Page MenuHomeClusterLabs Projects

No OneTemporary

diff --git a/GNUmakefile b/GNUmakefile
index 5752276efb..91cf41b2de 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -1,382 +1,382 @@
#
# Copyright (C) 2008 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
default: $(shell test ! -e configure && echo init) $(shell test -e configure && echo core)
-include Makefile
PACKAGE ?= pacemaker
# Force 'make dist' to be consistent with 'make export'
distdir = $(PACKAGE)-$(TAG)
TARFILE = $(PACKAGE)-$(SHORTTAG).tar.gz
DIST_ARCHIVES = $(TARFILE)
RPM_ROOT = $(shell pwd)
RPM_OPTS = --define "_sourcedir $(RPM_ROOT)" \
--define "_specdir $(RPM_ROOT)" \
--define "_srcrpmdir $(RPM_ROOT)" \
MOCK_OPTIONS ?= --resultdir=$(RPM_ROOT)/mock --no-cleanup-after
# Default to building Fedora-compliant spec files
# SLES: /etc/SuSE-release
# openSUSE: /etc/SuSE-release
# RHEL: /etc/redhat-release, /etc/system-release
# Fedora: /etc/fedora-release, /etc/redhat-release, /etc/system-release
# CentOS: /etc/centos-release, /etc/redhat-release, /etc/system-release
F ?= $(shell test ! -e /etc/fedora-release && echo 0; test -e /etc/fedora-release && rpm --eval %{fedora})
ARCH ?= $(shell test -e /etc/fedora-release && rpm --eval %{_arch})
MOCK_CFG ?= $(shell test -e /etc/fedora-release && echo fedora-$(F)-$(ARCH))
DISTRO ?= $(shell test -e /etc/SuSE-release && echo suse; echo fedora)
COMMIT ?= HEAD
TAG ?= $(shell T=$$(git describe --all '$(COMMIT)' | sed -n 's|tags/\(.*\)|\1|p'); \
test -n "$${T}" && echo "$${T}" \
|| git log --pretty=format:%H -n 1 '$(COMMIT)')
lparen = (
rparen = )
SHORTTAG ?= $(shell case $(TAG) in Pacemaker-*$(rparen) echo '$(TAG)' | cut -c11-;; \
*$(rparen) git log --pretty=format:%h -n 1 '$(TAG)';; esac)
SHORTTAG_ABBREV = $(shell printf %s '$(SHORTTAG)' | wc -c)
WITH ?= --without doc
#WITH ?= --without=doc --with=gcov
LAST_RC ?= $(shell test -e /Volumes || git tag -l | grep Pacemaker | sort -Vr | grep rc | head -n 1)
ifneq ($(origin VERSION), undefined)
LAST_RELEASE ?= Pacemaker-$(VERSION)
else
LAST_RELEASE ?= $(shell git tag -l | grep Pacemaker | sort -Vr | grep -v rc | head -n 1)
endif
NEXT_RELEASE ?= $(shell echo $(LAST_RELEASE) | awk -F. '/[0-9]+\./{$$3+=1;OFS=".";print $$1,$$2,$$3}')
BUILD_COUNTER ?= build.counter
LAST_COUNT = $(shell test ! -e $(BUILD_COUNTER) && echo 0; test -e $(BUILD_COUNTER) && cat $(BUILD_COUNTER))
COUNT = $(shell expr 1 + $(LAST_COUNT))
SPECVERSION ?= $(COUNT)
# toplevel rsync destination for www targets (without trailing slash)
RSYNC_DEST ?= root@www.clusterlabs.org:/var/www/html
# recursive, preserve symlinks/permissions/times, verbose, compress,
# don't cross filesystems, sparse, show progress
RSYNC_OPTS = -rlptvzxS --progress
# rpmbuild wrapper that translates "--with[out] FEATURE" into RPM macros
#
# Unfortunately, at least recent versions of rpm do not support mentioned
# switch. To work this around, we can emulate mechanism that rpm uses
# internally: unfold the flags into respective macro definitions:
#
# --with[out] FOO -> --define "_with[out]_FOO --with[out]-FOO"
#
# $(1) ... WITH string (e.g., --with pre_release --without cman)
# $(2) ... options following the initial "rpmbuild" in the command
# $(3) ... final arguments determined with $2 (e.g., pacemaker.spec)
#
# Note that if $(3) is a specfile, extra case is taken so as to reflect
# pcmkversion correctly (using in-place modification).
#
# Also note that both ways to specify long option with an argument
# (i.e., what getopt and, importantly, rpm itself support) can be used:
#
# --with FOO
# --with=FOO
rpmbuild-with = \
WITH=$$(getopt -o "" -l with:,without: -- $(1)) || exit 1; \
CMD='rpmbuild $(2)'; PREREL=0; \
eval set -- "$${WITH}"; \
while true; do \
case "$$1" in \
--with) CMD="$${CMD} --define \"_with_$$2 --with-$$2\""; \
[ "$$2" != pre_release ] || PREREL=1; shift 2;; \
--without) CMD="$${CMD} --define \"_without_$$2 --without-$$2\""; \
[ "$$2" != pre_release ] || PREREL=0; shift 2;; \
--) shift ; break ;; \
*) echo "cannot parse WITH: $$1"; exit 1;; \
esac; \
done; \
case "$(3)" in \
*.spec) { [ $${PREREL} -eq 0 ] || [ $(LAST_RELEASE) = $(TAG) ]; } \
&& sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(LAST_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3) \
|| sed -i "s/^\(%global pcmkversion \).*/\1$$(echo $(NEXT_RELEASE) | sed -e s:Pacemaker-:: -e s:-.*::)/" $(3);; \
esac; \
CMD="$${CMD} $(3)"; \
eval "$${CMD}"
init:
./autogen.sh init
export:
rm -f $(PACKAGE)-dirty.tar.* $(PACKAGE)-tip.tar.* $(PACKAGE)-HEAD.tar.*
if [ ! -f $(TARFILE) ]; then \
rm -f $(PACKAGE).tar.*; \
if [ $(TAG) = dirty ]; then \
git commit -m "DO-NOT-PUSH" -a; \
git archive --prefix=$(distdir)/ HEAD | gzip > $(TARFILE); \
git reset --mixed HEAD^; \
else \
git archive --prefix=$(distdir)/ $(TAG) | gzip > $(TARFILE); \
fi; \
echo `date`: Rebuilt $(TARFILE); \
else \
echo `date`: Using existing tarball: $(TARFILE); \
fi
$(PACKAGE)-opensuse.spec: $(PACKAGE)-suse.spec
cp $^ $@
@echo Rebuilt $@
$(PACKAGE)-suse.spec: $(PACKAGE).spec.in GNUmakefile
rm -f $@
if [ x != x"`git ls-files -m | grep pacemaker.spec.in`" ]; then \
cp $(PACKAGE).spec.in $@; \
echo "Rebuilt $@ (local modifications)"; \
elif [ x = x"`git show $(TAG):pacemaker.spec.in 2>/dev/null`" ]; then \
cp $(PACKAGE).spec.in $@; \
echo "Rebuilt $@"; \
else \
git show $(TAG):$(PACKAGE).spec.in >> $@; \
echo "Rebuilt $@ from $(TAG)"; \
fi
sed -i s:%{_docdir}/%{name}:%{_docdir}/%{name}-%{version}:g $@
sed -i s:corosynclib:libcorosync:g $@
sed -i s:libexecdir}/lcrso:libdir}/lcrso:g $@
sed -i 's:%{name}-libs:lib%{name}3:g' $@
sed -i s:cluster-glue-libs:libglue:g $@
sed -i s:bzip2-devel:libbz2-devel:g $@
sed -i s:docbook-style-xsl:docbook-xsl-stylesheets:g $@
sed -i s:libtool-ltdl-devel::g $@
sed -i s:publican::g $@
sed -i s:byacc::g $@
sed -i s:gnutls-devel:libgnutls-devel:g $@
sed -i s:189:90:g $@
sed -i 's:python-devel:python-curses python-xml python-devel:' $@
sed -i 's@Requires: python@Requires: python-curses python-xml python@' $@
@echo "Applied SUSE-specific modifications"
# Works for all fedora based distros
$(PACKAGE)-%.spec: $(PACKAGE).spec.in
rm -f $@
if [ x != x"`git ls-files -m | grep pacemaker.spec.in`" ]; then \
cp $(PACKAGE).spec.in $(PACKAGE)-$*.spec; \
echo "Rebuilt $@ (local modifications)"; \
elif [ x = x"`git show $(TAG):pacemaker.spec.in 2>/dev/null`" ]; then \
cp $(PACKAGE).spec.in $(PACKAGE)-$*.spec; \
echo "Rebuilt $@"; \
else \
git show $(TAG):$(PACKAGE).spec.in >> $(PACKAGE)-$*.spec; \
echo "Rebuilt $@ from $(TAG)"; \
fi
srpm-%: export $(PACKAGE)-%.spec
rm -f *.src.rpm
cp $(PACKAGE)-$*.spec $(PACKAGE).spec
echo "* $(shell date +"%a %b %d %Y") Andrew Beekhof <andrew@beekhof.net> $(shell git describe --tags $(TAG) | sed -e s:Pacemaker-:: -e s:-.*::)-1" >> $(PACKAGE).spec
echo " - See included ChangeLog file or https://raw.github.com/ClusterLabs/pacemaker/master/ChangeLog for full details" >> $(PACKAGE).spec
if [ -e $(BUILD_COUNTER) ]; then \
echo $(COUNT) > $(BUILD_COUNTER); \
fi
sed -e 's/global\ specversion\ .*/global\ specversion\ $(SPECVERSION)/' \
-e 's/global\ commit\ .*/global\ commit\ $(TAG)/' \
-e 's/global\ commit_abbrev\ .*/global\ commit_abbrev\ $(SHORTTAG_ABBREV)/' \
-i $(PACKAGE).spec
$(call rpmbuild-with,$(WITH),-bs --define "dist .$*" $(RPM_OPTS),$(PACKAGE).spec)
chroot: mock-$(MOCK_CFG) mock-install-$(MOCK_CFG) mock-sh-$(MOCK_CFG)
echo "Done"
mock-next:
make F=$(shell expr 1 + $(F)) mock
mock-rawhide:
make F=rawhide mock
mock-install-%:
echo "Installing packages"
mock --root=$* $(MOCK_OPTIONS) --install $(RPM_ROOT)/mock/*.rpm vi sudo valgrind lcov gdb fence-agents psmisc
mock-install: mock-install-$(MOCK_CFG)
echo "Done"
mock-sh: mock-sh-$(MOCK_CFG)
echo "Done"
mock-sh-%:
echo "Connecting"
mock --root=$* $(MOCK_OPTIONS) --shell
echo "Done"
# eg. WITH="--with cman" make rpm
mock-%:
make srpm-$(firstword $(shell echo $(@:mock-%=%) | tr '-' ' '))
-rm -rf $(RPM_ROOT)/mock
@echo "mock --root=$* --rebuild $(WITH) $(MOCK_OPTIONS) $(RPM_ROOT)/*.src.rpm"
mock --root=$* --no-cleanup-after --rebuild $(WITH) $(MOCK_OPTIONS) $(RPM_ROOT)/*.src.rpm
srpm: srpm-$(DISTRO)
echo "Done"
mock: mock-$(MOCK_CFG)
echo "Done"
rpm-dep: $(PACKAGE)-$(DISTRO).spec
if [ x != x`which yum-builddep 2>/dev/null` ]; then \
echo "Installing with yum-builddep"; \
sudo yum-builddep $(PACKAGE)-$(DISTRO).spec; \
elif [ x != x`which yum 2>/dev/null` ]; then \
echo -e "Installing: $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')\n\n"; \
sudo yum install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' '); \
elif [ x != x`which zypper` ]; then \
echo -e "Installing: $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')\n\n"; \
sudo zypper install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ');\
else \
echo "I don't know how to install $(shell grep BuildRequires pacemaker.spec.in | sed -e s/BuildRequires:// -e s:\>.*0:: | tr '\n' ' ')";\
fi
rpm: srpm
@echo To create custom builds, edit the flags and options in $(PACKAGE).spec first
$(call rpmbuild-with,$(WITH),$(RPM_OPTS),--rebuild $(RPM_ROOT)/*.src.rpm)
release:
make TAG=$(LAST_RELEASE) rpm
rc:
make TAG=$(LAST_RC) rpm
dirty:
make TAG=dirty mock
COVERITY_DIR = $(shell pwd)/coverity-$(TAG)
COVFILE = $(PACKAGE)-coverity-$(TAG).tgz
COVHOST ?= scan5.coverity.com
COVPASS ?= password
# Public coverity
coverity:
test -e configure || ./autogen.sh
test -e Makefile || ./configure
make core-clean
rm -rf $(COVERITY_DIR)
cov-build --dir $(COVERITY_DIR) make core
tar czf $(COVFILE) --transform=s@.*$(TAG)@cov-int@ $(COVERITY_DIR)
@echo "Uploading to public Coverity instance..."
curl --form file=@$(COVFILE) --form project=$(PACKAGE) --form password=$(COVPASS) --form email=andrew@beekhof.net http://$(COVHOST)/cgi-bin/upload.py
rm -rf $(COVFILE) $(COVERITY_DIR)
coverity-corp:
test -e configure || ./autogen.sh
test -e Makefile || ./configure
make core-clean
rm -rf $(COVERITY_DIR)
cov-build --dir $(COVERITY_DIR) make core
@echo "Waiting for a corporate Coverity license..."
cov-analyze --dir $(COVERITY_DIR) --wait-for-license
cov-format-errors --dir $(COVERITY_DIR) --emacs-style > $(TAG).coverity
cov-format-errors --dir $(COVERITY_DIR)
- rsync $(RSYNC_OPTS) "$(COVERITY_DIR)/c/output/errors/" "$(RSYNC_DEST)/coverity/$(PACKAGE)/$(TAG)"
+ rsync $(RSYNC_OPTS) "$(COVERITY_DIR)/c/output/errors/" "$(RSYNC_DEST)/$(PACKAGE)/coverity/$(TAG)/"
make core-clean
# cov-commit-defects --host $(COVHOST) --dir $(COVERITY_DIR) --stream $(PACKAGE) --user auto --password $(COVPASS)
rm -rf $(COVERITY_DIR)
global: clean-generic
gtags -q
global-upload: global
htags -sanhIT
- rsync $(RSYNC_OPTS) HTML/ "$(RSYNC_DEST)/global/$(PACKAGE)/$(TAG)"
+ rsync $(RSYNC_OPTS) HTML/ "$(RSYNC_DEST)/$(PACKAGE)/global/$(TAG)/"
%.8.html: %.8
echo groff -mandoc `man -w ./$<` -T html > $@
groff -mandoc `man -w ./$<` -T html > $@
- rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/man/$(PACKAGE)/"
+ rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/$(PACKAGE)/man/"
%.7.html: %.7
echo groff -mandoc `man -w ./$<` -T html > $@
groff -mandoc `man -w ./$<` -T html > $@
- rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/man/$(PACKAGE)/"
+ rsync $(RSYNC_OPTS) "$@" "$(RSYNC_DEST)/$(PACKAGE)/man/"
manhtml-upload: all
find . -name "[a-z]*.[78]" -exec make \{\}.html \;
doxygen: Doxyfile
doxygen Doxyfile
doxygen-upload: doxygen
- rsync $(RSYNC_OPTS) doc/api/html/ "$(RSYNC_DEST)/doxygen/$(PACKAGE)/$(TAG)"
+ rsync $(RSYNC_OPTS) doc/api/html/ "$(RSYNC_DEST)/$(PACKAGE)/doxygen/$(TAG)/"
abi:
./abi-check pacemaker $(LAST_RELEASE) $(TAG)
abi-www:
- ./abi-check -u pacemaker $(LAST_RELEASE) $(TAG)
+ export RSYNC_DEST=$(RSYNC_DEST); ./abi-check -u pacemaker $(LAST_RELEASE) $(TAG)
www: manhtml-upload global-upload doxygen-upload
make RSYNC_DEST=$(RSYNC_DEST) -C doc www
summary:
@printf "\n* `date +"%a %b %d %Y"` `git config user.name` <`git config user.email`> $(NEXT_RELEASE)-1"
@printf "\n- Update source tarball to revision: `git log --pretty=format:%h -n 1`"
@printf "\n- Changesets: `git log --pretty=oneline $(LAST_RELEASE)..HEAD | wc -l`"
@printf "\n- Diff: "
@git diff -r $(LAST_RELEASE)..HEAD --stat include lib mcp pengine/*.c pengine/*.h cib crmd fencing lrmd tools xml | tail -n 1
rc-changes:
@make NEXT_RELEASE=$(shell echo $(LAST_RC) | sed s:-rc.*::) LAST_RELEASE=$(LAST_RC) changes
changes: summary
@printf "\n- Features added since $(LAST_RELEASE)\n"
@git log --pretty=format:' +%s' --abbrev-commit $(LAST_RELEASE)..HEAD | grep -e Feature: | sed -e 's@Feature:@@' | sort -uf
@printf "\n- Changes since $(LAST_RELEASE)\n"
@git log --pretty=format:' +%s' --abbrev-commit $(LAST_RELEASE)..HEAD | grep -e High: -e Fix: -e Bug | sed -e 's@Fix:@@' -e s@High:@@ -e s@Fencing:@fencing:@ -e 's@Bug@ Bug@' -e s@PE:@pengine:@ | sort -uf
changelog:
@make changes > ChangeLog
@printf "\n">> ChangeLog
git show $(LAST_RELEASE):ChangeLog >> ChangeLog
@echo -e "\033[1;35m -- Don't forget to run the bumplibs.sh script! --\033[0m"
indent:
find . -name "*.h" -exec ./p-indent \{\} \;
find . -name "*.c" -exec ./p-indent \{\} \;
git co HEAD crmd/fsa_proto.h lib/gnu
rel-tags: tags
find . -name TAGS -exec sed -i 's:\(.*\)/\(.*\)/TAGS:\2/TAGS:g' \{\} \;
CLANG_analyzer = $(shell which scan-build)
CLANG_checkers =
check: clang cppcheck
# Extra cppcheck options: --enable=all --inconclusive --std=posix
cppcheck:
for d in replace lib mcp attrd pengine cib crmd fencing lrmd tools; do cppcheck -q $$d; done
clang:
test -e $(CLANG_analyzer)
scan-build $(CLANG_checkers:%=-enable-checker %) make clean all
# V3 = scandir unsetenv alphasort xalloc
# V2 = setenv strerror strchrnul strndup
# http://www.gnu.org/software/gnulib/manual/html_node/Initial-import.html#Initial-import
GNU_MODS = crypto/md5
gnulib-update:
-test ! -e gnulib && git clone git://git.savannah.gnu.org/gnulib.git
cd gnulib && git pull
gnulib/gnulib-tool --source-base=lib/gnu --lgpl=2 --no-vc-files --import $(GNU_MODS)
diff --git a/abi-check b/abi-check
index d28192b1b0..48220e5bc3 100755
--- a/abi-check
+++ b/abi-check
@@ -1,103 +1,100 @@
#!/bin/bash
-UPLOAD=0
+# toplevel rsync destination for www targets (without trailing slash)
+: ${RSYNC_DEST:=root@www.clusterlabs.org:/var/www/html}
+
+UPLOAD=0
if [ $1 = "-u" ]; then
UPLOAD=1; shift
fi
PACKAGE=$1; shift
function tag() {
if [[ $1 =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} ]]; then
- echo Pacemaker-$1
+ echo Pacemaker-$1
else
- echo $1
+ echo $1
fi
}
function version() {
echo $1 | sed s:.*-::
}
function extract() {
- DUMP=1
TAG=$1
VERSION=$2
if [ $VERSION = HEAD ]; then
- rm -rf abi_dumps/$PACKAGE/${PACKAGE}_$VERSION.abi.tar.gz
+ rm -rf abi_dumps/$PACKAGE/${PACKAGE}_$VERSION.abi.tar.gz
elif [ -f abi_dumps/$PACKAGE/${PACKAGE}_$VERSION.abi.tar.gz ]; then
- return
+ return
fi
echo "Building ABI dump for $*"
BUILD_ROOT=.ABI-build
rm -rf $BUILD_ROOT
git archive --prefix $BUILD_ROOT/ $TAG | tar xv
- BUILD_ROOT=`pwd`/$BUILD_ROOT
+ BUILD_ROOT=$(pwd)/$BUILD_ROOT
DESC=$BUILD_ROOT/$VERSION.xml
sed -i.sed 's: doc::' $BUILD_ROOT/Makefile.am
- sed -i.sed 's: debian::' $BUILD_ROOT/Makefile.am
- cat<<EOF>$DESC
+ cat <<EOF >$DESC
<?xml version="1.0" encoding="utf-8"?>
<descriptor>
<version>
$VERSION
</version>
<headers>
- $BUILD_ROOT/root/usr/include/pacemaker/crm
+ $BUILD_ROOT/root/usr/include/pacemaker/crm
</headers>
<libs>
EOF
( cd $BUILD_ROOT && ./autogen.sh )
( cd $BUILD_ROOT && ./configure --disable-fatal-warnings )
make -C $BUILD_ROOT V=0 DESTDIR=${BUILD_ROOT}/root install
- if [ $? != 0 ]; then
- echo "Build for $TAG failed. Repair, populate <libs/> and re-run: "
- echo " abi-compliance-checker -l $PACKAGE -dump_abi $DESC"
- echo ""
- echo "To find libraries after building:"
- echo " find $BUILD_ROOT/root -name "*.so" -print"
+ if [ $? -ne 0 ]; then
+ echo "Build for $TAG failed. Repair, populate <libs/> and re-run: "
+ echo " abi-compliance-checker -l $PACKAGE -dump_abi $DESC"
+ echo ""
+ echo "To find libraries after building:"
+ echo " find $BUILD_ROOT/root -name "*.so" -print"
else
- find $BUILD_ROOT/root -name "*.so" -print >> $DESC
+ find $BUILD_ROOT/root -name "*.so" -print >> $DESC
fi
- cat<<EOF>>$DESC
+ cat <<EOF >>$DESC
</libs>
</descriptor>
EOF
- if [ $DUMP = 1 ]; then
- abi-compliance-checker -l $PACKAGE -dump_abi $DESC
- rm -rf $BUILD_ROOT
- else
- exit 1
- fi
+ abi-compliance-checker -l $PACKAGE -dump_abi $DESC
+ rm -rf $BUILD_ROOT
}
for arg in $*; do
- T=`tag $arg`
- V=`version $T`
+ T=$(tag $arg)
+ V=$(version $T)
extract $T $V
done
-if [ $# = 2 ]; then
- V1=`version $1`
- V2=`version $2`
+if [ $# -eq 2 ]; then
+ V1=$(version $1)
+ V2=$(version $2)
abi-compliance-checker -l ${PACKAGE} \
- -d1 abi_dumps/${PACKAGE}/${PACKAGE}_${V1}.abi.tar.gz \
- -d2 abi_dumps/${PACKAGE}/${PACKAGE}_${V2}.abi.tar.gz
+ -d1 abi_dumps/${PACKAGE}/${PACKAGE}_${V1}.abi.tar.gz \
+ -d2 abi_dumps/${PACKAGE}/${PACKAGE}_${V2}.abi.tar.gz
if [ $UPLOAD = 1 -a -d compat_reports/pacemaker/${V1}_to_${V2} ]; then
- rsync -azxlSD --progress compat_reports/pacemaker/${V1}_to_${V2} root@www.clusterlabs.org:/var/www/html/abi/pacemaker/
+ rsync -azxlSD --progress compat_reports/pacemaker/${V1}_to_${V2} ${RSYNC_DEST}/${PACKAGE}/abi/
fi
fi
diff --git a/configure.ac b/configure.ac
index edc70f1b84..47400bf3bc 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,1822 +1,1816 @@
dnl
dnl autoconf for Pacemaker
dnl
dnl License: GNU General Public License (GPL)
dnl ===============================================
dnl Bootstrap
dnl ===============================================
AC_PREREQ(2.59)
dnl Suggested structure:
dnl information on the package
dnl checks for programs
dnl checks for libraries
dnl checks for header files
dnl checks for types
dnl checks for structures
dnl checks for compiler characteristics
dnl checks for library functions
dnl checks for system services
m4_include([version.m4])
AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org],
[pacemaker], PCMK_URL)
dnl Workaround autoconf < 2.64
if test x"${PACKAGE_URL}" = x""; then
AC_SUBST([PACKAGE_URL], PCMK_URL)
fi
PCMK_FEATURES=""
AC_CONFIG_AUX_DIR(.)
AC_CANONICAL_HOST
dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below)
dnl
dnl Internal header: include/config.h
dnl - Contains ALL defines
dnl - include/config.h.in is generated automatically by autoheader
dnl - NOT to be included in any header files except crm_internal.h
dnl (which is also not to be included in any other header files)
dnl
dnl External header: include/crm_config.h
dnl - Contains a subset of defines checked here
dnl - Manually edit include/crm_config.h.in to have configure include
dnl new defines
dnl - Should not include HAVE_* defines
dnl - Safe to include anywhere
AM_CONFIG_HEADER(include/config.h include/crm_config.h)
ALL_LINGUAS="en fr"
AC_ARG_WITH(version,
[ --with-version=version Override package version (if you're a packager needing to pretend) ],
[ PACKAGE_VERSION="$withval" ])
AC_ARG_WITH(pkg-name,
[ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ],
[ PACKAGE_NAME="$withval" ])
dnl Older distros may need: AM_INIT_AUTOMAKE($PACKAGE_NAME, $PACKAGE_VERSION)
AM_INIT_AUTOMAKE([foreign])
AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", Current pacemaker version)
dnl Versioned attributes implementation is not yet production-ready
AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, Enable versioned attributes)
PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'`
AC_SUBST(PACKAGE_SERIES)
AC_SUBST(PACKAGE_VERSION)
dnl automake >= 1.11 offers --enable-silent-rules for suppressing the output from
dnl normal compilation. When a failure occurs, it will then display the full
dnl command line
dnl Wrap in m4_ifdef to avoid breaking on older platforms
m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])])
dnl Example 2.4. Silent Custom Rule to Generate a File
dnl %-bar.pc: %.pc
dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@
CC_IN_CONFIGURE=yes
export CC_IN_CONFIGURE
LDD=ldd
dnl ========================================================================
dnl Compiler characteristics
dnl ========================================================================
AC_PROG_CC dnl Can force other with environment variable "CC".
AM_PROG_CC_C_O
AC_PROG_CC_STDC
gl_EARLY
gl_INIT
LT_INIT([dlopen])
LTDL_INIT([convenience])
AC_PROG_YACC
AM_PROG_LEX
AC_C_STRINGIZE
AC_TYPE_SIZE_T
AC_CHECK_SIZEOF(char)
AC_CHECK_SIZEOF(short)
AC_CHECK_SIZEOF(int)
AC_CHECK_SIZEOF(long)
AC_CHECK_SIZEOF(long long)
AC_STRUCT_TIMEZONE
dnl ===============================================
dnl Helpers
dnl ===============================================
cc_supports_flag() {
local CFLAGS="-Werror $@"
AC_MSG_CHECKING(whether $CC supports "$@")
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)],[RC=1; AC_MSG_RESULT(no)])
return $RC
}
try_extract_header_define() {
AC_MSG_CHECKING(if $2 in $1 exists)
Cfile=$srcdir/extract_define.$2.${$}
printf "#include <stdio.h>\n" > ${Cfile}.c
printf "#include <%s>\n" $1 >> ${Cfile}.c
printf "int main(int argc, char **argv) {\n" >> ${Cfile}.c
printf "#ifdef %s\n" $2 >> ${Cfile}.c
printf "printf(\"%%s\", %s);\n" $2 >> ${Cfile}.c
printf "#endif \n return 0; }\n" >> ${Cfile}.c
$CC $CFLAGS ${Cfile}.c -o ${Cfile} 2>/dev/null
value=
if test -x ${Cfile}; then
value=`${Cfile} 2>/dev/null`
fi
if test x"${value}" == x""; then
value=$3
AC_MSG_RESULT(default: $value)
else
AC_MSG_RESULT($value)
fi
printf $value
rm -rf ${Cfile}.c ${Cfile} ${Cfile}.dSYM ${Cfile}.gcno
}
extract_header_define() {
AC_MSG_CHECKING(for $2 in $1)
Cfile=$srcdir/extract_define.$2.${$}
printf "#include <stdio.h>\n" > ${Cfile}.c
printf "#include <%s>\n" $1 >> ${Cfile}.c
printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c
$CC $CFLAGS ${Cfile}.c -o ${Cfile}
value=`${Cfile}`
AC_MSG_RESULT($value)
printf $value
rm -rf ${Cfile}.c ${Cfile} ${Cfile}.dSYM ${Cfile}.gcno
}
dnl ===============================================
dnl Configure Options
dnl ===============================================
dnl Some systems, like Solaris require a custom package name
AC_ARG_WITH(pkgname,
[ --with-pkgname=name name for pkg (typically for Solaris) ],
[ PKGNAME="$withval" ],
[ PKGNAME="LXHAhb" ],
)
AC_SUBST(PKGNAME)
AC_ARG_ENABLE([ansi],
[ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers.
[default=no]])
AC_ARG_ENABLE([fatal-warnings],
[ --enable-fatal-warnings very pedantic and fatal warnings for gcc
[default=yes]])
AC_ARG_ENABLE([quiet],
[ --enable-quiet
Supress make output unless there is an error
[default=no]])
AC_ARG_ENABLE([no-stack],
[ --enable-no-stack
Only build the Policy Engine and pieces needed to support it [default=no]])
AC_ARG_ENABLE([upstart],
[ --enable-upstart
Enable support for managing resources via Upstart [default=try]],
[],
[enable_upstart=try],
)
AC_ARG_ENABLE([systemd],
[ --enable-systemd
Enable support for managing resources via systemd [default=try]],
[],
[enable_systemd=try],
)
AC_ARG_ENABLE(hardening,
[ --with-hardening
Harden the resulting executables/libraries (best effort by default)],
[ HARDENING="${enableval}" ],
[ HARDENING=try ],
)
AC_ARG_WITH(corosync,
[ --with-corosync
Support the Corosync messaging and membership layer ],
[ SUPPORT_CS=$withval ],
[ SUPPORT_CS=try ],
)
AC_ARG_WITH(nagios,
[ --with-nagios
Support nagios remote monitoring ],
[ SUPPORT_NAGIOS=$withval ],
[ SUPPORT_NAGIOS=try ],
)
AC_ARG_WITH(nagios-plugin-dir,
[ --with-nagios-plugin-dir=DIR
Directory for nagios plugins [${NAGIOS_PLUGIN_DIR}]],
[ NAGIOS_PLUGIN_DIR="$withval" ]
)
AC_ARG_WITH(nagios-metadata-dir,
[ --with-nagios-metadata-dir=DIR
Directory for nagios plugins metadata [${NAGIOS_METADATA_DIR}]],
[ NAGIOS_METADATA_DIR="$withval" ]
)
AC_ARG_WITH(acl,
[ --with-acl
Support CIB ACL ],
[ SUPPORT_ACL=$withval ],
[ SUPPORT_ACL=yes ],
)
AC_ARG_WITH(cibsecrets,
[ --with-cibsecrets
Support CIB secrets ],
[ SUPPORT_CIBSECRETS=$withval ],
[ SUPPORT_CIBSECRETS=no ],
)
INITDIR=""
AC_ARG_WITH(initdir,
[ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]],
[ INITDIR="$withval" ])
SUPPORT_PROFILING=0
AC_ARG_WITH(profiling,
[ --with-profiling
Disable optimizations for effective profiling ],
[ SUPPORT_PROFILING=$withval ])
AC_ARG_WITH(coverage,
[ --with-coverage
Disable optimizations for effective profiling ],
[ SUPPORT_COVERAGE=$withval ])
PUBLICAN_BRAND="common"
AC_ARG_WITH(brand,
[ --with-brand=brand Brand to use for generated documentation (set empty for no docs) [$PUBLICAN_BRAND]],
[ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ])
AC_SUBST(PUBLICAN_BRAND)
-ASCIIDOC_CLI_TYPE="pcs"
-AC_ARG_WITH(doc-cli,
- [ --with-doc-cli=cli_type CLI type to use for generated documentation. [$ASCIIDOC_CLI_TYPE]],
- [ ASCIIDOC_CLI_TYPE="$withval" ])
-AC_SUBST(ASCIIDOC_CLI_TYPE)
-
CONFIGDIR=""
AC_ARG_WITH(configdir,
[ --with-configdir=DIR
Directory for Pacemaker configuration file [${CONFIGDIR}]],
[ CONFIGDIR="$withval" ]
)
CRM_LOG_DIR=""
AC_ARG_WITH(logdir,
[ --with-logdir=DIR
Directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@],
[ CRM_LOG_DIR="$withval" ]
)
CRM_BUNDLE_DIR=""
AC_ARG_WITH(bundledir,
[ --with-bundledir=DIR
Directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@],
[ CRM_BUNDLE_DIR="$withval" ]
)
dnl ===============================================
dnl General Processing
dnl ===============================================
INIT_EXT=""
echo Our Host OS: $host_os/$host
AC_MSG_NOTICE(Sanitizing prefix: ${prefix})
case $prefix in
NONE)
prefix=/usr
dnl Fix default variables - "prefix" variable if not specified
if test "$localstatedir" = "\${prefix}/var"; then
localstatedir="/var"
fi
if test "$sysconfdir" = "\${prefix}/etc"; then
sysconfdir="/etc"
fi
;;
esac
AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix})
case $exec_prefix in
dnl For consistency with Heartbeat, map NONE->$prefix
NONE) exec_prefix=$prefix;;
prefix) exec_prefix=$prefix;;
esac
AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR})
case $INITDIR in
prefix) INITDIR=$prefix;;
"")
AC_MSG_CHECKING(which init (rc) directory to use)
for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \
/usr/local/etc/rc.d /etc/rc.d
do
if
test -d $initdir
then
INITDIR=$initdir
break
fi
done
AC_MSG_RESULT($INITDIR);;
esac
AC_SUBST(INITDIR)
AC_MSG_NOTICE(Sanitizing libdir: ${libdir})
case $libdir in
dnl For consistency with Heartbeat, map NONE->$prefix
prefix|NONE)
AC_MSG_CHECKING(which lib directory to use)
for aDir in lib64 lib
do
trydir="${exec_prefix}/${aDir}"
if
test -d ${trydir}
then
libdir=${trydir}
break
fi
done
AC_MSG_RESULT($libdir);
;;
esac
dnl Expand autoconf variables so that we don't end up with '${prefix}'
dnl in #defines and python scripts
dnl NOTE: Autoconf deliberately leaves them unexpanded to allow
dnl make exec_prefix=/foo install
dnl No longer being able to do this seems like no great loss to me...
eval prefix="`eval echo ${prefix}`"
eval exec_prefix="`eval echo ${exec_prefix}`"
eval bindir="`eval echo ${bindir}`"
eval sbindir="`eval echo ${sbindir}`"
eval libexecdir="`eval echo ${libexecdir}`"
eval datadir="`eval echo ${datadir}`"
eval sysconfdir="`eval echo ${sysconfdir}`"
eval sharedstatedir="`eval echo ${sharedstatedir}`"
eval localstatedir="`eval echo ${localstatedir}`"
eval libdir="`eval echo ${libdir}`"
eval includedir="`eval echo ${includedir}`"
eval oldincludedir="`eval echo ${oldincludedir}`"
eval infodir="`eval echo ${infodir}`"
eval mandir="`eval echo ${mandir}`"
dnl Home-grown variables
eval INITDIR="${INITDIR}"
eval docdir="`eval echo ${docdir}`"
if test x"${docdir}" = x""; then
docdir=${datadir}/doc/${PACKAGE}-${VERSION}
#docdir=${datadir}/doc/packages/${PACKAGE}
fi
AC_SUBST(docdir)
if test x"${CONFIGDIR}" = x""; then
CONFIGDIR="${sysconfdir}/sysconfig"
fi
AC_SUBST(CONFIGDIR)
if test x"${CRM_LOG_DIR}" = x""; then
CRM_LOG_DIR="${localstatedir}/log/pacemaker"
fi
AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file)
AC_SUBST(CRM_LOG_DIR)
if test x"${CRM_BUNDLE_DIR}" = x""; then
CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles"
fi
AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs)
AC_SUBST(CRM_BUNDLE_DIR)
for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \
sharedstatedir localstatedir libdir includedir oldincludedir infodir \
mandir INITDIR docdir CONFIGDIR
do
dirname=`eval echo '${'${j}'}'`
if
test ! -d "$dirname"
then
AC_MSG_WARN([$j directory ($dirname) does not exist!])
fi
done
dnl This OS-based decision-making is poor autotools practice;
dnl feature-based mechanisms are strongly preferred.
dnl
dnl So keep this section to a bare minimum; regard as a "necessary evil".
case "$host_os" in
*bsd*)
AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform)
LIBS="-L/usr/local/lib"
CPPFLAGS="$CPPFLAGS -I/usr/local/include"
INIT_EXT=".sh"
;;
*solaris*)
AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform)
;;
*linux*)
AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform)
;;
darwin*)
AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform)
LIBS="$LIBS -L${prefix}/lib"
CFLAGS="$CFLAGS -I${prefix}/include"
;;
esac
AC_SUBST(INIT_EXT)
AC_MSG_NOTICE(Host CPU: $host_cpu)
case "$host_cpu" in
ppc64|powerpc64)
case $CFLAGS in
*powerpc64*) ;;
*) if test "$GCC" = yes; then
CFLAGS="$CFLAGS -m64"
fi ;;
esac
esac
AC_MSG_CHECKING(which format is needed to print uint64_t)
ac_save_CFLAGS=$CFLAGS
CFLAGS="-Wall -Werror"
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM(
[
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
],
[
int max = 512;
uint64_t bignum = 42;
char *buffer = malloc(max);
const char *random = "random";
snprintf(buffer, max-1, "<quorum id=%lu quorate=%s/>", bignum, random);
fprintf(stderr, "Result: %s\n", buffer);
]
)],
[U64T="%lu"],
[U64T="%llu"]
)
CFLAGS=$ac_save_CFLAGS
AC_MSG_RESULT($U64T)
AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t)
dnl ===============================================
dnl Program Paths
dnl ===============================================
PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
export PATH
dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL
dnl was NOT being expanded all the time thus causing things to fail.
AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13)
AM_PATH_PYTHON
AC_CHECK_PROGS(MAKE, gmake make)
AC_PATH_PROGS(HTML2TXT, lynx w3m)
AC_PATH_PROGS(HELP2MAN, help2man)
AC_PATH_PROGS(POD2MAN, pod2man, pod2man)
AC_PATH_PROGS(ASCIIDOC, asciidoc)
AC_PATH_PROGS(PUBLICAN, publican)
AC_PATH_PROGS(INKSCAPE, inkscape)
AC_PATH_PROGS(XSLTPROC, xsltproc)
AC_PATH_PROGS(XMLCATALOG, xmlcatalog)
AC_PATH_PROGS(FOP, fop)
AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh)
AC_PATH_PROGS(SCP, scp, /usr/bin/scp)
AC_PATH_PROGS(TAR, tar)
AC_PATH_PROGS(MD5, md5)
AC_PATH_PROGS(TEST, test)
PKG_PROG_PKG_CONFIG
AC_PATH_PROGS(XML2CONFIG, xml2-config)
AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind)
AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command)
dnl Disable these until we decide if the stonith config file should be supported
dnl AC_PATH_PROGS(BISON, bison)
dnl AC_PATH_PROGS(FLEX, flex)
dnl AC_PATH_PROGS(HAVE_YACC, $YACC)
if test x"${LIBTOOL}" = x""; then
AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE})
fi
if test x"${MAKE}" = x""; then
AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE})
fi
AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"")
if test x"${HELP2MAN}" != x""; then
PCMK_FEATURES="$PCMK_FEATURES generated-manpages"
fi
MANPAGE_XSLT=""
if test x"${XSLTPROC}" != x""; then
AC_MSG_CHECKING(docbook to manpage transform)
# first try to figure out correct template using xmlcatalog query,
# resort to extensive (semi-deterministic) file search if that fails
DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current'
DOCBOOK_XSL_PATH='manpages/docbook.xsl'
MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \
| sed -n 's|^file://||p;q')
if test x"${MANPAGE_XSLT}" = x""; then
DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \
-type d | LC_ALL=C sort)
XSLT=$(basename ${DOCBOOK_XSL_PATH})
for d in ${DIRS}; do
if test -f "${d}/${XSLT}"; then
MANPAGE_XSLT="${d}/${XSLT}"
break
fi
done
fi
fi
AC_MSG_RESULT($MANPAGE_XSLT)
AC_SUBST(MANPAGE_XSLT)
AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"")
if test x"${MANPAGE_XSLT}" != x""; then
PCMK_FEATURES="$PCMK_FEATURES agent-manpages"
fi
AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"")
if test x"${ASCIIDOC}" != x""; then
PCMK_FEATURES="$PCMK_FEATURES ascii-docs"
fi
SUPPORT_STONITH_CONFIG=0
if test x"${HAVE_YACC}" != x"" -a x"${FLEX}" != x"" -a x"${BISON}" != x""; then
SUPPORT_STONITH_CONFIG=1
PCMK_FEATURES="$PCMK_FEATURES st-conf"
fi
AM_CONDITIONAL(BUILD_STONITH_CONFIG, test $SUPPORT_STONITH_CONFIG = 1)
AC_DEFINE_UNQUOTED(SUPPORT_STONITH_CONFIG, $SUPPORT_STONITH_CONFIG, Support a stand-alone stonith config file in addition to the CIB)
publican_intree_brand=no
if test x"${PUBLICAN_BRAND}" != x"" \
&& test x"${PUBLICAN}" != x"" \
&& test x"${INKSCAPE}" != x""; then
dnl special handling for clusterlabs brand (possibly in-tree version used)
test "${PUBLICAN_BRAND}" != "clusterlabs" \
|| test -d /usr/share/publican/Common_Content/clusterlabs
if test $? -ne 0; then
dnl Unknown option: brand_dir vs. Option brand_dir requires an argument
if ${PUBLICAN} build --brand_dir 2>&1 | grep -Eq 'brand_dir$'; then
AC_MSG_WARN([Cannot use in-tree clusterlabs brand, resorting to common])
PUBLICAN_BRAND=common
else
publican_intree_brand=yes
fi
fi
AC_MSG_NOTICE([Enabling Publican-generated documentation using ${PUBLICAN_BRAND} brand])
PCMK_FEATURES="$PCMK_FEATURES publican-docs"
fi
AM_CONDITIONAL([BUILD_DOCBOOK],
[test x"${PUBLICAN_BRAND}" != x"" \
&& test x"${PUBLICAN}" != x"" \
&& test x"${INKSCAPE}" != x""])
AM_CONDITIONAL([PUBLICAN_INTREE_BRAND],
[test x"${publican_intree_brand}" = x"yes"])
dnl ========================================================================
dnl checks for library functions to replace them
dnl
dnl NoSuchFunctionName:
dnl is a dummy function which no system supplies. It is here to make
dnl the system compile semi-correctly on OpenBSD which doesn't know
dnl how to create an empty archive
dnl
dnl scandir: Only on BSD.
dnl System-V systems may have it, but hidden and/or deprecated.
dnl A replacement function is supplied for it.
dnl
dnl setenv: is some bsdish function that should also be avoided (use
dnl putenv instead)
dnl On the other hand, putenv doesn't provide the right API for the
dnl code and has memory leaks designed in (sigh...) Fortunately this
dnl A replacement function is supplied for it.
dnl
dnl strerror: returns a string that corresponds to an errno.
dnl A replacement function is supplied for it.
dnl
dnl strnlen: is a gnu function similar to strlen, but safer.
dnl We wrote a tolearably-fast replacement function for it.
dnl
dnl strndup: is a gnu function similar to strdup, but safer.
dnl We wrote a tolearably-fast replacement function for it.
AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup)
dnl ===============================================
dnl Libraries
dnl ===============================================
AC_CHECK_LIB(socket, socket) dnl -lsocket
AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc...
AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux)
AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64)
AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available )
AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available)
AC_CHECK_FUNCS([sched_setscheduler])
AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary
AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions
AC_CHECK_HEADERS(uuid/uuid.h)
if test "x$ac_cv_func_uuid_unparse" != xyes; then
AC_MSG_ERROR(You do not have the libuuid development package installed)
fi
if test x"${PKG_CONFIG}" = x""; then
AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE})
fi
if
$PKG_CONFIG --exists glib-2.0
then
GLIBCONFIG="$PKG_CONFIG glib-2.0"
else
set -x
echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH
$PKG_CONFIG --exists glib-2.0; echo $?
$PKG_CONFIG --cflags glib-2.0; echo $?
$PKG_CONFIG glib-2.0; echo $?
set +x
AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE})
fi
AC_MSG_RESULT(using $GLIBCONFIG)
#
# Where is dlopen?
#
if test "$ac_cv_lib_c_dlopen" = yes; then
LIBADD_DL=""
elif test "$ac_cv_lib_dl_dlopen" = yes; then
LIBADD_DL=-ldl
else
LIBADD_DL=${lt_cv_dlopen_libs}
fi
if test "X$GLIBCONFIG" != X; then
AC_MSG_CHECKING(for special glib includes: )
GLIBHEAD=`$GLIBCONFIG --cflags`
AC_MSG_RESULT($GLIBHEAD)
CPPFLAGS="$CPPFLAGS $GLIBHEAD"
AC_MSG_CHECKING(for glib library flags)
GLIBLIB=`$GLIBCONFIG --libs`
AC_MSG_RESULT($GLIBLIB)
LIBS="$LIBS $GLIBLIB"
fi
dnl FreeBSD needs -lcompat for ftime() used by lrmd.c
AC_CHECK_LIB([compat], [ftime], [COMPAT_LIBS='-lcompat'])
AC_SUBST(COMPAT_LIBS)
dnl ========================================================================
dnl Headers
dnl ========================================================================
AC_HEADER_STDC
AC_CHECK_HEADERS(arpa/inet.h)
AC_CHECK_HEADERS(asm/types.h)
AC_CHECK_HEADERS(assert.h)
AC_CHECK_HEADERS(auth-client.h)
AC_CHECK_HEADERS(ctype.h)
AC_CHECK_HEADERS(dirent.h)
AC_CHECK_HEADERS(errno.h)
AC_CHECK_HEADERS(fcntl.h)
AC_CHECK_HEADERS(getopt.h)
AC_CHECK_HEADERS(glib.h)
AC_CHECK_HEADERS(grp.h)
AC_CHECK_HEADERS(limits.h)
AC_CHECK_HEADERS(linux/errqueue.h)
AC_CHECK_HEADERS(linux/swab.h)
AC_CHECK_HEADERS(malloc.h)
AC_CHECK_HEADERS(netdb.h)
AC_CHECK_HEADERS(netinet/in.h)
AC_CHECK_HEADERS(netinet/ip.h)
AC_CHECK_HEADERS(pam/pam_appl.h)
AC_CHECK_HEADERS(pthread.h)
AC_CHECK_HEADERS(pwd.h)
AC_CHECK_HEADERS(security/pam_appl.h)
AC_CHECK_HEADERS(sgtty.h)
AC_CHECK_HEADERS(signal.h)
AC_CHECK_HEADERS(stdarg.h)
AC_CHECK_HEADERS(stddef.h)
AC_CHECK_HEADERS(stdio.h)
AC_CHECK_HEADERS(stdlib.h)
AC_CHECK_HEADERS(string.h)
AC_CHECK_HEADERS(strings.h)
AC_CHECK_HEADERS(sys/dir.h)
AC_CHECK_HEADERS(sys/ioctl.h)
AC_CHECK_HEADERS(sys/param.h)
AC_CHECK_HEADERS(sys/poll.h)
AC_CHECK_HEADERS(sys/reboot.h)
AC_CHECK_HEADERS(sys/resource.h)
AC_CHECK_HEADERS(sys/select.h)
AC_CHECK_HEADERS(sys/socket.h)
AC_CHECK_HEADERS(sys/signalfd.h)
AC_CHECK_HEADERS(sys/sockio.h)
AC_CHECK_HEADERS(sys/stat.h)
AC_CHECK_HEADERS(sys/time.h)
AC_CHECK_HEADERS(sys/timeb.h)
AC_CHECK_HEADERS(sys/types.h)
AC_CHECK_HEADERS(sys/uio.h)
AC_CHECK_HEADERS(sys/un.h)
AC_CHECK_HEADERS(sys/utsname.h)
AC_CHECK_HEADERS(sys/wait.h)
AC_CHECK_HEADERS(time.h)
AC_CHECK_HEADERS(unistd.h)
AC_CHECK_HEADERS(winsock.h)
dnl These headers need prerequisits before the tests will pass
dnl AC_CHECK_HEADERS(net/if.h)
dnl AC_CHECK_HEADERS(netinet/icmp6.h)
dnl AC_CHECK_HEADERS(netinet/ip6.h)
dnl AC_CHECK_HEADERS(netinet/ip_icmp.h)
AC_MSG_CHECKING(for special libxml2 includes)
if test "x$XML2CONFIG" = "x"; then
AC_MSG_ERROR(libxml2 config not found)
else
XML2HEAD="`$XML2CONFIG --cflags`"
AC_MSG_RESULT($XML2HEAD)
AC_CHECK_LIB(xml2, xmlReadMemory)
AC_CHECK_LIB(xslt, xsltApplyStylesheet)
fi
CPPFLAGS="$CPPFLAGS $XML2HEAD"
AC_CHECK_HEADERS(libxml/xpath.h)
AC_CHECK_HEADERS(libxslt/xslt.h)
if test "$ac_cv_header_libxml_xpath_h" != "yes"; then
AC_MSG_ERROR(The libxml developement headers were not found)
fi
if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then
AC_MSG_ERROR(The libxslt developement headers were not found)
fi
AC_CACHE_CHECK(whether __progname and __progname_full are available,
pf_cv_var_progname,
AC_TRY_LINK([extern char *__progname, *__progname_full;],
[__progname = "foo"; __progname_full = "foo bar";],
pf_cv_var_progname="yes", pf_cv_var_progname="no"))
if test "$pf_cv_var_progname" = "yes"; then
AC_DEFINE(HAVE___PROGNAME,1,[ ])
fi
dnl ========================================================================
dnl Structures
dnl ========================================================================
AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include <time.h>]])
AC_CHECK_MEMBERS([lrm_op_t.rsc_deleted],,,[[#include <lrm/lrm_api.h>]])
AC_CHECK_MEMBER([struct dirent.d_type],
AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),,
[#include <dirent.h>])
dnl ========================================================================
dnl Functions
dnl ========================================================================
AC_CHECK_FUNCS(g_log_set_default_handler)
AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function]))
AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function]))
dnl ========================================================================
dnl bzip2
dnl ========================================================================
AC_CHECK_HEADERS(bzlib.h)
AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress)
if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then
AC_MSG_ERROR(BZ2 libraries not found)
fi
if test x$ac_cv_header_bzlib_h != xyes; then
AC_MSG_ERROR(BZ2 Development headers not found)
fi
dnl ========================================================================
dnl sighandler_t is missing from Illumos, Solaris11 systems
dnl ========================================================================
AC_MSG_CHECKING([for sighandler_t])
AC_TRY_COMPILE([#include <signal.h>],[sighandler_t *f;],
has_sighandler_t=yes,has_sighandler_t=no)
AC_MSG_RESULT($has_sighandler_t)
if test "$has_sighandler_t" = "yes" ; then
AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] )
fi
dnl ========================================================================
dnl ncurses
dnl ========================================================================
dnl
dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses".
dnl Many non-Linux deliver "curses"; sites may add "ncurses".
dnl
dnl However, the source-code recommendation for both is to #include "curses.h"
dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h").
dnl
dnl ncurse takes precedence.
dnl
AC_CHECK_HEADERS(curses.h)
AC_CHECK_HEADERS(curses/curses.h)
AC_CHECK_HEADERS(ncurses.h)
AC_CHECK_HEADERS(ncurses/ncurses.h)
dnl Although n-library is preferred, only look for it if the n-header was found.
CURSESLIBS=''
if test "$ac_cv_header_ncurses_h" = "yes"; then
AC_CHECK_LIB(ncurses, printw,
[AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]
)
CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses'
fi
if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then
AC_CHECK_LIB(ncurses, printw,
[AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]
)
CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses'
fi
dnl Only look for non-n-library if there was no n-library.
if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then
AC_CHECK_LIB(curses, printw,
[CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]
)
fi
dnl Only look for non-n-library if there was no n-library.
if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then
AC_CHECK_LIB(curses, printw,
[CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]
)
fi
if test "x$CURSESLIBS" != "x"; then
PCMK_FEATURES="$PCMK_FEATURES ncurses"
fi
dnl Check for printw() prototype compatibility
if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual && cc_supports_flag -Werror; then
ac_save_LIBS=$LIBS
LIBS="$CURSESLIBS"
ac_save_CFLAGS=$CFLAGS
CFLAGS="-Wcast-qual -Werror"
# avoid broken test because of hardened build environment in Fedora 23+
# - https://fedoraproject.org/wiki/Changes/Harden_All_Packages
# - https://bugzilla.redhat.com/1297985
if cc_supports_flag -fPIC; then
CFLAGS="$CFLAGS -fPIC"
fi
AC_MSG_CHECKING(whether printw() requires argument of "const char *")
AC_LINK_IFELSE(
[AC_LANG_PROGRAM(
[
#if defined(HAVE_NCURSES_H)
# include <ncurses.h>
#elif defined(HAVE_NCURSES_NCURSES_H)
# include <ncurses/ncurses.h>
#elif defined(HAVE_CURSES_H)
# include <curses.h>
#endif
],
[printw((const char *)"Test");]
)],
[ac_cv_compatible_printw=yes],
[ac_cv_compatible_printw=no]
)
LIBS=$ac_save_LIBS
CFLAGS=$ac_save_CFLAGS
AC_MSG_RESULT([$ac_cv_compatible_printw])
if test "$ac_cv_compatible_printw" = no; then
AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.])
AC_MSG_NOTICE([Disabling curses])
AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?])
fi
fi
AC_SUBST(CURSESLIBS)
dnl ========================================================================
dnl Profiling and GProf
dnl ========================================================================
AC_MSG_NOTICE(Old CFLAGS: $CFLAGS)
case $SUPPORT_COVERAGE in
1|yes|true)
SUPPORT_PROFILING=1
PCMK_FEATURES="$PCMK_FEATURES coverage"
CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage"
dnl During linking, make sure to specify -lgcov or -coverage
dnl Enable gprof
#LIBS="$LIBS -pg"
#CFLAGS="$CFLAGS -pg"
;;
esac
case $SUPPORT_PROFILING in
1|yes|true)
SUPPORT_PROFILING=1
dnl Disable various compiler optimizations
CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin "
dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls"
dnl Turn off optimization so tools can get accurate line numbers
CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'`
CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2"
dnl Update features
PCMK_FEATURES="$PCMK_FEATURES profile"
;;
*) SUPPORT_PROFILING=0;;
esac
AC_MSG_NOTICE(New CFLAGS: $CFLAGS)
AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling)
dnl ========================================================================
dnl Cluster infrastructure - LibQB
dnl ========================================================================
if test x${enable_no_stack} = xyes; then
SUPPORT_CS=no
fi
PKG_CHECK_MODULES(libqb, libqb >= 0.13)
CPPFLAGS="$libqb_CFLAGS $CPPFLAGS"
LIBS="$libqb_LIBS $LIBS"
AC_CHECK_HEADERS(qb/qbipc_common.h)
AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set)
PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc"
AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size, AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1, [Have qb_ipcc_get_buffer_size function]))
dnl Support Linux-HA fence agents if available
if test "$cross_compiling" != "yes"; then
CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat"
fi
AC_CHECK_HEADERS(stonith/stonith.h)
if test "$ac_cv_header_stonith_stonith_h" = "yes"; then
dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols
dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb
AC_CHECK_LIB(pils, PILLoadPlugin)
AC_CHECK_LIB(plumb, G_main_add_IPC_Channel)
PCMK_FEATURES="$PCMK_FEATURES lha-fencing"
fi
dnl ===============================================
dnl Variables needed for substitution
dnl ===============================================
CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker"
AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema)
AC_SUBST(CRM_SCHEMA_DIRECTORY)
CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores"
AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons)
AC_SUBST(CRM_CORE_DIR)
CRM_DAEMON_USER="hacluster"
AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as)
AC_SUBST(CRM_DAEMON_USER)
CRM_DAEMON_GROUP="haclient"
AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as)
AC_SUBST(CRM_DAEMON_GROUP)
CRM_STATE_DIR=${localstatedir}/run/crm
AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets)
AC_SUBST(CRM_STATE_DIR)
CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker
AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons)
AC_SUBST(CRM_PACEMAKER_DIR)
CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox
AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps)
AC_SUBST(CRM_BLACKBOX_DIR)
PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine"
AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep PEngine outputs)
AC_SUBST(PE_STATE_DIR)
CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib"
AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files)
AC_SUBST(CRM_CONFIG_DIR)
CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts"
AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data)
AC_SUBST(CRM_CONFIG_CTS)
CRM_DAEMON_DIR="${libexecdir}/pacemaker"
AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons)
AC_SUBST(CRM_DAEMON_DIR)
HA_STATE_DIR="${localstatedir}/run"
AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where sbd keeps its PID file)
AC_SUBST(HA_STATE_DIR)
CRM_RSCTMP_DIR=`try_extract_header_define agent_config.h HA_RSCTMPDIR $HA_STATE_DIR/resource-agents`
AC_MSG_CHECKING(Scratch dir for resource agents)
AC_MSG_RESULT($CRM_RSCTMP_DIR)
AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files)
AC_SUBST(CRM_RSCTMP_DIR)
OCF_ROOT_DIR="/usr/lib/ocf"
if test "X$OCF_ROOT_DIR" = X; then
AC_MSG_ERROR(Could not locate OCF directory)
fi
AC_SUBST(OCF_ROOT_DIR)
OCF_RA_DIR="$OCF_ROOT_DIR/resource.d"
AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs)
AC_SUBST(OCF_RA_DIR)
RH_STONITH_DIR="$sbindir"
AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents)
AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir", Location for system binaries)
RH_STONITH_PREFIX="fence_"
AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents)
AC_PATH_PROGS(GIT, git false)
AC_MSG_CHECKING(build version)
BUILD_VERSION=$Format:%h$
if test $BUILD_VERSION != ":%h$"; then
AC_MSG_RESULT(archive hash: $BUILD_VERSION)
elif test -x $GIT -a -d .git; then
BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1`
AC_MSG_RESULT(git hash: $BUILD_VERSION)
else
# The current directory name make a reasonable default
# Most generated archives will include the hash or tag
BASE=`basename $PWD`
BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::`
AC_MSG_RESULT(directory based hash: $BUILD_VERSION)
fi
AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version)
AC_SUBST(BUILD_VERSION)
HAVE_dbus=1
HAVE_upstart=0
HAVE_systemd=0
PKG_CHECK_MODULES(DBUS, dbus-1, ,HAVE_dbus=0)
AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus)
AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1)
if test $HAVE_dbus = 1; then
CFLAGS="$CFLAGS `$PKG_CONFIG --cflags dbus-1`"
fi
DBUS_LIBS="$CFLAGS `$PKG_CONFIG --libs dbus-1`"
AC_SUBST(DBUS_LIBS)
AC_CHECK_TYPES([DBusBasicValue],,,[[#include <dbus/dbus.h>]])
if test "x${enable_systemd}" != xno; then
if test $HAVE_dbus = 0; then
if test "x${enable_systemd}" = xyes; then
AC_MSG_FAILURE([cannot enable systemd without DBus])
else
enable_systemd=no
fi
fi
if test "x${enable_systemd}" = xtry; then
AC_MSG_CHECKING([for systemd version query result via dbus-send])
ret=$({ dbus-send --system --print-reply \
--dest=org.freedesktop.systemd1 \
/org/freedesktop/systemd1 \
org.freedesktop.DBus.Properties.Get \
string:org.freedesktop.systemd1.Manager \
string:Version 2>/dev/null \
|| echo "this borked"; } | tail -n1)
# sanitize output a bit (interested just in value, not type),
# ret is intentionally unenquoted so as to normalize whitespace
ret=$(echo ${ret} | cut -d' ' -f2-)
AC_MSG_RESULT([${ret}])
if test "x${ret}" != xborked \
|| systemctl --version 2>/dev/null | grep -q systemd; then
enable_systemd=yes
else
enable_systemd=no
fi
fi
fi
AC_MSG_CHECKING([whether to enable support for managing resources via systemd])
AC_MSG_RESULT([${enable_systemd}])
if test "x${enable_systemd}" = xyes; then
HAVE_systemd=1
PCMK_FEATURES="$PCMK_FEATURES systemd"
AC_MSG_CHECKING([for systemd path for system unit files])
systemdunitdir="${systemdunitdir-}"
PKG_CHECK_VAR([systemdunitdir], [systemd],
[systemdsystemunitdir], [],[
systemdunitdir=no
])
AC_MSG_RESULT([${systemdunitdir}])
if test "x${systemdunitdir}" = xno; then
AC_MSG_FAILURE([cannot enable systemd when systemdunitdir unresolved])
fi
fi
AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services)
AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1)
AC_SUBST(SUPPORT_SYSTEMD)
if test "x${enable_upstart}" != xno; then
if test $HAVE_dbus = 0; then
if test "x${enable_upstart}" = xyes; then
AC_MSG_FAILURE([cannot enable Upstart without DBus])
else
enable_upstart=no
fi
fi
if test "x${enable_upstart}" = xtry; then
AC_MSG_CHECKING([for Upstart version query result via dbus-send])
ret=$({ dbus-send --system --print-reply --dest=com.ubuntu.Upstart \
/com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \
string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \
|| echo "this borked"; } | tail -n1)
# sanitize output a bit (interested just in value, not type),
# ret is intentionally unenquoted so as to normalize whitespace
ret=$(echo ${ret} | cut -d' ' -f2-)
AC_MSG_RESULT([${ret}])
if test "x${ret}" != xborked \
|| initctl --version 2>/dev/null | grep -q upstart; then
enable_upstart=yes
else
enable_upstart=no
fi
fi
fi
AC_MSG_CHECKING([whether to enable support for managing resources via Upstart])
AC_MSG_RESULT([${enable_upstart}])
if test "x${enable_upstart}" = xyes; then
HAVE_upstart=1
PCMK_FEATURES="$PCMK_FEATURES upstart"
fi
AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services)
AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1)
AC_SUBST(SUPPORT_UPSTART)
case $SUPPORT_NAGIOS in
1|yes|true|try)
SUPPORT_NAGIOS=1;;
*)
SUPPORT_NAGIOS=0;;
esac
if test $SUPPORT_NAGIOS = 1; then
PCMK_FEATURES="$PCMK_FEATURES nagios"
fi
AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins)
AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1)
if test x"$NAGIOS_PLUGIN_DIR" = x""; then
NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins"
fi
AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins)
AC_SUBST(NAGIOS_PLUGIN_DIR)
if test x"$NAGIOS_METADATA_DIR" = x""; then
NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata"
fi
AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata)
AC_SUBST(NAGIOS_METADATA_DIR)
STACKS=""
CLUSTERLIBS=""
dnl ========================================================================
dnl Cluster stack - Corosync
dnl ========================================================================
dnl Normalize the values
case $SUPPORT_CS in
1|yes|true)
SUPPORT_CS=yes
missingisfatal=1;;
try) missingisfatal=0;;
*) SUPPORT_CS=no;;
esac
AC_MSG_CHECKING(for native corosync)
COROSYNC_LIBS=""
if test $SUPPORT_CS = no; then
AC_MSG_RESULT(no (disabled))
SUPPORT_CS=0
else
AC_MSG_RESULT($SUPPORT_CS)
SUPPORT_CS=1
PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal
PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal
PKG_CHECK_MODULES(cmap, libcmap) dnl Fatal
PKG_CHECK_MODULES(quorum, libquorum) dnl Fatal
CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS $cmap_CFLAGS $quorum_CFLAGS"
COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS"
CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS"
AC_CHECK_LIB(corosync_common, cs_strerror)
STACKS="$STACKS corosync-native"
fi
AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer)
AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1)
AC_SUBST(SUPPORT_COROSYNC)
dnl
dnl Cluster stack - Sanity
dnl
if test x${enable_no_stack} = xyes; then
AC_MSG_NOTICE(No cluster stack supported. Just building the Policy Engine)
PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack"
else
AC_MSG_CHECKING(for supported stacks)
if test x"$STACKS" = x; then
AC_MSG_FAILURE(You must support at least one cluster stack)
fi
AC_MSG_RESULT($STACKS)
PCMK_FEATURES="$PCMK_FEATURES $STACKS"
fi
PCMK_FEATURES="$PCMK_FEATURES atomic-attrd"
AC_SUBST(CLUSTERLIBS)
dnl ========================================================================
dnl ACL
dnl ========================================================================
case $SUPPORT_ACL in
1|yes|true) missingisfatal=1;;
try) missingisfatal=0;;
*) SUPPORT_ACL=no;;
esac
AC_MSG_CHECKING(for acl support)
if test $SUPPORT_ACL = no; then
AC_MSG_RESULT(no (disabled))
SUPPORT_ACL=0
else
AC_MSG_RESULT($SUPPORT_ACL)
SUPPORT_ACL=1
AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set)
if test $ac_cv_lib_qb_qb_ipcs_connection_auth_set != yes; then
SUPPORT_ACL=0
fi
if test $SUPPORT_ACL = 0; then
if test $missingisfatal = 0; then
AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0)
else
AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0)
fi
fi
fi
if test $SUPPORT_ACL = 1; then
PCMK_FEATURES="$PCMK_FEATURES acls"
fi
AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1")
AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL)
dnl ========================================================================
dnl CIB secrets
dnl ========================================================================
case $SUPPORT_CIBSECRETS in
1|yes|true|try)
SUPPORT_CIBSECRETS=1;;
*)
SUPPORT_CIBSECRETS=0;;
esac
AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets)
AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1)
if test $SUPPORT_CIBSECRETS = 1; then
PCMK_FEATURES="$PCMK_FEATURES cibsecrets"
LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets"
AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets)
AC_SUBST(LRM_CIBSECRETS_DIR)
fi
dnl ========================================================================
dnl GnuTLS
dnl ========================================================================
AC_CHECK_HEADERS(gnutls/gnutls.h)
AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h)
dnl GNUTLS library: Attempt to determine by 'libgnutls-config' program.
dnl If no 'libgnutls-config', try traditional autoconf means.
AC_PATH_PROGS(LIBGNUTLS_CONFIG, libgnutls-config)
if test -n "$LIBGNUTLS_CONFIG"; then
AC_MSG_CHECKING(for gnutls header flags)
GNUTLSHEAD="`$LIBGNUTLS_CONFIG --cflags`";
AC_MSG_RESULT($GNUTLSHEAD)
AC_MSG_CHECKING(for gnutls library flags)
GNUTLSLIBS="`$LIBGNUTLS_CONFIG --libs`";
AC_MSG_RESULT($GNUTLSLIBS)
fi
AC_CHECK_LIB(gnutls, gnutls_init)
AC_CHECK_FUNCS(gnutls_priority_set_direct)
AC_SUBST(GNUTLSHEAD)
AC_SUBST(GNUTLSLIBS)
dnl ========================================================================
dnl System Health
dnl ========================================================================
dnl Check if servicelog development package is installed
SERVICELOG=servicelog-1
SERVICELOG_EXISTS="no"
AC_MSG_CHECKING(for $SERVICELOG packages)
if
$PKG_CONFIG --exists $SERVICELOG
then
PKG_CHECK_MODULES([SERVICELOG], [servicelog-1])
SERVICELOG_EXISTS="yes"
fi
AC_MSG_RESULT($SERVICELOG_EXISTS)
AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes")
dnl Check if OpenIMPI packages and servicelog are installed
OPENIPMI="OpenIPMI OpenIPMIposix"
OPENIPMI_SERVICELOG_EXISTS="no"
AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages)
if
$PKG_CONFIG --exists $OPENIPMI $SERVICELOG
then
PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix])
OPENIPMI_SERVICELOG_EXISTS="yes"
fi
AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS)
AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes")
dnl ========================================================================
dnl Compiler flags
dnl ========================================================================
dnl Make sure that CFLAGS is not exported. If the user did
dnl not have CFLAGS in their environment then this should have
dnl no effect. However if CFLAGS was exported from the user's
dnl environment, then the new CFLAGS will also be exported
dnl to sub processes.
if export | fgrep " CFLAGS=" > /dev/null; then
SAVED_CFLAGS="$CFLAGS"
unset CFLAGS
CFLAGS="$SAVED_CFLAGS"
unset SAVED_CFLAGS
fi
AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries])
AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries])
AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables])
AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables])
CC_EXTRAS=""
if test "$GCC" != yes; then
CFLAGS="$CFLAGS -g"
enable_fatal_warnings=no
else
CFLAGS="$CFLAGS -ggdb"
dnl When we don't have diagnostic push / pull, we can't explicitly disable
dnl checking for nonliteral formats in the places where they occur on purpose
dnl thus we disable nonliteral format checking globally as we are aborting
dnl on warnings.
dnl what makes the things really ugly is that nonliteral format checking is
dnl obviously available as an extra switch in very modern gcc but for older
dnl gcc this is part of -Wformat=2
dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral
dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2
dnl otherwise none of both
gcc_diagnostic_push_pull=no
SAVED_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS -Werror"
AC_MSG_CHECKING([for gcc diagnostic push / pull])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#pragma GCC diagnostic push
#pragma GCC diagnostic pop
]])], [
AC_MSG_RESULT([yes])
gcc_diagnostic_push_pull=yes
], AC_MSG_RESULT([no]))
CFLAGS="$SAVED_CFLAGS"
if cc_supports_flag "-Wformat-nonliteral"; then
gcc_format_nonliteral=yes
else
gcc_format_nonliteral=no
fi
# We had to eliminate -Wnested-externs because of libtool changes
# Make sure to order options so that the former stand for prerequisites
# of the latter (e.g., -Wformat-nonliteral requires -Wformat).
EXTRA_FLAGS="-fgnu89-inline
-Wall
-Waggregate-return
-Wbad-function-cast
-Wcast-align
-Wdeclaration-after-statement
-Wendif-labels
-Wfloat-equal
-Wformat-security
-Wmissing-prototypes
-Wmissing-declarations
-Wnested-externs
-Wno-long-long
-Wno-strict-aliasing
-Wpointer-arith
-Wstrict-prototypes
-Wwrite-strings
-Wunused-but-set-variable
-Wunsigned-char"
if test "x$gcc_diagnostic_push_pull" = "xyes"; then
AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [],
[gcc can complain about nonliterals in format])
EXTRA_FLAGS="$EXTRA_FLAGS
-Wformat=2
-Wformat-nonliteral"
else
if test "x$gcc_format_nonliteral" = "xyes"; then
EXTRA_FLAGS="$EXTRA_FLAGS
-Wformat=2"
fi
fi
# Additional warnings it might be nice to enable one day
# -Wshadow
# -Wunreachable-code
for j in $EXTRA_FLAGS
do
if
cc_supports_flag $CC_EXTRAS $j
then
CC_EXTRAS="$CC_EXTRAS $j"
fi
done
dnl System specific options
case "$host_os" in
*linux*|*bsd*)
if test "${enable_fatal_warnings}" = "unknown"; then
enable_fatal_warnings=yes
fi
;;
esac
if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then
enable_fatal_warnings=yes
else
enable_fatal_warnings=no
fi
if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then
AC_MSG_NOTICE(Enabling ANSI Compatibility)
CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY"
fi
AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS})
fi
dnl
dnl Hardening flags
dnl
dnl The prime control of whether to apply (targeted) hardening build flags and
dnl which ones is --{enable,disable}-hardening option passed to ./configure:
dnl
dnl --enable-hardening=try (default):
dnl depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE,
dnl CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables
dnl (see below) is set and non-null, all these custom flags (even if not
dnl set) are used as are, otherwise the best effort is made to offer
dnl reasonably strong hardening in several categories (RELRO, PIE,
dnl "bind now", stack protector) according to what the selected toolchain
dnl can offer
dnl
dnl --enable-hardening:
dnl same effect as --enable-hardening=try when the environment variables
dnl in question are suppressed
dnl
dnl --disable-hardening:
dnl do not apply any targeted hardening measures at all
dnl
dnl The user-injected environment variables that regulate the hardening in
dnl default case are as follows:
dnl
dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE
dnl compiler and linker flags (respectively) for daemon programs
dnl (attrd, cib, crmd, lrmd, stonithd, pacemakerd, pacemaker_remoted,
dnl pengine)
dnl
dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB
dnl compiler and linker flags (respectively) for libraries linked
dnl with the daemon programs
dnl
dnl Note that these are purposedly targeted variables (addressing particular
dnl targets all over the scattered Makefiles) and have no effect outside of
dnl the predestined scope (e.g., CLI utilities). For a global reach,
dnl use CFLAGS, LDFLAGS, etc. as usual.
dnl
dnl For guidance on the suitable flags consult, for instance:
dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description
dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils
dnl
if test "x${HARDENING}" != "xtry"; then
unset CFLAGS_HARDENED_EXE
unset CFLAGS_HARDENED_LIB
unset LDFLAGS_HARDENED_EXE
unset LDFLAGS_HARDENED_LIB
fi
if test "x${HARDENING}" = "xno"; then
AC_MSG_NOTICE([Hardening: explicitly disabled])
elif test "x${HARDENING}" = "xyes" \
|| test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then
dnl We'll figure out on our own...
CFLAGS_HARDENED_EXE=
CFLAGS_HARDENED_LIB=
LDFLAGS_HARDENED_EXE=
LDFLAGS_HARDENED_LIB=
relro=0
pie=0
bindnow=0
# daemons incl. libs: partial RELRO
flag="-Wl,-z,relro"
CC_CHECK_LDFLAGS(["${flag}"],
[LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}";
LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}";
relro=1]
)
# daemons: PIE for both CFLAGS and LDFLAGS
if cc_supports_flag -fPIE; then
flag="-pie"
CC_CHECK_LDFLAGS(["${flag}"],
[CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE";
LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}";
pie=1]
)
fi
# daemons incl. libs: full RELRO if sensible + as-needed linking
# so as to possibly mitigate startup performance
# hit caused by excessive linking with unneeded
# libraries
if test "${relro}" = 1 && test "${pie}" = 1; then
flag="-Wl,-z,now"
CC_CHECK_LDFLAGS(["${flag}"],
[LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}";
LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}";
bindnow=1]
)
fi
if test "${bindnow}" = 1; then
flag="-Wl,--as-needed"
CC_CHECK_LDFLAGS(["${flag}"],
[LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}";
LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"]
)
fi
# universal: prefer strong > all > default stack protector if possible
flag=
if cc_supports_flag -fstack-protector-strong; then
flag="-fstack-protector-strong"
elif cc_supports_flag -fstack-protector-all; then
flag="-fstack-protector-all"
elif cc_supports_flag -fstack-protector; then
flag="-fstack-protector"
fi
if test -n "${flag}"; then
CC_EXTRAS="${CC_EXTRAS} ${flag}"
stackprot=1
fi
if test "${relro}" = 1 \
|| test "${pie}" = 1 \
|| test "${stackprot}" = 1; then
AC_MSG_NOTICE(
[Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}])
else
AC_MSG_WARN([Hardening: no suitable features in the toolchain detected])
fi
else
AC_MSG_NOTICE([Hardening: using custom flags])
fi
CFLAGS="$CFLAGS $CC_EXTRAS"
NON_FATAL_CFLAGS="$CFLAGS"
AC_SUBST(NON_FATAL_CFLAGS)
dnl
dnl We reset CFLAGS to include our warnings *after* all function
dnl checking goes on, so that our warning flags don't keep the
dnl AC_*FUNCS() calls above from working. In particular, -Werror will
dnl *always* cause us troubles if we set it before here.
dnl
dnl
if test "x${enable_fatal_warnings}" = xyes ; then
AC_MSG_NOTICE(Enabling Fatal Warnings)
CFLAGS="$CFLAGS -Werror"
fi
AC_SUBST(CFLAGS)
dnl This is useful for use in Makefiles that need to remove one specific flag
CFLAGS_COPY="$CFLAGS"
AC_SUBST(CFLAGS_COPY)
AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries
AC_SUBST(LIBADD_INTL) dnl extra flags for GNU gettext stuff...
AC_SUBST(LOCALE)
dnl Options for cleaning up the compiler output
QUIET_LIBTOOL_OPTS=""
QUIET_MAKE_OPTS=""
if test "x${enable_quiet}" = "xyes"; then
QUIET_LIBTOOL_OPTS="--quiet"
QUIET_MAKE_OPTS="--quiet"
fi
AC_MSG_RESULT(Supress make details: ${enable_quiet})
dnl Put the above variables to use
LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)"
MAKE="${MAKE} \$(QUIET_MAKE_OPTS)"
AC_SUBST(CC)
AC_SUBST(MAKE)
AC_SUBST(LIBTOOL)
AC_SUBST(QUIET_MAKE_OPTS)
AC_SUBST(QUIET_LIBTOOL_OPTS)
AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features)
AC_SUBST(PCMK_FEATURES)
dnl The Makefiles and shell scripts we output
AC_CONFIG_FILES(Makefile \
Doxyfile \
coverage.sh \
cts/Makefile \
cts/CTSvars.py \
cts/LSBDummy \
cts/benchmark/Makefile \
cts/benchmark/clubench \
cts/lxc_autogen.sh \
cib/Makefile \
attrd/Makefile \
crmd/Makefile \
pengine/Makefile \
pengine/regression.core.sh \
doc/Makefile \
doc/Clusters_from_Scratch/publican.cfg \
doc/Pacemaker_Development/publican.cfg \
doc/Pacemaker_Explained/publican.cfg \
doc/Pacemaker_Remote/publican.cfg \
include/Makefile \
include/crm/Makefile \
include/crm/cib/Makefile \
include/crm/common/Makefile \
include/crm/cluster/Makefile \
include/crm/fencing/Makefile \
include/crm/pengine/Makefile \
replace/Makefile \
lib/Makefile \
lib/pacemaker.pc \
lib/pacemaker-cib.pc \
lib/pacemaker-lrmd.pc \
lib/pacemaker-service.pc \
lib/pacemaker-pengine.pc \
lib/pacemaker-fencing.pc \
lib/pacemaker-cluster.pc \
lib/common/Makefile \
lib/cluster/Makefile \
lib/cib/Makefile \
lib/pengine/Makefile \
lib/transition/Makefile \
lib/fencing/Makefile \
lib/lrmd/Makefile \
lib/services/Makefile \
mcp/Makefile \
mcp/pacemaker \
mcp/pacemaker.service \
mcp/pacemaker.upstart \
mcp/pacemaker.combined.upstart \
fencing/Makefile \
fencing/regression.py \
lrmd/Makefile \
lrmd/regression.py \
lrmd/pacemaker_remote.service \
lrmd/pacemaker_remote \
extra/Makefile \
extra/alerts/Makefile \
extra/resources/Makefile \
extra/logrotate/Makefile \
extra/logrotate/pacemaker \
tools/Makefile \
tools/crm_report \
tools/report.common \
tools/cibsecret \
tools/crm_mon.service \
tools/crm_mon.upstart \
xml/Makefile \
lib/gnu/Makefile \
)
dnl Now process the entire list of files added by previous
dnl calls to AC_CONFIG_FILES()
AC_OUTPUT()
dnl *****************
dnl Configure summary
dnl *****************
AC_MSG_RESULT([])
AC_MSG_RESULT([$PACKAGE configuration:])
AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)])
AC_MSG_RESULT([ Features =${PCMK_FEATURES}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ Prefix = ${prefix}])
AC_MSG_RESULT([ Executables = ${sbindir}])
AC_MSG_RESULT([ Man pages = ${mandir}])
AC_MSG_RESULT([ Libraries = ${libdir}])
AC_MSG_RESULT([ Header files = ${includedir}])
AC_MSG_RESULT([ Arch-independent files = ${datadir}])
AC_MSG_RESULT([ State information = ${localstatedir}])
AC_MSG_RESULT([ System configuration = ${sysconfdir}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}])
AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ CFLAGS = ${CFLAGS}])
AC_MSG_RESULT([ CFLAGS_HARDENED_EXE = ${CFLAGS_HARDENED_EXE}])
AC_MSG_RESULT([ CFLAGS_HARDENED_LIB = ${CFLAGS_HARDENED_LIB}])
AC_MSG_RESULT([ LDFLAGS_HARDENED_EXE = ${LDFLAGS_HARDENED_EXE}])
AC_MSG_RESULT([ LDFLAGS_HARDENED_LIB = ${LDFLAGS_HARDENED_LIB}])
AC_MSG_RESULT([ Libraries = ${LIBS}])
AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}])
diff --git a/doc/Clusters_from_Scratch/en-US/Author_Group.xml b/doc/Clusters_from_Scratch/en-US/Author_Group.xml
index 49aae807b1..898da71261 100644
--- a/doc/Clusters_from_Scratch/en-US/Author_Group.xml
+++ b/doc/Clusters_from_Scratch/en-US/Author_Group.xml
@@ -1,21 +1,11 @@
<?xml version='1.0' encoding='utf-8' ?>
<!DOCTYPE authorgroup PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
]>
<authorgroup>
<author>
<firstname>Andrew</firstname><surname>Beekhof</surname>
<affiliation><orgname>Red Hat</orgname></affiliation>
<contrib>Primary author</contrib>
<email>andrew@beekhof.net</email>
</author>
- <othercredit class="translator">
- <firstname>Raoul</firstname><surname>Scarazzini</surname>
- <contrib>Italian translation</contrib>
- <email>rasca@miamammausalinux.org</email>
- </othercredit>
- <othercredit class="translator">
- <firstname>Dan</firstname><surname>Frîncu</surname>
- <contrib>Romanian translation</contrib>
- <email>df.cluster@gmail.com</email>
- </othercredit>
</authorgroup>
diff --git a/doc/Makefile.am b/doc/Makefile.am
index ceeb9f00e4..781c4c7b97 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -1,290 +1,286 @@
#
# doc: Pacemaker code
#
# Copyright (C) 2008 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
include $(top_srcdir)/Makefile.common
helpdir = $(datadir)/$(PACKAGE)
ascii = crm_fencing.txt acls.txt
docbook = Clusters_from_Scratch \
Pacemaker_Development \
Pacemaker_Explained \
Pacemaker_Remote
doc_DATA = $(ascii) $(generated_docs)
# toplevel rsync destination for www targets (without trailing slash)
-RSYNC_DEST ?= root@www.clusterlabs.org:/var/www/html/
+RSYNC_DEST ?= root@www.clusterlabs.org:/var/www/html
# recursive, preserve symlinks/permissions/times, verbose, compress,
# don't cross filesystems, sparse, show progress
RSYNC_OPTS = -rlptvzxS --progress
publican_docs =
generated_docs =
generated_mans =
-ASCIIDOC_CLI_TYPE := pcs
+# What formats to build: pdf,html,html-single,html-desktop,epub
DOCBOOK_FORMATS := html-desktop
+
+# What languages to build
DOCBOOK_LANGS := en-US
+
+# What languages to build for uploading to website
+# (currently only en-US because translations aren't up-to-date)
+UPLOAD_LANGS = en-US
+
DOTs = $(wildcard */en-US/images/*.dot)
SVG = $(wildcard */en-US/images/pcmk-*.svg) $(DOTs:%.dot=%.svg)
generated_PNGS = $(SVG:%.svg=%-small.png) $(SVG:%.svg=%.png) $(SVG:%.svg=%-large.png) \
Pacemaker_Explained/en-US/images/Policy-Engine-big.png \
Pacemaker_Explained/en-US/images/Policy-Engine-small.png
PNGS = $(generated_PNGS) \
Pacemaker_Remote/en-US/images/pcmk-ha-cluster-stack.png \
Pacemaker_Remote/en-US/images/pcmk-ha-remote-stack.png
BRAND_PNGS = publican-clusterlabs/en-US/images/title_logo.png \
publican-clusterlabs/en-US/images/image_left.png \
publican-clusterlabs/en-US/images/image_right.png \
publican-clusterlabs/en-US/images/h1-bg.png
graphics: $(PNGS)
%.png: %.svg
$(AM_V_IMG)$(INKSCAPE) --file=$< --export-dpi=90 -C --export-png=$@
%-small.png: %.svg
$(AM_V_IMG)$(INKSCAPE) --file=$< --export-dpi=45 -C --export-png=$@
%-large.png: %.svg
$(AM_V_IMG)$(INKSCAPE) --file=$< --export-dpi=180 -C --export-png=$@
if BUILD_ASCIIDOC
generated_docs += $(ascii:%.txt=%.html)
if BUILD_DOCBOOK
publican_docs += $(docbook)
endif
endif
EXTRA_DIST = $(docbook:%=%.xml)
%.html: %.txt
$(AM_V_ASCII)$(ASCIIDOC) --unsafe --backend=xhtml11 $<
# publican-clusterlabs/xsl/{html,html-single,pdf}.xsl refer to URIs
# requiring Internet access, hence we shadow that with a XML catalog-based
# redirect to local files brought with Publican installation;
# this is what newer Publican normally does with the system-wide catalog
# upon its installation, but let's provide a compatibility for older
# or badly installed instances (via adding the created file into
# XML_CATALOG_FILES for libxml2 backing Publican as a fallback);
# note that nextCatalog arrangement needed so as to overcome
# https://rt.cpan.org/Public/Bug/Display.html?id=113781
publican-catalog-fallback:
@exec >$@-t \
&& echo '<catalog xmlns="urn:oasis:names:tc:entity:xmlns:xml:catalog">' \
&& echo '<rewriteURI uriStartString="https://fedorahosted.org/released/publican/xsl/docbook4/" rewritePrefix="file:///usr/share/publican/xsl/"/>' \
&& echo '</catalog>'
$(AM_V_GEN)mv $@-t $@
publican-catalog: publican-catalog-fallback
@exec >$@-t \
&& echo '<catalog xmlns="urn:oasis:names:tc:entity:xmlns:xml:catalog">' \
&& echo '<nextCatalog catalog="file:///etc/xml/catalog"/>' \
&& echo '<nextCatalog catalog="file://$(CURDIR)/$<"/>' \
&& echo '</catalog>'
$(AM_V_GEN)mv $@-t $@
SHARED_TXT=$(wildcard shared/en-US/*.txt)
SHARED_XML=$(SHARED_TXT:%.txt=%.xml)
CFS_SHARED_TXT=$(addprefix shared/en-US/,pacemaker-intro.txt)
CFS_SHARED_XML=$(CFS_SHARED_TXT:%.txt=%.xml)
CFS_TXT=$(wildcard Clusters_from_Scratch/en-US/*.txt)
CFS_XML=$(CFS_TXT:%.txt=%.xml)
$(CFS_XML): $(CFS_SHARED_XML)
PUBLICAN_INTREE_DEPS =
if PUBLICAN_INTREE_BRAND
PUBLICAN_INTREE_DEPS += publican-catalog
endif
# We have to hardcode the book name
# With '%' the test for 'newness' fails
Clusters_from_Scratch.build: $(PNGS) $(wildcard Clusters_from_Scratch/en-US/*.xml) $(CFS_XML) $(CFS_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
$(PCMK_V) @echo Building $(@:%.build=%) because of $?
rm -rf $(@:%.build=%)/publish/*
if PUBLICAN_INTREE_BRAND
$(AM_V_PUB)cd $(@:%.build=%) \
&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
$(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \
$(PCMK_quiet)
else
$(AM_V_PUB)cd $(@:%.build=%) \
&& RPM_BUILD_DIR="" \
$(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \
$(PCMK_quiet)
endif
rm -rf $(@:%.build=%)/tmp
touch $@
PD_TXT=$(wildcard Pacemaker_Development/en-US/*.txt)
PD_XML=$(PD_TXT:%.txt=%.xml)
# We have to hardcode the book name
# With '%' the test for 'newness' fails
Pacemaker_Development.build: $(wildcard Pacemaker_Development/en-US/*.xml) $(PD_XML) $(PUBLICAN_INTREE_DEPS)
$(PCMK_V) @echo Building $(@:%.build=%) because of $?
rm -rf $(@:%.build=%)/publish/*
if PUBLICAN_INTREE_BRAND
$(AM_V_PUB)cd $(@:%.build=%) \
&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
$(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \
$(PCMK_quiet)
else
$(AM_V_PUB)cd $(@:%.build=%) \
&& RPM_BUILD_DIR="" \
$(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \
$(PCMK_quiet)
endif
rm -rf $(@:%.build=%)/tmp
touch $@
PE_SHARED_TXT=$(addprefix shared/en-US/,pacemaker-intro.txt)
PE_SHARED_XML=$(PE_SHARED_TXT:%.txt=%.xml)
PE_TXT=$(wildcard Pacemaker_Explained/en-US/*.txt)
PE_XML=$(PE_TXT:%.txt=%.xml)
$(PE_XML): $(PE_SHARED_XML)
# We have to hardcode the book name
# With '%' the test for 'newness' fails
Pacemaker_Explained.build: $(PNGS) $(wildcard Pacemaker_Explained/en-US/*.xml) $(PE_XML) $(PE_SHARED_XML) $(PUBLICAN_INTREE_DEPS)
$(PCMK_V) @echo Building $(@:%.build=%) because of $?
rm -rf $(@:%.build=%)/publish/*
if PUBLICAN_INTREE_BRAND
$(AM_V_PUB)cd $(@:%.build=%) \
&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
$(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \
$(PCMK_quiet)
else
$(AM_V_PUB)cd $(@:%.build=%) \
&& RPM_BUILD_DIR="" \
$(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \
$(PCMK_quiet)
endif
rm -rf $(@:%.build=%)/tmp
touch $@
PR_TXT=$(wildcard Pacemaker_Remote/en-US/*.txt)
PR_XML=$(PR_TXT:%.txt=%.xml)
# We have to hardcode the book name
# With '%' the test for 'newness' fails
Pacemaker_Remote.build: $(PNGS) $(wildcard Pacemaker_Remote/en-US/*.xml) $(PR_XML) $(PUBLICAN_INTREE_DEPS)
$(PCMK_V) @echo Building $(@:%.build=%) because of $?
rm -rf $(@:%.build=%)/publish/*
if PUBLICAN_INTREE_BRAND
$(AM_V_PUB)cd $(@:%.build=%) \
&& RPM_BUILD_DIR="" XML_CATALOG_FILES="$(CURDIR)/publican-catalog" \
$(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) --brand_dir=../publican-clusterlabs \
$(PCMK_quiet)
else
$(AM_V_PUB)cd $(@:%.build=%) \
&& RPM_BUILD_DIR="" \
$(PUBLICAN) build --publish --langs=$(DOCBOOK_LANGS) --formats=$(DOCBOOK_FORMATS) \
$(PCMK_quiet)
endif
rm -rf $(@:%.build=%)/tmp
touch $@
# Update the translation template
pot:
for book in $(docbook); do \
echo "Updating translation templates in: $$book"; \
( cd $$book && RPM_BUILD_DIR="" $(PUBLICAN) update_pot ); \
done
# Update the actual translations
po: pot
for book in $(docbook); do \
echo "Updating translations in: $$book"; \
( cd $$book && RPM_BUILD_DIR="" $(PUBLICAN) update_po --langs=all );\
done
if BUILD_DOCBOOK
docbook_build = $(docbook:%=%.build)
all-local: $(docbook_build) */publican.cfg
install-data-local: all-local
for book in $(docbook); do \
filelist=`find $$book/publish/* -print`; \
for f in $$filelist; do \
p=`echo $$f | sed s:publish/:: | sed s:Pacemaker/::`; \
if [ -d $$f ]; then \
$(INSTALL) -d -m 775 $(DESTDIR)$(docdir)/$$p; \
else \
$(INSTALL) -m 644 $$f $(DESTDIR)$(docdir)/$$p; \
fi \
done; \
done
endif
brand: $(BRAND_PNGS) $(wildcard publican-clusterlabs/en-US/*.xml)
cd publican-clusterlabs && publican build --formats=xml --langs=all --publish
echo "Installing..."
cd publican-clusterlabs && sudo publican install_brand --path=$(datadir)/publican/Common_Content
# find publican-clusterlabs -name "*.noarch.rpm" -exec rm -f \{\} \;
# cd publican-clusterlabs && $(PUBLICAN) package --binary
# find publican-clusterlabs -name "*.noarch.rpm" -exec sudo rpm -Uvh --force \{\} \;
pdf:
- make DOCBOOK_FORMATS="pdf" ASCIIDOC_CLI_TYPE=$(ASCIIDOC_CLI_TYPE) all-local
+ make DOCBOOK_FORMATS="pdf" all-local
www: clean-local $(generated_docs) $(ascii)
- make www-cli
- rsync $(RSYNC_OPTS) $(generated_docs) $(ascii) $(asciiman) "$(RSYNC_DEST)/doc/"
-
-www-pcs: www-cli
-
-www-cli:
- for book in $(docbook); do \
- sed -i.sed 's@brand:.*@brand: clusterlabs@' $$book/publican.cfg; \
- sed -i.sed 's@version:.*@version: $(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE)@' $$book/publican.cfg; \
+ for book in $(docbook); do \
+ sed -i.sed 's@^brand:.*@brand: clusterlabs@' $$book/publican.cfg; \
done
- make DOCBOOK_FORMATS="pdf,html,html-single,epub" DOCBOOK_LANGS="all" ASCIIDOC_CLI_TYPE=$(ASCIIDOC_CLI_TYPE) all-local
- echo Uploading current $(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE) documentation set to clusterlabs.org
+ make DOCBOOK_FORMATS="pdf,html,html-single,epub" DOCBOOK_LANGS="$(UPLOAD_LANGS)" all-local
+ echo Uploading current $(PACKAGE_SERIES) documentation set to clusterlabs.org
if BUILD_DOCBOOK
for book in $(docbook); do \
echo Uploading $$book...; \
- echo "Generated on `date` from version: $(shell git log --pretty="format:%h %d" -n 1)" >> $$book/publish/build-$(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE).txt; \
- for lang in `ls -1 $$book/publish | grep [a-z][a-z]-[A-Z][A-Z]`; do \
- mv $$book/publish/$$lang/Pacemaker/$(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE)/epub/$$book/Pacemaker-1.1{-$(ASCIIDOC_CLI_TYPE),}-$$book-$$lang.epub; \
- mv $$book/publish/$$lang/Pacemaker/$(PACKAGE_SERIES)-$(ASCIIDOC_CLI_TYPE)/pdf/$$book/Pacemaker-1.1{-$(ASCIIDOC_CLI_TYPE),}-$$book-$$lang.pdf; \
- done; \
- rsync $(RSYNC_OPTS) $$book/publish/* "$(RSYNC_DEST)/doc/"; \
- sed -i.sed 's@version:.*@version: $(PACKAGE_SERIES)@' $$book/publican.cfg; \
+ echo "Generated on `date` from version: $(shell git log --pretty="format:%h %d" -n 1)" >> $$book/publish/build-$(PACKAGE_SERIES).txt; \
+ rsync $(RSYNC_OPTS) $$book/publish/* "$(RSYNC_DEST)/$(PACKAGE)/doc/"; \
done
endif
+ rsync $(RSYNC_OPTS) $(generated_docs) $(ascii) "$(RSYNC_DEST)/$(PACKAGE)/doc/"
clean-local:
-rm -rf $(generated_docs) $(generated_mans) $(docbook_build) $(generated_PNGS)
-rm -rf $(SHARED_XML) $(CFS_XML) $(PE_XML) $(PR_XML)
-rm -rf publican-catalog-fallback publican-catalog
for book in $(docbook); do rm -rf $$book/tmp $$book/publish; done
diff --git a/doc/Pacemaker_Explained/en-US/Author_Group.xml b/doc/Pacemaker_Explained/en-US/Author_Group.xml
index f787962e07..7f3c297cee 100644
--- a/doc/Pacemaker_Explained/en-US/Author_Group.xml
+++ b/doc/Pacemaker_Explained/en-US/Author_Group.xml
@@ -1,58 +1,53 @@
<?xml version='1.0' encoding='utf-8' ?>
<!DOCTYPE authorgroup PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
]>
<authorgroup>
<author>
<firstname>Andrew</firstname><surname>Beekhof</surname>
<affiliation><orgname>Red Hat</orgname></affiliation>
<contrib>Primary author</contrib>
<email>andrew@beekhof.net</email>
</author>
- <othercredit class="translator">
- <firstname>Dan</firstname><surname>Frîncu</surname>
- <contrib>Romanian translation</contrib>
- <email>df.cluster@gmail.com</email>
- </othercredit>
<othercredit>
<firstname>Philipp</firstname><surname>Marek</surname>
<affiliation><orgname>LINBit</orgname></affiliation>
<contrib>Style and formatting updates. Indexing.</contrib>
<email>philipp.marek@linbit.com</email>
</othercredit>
<othercredit>
<firstname>Tanja</firstname><surname>Roth</surname>
<affiliation><orgname>SUSE</orgname></affiliation>
<contrib>Utilization chapter</contrib>
<contrib>Resource Templates chapter</contrib>
<contrib>Multi-Site Clusters chapter</contrib>
<email>taroth@suse.com</email>
</othercredit>
<othercredit>
<firstname>Lars</firstname><surname>Marowsky-Bree</surname>
<affiliation><orgname>SUSE</orgname></affiliation>
<contrib>Multi-Site Clusters chapter</contrib>
<email>lmb@suse.com</email>
</othercredit>
<othercredit>
<firstname>Yan</firstname><surname>Gao</surname>
<affiliation><orgname>SUSE</orgname></affiliation>
<contrib>Utilization chapter</contrib>
<contrib>Resource Templates chapter</contrib>
<contrib>Multi-Site Clusters chapter</contrib>
<email>ygao@suse.com</email>
</othercredit>
<othercredit>
<firstname>Thomas</firstname><surname>Schraitle</surname>
<affiliation><orgname>SUSE</orgname></affiliation>
<contrib>Utilization chapter</contrib>
<contrib>Resource Templates chapter</contrib>
<contrib>Multi-Site Clusters chapter</contrib>
<email>toms@suse.com</email>
</othercredit>
<othercredit>
<firstname>Dejan</firstname><surname>Muhamedagic</surname>
<affiliation><orgname>SUSE</orgname></affiliation>
<contrib>Resource Templates chapter</contrib>
<email>dmuhamedagic@suse.com</email>
</othercredit>
</authorgroup>
diff --git a/doc/Pacemaker_Explained/en-US/Ch-Intro.txt b/doc/Pacemaker_Explained/en-US/Ch-Intro.txt
index e61065115a..dad06350b7 100644
--- a/doc/Pacemaker_Explained/en-US/Ch-Intro.txt
+++ b/doc/Pacemaker_Explained/en-US/Ch-Intro.txt
@@ -1,23 +1,25 @@
= Read-Me-First =
== The Scope of this Document ==
The purpose of this document is to definitively explain the concepts
used to configure Pacemaker. To achieve this, it will focus
exclusively on the XML syntax used to configure the CIB.
For those that are allergic to XML, there exist several unified shells
and GUIs for Pacemaker. However these tools will not be covered at all
in this document
footnote:[I hope, however, that the concepts explained here make the functionality of these tools more easily understood.]
, precisely because they hide the XML.
Additionally, this document is NOT a step-by-step how-to guide for
configuring a specific clustering scenario.
Although such guides exist,
-footnote:[For example, see the http://www.clusterlabs.org/doc/[Clusters from Scratch] guide.]
+footnote:[
+For example, see https://www.clusterlabs.org/pacemaker/doc/[Clusters from Scratch]
+]
the purpose of this document is to provide an understanding of the building
blocks that can be used to construct any type of Pacemaker cluster.
include::../../shared/en-US/pacemaker-intro.txt[]
diff --git a/doc/Pacemaker_Explained/en-US/NOTES b/doc/Pacemaker_Explained/en-US/NOTES
deleted file mode 100644
index d6c6f2f3a7..0000000000
--- a/doc/Pacemaker_Explained/en-US/NOTES
+++ /dev/null
@@ -1,16 +0,0 @@
-why sometimes <example> and sometimes <figure>? examples have title at top, figures have title at bottom
-
-Example 2.8 (and others) XML line too long, line broken
-
-some <command> are in <para>, some in <programlisting> ... I'd like the latter more, or perhaps in a <screen>.
-Indentation makes whitespace at start of lines ... remove?
-
-tables 3.1 and 3.2 incomplete (crm-feature-set, ...)
-
-Ch 7 missing?
-
-Remove Ex9.9?
-
-Ap-Debug.xml not used?
-
-<indexterm> alias for primary?
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index 86d64a9a01..4dac1f8feb 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,824 +1,826 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include <crm/msg_xml.h>
#include <unpack.h>
void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
resource_object_functions_t resource_class_functions[] = {
{
native_unpack,
native_find_rsc,
native_parameter,
native_print,
native_active,
native_resource_state,
native_location,
native_free
},
{
group_unpack,
native_find_rsc,
native_parameter,
group_print,
group_active,
group_resource_state,
native_location,
group_free
},
{
clone_unpack,
native_find_rsc,
native_parameter,
clone_print,
clone_active,
clone_resource_state,
native_location,
clone_free
},
{
master_unpack,
native_find_rsc,
native_parameter,
clone_print,
clone_active,
clone_resource_state,
native_location,
clone_free
},
{
container_unpack,
native_find_rsc,
native_parameter,
container_print,
container_active,
container_resource_state,
native_location,
container_free
}
};
enum pe_obj_types
get_resource_type(const char *name)
{
if (safe_str_eq(name, XML_CIB_TAG_RESOURCE)) {
return pe_native;
} else if (safe_str_eq(name, XML_CIB_TAG_GROUP)) {
return pe_group;
} else if (safe_str_eq(name, XML_CIB_TAG_INCARNATION)) {
return pe_clone;
} else if (safe_str_eq(name, XML_CIB_TAG_MASTER)) {
return pe_master;
} else if (safe_str_eq(name, XML_CIB_TAG_CONTAINER)) {
return pe_container;
}
return pe_unknown;
}
const char *
get_resource_typename(enum pe_obj_types type)
{
switch (type) {
case pe_native:
return XML_CIB_TAG_RESOURCE;
case pe_group:
return XML_CIB_TAG_GROUP;
case pe_clone:
return XML_CIB_TAG_INCARNATION;
case pe_master:
return XML_CIB_TAG_MASTER;
case pe_container:
return XML_CIB_TAG_CONTAINER;
case pe_unknown:
return "unknown";
}
return "<unknown>";
}
static void
dup_attr(gpointer key, gpointer value, gpointer user_data)
{
add_hash_param(user_data, key, value);
}
void
get_meta_attributes(GHashTable * meta_hash, resource_t * rsc,
node_t * node, pe_working_set_t * data_set)
{
GHashTable *node_hash = NULL;
if (node) {
node_hash = node->details->attrs;
}
if (rsc->xml) {
xmlAttrPtr xIter = NULL;
for (xIter = rsc->xml->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(rsc->xml, prop_name);
add_hash_param(meta_hash, prop_name, prop_value);
}
}
unpack_instance_attributes(data_set->input, rsc->xml, XML_TAG_META_SETS, node_hash,
meta_hash, NULL, FALSE, data_set->now);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
}
/* and finally check the defaults */
unpack_instance_attributes(data_set->input, data_set->rsc_defaults, XML_TAG_META_SETS,
node_hash, meta_hash, NULL, FALSE, data_set->now);
}
void
get_rsc_attributes(GHashTable * meta_hash, resource_t * rsc,
node_t * node, pe_working_set_t * data_set)
{
GHashTable *node_hash = NULL;
if (node) {
node_hash = node->details->attrs;
}
unpack_instance_attributes(data_set->input, rsc->xml, XML_TAG_ATTR_SETS, node_hash,
meta_hash, NULL, FALSE, data_set->now);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
} else {
/* and finally check the defaults */
unpack_instance_attributes(data_set->input, data_set->rsc_defaults, XML_TAG_ATTR_SETS,
node_hash, meta_hash, NULL, FALSE, data_set->now);
}
}
#if ENABLE_VERSIONED_ATTRS
void
pe_get_versioned_attributes(xmlNode * meta_hash, resource_t * rsc,
node_t * node, pe_working_set_t * data_set)
{
GHashTable *node_hash = NULL;
if (node) {
node_hash = node->details->attrs;
}
pe_unpack_versioned_attributes(data_set->input, rsc->xml, XML_TAG_ATTR_SETS, node_hash,
meta_hash, data_set->now);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
pe_get_versioned_attributes(meta_hash, rsc->parent, node, data_set);
} else {
/* and finally check the defaults */
pe_unpack_versioned_attributes(data_set->input, data_set->rsc_defaults, XML_TAG_ATTR_SETS,
node_hash, meta_hash, data_set->now);
}
}
#endif
static char *
template_op_key(xmlNode * op)
{
const char *name = crm_element_value(op, "name");
const char *role = crm_element_value(op, "role");
char *key = NULL;
if (role == NULL || crm_str_eq(role, RSC_ROLE_STARTED_S, TRUE)
|| crm_str_eq(role, RSC_ROLE_SLAVE_S, TRUE)) {
role = RSC_ROLE_UNKNOWN_S;
}
key = crm_concat(name, role, '-');
return key;
}
static gboolean
unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
{
xmlNode *cib_resources = NULL;
xmlNode *template = NULL;
xmlNode *new_xml = NULL;
xmlNode *child_xml = NULL;
xmlNode *rsc_ops = NULL;
xmlNode *template_ops = NULL;
const char *template_ref = NULL;
const char *clone = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for template unpacking");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", crm_element_name(xml_obj));
return FALSE;
}
if (crm_str_eq(template_ref, id, TRUE)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
if (cib_resources == NULL) {
pe_err("No resources configured");
return FALSE;
}
template = find_entity(cib_resources, XML_CIB_TAG_RSC_TEMPLATE, template_ref);
if (template == NULL) {
pe_err("No template named '%s'", template_ref);
return FALSE;
}
new_xml = copy_xml(template);
xmlNodeSetName(new_xml, xml_obj->name);
crm_xml_replace(new_xml, XML_ATTR_ID, id);
clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
if(clone) {
crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
}
template_ops = find_xml_node(new_xml, "operations", FALSE);
for (child_xml = __xml_first_child(xml_obj); child_xml != NULL;
child_xml = __xml_next_element(child_xml)) {
xmlNode *new_child = NULL;
new_child = add_node_copy(new_xml, child_xml);
if (crm_str_eq((const char *)new_child->name, "operations", TRUE)) {
rsc_ops = new_child;
}
}
if (template_ops && rsc_ops) {
xmlNode *op = NULL;
GHashTable *rsc_ops_hash =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, NULL);
for (op = __xml_first_child(rsc_ops); op != NULL; op = __xml_next_element(op)) {
char *key = template_op_key(op);
g_hash_table_insert(rsc_ops_hash, key, op);
}
for (op = __xml_first_child(template_ops); op != NULL; op = __xml_next_element(op)) {
char *key = template_op_key(op);
if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
add_node_copy(rsc_ops, op);
}
free(key);
}
if (rsc_ops_hash) {
g_hash_table_destroy(rsc_ops_hash);
}
free_xml(template_ops);
}
/*free_xml(*expanded_xml); */
*expanded_xml = new_xml;
/* Disable multi-level templates for now */
/*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return FALSE;
} */
return TRUE;
}
static gboolean
add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
{
const char *template_ref = NULL;
const char *id = NULL;
if (xml_obj == NULL) {
pe_err("No resource object for processing resource list of template");
return FALSE;
}
template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
if (template_ref == NULL) {
return TRUE;
}
id = ID(xml_obj);
if (id == NULL) {
pe_err("'%s' object must have a id", crm_element_name(xml_obj));
return FALSE;
}
if (crm_str_eq(template_ref, id, TRUE)) {
pe_err("The resource object '%s' should not reference itself", id);
return FALSE;
}
if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
return FALSE;
}
return TRUE;
}
gboolean
common_unpack(xmlNode * xml_obj, resource_t ** rsc,
resource_t * parent, pe_working_set_t * data_set)
{
bool isdefault = FALSE;
xmlNode *expanded_xml = NULL;
xmlNode *ops = NULL;
resource_t *top = NULL;
const char *value = NULL;
const char *rclass = NULL; /* Look for this after any templates have been expanded */
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
int container_remote_node = 0;
int baremetal_remote_node = 0;
bool has_versioned_params = FALSE;
crm_log_xml_trace(xml_obj, "Processing resource input...");
if (id == NULL) {
pe_err("Must specify id tag in <resource>");
return FALSE;
} else if (rsc == NULL) {
pe_err("Nowhere to unpack resource into");
return FALSE;
}
if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
return FALSE;
}
*rsc = calloc(1, sizeof(resource_t));
(*rsc)->cluster = data_set;
if (expanded_xml) {
crm_log_xml_trace(expanded_xml, "Expanded resource...");
(*rsc)->xml = expanded_xml;
(*rsc)->orig_xml = xml_obj;
} else {
(*rsc)->xml = xml_obj;
(*rsc)->orig_xml = NULL;
}
/* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
rclass = crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS);
(*rsc)->parent = parent;
ops = find_xml_node((*rsc)->xml, "operations", FALSE);
(*rsc)->ops_xml = expand_idref(ops, data_set->input);
(*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml));
if ((*rsc)->variant == pe_unknown) {
pe_err("Unknown resource type: %s", crm_element_name((*rsc)->xml));
free(*rsc);
return FALSE;
}
(*rsc)->parameters = crm_str_table_new();
#if ENABLE_VERSIONED_ATTRS
(*rsc)->versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
#endif
(*rsc)->meta = crm_str_table_new();
(*rsc)->allowed_nodes =
g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
(*rsc)->known_on = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
if (value) {
(*rsc)->id = crm_concat(id, value, ':');
add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
} else {
(*rsc)->id = strdup(id);
}
(*rsc)->fns = &resource_class_functions[(*rsc)->variant];
pe_rsc_trace((*rsc), "Unpacking resource...");
get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
get_rsc_attributes((*rsc)->parameters, *rsc, NULL, data_set);
#if ENABLE_VERSIONED_ATTRS
pe_get_versioned_attributes((*rsc)->versioned_parameters, *rsc, NULL, data_set);
#endif
(*rsc)->flags = 0;
set_bit((*rsc)->flags, pe_rsc_runnable);
set_bit((*rsc)->flags, pe_rsc_provisional);
if (is_not_set(data_set->flags, pe_flag_maintenance_mode)) {
set_bit((*rsc)->flags, pe_rsc_managed);
}
(*rsc)->rsc_cons = NULL;
(*rsc)->rsc_tickets = NULL;
(*rsc)->actions = NULL;
(*rsc)->role = RSC_ROLE_STOPPED;
(*rsc)->next_role = RSC_ROLE_UNKNOWN;
(*rsc)->recovery_type = recovery_stop_start;
(*rsc)->stickiness = 0;
(*rsc)->migration_threshold = INFINITY;
(*rsc)->failure_timeout = 0;
value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
(*rsc)->priority = crm_parse_int(value, "0");
(*rsc)->effective_priority = (*rsc)->priority;
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
if (crm_is_true(value)) {
set_bit((*rsc)->flags, pe_rsc_notify);
}
if (xml_contains_remote_node((*rsc)->xml)) {
(*rsc)->is_remote_node = TRUE;
if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
container_remote_node = 1;
} else {
baremetal_remote_node = 1;
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
#if ENABLE_VERSIONED_ATTRS
has_versioned_params = xml_has_children((*rsc)->versioned_parameters);
#endif
if (crm_is_true(value) && has_versioned_params) {
pe_rsc_trace((*rsc), "Migration is disabled for resources with versioned parameters");
} else if (crm_is_true(value)) {
set_bit((*rsc)->flags, pe_rsc_allow_migrate);
} else if ((value == NULL) && baremetal_remote_node && !has_versioned_params) {
/* by default, we want baremetal remote-nodes to be able
* to float around the cluster without having to stop all the
* resources within the remote-node before moving. Allowing
* migration support enables this feature. If this ever causes
* problems, migration support can be explicitly turned off with
* allow-migrate=false.
* We don't support migration for versioned resources, though. */
set_bit((*rsc)->flags, pe_rsc_allow_migrate);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
if (value != NULL && safe_str_neq("default", value)) {
gboolean bool_value = TRUE;
crm_str_to_boolean(value, &bool_value);
if (bool_value == FALSE) {
clear_bit((*rsc)->flags, pe_rsc_managed);
} else {
set_bit((*rsc)->flags, pe_rsc_managed);
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
if (value != NULL && safe_str_neq("default", value)) {
gboolean bool_value = FALSE;
crm_str_to_boolean(value, &bool_value);
if (bool_value == TRUE) {
clear_bit((*rsc)->flags, pe_rsc_managed);
set_bit((*rsc)->flags, pe_rsc_maintenance);
}
} else if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
clear_bit((*rsc)->flags, pe_rsc_managed);
set_bit((*rsc)->flags, pe_rsc_maintenance);
}
pe_rsc_trace((*rsc), "Options for %s", (*rsc)->id);
top = uber_parent(*rsc);
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
if (crm_is_true(value) || pe_rsc_is_clone(top) == FALSE) {
set_bit((*rsc)->flags, pe_rsc_unique);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
if (safe_str_eq(value, "restart")) {
(*rsc)->restart_type = pe_restart_restart;
pe_rsc_trace((*rsc), "\tDependency restart handling: restart");
+ pe_warn_once(pe_wo_restart_type,
+ "Support for restart-type is deprecated and will be removed in a future release");
} else {
(*rsc)->restart_type = pe_restart_ignore;
pe_rsc_trace((*rsc), "\tDependency restart handling: ignore");
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
if (safe_str_eq(value, "stop_only")) {
(*rsc)->recovery_type = recovery_stop_only;
pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop only");
} else if (safe_str_eq(value, "block")) {
(*rsc)->recovery_type = recovery_block;
pe_rsc_trace((*rsc), "\tMultiple running resource recovery: block");
} else {
(*rsc)->recovery_type = recovery_stop_start;
pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop/start");
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
if (value != NULL && safe_str_neq("default", value)) {
(*rsc)->stickiness = char2score(value);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
if (value != NULL && safe_str_neq("default", value)) {
(*rsc)->migration_threshold = char2score(value);
}
if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH)) {
set_bit(data_set->flags, pe_flag_have_stonith_resource);
set_bit((*rsc)->flags, pe_rsc_fence_device);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
handle_requires_pref:
if (safe_str_eq(value, "nothing")) {
} else if (safe_str_eq(value, "quorum")) {
set_bit((*rsc)->flags, pe_rsc_needs_quorum);
} else if (safe_str_eq(value, "unfencing")) {
if (is_set((*rsc)->flags, pe_rsc_fence_device)) {
crm_config_warn("%s is a fencing device but requires (un)fencing", (*rsc)->id);
value = "quorum";
isdefault = TRUE;
goto handle_requires_pref;
} else if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
crm_config_warn("%s requires (un)fencing but fencing is disabled", (*rsc)->id);
value = "quorum";
isdefault = TRUE;
goto handle_requires_pref;
} else {
set_bit((*rsc)->flags, pe_rsc_needs_fencing);
set_bit((*rsc)->flags, pe_rsc_needs_unfencing);
}
} else if (safe_str_eq(value, "fencing")) {
set_bit((*rsc)->flags, pe_rsc_needs_fencing);
if (is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
crm_config_warn("%s requires fencing but fencing is disabled", (*rsc)->id);
}
} else {
if (value) {
crm_config_err("Invalid value for %s->requires: %s%s",
(*rsc)->id, value,
is_set(data_set->flags, pe_flag_stonith_enabled) ? "" : " (stonith-enabled=false)");
}
isdefault = TRUE;
if(is_set((*rsc)->flags, pe_rsc_fence_device)) {
value = "quorum";
} else if (is_set(data_set->flags, pe_flag_enable_unfencing)) {
value = "unfencing";
} else if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
value = "fencing";
} else if (data_set->no_quorum_policy == no_quorum_ignore) {
value = "nothing";
} else {
value = "quorum";
}
goto handle_requires_pref;
}
pe_rsc_trace((*rsc), "\tRequired to start: %s%s", value, isdefault?" (default)":"");
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
if (value != NULL) {
/* call crm_get_msec() and convert back to seconds */
(*rsc)->failure_timeout = (crm_get_msec(value) / 1000);
}
if (baremetal_remote_node) {
value = g_hash_table_lookup((*rsc)->parameters, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
if (value) {
/* reconnect delay works by setting failure_timeout and preventing the
* connection from starting until the failure is cleared. */
(*rsc)->remote_reconnect_interval = (crm_get_msec(value) / 1000);
/* we want to override any default failure_timeout in use when remote
* reconnect_interval is in use. */
(*rsc)->failure_timeout = (*rsc)->remote_reconnect_interval;
}
}
get_target_role(*rsc, &((*rsc)->next_role));
pe_rsc_trace((*rsc), "\tDesired next state: %s",
(*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default");
if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
return FALSE;
}
if (is_set(data_set->flags, pe_flag_symmetric_cluster)) {
// This tag must stay exactly the same because it is tested elsewhere
resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
} else if (container_remote_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
}
pe_rsc_trace((*rsc), "\tAction notification: %s",
is_set((*rsc)->flags, pe_rsc_notify) ? "required" : "not required");
(*rsc)->utilization = crm_str_table_new();
unpack_instance_attributes(data_set->input, (*rsc)->xml, XML_TAG_UTILIZATION, NULL,
(*rsc)->utilization, NULL, FALSE, data_set->now);
/* data_set->resources = g_list_append(data_set->resources, (*rsc)); */
if (expanded_xml) {
if (add_template_rsc(xml_obj, data_set) == FALSE) {
return FALSE;
}
}
return TRUE;
}
void
common_update_score(resource_t * rsc, const char *id, int score)
{
node_t *node = NULL;
node = pe_hash_table_lookup(rsc->allowed_nodes, id);
if (node != NULL) {
pe_rsc_trace(rsc, "Updating score for %s on %s: %d + %d", rsc->id, id, node->weight, score);
node->weight = merge_weights(node->weight, score);
}
if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
common_update_score(child_rsc, id, score);
}
}
}
gboolean
is_parent(resource_t *child, resource_t *rsc)
{
resource_t *parent = child;
if (parent == NULL || rsc == NULL) {
return FALSE;
}
while (parent->parent != NULL) {
if (parent->parent == rsc) {
return TRUE;
}
parent = parent->parent;
}
return FALSE;
}
resource_t *
uber_parent(resource_t * rsc)
{
resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL && parent->parent->variant != pe_container) {
parent = parent->parent;
}
return parent;
}
void
common_free(resource_t * rsc)
{
if (rsc == NULL) {
return;
}
pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
g_list_free(rsc->rsc_cons);
g_list_free(rsc->rsc_cons_lhs);
g_list_free(rsc->rsc_tickets);
g_list_free(rsc->dangling_migrations);
if (rsc->parameters != NULL) {
g_hash_table_destroy(rsc->parameters);
}
#if ENABLE_VERSIONED_ATTRS
if (rsc->versioned_parameters != NULL) {
free_xml(rsc->versioned_parameters);
}
#endif
if (rsc->meta != NULL) {
g_hash_table_destroy(rsc->meta);
}
if (rsc->utilization != NULL) {
g_hash_table_destroy(rsc->utilization);
}
if (rsc->parent == NULL && is_set(rsc->flags, pe_rsc_orphan)) {
free_xml(rsc->xml);
rsc->xml = NULL;
free_xml(rsc->orig_xml);
rsc->orig_xml = NULL;
/* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
} else if (rsc->orig_xml) {
free_xml(rsc->xml);
rsc->xml = NULL;
}
if (rsc->running_on) {
g_list_free(rsc->running_on);
rsc->running_on = NULL;
}
if (rsc->known_on) {
g_hash_table_destroy(rsc->known_on);
rsc->known_on = NULL;
}
if (rsc->actions) {
g_list_free(rsc->actions);
rsc->actions = NULL;
}
if (rsc->allowed_nodes) {
g_hash_table_destroy(rsc->allowed_nodes);
rsc->allowed_nodes = NULL;
}
g_list_free(rsc->fillers);
g_list_free(rsc->rsc_location);
pe_rsc_trace(rsc, "Resource freed");
free(rsc->id);
free(rsc->clone_name);
free(rsc->allocated_to);
free(rsc->variant_opaque);
free(rsc->pending_task);
free(rsc);
}
diff --git a/lib/pengine/unpack.h b/lib/pengine/unpack.h
index 2d8eba7715..71c4530ec7 100644
--- a/lib/pengine/unpack.h
+++ b/lib/pengine/unpack.h
@@ -1,119 +1,117 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef PENGINE_UNPACK__H
# define PENGINE_UNPACK__H
extern gboolean unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set);
extern gboolean unpack_resources(xmlNode * xml_resources, pe_working_set_t * data_set);
extern gboolean unpack_config(xmlNode * config, pe_working_set_t * data_set);
extern gboolean unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set);
extern gboolean unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set);
extern gboolean unpack_status(xmlNode * status, pe_working_set_t * data_set);
extern gboolean unpack_remote_status(xmlNode * status, pe_working_set_t * data_set);
extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
extern gboolean unpack_lrm_resources(node_t * node, xmlNode * lrm_state,
pe_working_set_t * data_set);
extern gboolean add_node_attrs(xmlNode * attrs, node_t * node, gboolean overwrite,
pe_working_set_t * data_set);
extern gboolean determine_online_status(xmlNode * node_state, node_t * this_node,
pe_working_set_t * data_set);
/*
* The man pages for both curses and ncurses suggest inclusion of "curses.h".
* We believe the following to be acceptable and portable.
*/
# if defined(HAVE_LIBNCURSES) || defined(HAVE_LIBCURSES)
# if defined(HAVE_NCURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
# include <ncurses.h>
# define CURSES_ENABLED 1
# elif defined(HAVE_NCURSES_NCURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
# include <ncurses/ncurses.h>
# define CURSES_ENABLED 1
# elif defined(HAVE_CURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
# include <curses.h>
# define CURSES_ENABLED 1
# elif defined(HAVE_CURSES_CURSES_H) && !defined(HAVE_INCOMPATIBLE_PRINTW)
# include <curses/curses.h>
# define CURSES_ENABLED 1
# else
# define CURSES_ENABLED 0
# endif
# else
# define CURSES_ENABLED 0
# endif
# if CURSES_ENABLED
# define status_printw(fmt, args...) printw(fmt, ##args)
# else
# define status_printw(fmt, args...) \
crm_err("printw support requires ncurses to be available during configure"); \
do_crm_log(LOG_WARNING, fmt, ##args);
# endif
# define status_print(fmt, args...) \
if(options & pe_print_html) { \
FILE *stream = print_data; \
fprintf(stream, fmt, ##args); \
} else if(options & pe_print_ncurses) { \
status_printw(fmt, ##args); \
} else if(options & pe_print_printf) { \
FILE *stream = print_data; \
fprintf(stream, fmt, ##args); \
} else if(options & pe_print_xml) { \
FILE *stream = print_data; \
fprintf(stream, fmt, ##args); \
} else if(options & pe_print_log) { \
int log_level = *(int*)print_data; \
do_crm_log(log_level, fmt, ##args); \
}
// Some warnings we don't want to print every transition
enum pe_warn_once_e {
pe_wo_blind = 0x0001,
- pe_wo_requires = 0x0010,
- pe_wo_rsc_failstick = 0x0200,
- pe_wo_default_rscfs = 0x0400,
- pe_wo_legacy_notifs = 0x0800,
+ pe_wo_restart_type = 0x0002,
+ pe_wo_role_after = 0x0004,
};
extern uint32_t pe_wo;
#define pe_warn_once(pe_wo_bit, fmt...) do { \
if (is_not_set(pe_wo, pe_wo_bit)) { \
if (pe_wo_bit == pe_wo_blind) { \
crm_warn(fmt); \
} else { \
pe_warn(fmt); \
} \
set_bit(pe_wo, pe_wo_bit); \
} \
} while (0);
#endif
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index 5fdc9c1bed..e67cf76858 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1,2339 +1,2343 @@
/*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <crm/crm.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/util.h>
#include <glib.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/internal.h>
#include <unpack.h>
pe_working_set_t *pe_dataset = NULL;
extern xmlNode *get_object_root(const char *object_type, xmlNode * the_root);
void print_str_str(gpointer key, gpointer value, gpointer user_data);
gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
void unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container,
pe_working_set_t * data_set);
static xmlNode *find_rsc_op_entry_helper(resource_t * rsc, const char *key,
gboolean include_disabled);
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *
pe_rsc_action_details(pe_action_t *action)
{
pe_rsc_action_details_t *details;
CRM_CHECK(action != NULL, return NULL);
if (action->action_details == NULL) {
action->action_details = calloc(1, sizeof(pe_rsc_action_details_t));
CRM_CHECK(action->action_details != NULL, return NULL);
}
details = (pe_rsc_action_details_t *) action->action_details;
if (details->versioned_parameters == NULL) {
details->versioned_parameters = create_xml_node(NULL,
XML_TAG_OP_VER_ATTRS);
}
if (details->versioned_meta == NULL) {
details->versioned_meta = create_xml_node(NULL, XML_TAG_OP_VER_META);
}
return details;
}
static void
pe_free_rsc_action_details(pe_action_t *action)
{
pe_rsc_action_details_t *details;
if ((action == NULL) || (action->action_details == NULL)) {
return;
}
details = (pe_rsc_action_details_t *) action->action_details;
if (details->versioned_parameters) {
free_xml(details->versioned_parameters);
}
if (details->versioned_meta) {
free_xml(details->versioned_meta);
}
action->action_details = NULL;
}
#endif
/*!
* \internal
* \brief Check whether we can fence a particular node
*
* \param[in] data_set Working set for cluster
* \param[in] node Name of node to check
*
* \return TRUE if node can be fenced, FALSE otherwise
*
* \note This function should only be called for cluster nodes and baremetal
* remote nodes; guest nodes are fenced by stopping their container
* resource, so fence execution requirements do not apply to them.
*/
bool pe_can_fence(pe_working_set_t * data_set, node_t *node)
{
if(is_not_set(data_set->flags, pe_flag_stonith_enabled)) {
return FALSE; /* Turned off */
} else if (is_not_set(data_set->flags, pe_flag_have_stonith_resource)) {
return FALSE; /* No devices */
} else if (is_set(data_set->flags, pe_flag_have_quorum)) {
return TRUE;
} else if (data_set->no_quorum_policy == no_quorum_ignore) {
return TRUE;
} else if(node == NULL) {
return FALSE;
} else if(node->details->online) {
crm_notice("We can fence %s without quorum because they're in our membership", node->details->uname);
return TRUE;
}
crm_trace("Cannot fence %s", node->details->uname);
return FALSE;
}
node_t *
node_copy(const node_t *this_node)
{
node_t *new_node = NULL;
CRM_CHECK(this_node != NULL, return NULL);
new_node = calloc(1, sizeof(node_t));
CRM_ASSERT(new_node != NULL);
crm_trace("Copying %p (%s) to %p", this_node, this_node->details->uname, new_node);
new_node->rsc_discover_mode = this_node->rsc_discover_mode;
new_node->weight = this_node->weight;
new_node->fixed = this_node->fixed;
new_node->details = this_node->details;
return new_node;
}
/* any node in list1 or list2 and not in the other gets a score of -INFINITY */
void
node_list_exclude(GHashTable * hash, GListPtr list, gboolean merge_scores)
{
GHashTable *result = hash;
node_t *other_node = NULL;
GListPtr gIter = list;
GHashTableIter iter;
node_t *node = NULL;
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
other_node = pe_find_node_id(list, node->details->id);
if (other_node == NULL) {
node->weight = -INFINITY;
} else if (merge_scores) {
node->weight = merge_weights(node->weight, other_node->weight);
}
}
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
other_node = pe_hash_table_lookup(result, node->details->id);
if (other_node == NULL) {
node_t *new_node = node_copy(node);
new_node->weight = -INFINITY;
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
}
}
GHashTable *
node_hash_from_list(GListPtr list)
{
GListPtr gIter = list;
GHashTable *result = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
node_t *n = node_copy(node);
g_hash_table_insert(result, (gpointer) n->details->id, n);
}
return result;
}
GListPtr
node_list_dup(GListPtr list1, gboolean reset, gboolean filter)
{
GListPtr result = NULL;
GListPtr gIter = list1;
for (; gIter != NULL; gIter = gIter->next) {
node_t *new_node = NULL;
node_t *this_node = (node_t *) gIter->data;
if (filter && this_node->weight < 0) {
continue;
}
new_node = node_copy(this_node);
if (reset) {
new_node->weight = 0;
}
if (new_node != NULL) {
result = g_list_prepend(result, new_node);
}
}
return result;
}
gint
sort_node_uname(gconstpointer a, gconstpointer b)
{
const node_t *node_a = a;
const node_t *node_b = b;
return strcmp(node_a->details->uname, node_b->details->uname);
}
void
dump_node_scores_worker(int level, const char *file, const char *function, int line,
resource_t * rsc, const char *comment, GHashTable * nodes)
{
GHashTable *hash = nodes;
GHashTableIter iter;
node_t *node = NULL;
if (rsc) {
hash = rsc->allowed_nodes;
}
if (rsc && is_set(rsc->flags, pe_rsc_orphan)) {
/* Don't show the allocation scores for orphans */
return;
}
if (level == 0) {
char score[128];
int len = sizeof(score);
/* For now we want this in sorted order to keep the regression tests happy */
GListPtr gIter = NULL;
GListPtr list = g_hash_table_get_values(hash);
list = g_list_sort(list, sort_node_uname);
gIter = list;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node = (node_t *) gIter->data;
/* This function is called a whole lot, use stack allocated score */
score2char_stack(node->weight, score, len);
if (rsc) {
printf("%s: %s allocation score on %s: %s\n",
comment, rsc->id, node->details->uname, score);
} else {
printf("%s: %s = %s\n", comment, node->details->uname, score);
}
}
g_list_free(list);
} else if (hash) {
char score[128];
int len = sizeof(score);
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
/* This function is called a whole lot, use stack allocated score */
score2char_stack(node->weight, score, len);
if (rsc) {
do_crm_log_alias(LOG_TRACE, file, function, line,
"%s: %s allocation score on %s: %s", comment, rsc->id,
node->details->uname, score);
} else {
do_crm_log_alias(LOG_TRACE, file, function, line + 1, "%s: %s = %s", comment,
node->details->uname, score);
}
}
}
if (rsc && rsc->children) {
GListPtr gIter = NULL;
gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child = (resource_t *) gIter->data;
dump_node_scores_worker(level, file, function, line, child, comment, nodes);
}
}
}
static void
append_dump_text(gpointer key, gpointer value, gpointer user_data)
{
char **dump_text = user_data;
int len = 0;
char *new_text = NULL;
len = strlen(*dump_text) + strlen(" ") + strlen(key) + strlen("=") + strlen(value) + 1;
new_text = calloc(1, len);
sprintf(new_text, "%s %s=%s", *dump_text, (char *)key, (char *)value);
free(*dump_text);
*dump_text = new_text;
}
void
dump_node_capacity(int level, const char *comment, node_t * node)
{
int len = 0;
char *dump_text = NULL;
len = strlen(comment) + strlen(": ") + strlen(node->details->uname) + strlen(" capacity:") + 1;
dump_text = calloc(1, len);
sprintf(dump_text, "%s: %s capacity:", comment, node->details->uname);
g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
if (level == 0) {
fprintf(stdout, "%s\n", dump_text);
} else {
crm_trace("%s", dump_text);
}
free(dump_text);
}
void
dump_rsc_utilization(int level, const char *comment, resource_t * rsc, node_t * node)
{
int len = 0;
char *dump_text = NULL;
len = strlen(comment) + strlen(": ") + strlen(rsc->id) + strlen(" utilization on ")
+ strlen(node->details->uname) + strlen(":") + 1;
dump_text = calloc(1, len);
sprintf(dump_text, "%s: %s utilization on %s:", comment, rsc->id, node->details->uname);
g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
if (level == 0) {
fprintf(stdout, "%s\n", dump_text);
} else {
crm_trace("%s", dump_text);
}
free(dump_text);
}
gint
sort_rsc_index(gconstpointer a, gconstpointer b)
{
const resource_t *resource1 = (const resource_t *)a;
const resource_t *resource2 = (const resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->sort_index > resource2->sort_index) {
return -1;
}
if (resource1->sort_index < resource2->sort_index) {
return 1;
}
return 0;
}
gint
sort_rsc_priority(gconstpointer a, gconstpointer b)
{
const resource_t *resource1 = (const resource_t *)a;
const resource_t *resource2 = (const resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
}
if (a == NULL) {
return 1;
}
if (b == NULL) {
return -1;
}
if (resource1->priority > resource2->priority) {
return -1;
}
if (resource1->priority < resource2->priority) {
return 1;
}
return 0;
}
action_t *
custom_action(resource_t * rsc, char *key, const char *task,
node_t * on_node, gboolean optional, gboolean save_action,
pe_working_set_t * data_set)
{
action_t *action = NULL;
GListPtr possible_matches = NULL;
CRM_CHECK(key != NULL, return NULL);
CRM_CHECK(task != NULL, free(key); return NULL);
if (save_action && rsc != NULL) {
possible_matches = find_actions(rsc->actions, key, on_node);
} else if(save_action) {
#if 0
action = g_hash_table_lookup(data_set->singletons, key);
#else
/* More expensive but takes 'node' into account */
possible_matches = find_actions(data_set->actions, key, on_node);
#endif
}
if(data_set->singletons == NULL) {
data_set->singletons = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, NULL);
}
if (possible_matches != NULL) {
if (g_list_length(possible_matches) > 1) {
pe_warn("Action %s for %s on %s exists %d times",
task, rsc ? rsc->id : "<NULL>",
on_node ? on_node->details->uname : "<NULL>", g_list_length(possible_matches));
}
action = g_list_nth_data(possible_matches, 0);
pe_rsc_trace(rsc, "Found existing action (%d) %s for %s on %s",
action->id, task, rsc ? rsc->id : "<NULL>",
on_node ? on_node->details->uname : "<NULL>");
g_list_free(possible_matches);
}
if (action == NULL) {
if (save_action) {
pe_rsc_trace(rsc, "Creating%s action %d: %s for %s on %s %d",
optional ? "" : " mandatory", data_set->action_id, key,
rsc ? rsc->id : "<NULL>", on_node ? on_node->details->uname : "<NULL>", optional);
}
action = calloc(1, sizeof(action_t));
if (save_action) {
action->id = data_set->action_id++;
} else {
action->id = 0;
}
action->rsc = rsc;
CRM_ASSERT(task != NULL);
action->task = strdup(task);
if (on_node) {
action->node = node_copy(on_node);
}
action->uuid = strdup(key);
pe_set_action_bit(action, pe_action_runnable);
if (optional) {
pe_rsc_trace(rsc, "Set optional on %s", action->uuid);
pe_set_action_bit(action, pe_action_optional);
} else {
pe_clear_action_bit(action, pe_action_optional);
pe_rsc_trace(rsc, "Unset optional on %s", action->uuid);
}
/*
Implied by calloc()...
action->actions_before = NULL;
action->actions_after = NULL;
action->pseudo = FALSE;
action->dumped = FALSE;
action->processed = FALSE;
action->seen_count = 0;
*/
action->extra = crm_str_table_new();
action->meta = crm_str_table_new();
if (save_action) {
data_set->actions = g_list_prepend(data_set->actions, action);
if(rsc == NULL) {
g_hash_table_insert(data_set->singletons, action->uuid, action);
}
}
if (rsc != NULL) {
action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
unpack_operation(action, action->op_entry, rsc->container, data_set);
if (save_action) {
rsc->actions = g_list_prepend(rsc->actions, action);
}
}
if (save_action) {
pe_rsc_trace(rsc, "Action %d created", action->id);
}
}
if (optional == FALSE) {
pe_rsc_trace(rsc, "Unset optional on %s", action->uuid);
pe_clear_action_bit(action, pe_action_optional);
}
if (rsc != NULL) {
enum action_tasks a_task = text2task(action->task);
int warn_level = LOG_TRACE;
if (save_action) {
warn_level = LOG_WARNING;
}
if (is_set(action->flags, pe_action_have_node_attrs) == FALSE
&& action->node != NULL && action->op_entry != NULL) {
pe_set_action_bit(action, pe_action_have_node_attrs);
unpack_instance_attributes(data_set->input, action->op_entry, XML_TAG_ATTR_SETS,
action->node->details->attrs,
action->extra, NULL, FALSE, data_set->now);
}
if (is_set(action->flags, pe_action_pseudo)) {
/* leave untouched */
} else if (action->node == NULL) {
pe_rsc_trace(rsc, "Unset runnable on %s", action->uuid);
pe_clear_action_bit(action, pe_action_runnable);
} else if (is_not_set(rsc->flags, pe_rsc_managed)
&& g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL) == NULL) {
crm_debug("Action %s (unmanaged)", action->uuid);
pe_rsc_trace(rsc, "Set optional on %s", action->uuid);
pe_set_action_bit(action, pe_action_optional);
/* action->runnable = FALSE; */
} else if (action->node->details->online == FALSE
&& (!is_container_remote_node(action->node) || action->node->details->remote_requires_reset)) {
pe_clear_action_bit(action, pe_action_runnable);
do_crm_log(warn_level, "Action %s on %s is unrunnable (offline)",
action->uuid, action->node->details->uname);
if (is_set(action->rsc->flags, pe_rsc_managed)
&& save_action && a_task == stop_rsc
&& action->node->details->unclean == FALSE) {
pe_fence_node(data_set, action->node, "resource actions are unrunnable");
}
} else if (action->node->details->pending) {
pe_clear_action_bit(action, pe_action_runnable);
do_crm_log(warn_level, "Action %s on %s is unrunnable (pending)",
action->uuid, action->node->details->uname);
} else if (action->needs == rsc_req_nothing) {
pe_rsc_trace(rsc, "Action %s does not require anything", action->uuid);
pe_action_set_reason(action, NULL, TRUE);
pe_set_action_bit(action, pe_action_runnable);
#if 0
/*
* No point checking this
* - if we don't have quorum we can't stonith anyway
*/
} else if (action->needs == rsc_req_stonith) {
crm_trace("Action %s requires only stonith", action->uuid);
action->runnable = TRUE;
#endif
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE
&& data_set->no_quorum_policy == no_quorum_stop) {
pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "no quorum", pe_action_runnable, TRUE);
crm_debug("%s\t%s (cancelled : quorum)", action->node->details->uname, action->uuid);
} else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE
&& data_set->no_quorum_policy == no_quorum_freeze) {
pe_rsc_trace(rsc, "Check resource is already active: %s %s %s %s", rsc->id, action->uuid, role2text(rsc->next_role), role2text(rsc->role));
if (rsc->fns->active(rsc, TRUE) == FALSE || rsc->next_role > rsc->role) {
pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, "quorum freeze", pe_action_runnable, TRUE);
pe_rsc_debug(rsc, "%s\t%s (cancelled : quorum freeze)",
action->node->details->uname, action->uuid);
}
} else if(is_not_set(action->flags, pe_action_runnable)) {
pe_rsc_trace(rsc, "Action %s is runnable", action->uuid);
//pe_action_set_reason(action, NULL, TRUE);
pe_set_action_bit(action, pe_action_runnable);
}
if (save_action) {
switch (a_task) {
case stop_rsc:
set_bit(rsc->flags, pe_rsc_stopping);
break;
case start_rsc:
clear_bit(rsc->flags, pe_rsc_starting);
if (is_set(action->flags, pe_action_runnable)) {
set_bit(rsc->flags, pe_rsc_starting);
}
break;
default:
break;
}
}
}
free(key);
return action;
}
static const char *
unpack_operation_on_fail(action_t * action)
{
const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
if (safe_str_eq(action->task, CRMD_ACTION_STOP) && safe_str_eq(value, "standby")) {
crm_config_err("on-fail=standby is not allowed for stop actions: %s", action->rsc->id);
return NULL;
} else if (safe_str_eq(action->task, CRMD_ACTION_DEMOTE) && !value) {
/* demote on_fail defaults to master monitor value if present */
xmlNode *operation = NULL;
const char *name = NULL;
const char *role = NULL;
const char *on_fail = NULL;
const char *interval = NULL;
const char *enabled = NULL;
CRM_CHECK(action->rsc != NULL, return NULL);
for (operation = __xml_first_child(action->rsc->ops_xml);
operation && !value; operation = __xml_next_element(operation)) {
if (!crm_str_eq((const char *)operation->name, "op", TRUE)) {
continue;
}
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
enabled = crm_element_value(operation, "enabled");
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
if (!on_fail) {
continue;
} else if (enabled && !crm_is_true(enabled)) {
continue;
} else if (safe_str_neq(name, "monitor") || safe_str_neq(role, "Master")) {
continue;
} else if (crm_get_interval(interval) <= 0) {
continue;
}
value = on_fail;
}
}
return value;
}
static xmlNode *
find_min_interval_mon(resource_t * rsc, gboolean include_disabled)
{
int number = 0;
int min_interval = -1;
const char *name = NULL;
const char *value = NULL;
const char *interval = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
name = crm_element_value(operation, "name");
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
value = crm_element_value(operation, "enabled");
if (!include_disabled && value && crm_is_true(value) == FALSE) {
continue;
}
if (safe_str_neq(name, RSC_STATUS)) {
continue;
}
number = crm_get_interval(interval);
if (number < 0) {
continue;
}
if (min_interval < 0 || number < min_interval) {
min_interval = number;
op = operation;
}
}
}
return op;
}
static int
unpack_start_delay(const char *value, GHashTable *meta)
{
int start_delay = 0;
if (value != NULL) {
start_delay = crm_get_msec(value);
if (start_delay < 0) {
start_delay = 0;
}
if (meta) {
g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY), crm_itoa(start_delay));
}
}
return start_delay;
}
static int
unpack_interval_origin(const char *value, GHashTable *meta, xmlNode *xml_obj,
unsigned long long interval, crm_time_t *now)
{
int start_delay = 0;
if (interval > 0 && value) {
crm_time_t *origin = crm_time_new(value);
if (origin && now) {
crm_time_t *delay = NULL;
int rc = crm_time_compare(origin, now);
long long delay_s = 0;
int interval_s = (interval / 1000);
crm_trace("Origin: %s, interval: %d", value, interval_s);
/* If 'origin' is in the future, find the most recent "multiple" that occurred in the past */
while(rc > 0) {
crm_time_add_seconds(origin, -interval_s);
rc = crm_time_compare(origin, now);
}
/* Now find the first "multiple" that occurs after 'now' */
while (rc < 0) {
crm_time_add_seconds(origin, interval_s);
rc = crm_time_compare(origin, now);
}
delay = crm_time_calculate_duration(origin, now);
crm_time_log(LOG_TRACE, "origin", origin,
crm_time_log_date | crm_time_log_timeofday |
crm_time_log_with_timezone);
crm_time_log(LOG_TRACE, "now", now,
crm_time_log_date | crm_time_log_timeofday |
crm_time_log_with_timezone);
crm_time_log(LOG_TRACE, "delay", delay, crm_time_log_duration);
delay_s = crm_time_get_seconds(delay);
CRM_CHECK(delay_s >= 0, delay_s = 0);
start_delay = delay_s * 1000;
if (xml_obj) {
crm_info("Calculated a start delay of %llds for %s", delay_s, ID(xml_obj));
}
if (meta) {
g_hash_table_replace(meta, strdup(XML_OP_ATTR_START_DELAY),
crm_itoa(start_delay));
}
crm_time_free(origin);
crm_time_free(delay);
} else if (!origin && xml_obj) {
crm_config_err("Operation %s contained an invalid " XML_OP_ATTR_ORIGIN ": %s",
ID(xml_obj), value);
}
}
return start_delay;
}
static int
unpack_timeout(const char *value)
{
int timeout = 0;
if (value == NULL) {
value = CRM_DEFAULT_OP_TIMEOUT_S;
}
timeout = crm_get_msec(value);
if (timeout < 0) {
timeout = 0;
}
return timeout;
}
int
pe_get_configured_timeout(resource_t *rsc, const char *action, pe_working_set_t *data_set)
{
xmlNode *child = NULL;
const char *timeout = NULL;
int timeout_ms = 0;
for (child = first_named_child(rsc->ops_xml, XML_ATTR_OP);
child != NULL; child = crm_next_same_xml(child)) {
if (safe_str_eq(action, crm_element_value(child, XML_NVPAIR_ATTR_NAME))) {
timeout = crm_element_value(child, XML_ATTR_TIMEOUT);
break;
}
}
if (timeout == NULL && data_set->op_defaults) {
GHashTable *action_meta = crm_str_table_new();
unpack_instance_attributes(data_set->input, data_set->op_defaults, XML_TAG_META_SETS,
NULL, action_meta, NULL, FALSE, data_set->now);
timeout = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
if (timeout == NULL) {
timeout = CRM_DEFAULT_OP_TIMEOUT_S;
}
timeout_ms = crm_get_msec(timeout);
if (timeout_ms < 0) {
timeout_ms = 0;
}
return timeout_ms;
}
#if ENABLE_VERSIONED_ATTRS
static void
unpack_versioned_meta(xmlNode *versioned_meta, xmlNode *xml_obj, unsigned long long interval, crm_time_t *now)
{
xmlNode *attrs = NULL;
xmlNode *attr = NULL;
for (attrs = __xml_first_child(versioned_meta); attrs != NULL; attrs = __xml_next_element(attrs)) {
for (attr = __xml_first_child(attrs); attr != NULL; attr = __xml_next_element(attr)) {
const char *name = crm_element_value(attr, XML_NVPAIR_ATTR_NAME);
const char *value = crm_element_value(attr, XML_NVPAIR_ATTR_VALUE);
if (safe_str_eq(name, XML_OP_ATTR_START_DELAY)) {
int start_delay = unpack_start_delay(value, NULL);
crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
} else if (safe_str_eq(name, XML_OP_ATTR_ORIGIN)) {
int start_delay = unpack_interval_origin(value, NULL, xml_obj, interval, now);
crm_xml_add(attr, XML_NVPAIR_ATTR_NAME, XML_OP_ATTR_START_DELAY);
crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, start_delay);
} else if (safe_str_eq(name, XML_ATTR_TIMEOUT)) {
int timeout = unpack_timeout(value);
crm_xml_add_int(attr, XML_NVPAIR_ATTR_VALUE, timeout);
}
}
}
}
#endif
/*!
* \brief Unpack operation XML into an action structure
*
* Unpack an operation's meta-attributes (normalizing the interval, timeout,
* and start delay values as integer milliseconds), requirements, and
* failure policy.
*
* \param[in,out] action Action to unpack into
* \param[in] xml_obj Operation XML (or NULL if all defaults)
* \param[in] container Resource that contains affected resource, if any
* \param[in] data_set Cluster state
*/
void
unpack_operation(action_t * action, xmlNode * xml_obj, resource_t * container,
pe_working_set_t * data_set)
{
unsigned long long interval = 0;
int timeout = 0;
char *value_ms = NULL;
const char *value = NULL;
const char *field = NULL;
#if ENABLE_VERSIONED_ATTRS
pe_rsc_action_details_t *rsc_details = NULL;
#endif
CRM_CHECK(action->rsc != NULL, return);
// Probe timeouts default to minimum-interval monitor's
if ((xml_obj == NULL) && action &&
safe_str_eq(action->task, RSC_STATUS) && (interval == 0)) {
xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
if (min_interval_mon) {
value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
if (value) {
crm_trace("\t%s defaults to minimum-interval monitor's timeout '%s'",
action->uuid, value);
g_hash_table_insert(action->meta, strdup(XML_ATTR_TIMEOUT),
strdup(value));
}
}
}
// Cluster-wide <op_defaults> <meta_attributes>
unpack_instance_attributes(data_set->input, data_set->op_defaults, XML_TAG_META_SETS, NULL,
action->meta, NULL, FALSE, data_set->now);
// <op> <meta_attributes> take precedence over defaults
unpack_instance_attributes(data_set->input, xml_obj, XML_TAG_META_SETS,
NULL, action->meta, NULL, TRUE, data_set->now);
/* Anything set as an <op> XML property has highest precedence.
* This ensures we use the name and interval from the <op> tag.
*/
if (xml_obj) {
xmlAttrPtr xIter = NULL;
for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
const char *prop_value = crm_element_value(xml_obj, prop_name);
g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
}
}
#if ENABLE_VERSIONED_ATTRS
rsc_details = pe_rsc_action_details(action);
pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_ATTR_SETS, NULL,
rsc_details->versioned_parameters, data_set->now);
pe_unpack_versioned_attributes(data_set->input, xml_obj, XML_TAG_META_SETS, NULL,
rsc_details->versioned_meta, data_set->now);
#endif
g_hash_table_remove(action->meta, "id");
field = XML_LRM_ATTR_INTERVAL;
value = g_hash_table_lookup(action->meta, field);
if (value != NULL) {
interval = crm_get_interval(value);
if (interval > 0) {
value_ms = crm_itoa(interval);
g_hash_table_replace(action->meta, strdup(field), value_ms);
} else {
g_hash_table_remove(action->meta, field);
}
}
if (safe_str_neq(action->task, RSC_START)
&& safe_str_neq(action->task, RSC_PROMOTE)) {
action->needs = rsc_req_nothing;
value = "nothing (not start/promote)";
} else if (is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
action->needs = rsc_req_stonith;
value = "fencing (resource)";
} else if (is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
action->needs = rsc_req_quorum;
value = "quorum (resource)";
} else {
action->needs = rsc_req_nothing;
value = "nothing (resource)";
}
pe_rsc_trace(action->rsc, "\tAction %s requires: %s", action->task, value);
value = unpack_operation_on_fail(action);
if (value == NULL) {
} else if (safe_str_eq(value, "block")) {
action->on_fail = action_fail_block;
g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
} else if (safe_str_eq(value, "fence")) {
action->on_fail = action_fail_fence;
value = "node fencing";
if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
crm_config_err("Specifying on_fail=fence and" " stonith-enabled=false makes no sense");
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
}
} else if (safe_str_eq(value, "standby")) {
action->on_fail = action_fail_standby;
value = "node standby";
} else if (safe_str_eq(value, "ignore")
|| safe_str_eq(value, "nothing")) {
action->on_fail = action_fail_ignore;
value = "ignore";
} else if (safe_str_eq(value, "migrate")) {
action->on_fail = action_fail_migrate;
value = "force migration";
} else if (safe_str_eq(value, "stop")) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop resource";
} else if (safe_str_eq(value, "restart")) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate)";
} else if (safe_str_eq(value, "restart-container")) {
if (container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate)";
} else {
value = NULL;
}
} else {
pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
value = NULL;
}
/* defaults */
if (value == NULL && container) {
action->on_fail = action_fail_restart_container;
value = "restart container (and possibly migrate) (default)";
/* for baremetal remote nodes, ensure that any failure that results in
* dropping an active connection to a remote node results in fencing of
* the remote node.
*
* There are only two action failures that don't result in fencing.
* 1. probes - probe failures are expected.
* 2. start - a start failure indicates that an active connection does not already
* exist. The user can set op on-fail=fence if they really want to fence start
* failures. */
} else if (((value == NULL) || !is_set(action->rsc->flags, pe_rsc_managed)) &&
(is_rsc_baremetal_remote_node(action->rsc, data_set) &&
!(safe_str_eq(action->task, CRMD_ACTION_STATUS) && interval == 0) &&
(safe_str_neq(action->task, CRMD_ACTION_START)))) {
if (!is_set(action->rsc->flags, pe_rsc_managed)) {
action->on_fail = action_fail_stop;
action->fail_role = RSC_ROLE_STOPPED;
value = "stop unmanaged baremetal remote node (enforcing default)";
} else {
if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
value = "fence baremetal remote node (default)";
} else {
value = "recover baremetal remote node connection (default)";
}
if (action->rsc->remote_reconnect_interval) {
action->fail_role = RSC_ROLE_STOPPED;
}
action->on_fail = action_fail_reset_remote;
}
} else if (value == NULL && safe_str_eq(action->task, CRMD_ACTION_STOP)) {
if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
action->on_fail = action_fail_fence;
value = "resource fence (default)";
} else {
action->on_fail = action_fail_block;
value = "resource block (default)";
}
} else if (value == NULL) {
action->on_fail = action_fail_recover;
value = "restart (and possibly migrate) (default)";
}
pe_rsc_trace(action->rsc, "\t%s failure handling: %s", action->task, value);
value = NULL;
if (xml_obj != NULL) {
value = g_hash_table_lookup(action->meta, "role_after_failure");
+ if (value) {
+ pe_warn_once(pe_wo_role_after,
+ "Support for role_after_failure is deprecated and will be removed in a future release");
+ }
}
if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
action->fail_role = text2role(value);
}
/* defaults */
if (action->fail_role == RSC_ROLE_UNKNOWN) {
if (safe_str_eq(action->task, CRMD_ACTION_PROMOTE)) {
action->fail_role = RSC_ROLE_SLAVE;
} else {
action->fail_role = RSC_ROLE_STARTED;
}
}
pe_rsc_trace(action->rsc, "\t%s failure results in: %s", action->task,
role2text(action->fail_role));
field = XML_OP_ATTR_START_DELAY;
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
if (value) {
unpack_start_delay(value, action->meta);
} else {
value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
unpack_interval_origin(value, action->meta, xml_obj, interval, data_set->now);
}
field = XML_ATTR_TIMEOUT;
value = g_hash_table_lookup(action->meta, field);
timeout = unpack_timeout(value);
g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT), crm_itoa(timeout));
#if ENABLE_VERSIONED_ATTRS
unpack_versioned_meta(rsc_details->versioned_meta, xml_obj, interval,
data_set->now);
#endif
}
static xmlNode *
find_rsc_op_entry_helper(resource_t * rsc, const char *key, gboolean include_disabled)
{
unsigned long long number = 0;
gboolean do_retry = TRUE;
char *local_key = NULL;
const char *name = NULL;
const char *value = NULL;
const char *interval = NULL;
char *match_key = NULL;
xmlNode *op = NULL;
xmlNode *operation = NULL;
retry:
for (operation = __xml_first_child(rsc->ops_xml); operation != NULL;
operation = __xml_next_element(operation)) {
if (crm_str_eq((const char *)operation->name, "op", TRUE)) {
name = crm_element_value(operation, "name");
interval = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
value = crm_element_value(operation, "enabled");
if (!include_disabled && value && crm_is_true(value) == FALSE) {
continue;
}
number = crm_get_interval(interval);
match_key = generate_op_key(rsc->id, name, number);
if (safe_str_eq(key, match_key)) {
op = operation;
}
free(match_key);
if (rsc->clone_name) {
match_key = generate_op_key(rsc->clone_name, name, number);
if (safe_str_eq(key, match_key)) {
op = operation;
}
free(match_key);
}
if (op != NULL) {
free(local_key);
return op;
}
}
}
free(local_key);
if (do_retry == FALSE) {
return NULL;
}
do_retry = FALSE;
if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
local_key = generate_op_key(rsc->id, "migrate", 0);
key = local_key;
goto retry;
} else if (strstr(key, "_notify_")) {
local_key = generate_op_key(rsc->id, "notify", 0);
key = local_key;
goto retry;
}
return NULL;
}
xmlNode *
find_rsc_op_entry(resource_t * rsc, const char *key)
{
return find_rsc_op_entry_helper(rsc, key, FALSE);
}
void
print_node(const char *pre_text, node_t * node, gboolean details)
{
if (node == NULL) {
crm_trace("%s%s: <NULL>", pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
return;
}
CRM_ASSERT(node->details);
crm_trace("%s%s%sNode %s: (weight=%d, fixed=%s)",
pre_text == NULL ? "" : pre_text,
pre_text == NULL ? "" : ": ",
node->details->online ? "" : "Unavailable/Unclean ",
node->details->uname, node->weight, node->fixed ? "True" : "False");
if (details) {
char *pe_mutable = strdup("\t\t");
GListPtr gIter = node->details->running_rsc;
crm_trace("\t\t===Node Attributes");
g_hash_table_foreach(node->details->attrs, print_str_str, pe_mutable);
free(pe_mutable);
crm_trace("\t\t=== Resources");
for (; gIter != NULL; gIter = gIter->next) {
resource_t *rsc = (resource_t *) gIter->data;
print_resource(LOG_DEBUG_4, "\t\t", rsc, FALSE);
}
}
}
/*
* Used by the HashTable for-loop
*/
void
print_str_str(gpointer key, gpointer value, gpointer user_data)
{
crm_trace("%s%s %s ==> %s",
user_data == NULL ? "" : (char *)user_data,
user_data == NULL ? "" : ": ", (char *)key, (char *)value);
}
void
print_resource(int log_level, const char *pre_text, resource_t * rsc, gboolean details)
{
long options = pe_print_log | pe_print_pending;
if (rsc == NULL) {
do_crm_log(log_level - 1, "%s%s: <NULL>",
pre_text == NULL ? "" : pre_text, pre_text == NULL ? "" : ": ");
return;
}
if (details) {
options |= pe_print_details;
}
rsc->fns->print(rsc, pre_text, options, &log_level);
}
void
pe_free_action(action_t * action)
{
if (action == NULL) {
return;
}
g_list_free_full(action->actions_before, free); /* action_wrapper_t* */
g_list_free_full(action->actions_after, free); /* action_wrapper_t* */
if (action->extra) {
g_hash_table_destroy(action->extra);
}
if (action->meta) {
g_hash_table_destroy(action->meta);
}
#if ENABLE_VERSIONED_ATTRS
if (action->rsc) {
pe_free_rsc_action_details(action);
}
#endif
free(action->cancel_task);
free(action->reason);
free(action->task);
free(action->uuid);
free(action->node);
free(action);
}
GListPtr
find_recurring_actions(GListPtr input, node_t * not_on_node)
{
const char *value = NULL;
GListPtr result = NULL;
GListPtr gIter = input;
CRM_CHECK(input != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
value = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
if (value == NULL) {
/* skip */
} else if (safe_str_eq(value, "0")) {
/* skip */
} else if (safe_str_eq(CRMD_ACTION_CANCEL, action->task)) {
/* skip */
} else if (not_on_node == NULL) {
crm_trace("(null) Found: %s", action->uuid);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
/* skip */
} else if (action->node->details != not_on_node->details) {
crm_trace("Found: %s", action->uuid);
result = g_list_prepend(result, action);
}
}
return result;
}
enum action_tasks
get_complex_task(resource_t * rsc, const char *name, gboolean allow_non_atomic)
{
enum action_tasks task = text2task(name);
if (rsc == NULL) {
return task;
} else if (allow_non_atomic == FALSE || rsc->variant == pe_native) {
switch (task) {
case stopped_rsc:
case started_rsc:
case action_demoted:
case action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s", name, rsc->id);
return task - 1;
break;
default:
break;
}
}
return task;
}
action_t *
find_first_action(GListPtr input, const char *uuid, const char *task, node_t * on_node)
{
GListPtr gIter = NULL;
CRM_CHECK(uuid || task, return NULL);
for (gIter = input; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (uuid != NULL && safe_str_neq(uuid, action->uuid)) {
continue;
} else if (task != NULL && safe_str_neq(task, action->task)) {
continue;
} else if (on_node == NULL) {
return action;
} else if (action->node == NULL) {
continue;
} else if (on_node->details == action->node->details) {
return action;
}
}
return NULL;
}
GListPtr
find_actions(GListPtr input, const char *key, const node_t *on_node)
{
GListPtr gIter = input;
GListPtr result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
if (safe_str_neq(key, action->uuid)) {
crm_trace("%s does not match action %s", key, action->uuid);
continue;
} else if (on_node == NULL) {
crm_trace("Action %s matches (ignoring node)", key);
result = g_list_prepend(result, action);
} else if (action->node == NULL) {
crm_trace("Action %s matches (unallocated, assigning to %s)",
key, on_node->details->uname);
action->node = node_copy(on_node);
result = g_list_prepend(result, action);
} else if (on_node->details == action->node->details) {
crm_trace("Action %s on %s matches", key, on_node->details->uname);
result = g_list_prepend(result, action);
} else {
crm_trace("Action %s on node %s does not match requested node %s",
key, action->node->details->uname,
on_node->details->uname);
}
}
return result;
}
GListPtr
find_actions_exact(GListPtr input, const char *key, node_t * on_node)
{
GListPtr gIter = input;
GListPtr result = NULL;
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
action_t *action = (action_t *) gIter->data;
crm_trace("Matching %s against %s", key, action->uuid);
if (safe_str_neq(key, action->uuid)) {
crm_trace("Key mismatch: %s vs. %s", key, action->uuid);
continue;
} else if (on_node == NULL || action->node == NULL) {
crm_trace("on_node=%p, action->node=%p", on_node, action->node);
continue;
} else if (safe_str_eq(on_node->details->id, action->node->details->id)) {
result = g_list_prepend(result, action);
}
crm_trace("Node mismatch: %s vs. %s", on_node->details->id, action->node->details->id);
}
return result;
}
static void
resource_node_score(resource_t * rsc, node_t * node, int score, const char *tag)
{
node_t *match = NULL;
if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never))
&& safe_str_eq(tag, "symmetric_default")) {
/* This string comparision may be fragile, but exclusive resources and
* exclusive nodes should not have the symmetric_default constraint
* applied to them.
*/
return;
} else if (rsc->children) {
GListPtr gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
resource_node_score(child_rsc, node, score, tag);
}
}
pe_rsc_trace(rsc, "Setting %s for %s on %s: %d", tag, rsc->id, node->details->uname, score);
match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
match = node_copy(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
}
match->weight = merge_weights(match->weight, score);
}
void
resource_location(resource_t * rsc, node_t * node, int score, const char *tag,
pe_working_set_t * data_set)
{
if (node != NULL) {
resource_node_score(rsc, node, score, tag);
} else if (data_set != NULL) {
GListPtr gIter = data_set->nodes;
for (; gIter != NULL; gIter = gIter->next) {
node_t *node_iter = (node_t *) gIter->data;
resource_node_score(rsc, node_iter, score, tag);
}
} else {
GHashTableIter iter;
node_t *node_iter = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
resource_node_score(rsc, node_iter, score, tag);
}
}
if (node == NULL && score == -INFINITY) {
if (rsc->allocated_to) {
crm_info("Deallocating %s from %s", rsc->id, rsc->allocated_to->details->uname);
free(rsc->allocated_to);
rsc->allocated_to = NULL;
}
}
}
#define sort_return(an_int, why) do { \
free(a_uuid); \
free(b_uuid); \
crm_trace("%s (%d) %c %s (%d) : %s", \
a_xml_id, a_call_id, an_int>0?'>':an_int<0?'<':'=', \
b_xml_id, b_call_id, why); \
return an_int; \
} while(0)
gint
sort_op_by_callid(gconstpointer a, gconstpointer b)
{
int a_call_id = -1;
int b_call_id = -1;
char *a_uuid = NULL;
char *b_uuid = NULL;
const xmlNode *xml_a = a;
const xmlNode *xml_b = b;
const char *a_xml_id = crm_element_value_const(xml_a, XML_ATTR_ID);
const char *b_xml_id = crm_element_value_const(xml_b, XML_ATTR_ID);
if (safe_str_eq(a_xml_id, b_xml_id)) {
/* We have duplicate lrm_rsc_op entries in the status
* section which is unliklely to be a good thing
* - we can handle it easily enough, but we need to get
* to the bottom of why it's happening.
*/
pe_err("Duplicate lrm_rsc_op entries named %s", a_xml_id);
sort_return(0, "duplicate");
}
crm_element_value_const_int(xml_a, XML_LRM_ATTR_CALLID, &a_call_id);
crm_element_value_const_int(xml_b, XML_LRM_ATTR_CALLID, &b_call_id);
if (a_call_id == -1 && b_call_id == -1) {
/* both are pending ops so it doesn't matter since
* stops are never pending
*/
sort_return(0, "pending");
} else if (a_call_id >= 0 && a_call_id < b_call_id) {
sort_return(-1, "call id");
} else if (b_call_id >= 0 && a_call_id > b_call_id) {
sort_return(1, "call id");
} else if (b_call_id >= 0 && a_call_id == b_call_id) {
/*
* The op and last_failed_op are the same
* Order on last-rc-change
*/
int last_a = -1;
int last_b = -1;
crm_element_value_const_int(xml_a, XML_RSC_OP_LAST_CHANGE, &last_a);
crm_element_value_const_int(xml_b, XML_RSC_OP_LAST_CHANGE, &last_b);
crm_trace("rc-change: %d vs %d", last_a, last_b);
if (last_a >= 0 && last_a < last_b) {
sort_return(-1, "rc-change");
} else if (last_b >= 0 && last_a > last_b) {
sort_return(1, "rc-change");
}
sort_return(0, "rc-change");
} else {
/* One of the inputs is a pending operation
* Attempt to use XML_ATTR_TRANSITION_MAGIC to determine its age relative to the other
*/
int a_id = -1;
int b_id = -1;
int dummy = -1;
const char *a_magic = crm_element_value_const(xml_a, XML_ATTR_TRANSITION_MAGIC);
const char *b_magic = crm_element_value_const(xml_b, XML_ATTR_TRANSITION_MAGIC);
CRM_CHECK(a_magic != NULL && b_magic != NULL, sort_return(0, "No magic"));
if(!decode_transition_magic(a_magic, &a_uuid, &a_id, &dummy, &dummy, &dummy, &dummy)) {
sort_return(0, "bad magic a");
}
if(!decode_transition_magic(b_magic, &b_uuid, &b_id, &dummy, &dummy, &dummy, &dummy)) {
sort_return(0, "bad magic b");
}
/* try to determine the relative age of the operation...
* some pending operations (e.g. a start) may have been superseded
* by a subsequent stop
*
* [a|b]_id == -1 means it's a shutdown operation and _always_ comes last
*/
if (safe_str_neq(a_uuid, b_uuid) || a_id == b_id) {
/*
* some of the logic in here may be redundant...
*
* if the UUID from the TE doesn't match then one better
* be a pending operation.
* pending operations don't survive between elections and joins
* because we query the LRM directly
*/
if (b_call_id == -1) {
sort_return(-1, "transition + call");
} else if (a_call_id == -1) {
sort_return(1, "transition + call");
}
} else if ((a_id >= 0 && a_id < b_id) || b_id == -1) {
sort_return(-1, "transition");
} else if ((b_id >= 0 && a_id > b_id) || a_id == -1) {
sort_return(1, "transition");
}
}
/* we should never end up here */
CRM_CHECK(FALSE, sort_return(0, "default"));
}
time_t
get_effective_time(pe_working_set_t * data_set)
{
if(data_set) {
if (data_set->now == NULL) {
crm_trace("Recording a new 'now'");
data_set->now = crm_time_new(NULL);
}
return crm_time_get_seconds_since_epoch(data_set->now);
}
crm_trace("Defaulting to 'now'");
return time(NULL);
}
gboolean
get_target_role(resource_t * rsc, enum rsc_role_e * role)
{
enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
CRM_CHECK(role != NULL, return FALSE);
if (value == NULL || safe_str_eq("started", value)
|| safe_str_eq("default", value)) {
return FALSE;
}
local_role = text2role(value);
if (local_role == RSC_ROLE_UNKNOWN) {
crm_config_err("%s: Unknown value for %s: %s", rsc->id, XML_RSC_ATTR_TARGET_ROLE, value);
return FALSE;
} else if (local_role > RSC_ROLE_STARTED) {
if (uber_parent(rsc)->variant == pe_master) {
if (local_role > RSC_ROLE_SLAVE) {
/* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
return FALSE;
}
} else {
crm_config_err("%s is not part of a master/slave resource, a %s of '%s' makes no sense",
rsc->id, XML_RSC_ATTR_TARGET_ROLE, value);
return FALSE;
}
}
*role = local_role;
return TRUE;
}
gboolean
order_actions(action_t * lh_action, action_t * rh_action, enum pe_ordering order)
{
GListPtr gIter = NULL;
action_wrapper_t *wrapper = NULL;
GListPtr list = NULL;
if (order == pe_order_none) {
return FALSE;
}
if (lh_action == NULL || rh_action == NULL) {
return FALSE;
}
crm_trace("Ordering Action %s before %s", lh_action->uuid, rh_action->uuid);
/* Ensure we never create a dependency on ourselves... it's happened */
CRM_ASSERT(lh_action != rh_action);
/* Filter dups, otherwise update_action_states() has too much work to do */
gIter = lh_action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
action_wrapper_t *after = (action_wrapper_t *) gIter->data;
if (after->action == rh_action && (after->type & order)) {
return FALSE;
}
}
wrapper = calloc(1, sizeof(action_wrapper_t));
wrapper->action = rh_action;
wrapper->type = order;
list = lh_action->actions_after;
list = g_list_prepend(list, wrapper);
lh_action->actions_after = list;
wrapper = NULL;
/* order |= pe_order_implies_then; */
/* order ^= pe_order_implies_then; */
wrapper = calloc(1, sizeof(action_wrapper_t));
wrapper->action = lh_action;
wrapper->type = order;
list = rh_action->actions_before;
list = g_list_prepend(list, wrapper);
rh_action->actions_before = list;
return TRUE;
}
action_t *
get_pseudo_op(const char *name, pe_working_set_t * data_set)
{
action_t *op = NULL;
if(data_set->singletons) {
op = g_hash_table_lookup(data_set->singletons, name);
}
if (op == NULL) {
op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
set_bit(op->flags, pe_action_pseudo);
set_bit(op->flags, pe_action_runnable);
}
return op;
}
void
destroy_ticket(gpointer data)
{
ticket_t *ticket = data;
if (ticket->state) {
g_hash_table_destroy(ticket->state);
}
free(ticket->id);
free(ticket);
}
ticket_t *
ticket_new(const char *ticket_id, pe_working_set_t * data_set)
{
ticket_t *ticket = NULL;
if (ticket_id == NULL || strlen(ticket_id) == 0) {
return NULL;
}
if (data_set->tickets == NULL) {
data_set->tickets =
g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, destroy_ticket);
}
ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
if (ticket == NULL) {
ticket = calloc(1, sizeof(ticket_t));
if (ticket == NULL) {
crm_err("Cannot allocate ticket '%s'", ticket_id);
return NULL;
}
crm_trace("Creaing ticket entry for %s", ticket_id);
ticket->id = strdup(ticket_id);
ticket->granted = FALSE;
ticket->last_granted = -1;
ticket->standby = FALSE;
ticket->state = crm_str_table_new();
g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
}
return ticket;
}
static void
filter_parameters(xmlNode * param_set, const char *param_string, bool need_present)
{
if (param_set && param_string) {
xmlAttrPtr xIter = param_set->properties;
while (xIter) {
const char *prop_name = (const char *)xIter->name;
char *name = crm_strdup_printf(" %s ", prop_name);
char *match = strstr(param_string, name);
free(name);
// Do now, because current entry might get removed below
xIter = xIter->next;
if (need_present && match == NULL) {
crm_trace("%s not found in %s", prop_name, param_string);
xml_remove_prop(param_set, prop_name);
} else if (need_present == FALSE && match) {
crm_trace("%s found in %s", prop_name, param_string);
xml_remove_prop(param_set, prop_name);
}
}
}
}
#if ENABLE_VERSIONED_ATTRS
static void
append_versioned_params(xmlNode *versioned_params, const char *ra_version, xmlNode *params)
{
GHashTable *hash = pe_unpack_versioned_parameters(versioned_params, ra_version);
char *key = NULL;
char *value = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, hash);
while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
crm_xml_add(params, key, value);
}
g_hash_table_destroy(hash);
}
#endif
static op_digest_cache_t *
rsc_action_digest(resource_t * rsc, const char *task, const char *key,
node_t * node, xmlNode * xml_op, pe_working_set_t * data_set)
{
op_digest_cache_t *data = NULL;
data = g_hash_table_lookup(node->details->digest_cache, key);
if (data == NULL) {
GHashTable *local_rsc_params = crm_str_table_new();
action_t *action = custom_action(rsc, strdup(key), task, node, TRUE, FALSE, data_set);
#if ENABLE_VERSIONED_ATTRS
xmlNode *local_versioned_params = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
const char *ra_version = NULL;
#endif
const char *op_version;
const char *restart_list = NULL;
const char *secure_list = " passwd password ";
data = calloc(1, sizeof(op_digest_cache_t));
CRM_ASSERT(data != NULL);
get_rsc_attributes(local_rsc_params, rsc, node, data_set);
#if ENABLE_VERSIONED_ATTRS
pe_get_versioned_attributes(local_versioned_params, rsc, node, data_set);
#endif
data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
if (container_fix_remote_addr_in(rsc, data->params_all, "addr")) {
crm_trace("Fixed addr for %s on %s", rsc->id, node->details->uname);
}
g_hash_table_foreach(local_rsc_params, hash2field, data->params_all);
g_hash_table_foreach(action->extra, hash2field, data->params_all);
g_hash_table_foreach(rsc->parameters, hash2field, data->params_all);
g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
if(xml_op) {
secure_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_SECURE);
restart_list = crm_element_value(xml_op, XML_LRM_ATTR_OP_RESTART);
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
#if ENABLE_VERSIONED_ATTRS
ra_version = crm_element_value(xml_op, XML_ATTR_RA_VERSION);
#endif
} else {
op_version = CRM_FEATURE_SET;
}
#if ENABLE_VERSIONED_ATTRS
append_versioned_params(local_versioned_params, ra_version, data->params_all);
append_versioned_params(rsc->versioned_parameters, ra_version, data->params_all);
{
pe_rsc_action_details_t *details = pe_rsc_action_details(action);
append_versioned_params(details->versioned_parameters, ra_version, data->params_all);
}
#endif
filter_action_parameters(data->params_all, op_version);
g_hash_table_destroy(local_rsc_params);
pe_free_action(action);
data->digest_all_calc = calculate_operation_digest(data->params_all, op_version);
if (is_set(data_set->flags, pe_flag_sanitized)) {
data->params_secure = copy_xml(data->params_all);
if(secure_list) {
filter_parameters(data->params_secure, secure_list, FALSE);
}
data->digest_secure_calc = calculate_operation_digest(data->params_secure, op_version);
}
if(xml_op && crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST) != NULL) {
data->params_restart = copy_xml(data->params_all);
if (restart_list) {
filter_parameters(data->params_restart, restart_list, TRUE);
}
data->digest_restart_calc = calculate_operation_digest(data->params_restart, op_version);
}
g_hash_table_insert(node->details->digest_cache, strdup(key), data);
}
return data;
}
op_digest_cache_t *
rsc_action_digest_cmp(resource_t * rsc, xmlNode * xml_op, node_t * node,
pe_working_set_t * data_set)
{
op_digest_cache_t *data = NULL;
char *key = NULL;
int interval = 0;
const char *op_version;
const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
const char *interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
const char *digest_all;
const char *digest_restart;
CRM_ASSERT(node != NULL);
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST);
digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
interval = crm_parse_int(interval_s, "0");
key = generate_op_key(rsc->id, task, interval);
data = rsc_action_digest(rsc, task, key, node, xml_op, data_set);
data->rc = RSC_DIGEST_MATCH;
if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s",
key, node->details->uname,
crm_str(digest_restart), data->digest_restart_calc,
op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
data->rc = RSC_DIGEST_RESTART;
} else if (digest_all == NULL) {
/* it is unknown what the previous op digest was */
data->rc = RSC_DIGEST_UNKNOWN;
} else if (strcmp(digest_all, data->digest_all_calc) != 0) {
pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (%s:%s) %s",
key, node->details->uname,
crm_str(digest_all), data->digest_all_calc,
(interval > 0)? "reschedule" : "reload",
op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
data->rc = RSC_DIGEST_ALL;
}
free(key);
return data;
}
#define STONITH_DIGEST_TASK "stonith-on"
static op_digest_cache_t *
fencing_action_digest_cmp(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
{
char *key = generate_op_key(rsc->id, STONITH_DIGEST_TASK, 0);
op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, key, node, NULL, data_set);
const char *digest_all = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
const char *digest_secure = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
/* No 'reloads' for fencing device changes
*
* We use the resource id + agent + digest so that we can detect
* changes to the agent and/or the parameters used
*/
char *search_all = crm_strdup_printf("%s:%s:%s", rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_all_calc);
char *search_secure = crm_strdup_printf("%s:%s:%s", rsc->id, (const char*)g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE), data->digest_secure_calc);
data->rc = RSC_DIGEST_ALL;
if (digest_all == NULL) {
/* it is unknown what the previous op digest was */
data->rc = RSC_DIGEST_UNKNOWN;
} else if (strstr(digest_all, search_all)) {
data->rc = RSC_DIGEST_MATCH;
} else if(digest_secure && data->digest_secure_calc) {
if(strstr(digest_secure, search_secure)) {
if (is_set(data_set->flags, pe_flag_sanitized)) {
printf("Only 'private' parameters to %s for unfencing %s changed\n",
rsc->id, node->details->uname);
}
data->rc = RSC_DIGEST_MATCH;
}
}
if (data->rc == RSC_DIGEST_ALL && is_set(data_set->flags, pe_flag_sanitized) && data->digest_secure_calc) {
if (is_set(data_set->flags, pe_flag_sanitized)) {
printf("Parameters to %s for unfencing %s changed, try '%s:%s:%s'\n",
rsc->id, node->details->uname, rsc->id,
(const char *) g_hash_table_lookup(rsc->meta, XML_ATTR_TYPE),
data->digest_secure_calc);
}
}
free(key);
free(search_all);
free(search_secure);
return data;
}
const char *rsc_printable_id(resource_t *rsc)
{
if (is_not_set(rsc->flags, pe_rsc_unique)) {
return ID(rsc->xml);
}
return rsc->id;
}
void
clear_bit_recursive(resource_t * rsc, unsigned long long flag)
{
GListPtr gIter = rsc->children;
clear_bit(rsc->flags, flag);
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
clear_bit_recursive(child_rsc, flag);
}
}
void
set_bit_recursive(resource_t * rsc, unsigned long long flag)
{
GListPtr gIter = rsc->children;
set_bit(rsc->flags, flag);
for (; gIter != NULL; gIter = gIter->next) {
resource_t *child_rsc = (resource_t *) gIter->data;
set_bit_recursive(child_rsc, flag);
}
}
static GListPtr
find_unfencing_devices(GListPtr candidates, GListPtr matches)
{
for (GListPtr gIter = candidates; gIter != NULL; gIter = gIter->next) {
resource_t *candidate = gIter->data;
const char *provides = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_PROVIDES);
const char *requires = g_hash_table_lookup(candidate->meta, XML_RSC_ATTR_REQUIRES);
if(candidate->children) {
matches = find_unfencing_devices(candidate->children, matches);
} else if (is_not_set(candidate->flags, pe_rsc_fence_device)) {
continue;
} else if (crm_str_eq(provides, "unfencing", FALSE) || crm_str_eq(requires, "unfencing", FALSE)) {
matches = g_list_prepend(matches, candidate);
}
}
return matches;
}
action_t *
pe_fence_op(node_t * node, const char *op, bool optional, const char *reason, pe_working_set_t * data_set)
{
char *op_key = NULL;
action_t *stonith_op = NULL;
if(op == NULL) {
op = data_set->stonith_action;
}
op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
if(data_set->singletons) {
stonith_op = g_hash_table_lookup(data_set->singletons, op_key);
}
if(stonith_op == NULL) {
stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
if(is_remote_node(node) && is_set(data_set->flags, pe_flag_enable_unfencing)) {
/* Extra work to detect device changes on remotes
*
* We may do this for all nodes in the future, but for now
* the check_action_definition() based stuff works fine.
*
* Use "stonith-on" to avoid creating cache entries for
* operations check_action_definition() would look for.
*/
long max = 1024;
long digests_all_offset = 0;
long digests_secure_offset = 0;
char *digests_all = malloc(max);
char *digests_secure = malloc(max);
GListPtr matches = find_unfencing_devices(data_set->resources, NULL);
for (GListPtr gIter = matches; gIter != NULL; gIter = gIter->next) {
resource_t *match = gIter->data;
op_digest_cache_t *data = fencing_action_digest_cmp(match, node, data_set);
if(data->rc == RSC_DIGEST_ALL) {
optional = FALSE;
crm_notice("Unfencing %s (remote): because the definition of %s changed", node->details->uname, match->id);
if (is_set(data_set->flags, pe_flag_sanitized)) {
/* Extra detail for those running from the commandline */
fprintf(stdout, " notice: Unfencing %s (remote): because the definition of %s changed\n", node->details->uname, match->id);
}
}
digests_all_offset += snprintf(
digests_all+digests_all_offset, max-digests_all_offset,
"%s:%s:%s,", match->id, (const char*)g_hash_table_lookup(match->meta, XML_ATTR_TYPE), data->digest_all_calc);
digests_secure_offset += snprintf(
digests_secure+digests_secure_offset, max-digests_secure_offset,
"%s:%s:%s,", match->id, (const char*)g_hash_table_lookup(match->meta, XML_ATTR_TYPE), data->digest_secure_calc);
}
g_hash_table_insert(stonith_op->meta,
strdup(XML_OP_ATTR_DIGESTS_ALL),
digests_all);
g_hash_table_insert(stonith_op->meta,
strdup(XML_OP_ATTR_DIGESTS_SECURE),
digests_secure);
}
} else {
free(op_key);
}
if(optional == FALSE && pe_can_fence(data_set, node)) {
pe_action_required(stonith_op, NULL, reason);
} else if(reason && stonith_op->reason == NULL) {
stonith_op->reason = strdup(reason);
}
return stonith_op;
}
void
trigger_unfencing(
resource_t * rsc, node_t *node, const char *reason, action_t *dependency, pe_working_set_t * data_set)
{
if(is_not_set(data_set->flags, pe_flag_enable_unfencing)) {
/* No resources require it */
return;
} else if (rsc != NULL && is_not_set(rsc->flags, pe_rsc_fence_device)) {
/* Wasn't a stonith device */
return;
} else if(node
&& node->details->online
&& node->details->unclean == FALSE
&& node->details->shutdown == FALSE) {
action_t *unfence = pe_fence_op(node, "on", FALSE, reason, data_set);
if(dependency) {
order_actions(unfence, dependency, pe_order_optional);
}
} else if(rsc) {
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
trigger_unfencing(rsc, node, reason, dependency, data_set);
}
}
}
}
gboolean
add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
{
tag_t *tag = NULL;
GListPtr gIter = NULL;
gboolean is_existing = FALSE;
CRM_CHECK(tags && tag_name && obj_ref, return FALSE);
tag = g_hash_table_lookup(tags, tag_name);
if (tag == NULL) {
tag = calloc(1, sizeof(tag_t));
if (tag == NULL) {
return FALSE;
}
tag->id = strdup(tag_name);
tag->refs = NULL;
g_hash_table_insert(tags, strdup(tag_name), tag);
}
for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
const char *existing_ref = (const char *) gIter->data;
if (crm_str_eq(existing_ref, obj_ref, TRUE)){
is_existing = TRUE;
break;
}
}
if (is_existing == FALSE) {
tag->refs = g_list_append(tag->refs, strdup(obj_ref));
crm_trace("Added: tag=%s ref=%s", tag->id, obj_ref);
}
return TRUE;
}
void pe_action_set_flag_reason(const char *function, long line,
pe_action_t *action, pe_action_t *reason, const char *text,
enum pe_action_flags flags, bool overwrite)
{
bool unset = FALSE;
bool update = FALSE;
const char *change = NULL;
if(is_set(flags, pe_action_runnable)) {
unset = TRUE;
change = "unrunnable";
} else if(is_set(flags, pe_action_optional)) {
unset = TRUE;
change = "required";
} else if(is_set(flags, pe_action_failure_is_fatal)) {
change = "fatally failed";
} else if(is_set(flags, pe_action_migrate_runnable)) {
unset = TRUE;
overwrite = TRUE;
change = "unrunnable";
} else if(is_set(flags, pe_action_dangle)) {
change = "dangling";
} else if(is_set(flags, pe_action_requires_any)) {
change = "required";
} else {
crm_err("Unknown flag change to %s by %s: 0x%.16x",
flags, action->uuid, (reason? reason->uuid : 0));
}
if(unset) {
if(is_set(action->flags, flags)) {
action->flags = crm_clear_bit(function, line, action->uuid, action->flags, flags);
update = TRUE;
}
} else {
if(is_not_set(action->flags, flags)) {
action->flags = crm_set_bit(function, line, action->uuid, action->flags, flags);
update = TRUE;
}
}
if((change && update) || text) {
char *reason_text = NULL;
if(reason == NULL) {
pe_action_set_reason(action, text, overwrite);
} else if(reason->rsc == NULL) {
reason_text = crm_strdup_printf("%s %s%c %s", change, reason->task, text?':':0, text?text:"");
} else {
reason_text = crm_strdup_printf("%s %s %s%c %s", change, reason->rsc->id, reason->task, text?':':0, text?text:"NA");
}
if(reason_text && action->rsc != reason->rsc) {
pe_action_set_reason(action, reason_text, overwrite);
}
free(reason_text);
}
}
void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
{
if(action->reason && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'", action->uuid, action->reason, reason);
free(action->reason);
action->reason = NULL;
}
if(action->reason == NULL) {
if(reason) {
pe_rsc_trace(action->rsc, "Set %s reason to '%s'", action->uuid, reason);
action->reason = strdup(reason);
} else {
action->reason = NULL;
}
}
}

File Metadata

Mime Type
text/x-diff
Expires
Sat, Nov 23, 4:47 PM (14 h, 14 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1018937
Default Alt Text
(203 KB)

Event Timeline