diff --git a/Makefile.am b/Makefile.am index 1769c6e39..fccaca493 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,178 +1,178 @@ # # Copyright (C) 2008 Andrew Beekhof # Copyright (C) 2011 Fabio M. Di Nitto # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure DRF/config-h.in \ missing install-sh autoscan.log configure.scan \ DRF/stamp-h.in libtool.m4 ltdl.m4 libltdl.tar \ compile config.guess config.sub depcomp SPEC = $(PACKAGE_NAME).spec TARFILES = $(PACKAGE_NAME)-$(VERSION).tar.bz2 \ $(PACKAGE_NAME)-$(VERSION).tar.gz SUBDIRS = if BUILD_RGMANAGER SUBDIRS += rgmanager RGMANAGER = without else RGMANAGER = with endif if BUILD_LINUX_HA -SUBDIRS += include heartbeat tools ldirectord doc +SUBDIRS += include heartbeat tools ldirectord doc systemd LINUX_HA = without else LINUX_HA = with endif if WITH_COMPAT_HABINDIR COMPAT_HABINDIR = without else COMPAT_HABINDIR = with endif EXTRA_DIST = autogen.sh .version make/release.mk \ make/git-version-gen make/gitlog-to-changelog \ AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog \ $(SPEC).in install-exec-local: if BUILD_LINUX_HA $(INSTALL) -d -m 1755 $(DESTDIR)$(HA_RSCTMPDIR) $(LN_S) ../../lib/heartbeat/ocf-binaries $(DESTDIR)${OCF_RA_DIR_PREFIX}/heartbeat/.ocf-binaries $(LN_S) ../../lib/heartbeat/ocf-directories $(DESTDIR)${OCF_RA_DIR_PREFIX}/heartbeat/.ocf-directories $(LN_S) ../../lib/heartbeat/ocf-returncodes $(DESTDIR)${OCF_RA_DIR_PREFIX}/heartbeat/.ocf-returncodes $(LN_S) ../../lib/heartbeat/ocf-shellfuncs $(DESTDIR)${OCF_RA_DIR_PREFIX}/heartbeat/.ocf-shellfuncs endif if BUILD_RGMANAGER if BUILD_LINUX_HA $(LN_S) ${CLUSTERDATA} $(DESTDIR)${OCF_RA_DIR_PREFIX}/redhat endif $(INSTALL) -d $(DESTDIR)/$(LOGDIR) endif dist-clean-local: rm -f autoconf automake autoheader $(TARFILES) uninstall-local: rmdir $(DESTDIR)/$(LOGDIR) || :; if BUILD_LINUX_HA rm -f $(DESTDIR)${OCF_RA_DIR_PREFIX}/heartbeat/.ocf-binaries rm -f $(DESTDIR)${OCF_RA_DIR_PREFIX}/heartbeat/.ocf-directories rm -f $(DESTDIR)${OCF_RA_DIR_PREFIX}/heartbeat/.ocf-returncodes rm -f $(DESTDIR)${OCF_RA_DIR_PREFIX}/heartbeat/.ocf-shellfuncs if BUILD_RGMANAGER rm -f $(DESTDIR)${OCF_RA_DIR_PREFIX}/redhat endif endif BUILT_SOURCES = .version .version: echo $(VERSION) > $@-t && mv $@-t $@ dist-hook: gen-ChangeLog $(SPEC) echo $(VERSION) > $(distdir)/.tarball-version rm -f $(distdir)/$(SPEC) && \ cp $(top_srcdir)/$(SPEC) $(distdir)/$(SPEC) gen_start_date = 2000-01-01 .PHONY: gen-ChangeLog gen-ChangeLog: if test -d .git; then \ LC_ALL=C $(top_srcdir)/make/gitlog-to-changelog \ --since=$(gen_start_date) > $(distdir)/cl-t; \ rm -f $(distdir)/ChangeLog.devel; \ mv $(distdir)/cl-t $(distdir)/ChangeLog.devel; \ fi ## make rpm/srpm section. $(SPEC): $(SPEC).in rm -f $@-t $@ date="$(shell LC_ALL=C date "+%a %b %d %Y")" && \ specver="1" && \ rcver="" && \ dirty="" && \ alphatag="" && \ numcomm="" && \ ver="$(VERSION)" && \ if echo $$ver | grep -q -- "-dirty$$"; then \ dirty="dirty" && \ echo VERSION IS DIRTY && \ ver=`echo $$ver | sed -e "s/-dirty$$//"`; \ fi && \ echo $$ver && \ if echo $$ver | grep -q -- "-"; then \ alphatag=`echo $$ver | sed -e "s/.*-//"` && \ echo VERSION HAS ALPHATAG && \ ver=`echo $$ver | sed -e "s/-.*//"`; \ fi && \ echo $$ver && \ if [ -n "$$alphatag" ]; then \ echo VERSION HAS NUMCOMMITS && \ numcomm=`echo $$ver | sed -e 's/.*\.//'` && \ ver=`echo $$ver | sed -e 's/.'$$numcomm'$$//'`; \ fi && \ if echo $$ver | grep -q "\.[[:digit:]]rc[[:digit:]]"; then \ rpmver=`echo $$ver | sed -e "s/rc[[:digit:]].*//g"` && \ rcver=`echo $$ver | sed -e "s/.*\(rc[[:digit:]]\).*/\1/g"` && \ specver="0"; \ else \ rpmver=$$ver; \ fi && \ echo $$rpmver $$rcver && \ sed \ -e "s#@version@#$$rpmver#g" \ -e "s#@alphatag@#$$alphatag#g" \ -e "s#@numcomm@#$$numcomm#g" \ -e "s#@dirty@#$$dirty#g" \ -e "s#@date@#$$date#g" \ -e "s#@specver@#$$specver#g" \ -e "s#@rcver@#$$rcver#g" \ -e "s#@rgmanager@#$(RGMANAGER)#g" \ -e "s#@linux-ha@#$(LINUX_HA)#g" \ -e "s#@compat-habindir@#$(COMPAT_HABINDIR)#g" \ $< > $@-t; \ if [ -z "$$dirty" ]; then sed -i -e "s#%glo.*dirty.*##g" $@-t; fi; \ if [ -z "$$alphatag" ]; then sed -i -e "s#%glo.*alphatag.*##g" $@-t; fi; \ if [ -z "$$numcomm" ]; then sed -i -e "s#%glo.*numcomm.*##g" $@-t; fi; \ if [ -z "$$rcver" ]; then sed -i -e "s#%glo.*rcver.*##g" $@-t; fi chmod a-w $@-t mv $@-t $@ $(TARFILES): $(MAKE) dist RPMBUILDOPTS = --define "_sourcedir $(abs_builddir)" \ --define "_specdir $(abs_builddir)" \ --define "_builddir $(abs_builddir)" \ --define "_srcrpmdir $(abs_builddir)" \ --define "_rpmdir $(abs_builddir)" srpm: clean $(MAKE) $(SPEC) $(TARFILES) rpmbuild $(RPMBUILDOPTS) --nodeps -bs $(SPEC) rpm: clean $(MAKE) $(SPEC) $(TARFILES) rpmbuild $(RPMBUILDOPTS) -ba $(SPEC) clean-generic: rm -rf $(SPEC) $(TARFILES) diff --git a/configure.ac b/configure.ac index 98c58a895..a050946af 100644 --- a/configure.ac +++ b/configure.ac @@ -1,923 +1,924 @@ dnl dnl autoconf for Agents dnl dnl License: GNU General Public License (GPL) dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.63) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services AC_INIT([resource-agents], m4_esyscmd([make/git-version-gen .tarball-version]), [to_be_defined@foobar.org]) AC_USE_SYSTEM_EXTENSIONS CRM_DTD_VERSION="1.0" PKG_FEATURES="" AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except lha_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/agent_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/agent_config.h.in to have configure include new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AM_CONFIG_HEADER(include/config.h include/agent_config.h) ALL_LINGUAS="en fr" AC_ARG_WITH(version, [ --with-version=version Override package version (if you're a packager needing to pretend) ], [ PACKAGE_VERSION="$withval" ]) AC_ARG_WITH(pkg-name, [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ], [ PACKAGE_NAME="$withval" ]) AC_PATH_PROGS(PKGCONFIG, pkg-config) if test x"${PKGCONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [Directory for systemd service files])],, [with_systemdsystemunitdir=auto]) AS_IF([test "x$with_systemdsystemunitdir" = "xyes" -o "x$with_systemdsystemunitdir" = "xauto"], [ def_systemdsystemunitdir=$($PKGCONFIG --variable=systemdsystemunitdir systemd) AS_IF([test "x$def_systemdsystemunitdir" = "x"], [AS_IF([test "x$with_systemdsystemunitdir" = "xyes"], [AC_MSG_ERROR([systemd support requested but pkg-config unable to query systemd package])]) with_systemdsystemunitdir=no], [with_systemdsystemunitdir="$def_systemdsystemunitdir"])]) AS_IF([test "x$with_systemdsystemunitdir" != "xno"], [AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])]) AM_CONDITIONAL([HAVE_SYSTEMD], [test "x$with_systemdsystemunitdir" != "xno"]) dnl dnl AM_INIT_AUTOMAKE([1.11.1 foreign dist-bzip2 dist-xz]) dnl AM_INIT_AUTOMAKE([1.10.1 foreign dist-bzip2]) AC_DEFINE_UNQUOTED(AGENTS_VERSION, "$PACKAGE_VERSION", Current agents version) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== # check stolen from gnulib/m4/gnu-make.m4 if ! ${MAKE-make} --version /cannot/make/this >/dev/null 2>&1; then AC_MSG_ERROR([you don't seem to have GNU make; it is required]) fi AC_PROG_CC dnl Can force other with environment variable "CC". AM_PROG_CC_C_O AC_PROG_CC_STDC AC_PROG_AWK AC_PROG_LN_S AC_PROG_INSTALL AC_PROG_MAKE_SET AC_C_STRINGIZE AC_C_INLINE AC_TYPE_SIZE_T AC_TYPE_SSIZE_T AC_TYPE_UID_T AC_TYPE_UINT16_T AC_TYPE_UINT8_T AC_TYPE_UINT32_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) AC_STRUCT_TIMEZONE dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CPPFLAGS="$@" AC_MSG_CHECKING(whether $CC supports "$@") AC_PREPROC_IFELSE([AC_LANG_PROGRAM([])], [RC=0; AC_MSG_RESULT([yes])], [RC=1; AC_MSG_RESULT([no])]) return $RC } extract_header_define() { AC_MSG_CHECKING(for $2 in $1) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` AC_MSG_RESULT($value) printf $value rm -f ${Cfile}.c ${Cfile} } AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac dnl =============================================== dnl Configure Options dnl =============================================== dnl Some systems, like Solaris require a custom package name AC_ARG_WITH(pkgname, [ --with-pkgname=name name for pkg (typically for Solaris) ], [ PKGNAME="$withval" ], [ PKGNAME="LXHAhb" ], ) AC_SUBST(PKGNAME) AC_ARG_ENABLE([ansi], [ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers. [default=yes]]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings very pedantic and fatal warnings for gcc [default=yes]]) INITDIR="" AC_ARG_WITH(initdir, [ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]], [ INITDIR="$withval" ]) OCF_ROOT_DIR="${prefix}/lib/ocf" AC_ARG_WITH(ocf-root, [ --with-ocf-root=DIR directory for OCF scripts [${OCF_ROOT_DIR}]], [ OCF_ROOT_DIR="$withval" ]) HA_RSCTMPDIR=${localstatedir}/run/resource-agents AC_ARG_WITH(rsctmpdir, [ --with-rsctmpdir=DIR directory for resource agents state files [${HA_RSCTMPDIR}]], [ HA_RSCTMPDIR="$withval" ]) AC_ARG_ENABLE([libnet], [ --enable-libnet Use libnet for ARP based funcationality, [default=try]], [enable_libnet="$enableval"], [enable_libnet=try]) BUILD_RGMANAGER=0 BUILD_LINUX_HA=0 RASSET=all AC_ARG_WITH(ras-set, [ --with-ras-set=SET build/install only linux-ha or rgmanager resource-agents [default: all]], [ RASSET="$withval" ]) if test x$RASSET = xyes || test x$RASSET = xall ; then BUILD_RGMANAGER=1 BUILD_LINUX_HA=1 fi if test x$RASSET = xlinux-ha; then BUILD_LINUX_HA=1 fi if test x$RASSET = xrgmanager; then BUILD_RGMANAGER=1 fi if test $BUILD_LINUX_HA -eq 0 && test $BUILD_RGMANAGER -eq 0; then AC_MSG_ERROR([Are you really sure you want this package?]) exit 1 fi AM_CONDITIONAL(BUILD_LINUX_HA, test $BUILD_LINUX_HA -eq 1) AM_CONDITIONAL(BUILD_RGMANAGER, test $BUILD_RGMANAGER -eq 1) AC_ARG_WITH(compat-habindir, [ --with-compat-habindir use HA_BIN directory with compatibility for the Heartbeat stack [${libexecdir}]], [], [with_compat_habindir=no]) AM_CONDITIONAL(WITH_COMPAT_HABINDIR, test "x$with_compat_habindir" != "xno") dnl =============================================== dnl General Processing dnl =============================================== echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in dnl For consistency with Heartbeat, map NONE->$prefix NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done if test -z $INITDIR then INITDIR=${sysconfdir}/init.d fi AC_MSG_RESULT($INITDIR);; esac AC_SUBST(INITDIR) if test "${prefix}" = "/usr"; then INITDIRPREFIX="$INITDIR" else INITDIRPREFIX="${prefix}/$INITDIR" fi AC_SUBST(INITDIRPREFIX) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in dnl For consistency with Heartbeat, map NONE->$prefix *prefix*|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac if test "x$with_compat_habindir" != "xno" ; then libexecdir=${libdir} fi dnl Expand autoconf variables so that we dont end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl docdir is a recent addition to autotools eval docdir="`eval echo ${docdir}`" if test "x$docdir" = "x"; then docdir="`eval echo ${datadir}/doc`" fi AC_SUBST(docdir) dnl Home-grown variables eval INITDIR="${INITDIR}" for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". REBOOT_OPTIONS="-f" POWEROFF_OPTIONS="-f" case "$host_os" in *bsd*) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" ;; *solaris*) REBOOT_OPTIONS="-n" POWEROFF_OPTIONS="-n" LDFLAGS+=" -lssp -lssp_nonshared" ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) POWEROFF_OPTIONS="-nf" REBOOT_OPTIONS="-nf" ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_DEFINE_UNQUOTED(HA_LOG_FACILITY, LOG_DAEMON, Default logging facility) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac esac AC_MSG_CHECKING(which format is needed to print uint64_t) case "$host_cpu" in s390x)U64T="%lu";; *64*) U64T="%lu";; *) U64T="%llu";; esac AC_MSG_RESULT($U64T) AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t) dnl Variables needed for substitution AC_CHECK_HEADERS(heartbeat/glue_config.h) if test "$ac_cv_header_heartbeat_glue_config_h" = "yes"; then OCF_ROOT_DIR=`extract_header_define heartbeat/glue_config.h OCF_ROOT_DIR` else enable_libnet=no fi AC_DEFINE_UNQUOTED(OCF_ROOT_DIR,"$OCF_ROOT_DIR", OCF root directory - specified by the OCF standard) AC_SUBST(OCF_ROOT_DIR) GLUE_STATE_DIR=${localstatedir}/run AC_DEFINE_UNQUOTED(GLUE_STATE_DIR,"$GLUE_STATE_DIR", Where to keep state files and sockets) AC_SUBST(GLUE_STATE_DIR) AC_DEFINE_UNQUOTED(HA_VARRUNDIR,"$GLUE_STATE_DIR", Where Heartbeat keeps state files and sockets - old name) HA_VARRUNDIR="$GLUE_STATE_DIR" AC_SUBST(HA_VARRUNDIR) # Expand $prefix eval HA_RSCTMPDIR="`eval echo ${HA_RSCTMPDIR}`" AC_DEFINE_UNQUOTED(HA_RSCTMPDIR,"$HA_RSCTMPDIR", Where Resouce agents keep state files) AC_SUBST(HA_RSCTMPDIR) dnl Eventually move out of the heartbeat dir tree and create symlinks when needed HA_VARLIBHBDIR=${localstatedir}/lib/heartbeat AC_DEFINE_UNQUOTED(HA_VARLIBHBDIR,"$HA_VARLIBHBDIR", Whatever this used to mean) AC_SUBST(HA_VARLIBHBDIR) OCF_RA_DIR="${OCF_ROOT_DIR}/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) OCF_RA_DIR_PREFIX="$OCF_RA_DIR" AC_SUBST(OCF_RA_DIR_PREFIX) OCF_LIB_DIR="${OCF_ROOT_DIR}/lib" AC_DEFINE_UNQUOTED(OCF_LIB_DIR,"$OCF_LIB_DIR", Location for shared code for OCF RAs) AC_SUBST(OCF_LIB_DIR) OCF_LIB_DIR_PREFIX="$OCF_LIB_DIR" AC_SUBST(OCF_LIB_DIR_PREFIX) dnl =============================================== dnl rgmanager ras bits dnl =============================================== LOGDIR=${localstatedir}/log/cluster CLUSTERDATA=${datadir}/cluster AC_SUBST([LOGDIR]) AC_SUBST([CLUSTERDATA]) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH AM_PATH_PYTHON AC_CHECK_PROGS(MAKE, gmake make) AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh) AC_PATH_PROGS(SCP, scp, /usr/bin/scp) AC_PATH_PROGS(TAR, tar) AC_PATH_PROGS(MD5, md5) AC_PATH_PROGS(TEST, test) AC_PATH_PROGS(PING, ping, /bin/ping) AC_PATH_PROGS(IFCONFIG, ifconfig, /sbin/ifconfig) AC_PATH_PROGS(MAILCMD, mailx mail, mail) AC_PATH_PROGS(EGREP, egrep) AC_SUBST(MAILCMD) AC_SUBST(EGREP) AC_SUBST(SHELL) AC_SUBST(PING) AC_SUBST(TEST) AC_PATH_PROGS(ROUTE, route) AC_DEFINE_UNQUOTED(ROUTE, "$ROUTE", path to route command) AC_MSG_CHECKING(ifconfig option to list interfaces) for IFCONFIG_A_OPT in "-A" "-a" "" do $IFCONFIG $IFCONFIG_A_OPT > /dev/null 2>&1 if test "$?" = 0 then AC_DEFINE_UNQUOTED(IFCONFIG_A_OPT, "$IFCONFIG_A_OPT", option for ifconfig command) AC_MSG_RESULT($IFCONFIG_A_OPT) break fi done AC_SUBST(IFCONFIG_A_OPT) if test x"${MAKE}" = x""; then AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE}) fi dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) AC_CHECK_LIB(gnugetopt, getopt_long) dnl if available if test "x${enable_thread_safe}" = "xyes"; then GPKGNAME="gthread-2.0" else GPKGNAME="glib-2.0" fi if $PKGCONFIG --exists $GPKGNAME then GLIBCONFIG="$PKGCONFIG $GPKGNAME" else set -x echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH $PKGCONFIG --exists $GPKGNAME; echo $? $PKGCONFIG --cflags $GPKGNAME; echo $? $PKGCONFIG $GPKGNAME; echo $? set +x AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE}) fi AC_MSG_RESULT(using $GLIBCONFIG) if test "X$GLIBCONFIG" != X; then AC_MSG_CHECKING(for special glib includes: ) GLIBHEAD=`$GLIBCONFIG --cflags` AC_MSG_RESULT($GLIBHEAD) CPPFLAGS="$CPPFLAGS $GLIBHEAD" AC_MSG_CHECKING(for glib library flags) GLIBLIB=`$GLIBCONFIG --libs` AC_MSG_RESULT($GLIBLIB) LIBS="$LIBS $GLIBLIB" fi dnl ======================================================================== dnl Headers dnl ======================================================================== AC_HEADER_STDC AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS([arpa/inet.h]) AC_CHECK_HEADERS([fcntl.h]) AC_CHECK_HEADERS([limits.h]) AC_CHECK_HEADERS([malloc.h]) AC_CHECK_HEADERS([netdb.h]) AC_CHECK_HEADERS([netinet/in.h]) AC_CHECK_HEADERS([sys/file.h]) AC_CHECK_HEADERS([sys/ioctl.h]) AC_CHECK_HEADERS([sys/param.h]) AC_CHECK_HEADERS([sys/time.h]) AC_CHECK_HEADERS([syslog.h]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_FUNC_FORK AC_FUNC_STRNLEN AC_CHECK_FUNCS([alarm gettimeofday inet_ntoa memset mkdir socket uname]) AC_CHECK_FUNCS([strcasecmp strchr strdup strerror strrchr strspn strstr strtol strtoul]) AC_PATH_PROGS(REBOOT, reboot, /sbin/reboot) AC_SUBST(REBOOT) AC_SUBST(REBOOT_OPTIONS) AC_DEFINE_UNQUOTED(REBOOT, "$REBOOT", path to the reboot command) AC_DEFINE_UNQUOTED(REBOOT_OPTIONS, "$REBOOT_OPTIONS", reboot options) AC_PATH_PROGS(POWEROFF_CMD, poweroff, /sbin/poweroff) AC_SUBST(POWEROFF_CMD) AC_SUBST(POWEROFF_OPTIONS) AC_DEFINE_UNQUOTED(POWEROFF_CMD, "$POWEROFF_CMD", path to the poweroff command) AC_DEFINE_UNQUOTED(POWEROFF_OPTIONS, "$POWEROFF_OPTIONS", poweroff options) AC_PATH_PROGS(XSLTPROC, xsltproc) AM_CONDITIONAL(BUILD_DOC, test "x$XSLTPROC" != "x" ) if test "x$XSLTPROC" = "x"; then AC_MSG_WARN([xsltproc not installed, unable to (re-)build manual pages]) fi AC_SUBST(XSLTPROC) AC_PATH_PROGS(POD2MAN, pod2man) AM_CONDITIONAL(BUILD_POD_DOC, test "x$POD2MAN" != "x" ) if test "x$POD2MAN" = "x"; then AC_MSG_WARN([pod2man not installed, unable to (re-)build ldirector manual page]) fi AC_SUBST(POD2MAN) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) dnl ======================================================================== dnl sfex dnl ======================================================================== build_sfex=no case $host_os in *Linux*|*linux*) if test "$ac_cv_header_heartbeat_glue_config_h" = "yes"; then build_sfex=yes fi ;; esac AM_CONDITIONAL(BUILD_SFEX, test "$build_sfex" = "yes" ) dnl ======================================================================== dnl tickle (needs port to BSD platforms) dnl ======================================================================== AC_CHECK_MEMBERS([struct iphdr.saddr],,,[[#include ]]) AM_CONDITIONAL(BUILD_TICKLE, test "$ac_cv_member_struct_iphdr_saddr" = "yes" ) dnl ======================================================================== dnl libnet dnl ======================================================================== libnet="" libnet_version="none" LIBNETLIBS="" LIBNETDEFINES="" AC_MSG_CHECKING(if libnet is required) libnet_fatal=$enable_libnet case $enable_libnet in no) ;; yes|libnet10|libnet11|10|11) libnet_fatal=yes;; try) case $host_os in *Linux*|*linux*) libnet_fatal=no;; *) libnet_fatal=yes;; dnl legacy behavior esac ;; *) libnet_fatal=yes; enable_libnet=try;; esac AC_MSG_RESULT($libnet_fatal) if test "x$enable_libnet" != "xno"; then AC_PATH_PROGS(LIBNETCONFIG, libnet-config) AC_CHECK_LIB(nsl, t_open) dnl -lnsl AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(net, libnet_get_hwaddr, LIBNETLIBS=" -lnet", []) fi AC_MSG_CHECKING(for libnet) if test "x$LIBNETLIBS" != "x" -o "x$enable_libnet" = "xlibnet11"; then LIBNETDEFINES="" if test "$ac_cv_lib_nsl_t_open" = yes; then LIBNETLIBS="-lnsl $LIBNETLIBS" fi if test "$ac_cv_lib_socket_socket" = yes; then LIBNETLIBS="-lsocket $LIBNETLIBS" fi libnet=net libnet_version="libnet1.1" fi if test "x$enable_libnet" = "xtry" -o "x$enable_libnet" = "xlibnet10"; then if test "x$LIBNETLIBS" = x -a "x${LIBNETCONFIG}" != "x" ; then LIBNETDEFINES="`$LIBNETCONFIG --defines` `$LIBNETCONFIG --cflags`"; LIBNETLIBS="`$LIBNETCONFIG --libs`"; libnet_version="libnet1.0 (old)" case $LIBNETLIBS in *-l*) libnet=`echo $LIBNETLIBS | sed 's%.*-l%%'`;; *) libnet_version=none;; esac CPPFLAGS="$CPPFLAGS $LIBNETDEFINES" AC_CHECK_HEADERS(libnet.h) if test "$ac_cv_header_libnet_h" = no; then libnet_version=none fi fi fi AC_MSG_RESULT(found $libnet_version) if test "$libnet_version" = none; then LIBNETLIBS="" LIBNETDEFINES="" if test $libnet_fatal = yes; then AC_MSG_ERROR(libnet not found) fi else AC_CHECK_LIB($libnet,libnet_init, [new_libnet=yes; AC_DEFINE(HAVE_LIBNET_1_1_API, 1, Libnet 1.1 API)], [new_libnet=no; AC_DEFINE(HAVE_LIBNET_1_0_API, 1, Libnet 1.0 API)],$LIBNETLIBS) AC_SUBST(LIBNETLIBS) fi if test "$new_libnet" = yes; then AC_MSG_CHECKING(for libnet API 1.1.4: ) save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -fgnu89-inline -Wall -Werror" AC_COMPILE_IFELSE([ AC_LANG_SOURCE(#include int main(){libnet_t *l=NULL; libnet_pblock_record_ip_offset(l, l->total_size); return(0); })], [AC_MSG_RESULT(no)], [AC_DEFINE(HAVE_LIBNET_1_1_4_API, 1, Libnet 1.1.4 API) AC_MSG_RESULT(yes)]) CFLAGS="$save_CFLAGS" fi sendarp_linux=0 case $host_os in *Linux*|*linux*) sendarp_linux=1;; esac AC_SUBST(LIBNETLIBS) AC_SUBST(LIBNETDEFINES) AM_CONDITIONAL(SENDARP_LINUX, test $sendarp_linux = 1 ) AM_CONDITIONAL(USE_LIBNET, test "x$libnet_version" != "xnone" ) dnl ************************************************************************ dnl * Check for netinet/icmp6.h to enable the IPv6addr resource agent AC_CHECK_HEADERS(netinet/icmp6.h,[],[],[#include ]) AM_CONDITIONAL(USE_IPV6ADDR_AGENT, test "$ac_cv_header_netinet_icmp6_h" = yes && test "$ac_cv_header_heartbeat_glue_config_h" = yes) AM_CONDITIONAL(IPV6ADDR_COMPATIBLE, test "$ac_cv_header_netinet_icmp6_h" = yes) dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. CC_ERRORS="" CC_EXTRAS="" if export -p | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" enable_fatal_warnings=no else CFLAGS="$CFLAGS -ggdb3" # We had to eliminate -Wnested-externs because of libtool changes # Also remove -Waggregate-return because we use one libnet # call which returns a struct EXTRA_FLAGS="-fgnu89-inline -fstack-protector-all -Wall -Wbad-function-cast -Wcast-qual -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat=2 -Wformat-security -Wformat-nonliteral -Winline -Wmissing-prototypes -Wmissing-declarations -Wmissing-format-attribute -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wunsigned-char -Wwrite-strings" # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $j then CC_EXTRAS="$CC_EXTRAS $j" fi done dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'` AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4) dnl System specific options case "$host_os" in *linux*|*bsd*) if test "${enable_fatal_warnings}" = "unknown"; then enable_fatal_warnings=yes fi ;; esac if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then enable_fatal_warnings=yes else enable_fatal_warnings=no fi if test "x${enable_ansi}" != xno && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS -Werror" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LOCALE) AC_SUBST(CC) AC_SUBST(MAKE) dnl The Makefiles and shell scripts we output AC_CONFIG_FILES(Makefile \ include/Makefile \ heartbeat/Makefile \ heartbeat/ocf-binaries \ heartbeat/ocf-directories \ heartbeat/ocf-shellfuncs \ heartbeat/shellfuncs \ +systemd/Makefile \ tools/Makefile \ tools/ocf-tester \ tools/ocft/Makefile \ tools/ocft/ocft \ tools/ocft/caselib \ tools/ocft/README \ tools/ocft/README.zh_CN \ ldirectord/Makefile \ ldirectord/ldirectord \ ldirectord/init.d/Makefile \ ldirectord/init.d/ldirectord \ ldirectord/init.d/ldirectord.debian \ ldirectord/init.d/ldirectord.debian.default \ ldirectord/systemd/Makefile \ ldirectord/systemd/ldirectord.service \ ldirectord/logrotate.d/Makefile \ ldirectord/OCF/Makefile \ ldirectord/OCF/ldirectord \ doc/Makefile \ doc/man/Makefile \ rgmanager/Makefile \ rgmanager/src/Makefile \ rgmanager/src/resources/Makefile \ rgmanager/src/resources/utils/Makefile \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION}]) AC_MSG_RESULT([ Build Version = $Format:%H$]) AC_MSG_RESULT([ Features =${PKG_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ Documentation = ${docdir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([ HA_BIN directory prefix = ${libexecdir}]) AC_MSG_RESULT([ RA state files = ${HA_RSCTMPDIR}]) AC_MSG_RESULT([ AIS Plugins = ${LCRSODIR}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) diff --git a/heartbeat/LVM b/heartbeat/LVM index 7ebedac6f..7ef1a7e31 100755 --- a/heartbeat/LVM +++ b/heartbeat/LVM @@ -1,721 +1,727 @@ #!/bin/sh # # # LVM # # Description: Manages an LVM volume as an HA resource # # # Author: Alan Robertson # Support: users@clusterlabs.org # License: GNU General Public License (GPL) # Copyright: (C) 2002 - 2005 International Business Machines, Inc. # # This code significantly inspired by the LVM resource # in FailSafe by Lars Marowsky-Bree # # # An example usage in /etc/ha.d/haresources: # node1 10.0.0.170 ServeRAID::1::1 LVM::myvolname # # See usage() function below for more details... # # OCF parameters are as below: # OCF_RESKEY_volgrpname # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### usage() { methods=`LVM_methods` methods=`echo $methods | tr ' ' '|'` cat < 1.0 Resource script for LVM. It manages an Linux Volume Manager volume (LVM) as an HA resource. Controls the availability of an LVM Volume Group The name of volume group. Volume group name If set, the volume group will be activated exclusively. This option works one of two ways. If the volume group has the cluster attribute set, then the volume group will be activated exclusively using clvmd across the cluster. If the cluster attribute is not set, the volume group will be activated exclusively using a tag and the volume_list filter. When the tag option is in use, the volume_list in lvm.con must be initialized. This can be as simple as setting 'volume_list = []' depending on your setup. Exclusive activation If "exclusive" is set on a non clustered volume group, this overrides the tag to be used. Exclusive activation tag If set, the volume group will be activated partially even with some physical volumes missing. It helps to set to true when using mirrored logical volumes. Activate VG partially when missing PVs EOF } # # methods: What methods/operations do we support? # LVM_methods() { cat < /dev/null 2>&1 if [ $? -ne 0 ]; then return fi ## # Now check to see if the initrd has been updated. # If not, the machine could boot and activate the VG outside # the control of pacemaker ## if [ "$(find /boot -name *.img -newer /etc/lvm/lvm.conf)" = "" ]; then ocf_log warn "LVM: Improper setup detected" ocf_log warn "* initrd image needs to be newer than lvm.conf" # While dangerous if not done the first time, there are many # cases where we don't simply want to fail here. Instead, # keep warning until the user remakes the initrd - or has # it done for them by upgrading the kernel. # # initrd can be updated using this command. # dracut -H -f /boot/initramfs-$(uname -r).img $(uname -r) # fi } ## # does this vg have our tag ## check_tags() { local owner=`vgs -o tags --noheadings $OCF_RESKEY_volgrpname | tr -d ' '` if [ -z "$owner" ]; then # No-one owns this VG yet return 1 fi if [ "$OUR_TAG" = "$owner" ]; then # yep, this is ours return 0 fi # some other tag is set on this vg return 2 } strip_tags() { local i for i in `vgs --noheadings -o tags $OCF_RESKEY_volgrpname | sed s/","/" "/g`; do ocf_log info "Stripping tag, $i" # LVM version 2.02.98 allows changing tags if PARTIAL vgchange --deltag $i $OCF_RESKEY_volgrpname done if [ ! -z `vgs -o tags --noheadings $OCF_RESKEY_volgrpname | tr -d ' '` ]; then ocf_exit_reason "Failed to remove ownership tags from $OCF_RESKEY_volgrpname" return $OCF_ERR_GENERIC fi return $OCF_SUCCESS } set_tags() { check_tags case $? in 0) # we already own it. return $OCF_SUCCESS ;; 2) # other tags are set, strip them before setting if ! strip_tags; then return $OCF_ERR_GENERIC fi ;; *) : ;; esac vgchange --addtag $OUR_TAG $OCF_RESKEY_volgrpname if [ $? -ne 0 ]; then ocf_exit_reason "Failed to add ownership tag to $OCF_RESKEY_volgrpname" return $OCF_ERR_GENERIC fi ocf_log info "New tag \"$OUR_TAG\" added to $OCF_RESKEY_volgrpname" return $OCF_SUCCESS } # # Return LVM status (silently) # LVM_status() { local rc=1 loglevel="debug" # Set the log level of the error message if [ "X${2}" = "X" ]; then loglevel="err" if ocf_is_probe; then loglevel="warn" else if [ ${OP_METHOD} = "stop" ]; then loglevel="info" fi fi fi if [ -d /dev/$1 ]; then test "`cd /dev/$1 && ls`" != "" rc=$? if [ $rc -ne 0 ]; then ocf_exit_reason "VG $1 with no logical volumes is not supported by this RA!" fi fi if [ $rc -ne 0 ]; then ocf_log $loglevel "LVM Volume $1 is not available (stopped)" rc=$OCF_NOT_RUNNING else case $(get_vg_mode) in 1) # exclusive with tagging. # If vg is running, make sure the correct tag is present. Otherwise we # can not guarantee exclusive activation. if ! check_tags; then ocf_exit_reason "WARNING: $OCF_RESKEY_volgrpname is active without the cluster tag, \"$OUR_TAG\"" rc=$OCF_ERR_GENERIC fi # make sure the environment for tags activation is still valid if ! verify_tags_environment; then rc=$OCF_ERR_GENERIC fi # let the user know if their initrd is older than lvm.conf. check_initrd_warning ;; *) : ;; esac fi if [ "X${2}" = "X" ]; then # status call return return $rc fi # Report on LVM volume status to stdout... if [ $rc -eq 0 ]; then echo "Volume $1 is available (running)" else echo "Volume $1 is not available (stopped)" fi return $rc } get_activate_options() { local options="-a" case $(get_vg_mode) in 0) options="${options}ly";; 1) options="${options}y --config activation{volume_list=[\"@${OUR_TAG}\"]}";; 2) options="${options}ey";; esac if ocf_is_true "$OCF_RESKEY_partial_activation" ; then options="${options} --partial" fi # for clones (clustered volume groups), we'll also have to force # monitoring, even if disabled in lvm.conf. if ocf_is_clone; then options="$options --monitor y" fi echo $options } ## # Attempt to deactivate vg cluster wide and then start the vg exclusively ## retry_exclusive_start() { local vgchange_options="$(get_activate_options)" # Deactivate each LV in the group one by one cluster wide set -- $(lvs -o name,attr --noheadings $OCF_RESKEY_volgrpname 2> /dev/null) while [ $# -ge 2 ]; do case $2 in ????ao*) # open LVs cannot be deactivated. return $OCF_ERR_GENERIC;; *) if ! lvchange -an $OCF_RESKEY_volgrpname/$1; then ocf_exit_reason "Unable to perform required deactivation of $OCF_RESKEY_volgrpname/$1 before starting" return $OCF_ERR_GENERIC fi ;; esac shift 2 done ocf_run vgchange $vgchange_options $OCF_RESKEY_volgrpname } # # Enable LVM volume # LVM_start() { local vgchange_options="$(get_activate_options)" local vg=$1 local clvmd=0 + # systemd drop-in to stop process before storage services during + # shutdown/reboot + if ps -p 1 | grep -q systemd ; then + systemd_drop_in "99-LVM" "After" "blk-availability.service" + fi + # TODO: This MUST run vgimport as well ocf_log info "Activating volume group $vg" if [ "$LVM_MAJOR" -eq "1" ]; then ocf_run vgscan $vg else ocf_run vgscan --cache fi case $(get_vg_mode) in 2) clvmd=1 ;; 1) if ! set_tags; then return $OCF_ERR_GENERIC fi ;; *) : ;; esac if ! ocf_run vgchange $vgchange_options $vg; then if [ $clvmd -eq 0 ]; then return $OCF_ERR_GENERIC fi # Failure to exclusively activate cluster vg.: # This could be caused by a remotely active LV, Attempt # to disable volume group cluster wide and try again. # Allow for some settling sleep 5 if ! retry_exclusive_start; then return $OCF_ERR_GENERIC fi fi if LVM_status $vg; then : OK Volume $vg activated just fine! return $OCF_SUCCESS else ocf_exit_reason "LVM: $vg did not activate correctly" return $OCF_NOT_RUNNING fi } # # Disable the LVM volume # LVM_stop() { local res=$OCF_ERR_GENERIC local vgchange_options="-aln" local vg=$1 if ! vgs $vg > /dev/null 2>&1; then ocf_log info "Volume group $vg not found" return $OCF_SUCCESS fi ocf_log info "Deactivating volume group $vg" case $(get_vg_mode) in 1) vgchange_options="-an" ;; esac for i in $(seq 10) do ocf_run vgchange $vgchange_options $vg res=$? if LVM_status $vg; then ocf_exit_reason "LVM: $vg did not stop correctly" res=1 fi if [ $res -eq 0 ]; then break fi res=$OCF_ERR_GENERIC ocf_log warn "$vg still Active" ocf_log info "Retry deactivating volume group $vg" sleep 1 which udevadm > /dev/null 2>&1 && udevadm settle --timeout=5 done case $(get_vg_mode) in 1) if [ $res -eq 0 ]; then strip_tags res=$? fi ;; esac return $res } # # Check whether the OCF instance parameters are valid # LVM_validate_all() { check_binary $AWK ## # lvmetad is a daemon that caches lvm metadata to improve the # performance of LVM commands. This daemon should never be used when # volume groups exist that are being managed by the cluster. The lvmetad # daemon introduces a response lag, where certain LVM commands look like # they have completed (like vg activation) when in fact the command # is still in progress by the lvmetad. This can cause reliability issues # when managing volume groups in the cluster. For Example, if you have a # volume group that is a dependency for another application, it is possible # the cluster will think the volume group is activated and attempt to start # the application before volume group is really accesible... lvmetad is bad. ## lvm dumpconfig global/use_lvmetad | grep 'use_lvmetad.*=.*1' > /dev/null 2>&1 if [ $? -eq 0 ]; then # for now warn users that lvmetad is enabled and that they should disable it. In the # future we may want to consider refusing to start, or killing the lvmetad daemon. ocf_log warn "Disable lvmetad in lvm.conf. lvmetad should never be enabled in a clustered environment. Set use_lvmetad=0 and kill the lvmetad process" fi ## # Off-the-shelf tests... ## VGOUT=`vgck ${VOLUME} 2>&1` if [ $? -ne 0 ]; then # Inconsistency might be due to missing physical volumes, which doesn't # automatically mean we should fail. If partial_activation=true then # we should let start try to handle it, or if no PVs are listed as # "unknown device" then another node may have marked a device missing # where we have access to all of them and can start without issue. if vgs -o pv_attr --noheadings $OCF_RESKEY_volgrpname 2>/dev/null | grep 'm' > /dev/null 2>&1; then case $(vgs -o attr --noheadings $OCF_RESKEY_volgrpname | tr -d ' ') in ???p??*) if ! ocf_is_true "$OCF_RESKEY_partial_activation" ; then # We are missing devices and cannot activate partially ocf_exit_reason "Volume group [$VOLUME] has devices missing. Consider partial_activation=true to attempt to activate partially" exit $OCF_ERR_GENERIC else # We are missing devices but are allowed to activate partially. # Assume that caused the vgck failure and carry on ocf_log warn "Volume group inconsistency detected with missing device(s) and partial_activation enabled. Proceeding with requested action." fi ;; esac # else the vg is partial but all devices are accounted for, so another # node must have marked the device missing. Proceed. else # vgck failure was for something other than missing devices ocf_exit_reason "Volume group [$VOLUME] does not exist or contains error! ${VGOUT}" exit $OCF_ERR_GENERIC fi fi ## # Does the Volume Group exist? ## if [ "$LVM_MAJOR" = "1" ]; then VGOUT=`vgdisplay ${VOLUME} 2>&1` else VGOUT=`vgdisplay -v ${VOLUME} 2>&1` fi if [ $? -ne 0 ]; then ocf_exit_reason "Volume group [$VOLUME] does not exist or contains error! ${VGOUT}" exit $OCF_ERR_GENERIC fi if lvs --noheadings -o segtype | grep -q "cache"; then if ! lvs --noheadings -o cache_mode "$OCF_RESKEY_volgrpname" | grep -q "writethrough"; then ocf_log warn "LVM CACHE IS NOT IN WRITETHROUGH MODE. THIS IS NOT A SUPPORTED CONFIGURATION." fi fi ## # If exclusive activation is not enabled, then # further checking of proper setup is not necessary ## if ! ocf_is_true "$OCF_RESKEY_exclusive"; then return $OCF_SUCCESS; fi ## # Having cloned lvm resources with exclusive vg activation makes no sense at all. ## if ocf_is_clone; then ocf_exit_reason "cloned lvm resources can not be activated exclusively" exit $OCF_ERR_CONFIGURED fi ## # Make sure the cluster attribute is set and clvmd is up when exclusive # activation is enabled. Otherwise we can't exclusively activate the volume group. ## case $(get_vg_mode) in 1) # exclusive activation using tags if ! verify_tags_environment; then exit $OCF_ERR_GENERIC fi ;; 2) # exclusive activation with clvmd ## # verify is clvmd running ## if ! ps -C clvmd > /dev/null 2>&1; then ocf_exit_reason "$OCF_RESKEY_volgrpname has the cluster attribute set, but 'clvmd' is not running" exit $OCF_ERR_GENERIC fi ;; *) : ;; esac return $OCF_SUCCESS } # # 'main' starts here... # if [ $# -ne 1 ] then usage exit $OCF_ERR_ARGS fi case $1 in meta-data) meta_data exit $OCF_SUCCESS;; methods) LVM_methods exit $?;; usage) usage exit $OCF_SUCCESS;; *) ;; esac if [ -z "$OCF_RESKEY_volgrpname" ] then ocf_exit_reason "You must identify the volume group name!" exit $OCF_ERR_CONFIGURED fi # Get the LVM version number, for this to work we assume(thanks to panjiam): # # LVM1 outputs like this # # # vgchange --version # vgchange: Logical Volume Manager 1.0.3 # Heinz Mauelshagen, Sistina Software 19/02/2002 (IOP 10) # # LVM2 and higher versions output in this format # # # vgchange --version # LVM version: 2.00.15 (2004-04-19) # Library version: 1.00.09-ioctl (2004-03-31) # Driver version: 4.1.0 LVM_VERSION=`vgchange --version 2>&1 | \ $AWK '/Logical Volume Manager/ {print $5"\n"; exit; } /LVM version:/ {printf $3"\n"; exit;}'` rc=$? if ( [ $rc -ne 0 ] || [ -z "$LVM_VERSION" ] ) then ocf_exit_reason "LVM: $1 could not determine LVM version. Try 'vgchange --version' manually and modify $0 ?" exit $OCF_ERR_INSTALLED fi LVM_MAJOR="${LVM_VERSION%%.*}" VOLUME=$OCF_RESKEY_volgrpname OP_METHOD=$1 if [ -n "$OCF_RESKEY_tag" ]; then OUR_TAG=$OCF_RESKEY_tag fi # What kind of method was invoked? case "$1" in start) LVM_validate_all LVM_start $VOLUME exit $?;; stop) LVM_stop $VOLUME exit $?;; status) LVM_status $VOLUME $1 exit $?;; monitor) LVM_status $VOLUME exit $?;; validate-all) LVM_validate_all ;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac diff --git a/heartbeat/Raid1 b/heartbeat/Raid1 index ffd71cb1c..c7a0d760d 100755 --- a/heartbeat/Raid1 +++ b/heartbeat/Raid1 @@ -1,556 +1,562 @@ #!/bin/sh # # # License: GNU General Public License (GPL) # Support: users@clusterlabs.org # # Raid1 # Description: Manages a Linux software RAID device on a shared storage medium. # Original Author: Eric Z. Ayers (eric.ayers@compgen.com) # Original Release: 25 Oct 2000 # RAID patches: http://people.redhat.com/mingo/raid-patches/ # Word to the Wise: http://lwn.net/2000/0810/a/raid-faq.php3 # Sympathetic Ear: mailto:linux-raid@vger.kernel.org # # usage: $0 {start|stop|status|monitor|validate-all|usage|meta-data} # # # EXAMPLE config file /etc/raidtab.md0 # This file must exist on both machines! # # raiddev /dev/md0 # raid-level 1 # nr-raid-disks 2 # chunk-size 64k # persistent-superblock 1 # #nr-spare-disks 0 # device /dev/sda1 # raid-disk 0 # device /dev/sdb1 # raid-disk 1 # # EXAMPLE config file /etc/mdadm.conf (for more info:man mdadm.conf) # # DEVICE /dev/sdb1 /dev/sdc1 # ARRAY /dev/md0 UUID=4a865b55:ba27ef8d:29cd5701:6fb42799 ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### usage() { cat <<-EOT usage: $0 {start|stop|status|monitor|validate-all|usage|meta-data} EOT } meta_data() { cat < 1.0 This resource agent manages Linux software RAID (MD) devices on a shared storage medium. It uses mdadm(8) to start, stop, and monitor the MD devices. Raidtools are supported, but deprecated. See https://raid.wiki.kernel.org/index.php/Linux_Raid for more information. Manages Linux software RAID (MD) devices on shared storage The RAID configuration file, e.g. /etc/mdadm.conf. RAID config file One or more block devices to use, space separated. Alternatively, set to "auto" to manage all devices specified in raidconf. block device The value for the homehost directive; this is an mdadm feature to protect RAIDs against being activated by accident. It is recommended to create RAIDs managed by the cluster with "homehost" set to a special value, so they are not accidentially auto-assembled by nodes not supposed to own them. Homehost for mdadm If processes or kernel threads are using the array, it cannot be stopped. We will try to stop processes, first by sending TERM and then, if that doesn't help in $PROC_CLEANUP_TIME seconds, using KILL. The lsof(8) program is required to get the list of array users. Of course, the kernel threads cannot be stopped this way. If the processes are critical for data integrity, then set this parameter to false. Note that in that case the stop operation will fail and the node will be fenced. force stop processes using the array Wait until udevd creates a device in the start operation. On a normally loaded host this should happen quickly, but you may be unlucky. If you are not using udev set this to "no". udev Activating the same md RAID array on multiple nodes at the same time will result in data corruption and thus is forbidden by default. A safe example could be an array that is only named identically across all nodes, but is in fact distinct. Only set this to "true" if you know what you are doing! force ability to run as a clone END } udev_settle() { if ocf_is_true $WAIT_FOR_UDEV; then udevadm settle $* fi } list_conf_arrays() { test -f $RAIDCONF || { ocf_exit_reason "$RAIDCONF gone missing!" exit $OCF_ERR_GENERIC } grep ^ARRAY $RAIDCONF | awk '{print $2}' } forall() { local func=$1 local checkall=$2 local mddev rc=0 for mddev in $RAIDDEVS; do $func $mddev rc=$(($rc | $?)) [ "$checkall" = all ] && continue [ $rc -ne 0 ] && return $rc done return $rc } are_arrays_stopped() { local rc mddev for mddev in $RAIDDEVS; do raid1_monitor_one $mddev rc=$? [ $rc -ne $OCF_NOT_RUNNING ] && break done test $rc -eq $OCF_NOT_RUNNING } md_assemble() { local mddev=$1 $MDADM --assemble $mddev --config=$RAIDCONF $MDADM_HOMEHOST udev_settle --exit-if-exists=$mddev } # # START: Start up the RAID device # raid1_start() { local rc raid1_monitor rc=$? if [ $rc -eq $OCF_SUCCESS ]; then # md already online, nothing to do. return $OCF_SUCCESS fi if [ $rc -ne $OCF_NOT_RUNNING ]; then # If the array is in a broken state, this agent doesn't # know how to repair that. ocf_exit_reason "$RAIDDEVS in a broken state; cannot start (rc=$rc)" return $OCF_ERR_GENERIC fi if [ $HAVE_RAIDTOOLS = "true" ]; then # Run raidstart to start up the RAID array $RAIDSTART --configfile $RAIDCONF $MDDEV else forall md_assemble all fi raid1_monitor if [ $? -eq $OCF_SUCCESS ]; then return $OCF_SUCCESS else ocf_exit_reason "Couldn't start RAID for $RAIDDEVS" return $OCF_ERR_GENERIC fi } # # STOP: stop the RAID device # mark_readonly() { local mddev=$1 local rc ocf_log info "Attempting to mark array $mddev readonly" $MDADM --readonly $mddev --config=$RAIDCONF rc=$? if [ $rc -ne 0 ]; then ocf_exit_reason "Failed to set $mddev readonly (rc=$rc)" fi return $rc } mknod_raid1_stop() { # first create a block device file, then try to stop the # array local rc n tmp_block_file n=`echo $1 | sed 's/[^0-9]*//'` if ! ocf_is_decimal "$n"; then ocf_log warn "could not get the minor device number from $1" return 1 fi tmp_block_file="$HA_RSCTMP/${OCF_RESOURCE_INSTANCE}-`basename $1`" rm -f $tmp_block_file ocf_log info "block device file $1 missing, creating one in order to stop the array" mknod $tmp_block_file b 9 $n $MDADM --stop $tmp_block_file --config=$RAIDCONF --wait-clean -W rc=$? rm -f $tmp_block_file return $rc } raid1_stop_one() { ocf_log info "Stopping array $1" if [ -b "$1" ]; then $MDADM --stop $1 --config=$RAIDCONF --wait-clean -W && return else # newer mdadm releases can stop arrays when given the # basename; try that first $MDADM --stop `basename $1` --config=$RAIDCONF --wait-clean -W && return # otherwise create a block device file mknod_raid1_stop $1 fi } get_users_pids() { local mddev=$1 local outp l ocf_log debug "running lsof to list $mddev users..." outp=`lsof $mddev | tail -n +2` echo "$outp" | awk '{print $2}' | sort -u echo "$outp" | while read l; do ocf_log warn "$l" done } stop_raid_users() { local pids pids=`forall get_users_pids all | sort -u` if [ -z "$pids" ]; then ocf_log warn "lsof reported no users holding arrays" return 2 else ocf_stop_processes TERM $PROC_CLEANUP_TIME $pids fi } stop_arrays() { if [ $HAVE_RAIDTOOLS = "true" ]; then $RAIDSTOP --configfile $RAIDCONF $MDDEV else forall raid1_stop_one all fi } showusers() { local disk for disk; do if have_binary lsof; then ocf_log info "running lsof to list $disk users..." ocf_run -warn lsof $disk fi if [ -d /sys/block/$disk/holders ]; then ocf_log info "ls -l /sys/block/$disk/holders" ocf_run -warn ls -l /sys/block/$disk/holders fi done } raid1_stop() { local rc # See if the MD device is already cleanly stopped: if are_arrays_stopped; then return $OCF_SUCCESS fi # Turn off raid if ! stop_arrays; then if ocf_is_true $FORCESTOP; then if have_binary lsof; then stop_raid_users case $? in 2) false;; *) stop_arrays;; esac else ocf_log warn "install lsof(8) to list users holding the disk" false fi else false fi fi rc=$? if [ $rc -ne 0 ]; then ocf_log warn "Couldn't stop RAID for $RAIDDEVS (rc=$rc)" showusers $RAIDDEVS if [ $HAVE_RAIDTOOLS != "true" ]; then forall mark_readonly all fi return $OCF_ERR_GENERIC fi if are_arrays_stopped; then return $OCF_SUCCESS fi ocf_exit_reason "RAID $RAIDDEVS still active after stop command!" return $OCF_ERR_GENERIC } # # monitor: a less noisy status # raid1_monitor_one() { local mddev=$1 - local md=`echo $mddev | sed 's,/dev/,,'` + local md= local rc local TRY_READD=0 local pbsize # check if the md device exists first # but not if we are in the stop operation # device existence is important only for the running arrays - if [ "$__OCF_ACTION" != "stop" -a ! -b $mddev ]; then - ocf_log info "$mddev is not a block device" - return $OCF_NOT_RUNNING + if [ "$__OCF_ACTION" != "stop" ]; then + if [ -h "$mddev" ]; then + md=$(ls $mddev -l | awk -F'/' '{print $NF}') + elif [ -b "$mddev" ]; then + md=$(echo $mddev | sed 's,/dev/,,') + else + ocf_log info "$mddev is not a block device" + return $OCF_NOT_RUNNING + fi fi if ! grep -e "^$md[ \t:]" /proc/mdstat >/dev/null ; then ocf_log info "$md not found in /proc/mdstat" return $OCF_NOT_RUNNING fi if [ $HAVE_RAIDTOOLS != "true" ]; then $MDADM --detail --test $mddev >/dev/null 2>&1 ; rc=$? case $rc in 0) ;; 1) ocf_log warn "$mddev has at least one failed device." TRY_READD=1 ;; 2) ocf_exit_reason "$mddev has failed." return $OCF_ERR_GENERIC ;; 4) ocf_exit_reason "mdadm failed on $mddev." return $OCF_ERR_GENERIC ;; *) ocf_exit_reason "mdadm returned an unknown result ($rc)." return $OCF_ERR_GENERIC ;; esac fi if [ "$__OCF_ACTION" = "monitor" -a "$OCF_RESKEY_CRM_meta_interval" != 0 \ -a $TRY_READD -eq 1 -a $OCF_CHECK_LEVEL -gt 0 ]; then ocf_log info "Attempting recovery sequence to re-add devices on $mddev:" $MDADM $mddev --fail detached $MDADM $mddev --remove failed $MDADM $mddev --re-add missing # TODO: At this stage, there's nothing to actually do # here. Either this worked or it did not. fi pbsize=`(blockdev --getpbsz $mddev || stat -c "%o" $mddev) 2>/dev/null` if [ -z "$pbsize" ]; then ocf_log warn "both blockdev and stat could not get the block size (will use 4k)" pbsize=4096 # try with 4k fi if ! dd if=$mddev count=1 bs=$pbsize of=/dev/null \ iflag=direct >/dev/null 2>&1 ; then ocf_exit_reason "$mddev: I/O error on read" return $OCF_ERR_GENERIC fi return $OCF_SUCCESS } raid1_monitor() { forall raid1_monitor_one } # # STATUS: is the raid device online or offline? # raid1_status() { # See if the MD device is online local rc raid1_monitor rc=$? if [ $rc -ne $OCF_SUCCESS ]; then echo "stopped" else echo "running" fi return $rc } raid1_validate_all() { return $OCF_SUCCESS } PROC_CLEANUP_TIME=3 if ( [ $# -ne 1 ] ) then usage exit $OCF_ERR_ARGS fi case "$1" in meta-data) meta_data exit $OCF_SUCCESS ;; usage) usage exit $OCF_SUCCESS ;; *) ;; esac RAIDCONF="$OCF_RESKEY_raidconf" MDDEV="$OCF_RESKEY_raiddev" FORCESTOP="${OCF_RESKEY_force_stop:-1}" WAIT_FOR_UDEV="${OCF_RESKEY_udev:-1}" if [ -z "$RAIDCONF" ] ; then ocf_exit_reason "Please set OCF_RESKEY_raidconf!" exit $OCF_ERR_CONFIGURED fi if [ ! -r "$RAIDCONF" ] ; then ocf_exit_reason "Configuration file [$RAIDCONF] does not exist, or can not be opened!" exit $OCF_ERR_INSTALLED fi if [ -z "$MDDEV" ] ; then ocf_exit_reason "Please set OCF_RESKEY_raiddev to the Raid device you want to control!" exit $OCF_ERR_CONFIGURED fi if ocf_is_clone && ! ocf_is_true "$OCF_RESKEY_force_clones"; then ocf_exit_reason "md RAID arrays are NOT safe to run as a clone!" ocf_log err "Please read the comment on the force_clones parameter." exit $OCF_ERR_CONFIGURED fi if ocf_is_true $WAIT_FOR_UDEV && ! have_binary udevadm; then if [ "$__OCF_ACTION" = "start" ]; then ocf_log warn "either install udevadm or set udev to false" ocf_log info "setting udev to false!" fi WAIT_FOR_UDEV=0 fi if ! ocf_is_true $WAIT_FOR_UDEV; then export MDADM_NO_UDEV=1 fi if ocf_is_true $FORCESTOP && ! have_binary lsof; then ocf_log warn "Please install lsof(8), we may need it when stopping Raid device! Now continuing anyway ..." fi HAVE_RAIDTOOLS=false if have_binary $MDADM >/dev/null 2>&1 ; then if [ -n "$OCF_RESKEY_homehost" ]; then MDADM_HOMEHOST="--homehost=${OCF_RESKEY_homehost}" else MDADM_HOMEHOST="" fi else check_binary $RAIDSTART HAVE_RAIDTOOLS=true fi if [ $HAVE_RAIDTOOLS = true ]; then if [ "$MDDEV" = "auto" ]; then ocf_exit_reason "autoconf supported only with mdadm!" exit $OCF_ERR_INSTALLED elif [ `echo $MDDEV|wc -w` -gt 1 ]; then ocf_exit_reason "multiple devices supported only with mdadm!" exit $OCF_ERR_INSTALLED fi fi if [ "$MDDEV" = "auto" ]; then RAIDDEVS=`list_conf_arrays` else RAIDDEVS="$MDDEV" fi # At this stage, # [ $HAVE_RAIDTOOLS = false ] <=> we have $MDADM, # otherwise we have raidtools (raidstart and raidstop) # Look for how we are called case "$1" in start) raid1_start ;; stop) raid1_stop ;; status) raid1_status ;; monitor) raid1_monitor ;; validate-all) raid1_validate_all ;; *) usage exit $OCF_ERR_UNIMPLEMENTED ;; esac exit $? diff --git a/heartbeat/VirtualDomain b/heartbeat/VirtualDomain index 0b614f5f3..c1857a16e 100755 --- a/heartbeat/VirtualDomain +++ b/heartbeat/VirtualDomain @@ -1,842 +1,914 @@ #!/bin/sh # # Support: users@clusterlabs.org # License: GNU General Public License (GPL) # # Resource Agent for domains managed by the libvirt API. # Requires a running libvirt daemon (libvirtd). # # (c) 2008-2010 Florian Haas, Dejan Muhamedagic, # and Linux-HA contributors # # usage: $0 {start|stop|status|monitor|migrate_to|migrate_from|meta-data|validate-all} # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs # Defaults OCF_RESKEY_migration_downtime_default=0 OCF_RESKEY_migration_speed_default=0 OCF_RESKEY_force_stop_default=0 OCF_RESKEY_autoset_utilization_cpu_default="true" OCF_RESKEY_autoset_utilization_hv_memory_default="true" OCF_RESKEY_migrateport_default=$(( 49152 + $(ocf_maybe_random) % 64 )) OCF_RESKEY_CRM_meta_timeout_default=90000 OCF_RESKEY_save_config_on_stop_default=false OCF_RESKEY_sync_config_on_stop_default=false : ${OCF_RESKEY_migration_downtime=${OCF_RESKEY_migration_downtime_default}} : ${OCF_RESKEY_migration_speed=${OCF_RESKEY_migration_speed_default}} : ${OCF_RESKEY_force_stop=${OCF_RESKEY_force_stop_default}} : ${OCF_RESKEY_autoset_utilization_cpu=${OCF_RESKEY_autoset_utilization_cpu_default}} : ${OCF_RESKEY_autoset_utilization_hv_memory=${OCF_RESKEY_autoset_utilization_hv_memory_default}} : ${OCF_RESKEY_migrateport=${OCF_RESKEY_migrateport_default}} : ${OCF_RESKEY_CRM_meta_timeout=${OCF_RESKEY_CRM_meta_timeout_default}} : ${OCF_RESKEY_save_config_on_stop=${OCF_RESKEY_save_config_on_stop_default}} : ${OCF_RESKEY_sync_config_on_stop=${OCF_RESKEY_sync_config_on_stop_default}} if ocf_is_true ${OCF_RESKEY_sync_config_on_stop}; then OCF_RESKEY_save_config_on_stop="true" fi ####################################################################### ## I'd very much suggest to make this RA use bash, ## and then use magic $SECONDS. ## But for now: NOW=$(date +%s) usage() { echo "usage: $0 {start|stop|status|monitor|migrate_to|migrate_from|meta-data|validate-all}" } VirtualDomain_meta_data() { cat < 1.1 Resource agent for a virtual domain (a.k.a. domU, virtual machine, virtual environment etc., depending on context) managed by libvirtd. Manages virtual domains through the libvirt virtualization framework Absolute path to the libvirt configuration file, for this virtual domain. Virtual domain configuration file Hypervisor URI to connect to. See the libvirt documentation for details on supported URI formats. The default is system dependent. Determine the system's default uri by running 'virsh --quiet uri'. Hypervisor URI Always forcefully shut down ("destroy") the domain on stop. The default behavior is to resort to a forceful shutdown only after a graceful shutdown attempt has failed. You should only set this to true if your virtual domain (or your virtualization backend) does not support graceful shutdown. Always force shutdown on stop Transport used to connect to the remote hypervisor while migrating. Please refer to the libvirt documentation for details on transports available. If this parameter is omitted, the resource will use libvirt's default transport to connect to the remote hypervisor. Remote hypervisor transport + + +The username will be used in the remote libvirt remoteuri/migrateuri. No user will be +given (which means root) in the username if omitted + +If remoteuri is set, migration_user will be ignored. + +Remote username for the remoteuri + + + Define max downtime during live migration in milliseconds Live migration downtime Define live migration speed per resource in MiB/s Live migration speed Use a dedicated migration network. The migration URI is composed by adding this parameters value to the end of the node name. If the node name happens to be an FQDN (as opposed to an unqualified host name), insert the suffix immediately prior to the first period (.) in the FQDN. At the moment Qemu/KVM and Xen migration via a dedicated network is supported. Note: Be sure this composed host name is locally resolveable and the -associated IP is reachable through the favored network. +associated IP is reachable through the favored network. This suffix will +be added to the remoteuri and migrateuri parameters. See also the migrate_options parameter below. Migration network host name suffix + + +You can also specify here if the calculated migrate URI is unsuitable for your +environment. + +If migrateuri is set then migration_network_suffix, migrateport and +--migrateuri in migrate_options are effectively ignored. Use "%n" as the +placeholder for the target node name. + +Please refer to the libvirt documentation for details on guest +migration. + +Custom migrateuri for migration state transfer + + + Extra virsh options for the guest live migration. You can also specify here --migrateuri if the calculated migrate URI is unsuitable for your environment. If --migrateuri is set then migration_network_suffix and migrateport are effectively ignored. Use "%n" as the placeholder for the target node name. Please refer to the libvirt documentation for details on guest migration. live migrate options To additionally monitor services within the virtual domain, add this parameter with a list of scripts to monitor. Note: when monitor scripts are used, the start and migrate_from operations will complete only when all monitor scripts have completed successfully. Be sure to set the timeout of these operations to accommodate this delay. space-separated list of monitor scripts If set true, the agent will detect the number of domainU's vCPUs from virsh, and put it into the CPU utilization of the resource when the monitor is executed. Enable auto-setting the CPU utilization of the resource If set true, the agent will detect the number of *Max memory* from virsh, and put it into the hv_memory utilization of the resource when the monitor is executed. Enable auto-setting the hv_memory utilization of the resource This port will be used in the qemu migrateuri. If unset, the port will be a random highport. Port for migrateuri + + +Use this URI as virsh connection URI to commuicate with a remote hypervisor. + +If remoteuri is set then migration_user and migration_network_suffix are +effectively ignored. Use "%n" as the placeholder for the target node name. + +Please refer to the libvirt documentation for details on guest +migration. + +Custom remoteuri to communicate with a remote hypervisor + + + Changes to a running VM's config are normally lost on stop. This parameter instructs the RA to save the configuration back to the xml file provided in the "config" parameter. Save running VM's config back to its config file Setting this automatically enables save_config_on_stop. When enabled this parameter instructs the RA to call csync2 -x to synchronize the file to all nodes. csync2 must be properly set up for this to work. Save running VM's config back to its config file Path to the snapshot directory where the virtual machine image will be stored. When this parameter is set, the virtual machine's RAM state will be saved to a file in the snapshot directory when stopped. If on start a state file is present for the domain, the domain will be restored to the same state it was in right before it stopped last. This option is incompatible with the 'force_stop' option. Restore state on start/stop EOF } set_util_attr() { local attr=$1 val=$2 local cval outp cval=$(crm_resource -Q -r $OCF_RESOURCE_INSTANCE -z -g $attr 2>/dev/null) if [ $? -ne 0 ] && [ -z "$cval" ]; then crm_resource -Q -r $OCF_RESOURCE_INSTANCE -z -g $attr 2>&1 | grep -e "not connected" > /dev/null 2>&1 if [ $? -eq 0 ]; then ocf_log debug "Unable to set utilization attribute, cib is not available" return fi fi if [ "$cval" != "$val" ]; then outp=$(crm_resource -r $OCF_RESOURCE_INSTANCE -z -p $attr -v $val 2>&1) || ocf_log warn "crm_resource failed to set utilization attribute $attr: $outp" fi } update_utilization() { local dom_cpu dom_mem if ocf_is_true "$OCF_RESKEY_autoset_utilization_cpu"; then dom_cpu=$(LANG=C virsh $VIRSH_OPTIONS dominfo ${DOMAIN_NAME} 2>/dev/null | awk '/CPU\(s\)/{print $2}') test -n "$dom_cpu" && set_util_attr cpu $dom_cpu fi if ocf_is_true "$OCF_RESKEY_autoset_utilization_hv_memory"; then dom_mem=$(LANG=C virsh $VIRSH_OPTIONS dominfo ${DOMAIN_NAME} 2>/dev/null | awk '/Max memory/{printf("%d", $3/1024)}') test -n "$dom_mem" && set_util_attr hv_memory "$dom_mem" fi } get_emulator() { local emulator="" emulator=$(virsh $VIRSH_OPTIONS dumpxml $DOMAIN_NAME 2>/dev/null | sed -n -e 's/^.*\(.*\)<\/emulator>.*$/\1/p') if [ -z "$emulator" ] && [ -e "$EMULATOR_STATE" ]; then emulator=$(cat $EMULATOR_STATE) fi if [ -z "$emulator" ]; then emulator=$(cat ${OCF_RESKEY_config} | sed -n -e 's/^.*\(.*\)<\/emulator>.*$/\1/p') fi if [ -n "$emulator" ]; then basename $emulator fi } update_emulator_cache() { local emulator emulator=$(get_emulator) if [ -n "$emulator" ]; then echo $emulator > $EMULATOR_STATE fi } # attempt to check domain status outside of libvirt using the emulator process pid_status() { local rc=$OCF_ERR_GENERIC local emulator=$(get_emulator) # An emulator is not required, so only report message in debug mode local loglevel="debug" if ocf_is_probe; then loglevel="notice" fi case "$emulator" in qemu-kvm|qemu-dm|qemu-system-*) rc=$OCF_NOT_RUNNING ps awx | grep -E "[q]emu-(kvm|dm|system).*-name $DOMAIN_NAME " > /dev/null 2>&1 if [ $? -eq 0 ]; then rc=$OCF_SUCCESS fi ;; libvirt_lxc) rc=$OCF_NOT_RUNNING ps awx | grep -E "[l]ibvirt_lxc.*-name $DOMAIN_NAME " > /dev/null 2>&1 if [ $? -eq 0 ]; then rc=$OCF_SUCCESS fi ;; # This can be expanded to check for additional emulators *) # We may be running xen with PV domains, they don't # have an emulator set. try xl list or xen-lists if have_binary xl; then rc=$OCF_NOT_RUNNING xl list $DOMAIN_NAME >/dev/null 2>&1 if [ $? -eq 0 ]; then rc=$OCF_SUCCESS fi elif have_binary xen-list; then rc=$OCF_NOT_RUNNING xen-list $DOMAIN_NAME 2>/dev/null | grep -qs "State.*[-r][-b][-p]--" 2>/dev/null if [ $? -eq 0 ]; then rc=$OCF_SUCCESS fi else ocf_log $loglevel "Unable to determine emulator for $DOMAIN_NAME" fi ;; esac if [ $rc -eq $OCF_SUCCESS ]; then ocf_log debug "Virtual domain $DOMAIN_NAME is currently running." elif [ $rc -eq $OCF_NOT_RUNNING ]; then ocf_log debug "Virtual domain $DOMAIN_NAME is currently not running." fi return $rc } VirtualDomain_status() { local try=0 rc=$OCF_ERR_GENERIC status="no state" while [ "$status" = "no state" ]; do try=$(($try + 1 )) status=$(LANG=C virsh $VIRSH_OPTIONS domstate $DOMAIN_NAME 2>&1 | tr 'A-Z' 'a-z') case "$status" in *"error:"*"domain not found"|*"error:"*"failed to get domain"*|"shut off") # shut off: domain is defined, but not started, will not happen if # domain is created but not defined # "Domain not found" or "failed to get domain": domain is not defined # and thus not started ocf_log debug "Virtual domain $DOMAIN_NAME is not running: $(echo $status | sed s/error://g)" rc=$OCF_NOT_RUNNING ;; running|paused|idle|blocked|"in shutdown") # running: domain is currently actively consuming cycles # paused: domain is paused (suspended) # idle: domain is running but idle # blocked: synonym for idle used by legacy Xen versions # in shutdown: the domain is in process of shutting down, but has not completely shutdown or crashed. ocf_log debug "Virtual domain $DOMAIN_NAME is currently $status." rc=$OCF_SUCCESS ;; ""|*"failed to "*"connect to the hypervisor"*|"no state") # Empty string may be returned when virsh does not # receive a reply from libvirtd. # "no state" may occur when the domain is currently # being migrated (on the migration target only), or # whenever virsh can't reliably obtain the domain # state. status="no state" if [ "$__OCF_ACTION" = "stop" ] && [ $try -ge 3 ]; then # During the stop operation, we want to bail out # quickly, so as to be able to force-stop (destroy) # the domain if necessary. ocf_log error "Virtual domain $DOMAIN_NAME has no state during stop operation, bailing out." return $OCF_ERR_GENERIC; elif [ "$__OCF_ACTION" = "monitor" ]; then pid_status rc=$? if [ $rc -ne $OCF_ERR_GENERIC ]; then # we've successfully determined the domains status outside of libvirt return $rc fi else # During all other actions, we just wait and try # again, relying on the CRM/LRM to time us out if # this takes too long. ocf_log info "Virtual domain $DOMAIN_NAME currently has no state, retrying." fi sleep 1 ;; *) # any other output is unexpected. ocf_log error "Virtual domain $DOMAIN_NAME has unknown status \"$status\"!" sleep 1 ;; esac done return $rc } # virsh undefine removes configuration files if they are in # directories which are managed by libvirt. such directories # include also subdirectories of /etc (for instance # /etc/libvirt/*) which may be surprising. VirtualDomain didn't # include the undefine call before, hence this wasn't an issue # before. # # There seems to be no way to find out which directories are # managed by libvirt. # verify_undefined() { local tmpf if virsh --connect=${OCF_RESKEY_hypervisor} list --all --name 2>/dev/null | grep -wqs "$DOMAIN_NAME" then tmpf=$(mktemp -t vmcfgsave.XXXXXX) if [ ! -r "$tmpf" ]; then ocf_log warn "unable to create temp file, disk full?" # we must undefine the domain virsh $VIRSH_OPTIONS undefine $DOMAIN_NAME > /dev/null 2>&1 else cp -p $OCF_RESKEY_config $tmpf virsh $VIRSH_OPTIONS undefine $DOMAIN_NAME > /dev/null 2>&1 [ -f $OCF_RESKEY_config ] || cp -f $tmpf $OCF_RESKEY_config rm -f $tmpf fi fi } VirtualDomain_start() { local snapshotimage if VirtualDomain_status; then ocf_log info "Virtual domain $DOMAIN_NAME already running." return $OCF_SUCCESS fi snapshotimage="$OCF_RESKEY_snapshot/${DOMAIN_NAME}.state" if [ -n "$OCF_RESKEY_snapshot" -a -f "$snapshotimage" ]; then virsh restore $snapshotimage if [ $? -eq 0 ]; then rm -f $snapshotimage return $OCF_SUCCESS fi ocf_exit_reason "Failed to restore ${DOMAIN_NAME} from state file in ${OCF_RESKEY_snapshot} directory." return $OCF_ERR_GENERIC fi # Make sure domain is undefined before creating. # The 'create' command guarantees that the domain will be # undefined on shutdown, but requires the domain to be undefined. # if a user defines the domain # outside of this agent, we have to ensure that the domain # is restored to an 'undefined' state before creating. verify_undefined virsh $VIRSH_OPTIONS create ${OCF_RESKEY_config} rc=$? if [ $rc -ne 0 ]; then ocf_exit_reason "Failed to start virtual domain ${DOMAIN_NAME}." return $OCF_ERR_GENERIC fi while ! VirtualDomain_monitor; do sleep 1 done return $OCF_SUCCESS } force_stop() { local out ex translate local status=0 ocf_log info "Issuing forced shutdown (destroy) request for domain ${DOMAIN_NAME}." out=$(LANG=C virsh $VIRSH_OPTIONS destroy ${DOMAIN_NAME} 2>&1) ex=$? translate=$(echo $out|tr 'A-Z' 'a-z') echo >&2 "$translate" case $ex$translate in *"error:"*"domain is not running"*|*"error:"*"domain not found"*|\ *"error:"*"failed to get domain"*) : ;; # unexpected path to the intended outcome, all is well [!0]*) ocf_exit_reason "forced stop failed" return $OCF_ERR_GENERIC ;; 0*) while [ $status != $OCF_NOT_RUNNING ]; do VirtualDomain_status status=$? done ;; esac return $OCF_SUCCESS } sync_config(){ ocf_log info "Syncing $DOMAIN_NAME config file with csync2 -x ${OCF_RESKEY_config}" if ! csync2 -x ${OCF_RESKEY_config}; then ocf_log warn "Syncing ${OCF_RESKEY_config} failed."; fi } save_config(){ CFGTMP=$(mktemp -t vmcfgsave.XXX) virsh $VIRSH_OPTIONS dumpxml --inactive --security-info ${DOMAIN_NAME} > ${CFGTMP} if [ -s ${CFGTMP} ]; then if ! cmp -s ${CFGTMP} ${OCF_RESKEY_config}; then if virt-xml-validate ${CFGTMP} domain 2>/dev/null ; then ocf_log info "Saving domain $DOMAIN_NAME to ${OCF_RESKEY_config}. Please make sure it's present on all nodes or sync_config_on_stop is on." if cat ${CFGTMP} > ${OCF_RESKEY_config} ; then ocf_log info "Saved $DOMAIN_NAME domain's configuration to ${OCF_RESKEY_config}." if ocf_is_true "$OCF_RESKEY_sync_config_on_stop"; then sync_config fi else ocf_log warn "Moving ${CFGTMP} to ${OCF_RESKEY_config} failed." fi else ocf_log warn "Domain $DOMAIN_NAME config failed to validate after dump. Skipping config update." fi fi else ocf_log warn "Domain $DOMAIN_NAME config has 0 size. Skipping config update." fi rm -f ${CFGTMP} } VirtualDomain_stop() { local i local status local shutdown_timeout local needshutdown=1 VirtualDomain_status status=$? case $status in $OCF_SUCCESS) if ocf_is_true $OCF_RESKEY_force_stop; then # if force stop, don't bother attempting graceful shutdown. force_stop return $? fi ocf_log info "Issuing graceful shutdown request for domain ${DOMAIN_NAME}." if [ -n "$OCF_RESKEY_snapshot" ]; then virsh save $DOMAIN_NAME "$OCF_RESKEY_snapshot/${DOMAIN_NAME}.state" if [ $? -eq 0 ]; then needshutdown=0 else ocf_log error "Failed to save snapshot state of ${DOMAIN_NAME} on stop" fi fi # save config if needed if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then save_config fi # issue the shutdown if save state didn't shutdown for us if [ $needshutdown -eq 1 ]; then # Issue a graceful shutdown request virsh $VIRSH_OPTIONS shutdown ${DOMAIN_NAME} fi # The "shutdown_timeout" we use here is the operation # timeout specified in the CIB, minus 5 seconds shutdown_timeout=$(( $NOW + ($OCF_RESKEY_CRM_meta_timeout/1000) -5 )) # Loop on status until we reach $shutdown_timeout while [ $NOW -lt $shutdown_timeout ]; do VirtualDomain_status status=$? case $status in $OCF_NOT_RUNNING) # This was a graceful shutdown. return $OCF_SUCCESS ;; $OCF_SUCCESS) # Domain is still running, keep # waiting (until shutdown_timeout # expires) sleep 1 ;; *) # Something went wrong. Bail out and # resort to forced stop (destroy). break; esac NOW=$(date +%s) done ;; $OCF_NOT_RUNNING) ocf_log info "Domain $DOMAIN_NAME already stopped." return $OCF_SUCCESS esac # OK. Now if the above graceful shutdown hasn't worked, kill # off the domain with destroy. If that too does not work, # have the LRM time us out. force_stop } mk_migrateuri() { local target_node local migrate_target local hypervisor target_node="$OCF_RESKEY_CRM_meta_migrate_target" # A typical migration URI via a special migration network looks # like "tcp://bar-mig:49152". The port would be randomly chosen # by libvirt from the range 49152-49215 if omitted, at least since # version 0.7.4 ... if [ -n "${OCF_RESKEY_migration_network_suffix}" ]; then hypervisor="${OCF_RESKEY_hypervisor%%[+:]*}" # Hostname might be a FQDN migrate_target=$(echo ${target_node} | sed -e "s,^\([^.]\+\),\1${OCF_RESKEY_migration_network_suffix},") case $hypervisor in qemu) # For quiet ancient libvirt versions a migration port is needed # and the URI must not contain the "//". Newer versions can handle # the "bad" URI. echo "tcp:${migrate_target}:${OCF_RESKEY_migrateport}" ;; xen) echo "xenmigr://${migrate_target}" ;; *) ocf_log warn "$DOMAIN_NAME: Migration via dedicated network currently not supported for ${hypervisor}." ;; esac fi } VirtualDomain_migrate_to() { local rc local target_node local remoteuri local transport_suffix local migrateuri local migrate_opts local migrate_pid target_node="$OCF_RESKEY_CRM_meta_migrate_target" if VirtualDomain_status; then # Find out the remote hypervisor to connect to. That is, turn # something like "qemu://foo:9999/system" into # "qemu+tcp://bar:9999/system" - if [ -n "${OCF_RESKEY_migration_transport}" ]; then - transport_suffix="+${OCF_RESKEY_migration_transport}" + + if [ -n "${OCF_RESKEY_remoteuri}" ]; then + remoteuri=`echo "${OCF_RESKEY_remoteuri}" | + sed "s/%n/$target_node/g"` + else + if [ -n "${OCF_RESKEY_migration_transport}" ]; then + transport_suffix="+${OCF_RESKEY_migration_transport}" + fi + + # append user defined suffix if virsh target should differ from cluster node name + if [ -n "${OCF_RESKEY_migration_network_suffix}" ]; then + # Hostname might be a FQDN + target_node=$(echo ${target_node} | sed -e "s,^\([^.]\+\),\1${OCF_RESKEY_migration_network_suffix},") + fi + + # a remote user has been defined to connect to target_node + if echo ${OCF_RESKEY_migration_user} | grep -q "^[a-z][-a-z0-9]*$" ; then + target_node="${OCF_RESKEY_migration_user}@${target_node}" + fi + + # Scared of that sed expression? So am I. :-) + remoteuri=$(echo ${OCF_RESKEY_hypervisor} | sed -e "s,\(.*\)://[^/:]*\(:\?[0-9]*\)/\(.*\),\1${transport_suffix}://${target_node}\2/\3,") fi # User defined migrateuri or do we make one? migrate_opts="$OCF_RESKEY_migrate_options" - if echo "$migrate_opts" | fgrep -qs -- "--migrateuri="; then + + # migration_uri is directly set + if [ -n "${OCF_RESKEY_migrateuri}" ]; then + migrateuri=`echo "${OCF_RESKEY_migrateuri}" | + sed "s/%n/$target_node/g"` + + # extract migrationuri from options + elif echo "$migrate_opts" | fgrep -qs -- "--migrateuri="; then migrateuri=`echo "$migrate_opts" | sed "s/.*--migrateuri=\([^ ]*\).*/\1/;s/%n/$target_node/g"` - migrate_opts=`echo "$migrate_opts" | - sed "s/\(.*\)--migrateuri=[^ ]*\(.*\)/\1\2/"` + + # auto generate else migrateuri=`mk_migrateuri` fi - # Scared of that sed expression? So am I. :-) - remoteuri=$(echo ${OCF_RESKEY_hypervisor} | sed -e "s,\(.*\)://[^/:]*\(:\?[0-9]*\)/\(.*\),\1${transport_suffix}://${target_node}\2/\3,") + + # remove --migrateuri from migration_opts + migrate_opts=`echo "$migrate_opts" | + sed "s/\(.*\)--migrateuri=[^ ]*\(.*\)/\1\2/"` + # save config if needed if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then save_config fi # Live migration speed limit if [ ${OCF_RESKEY_migration_speed} -ne 0 ]; then ocf_log info "$DOMAIN_NAME: Setting live migration speed limit for $DOMAIN_NAME (using: virsh ${VIRSH_OPTIONS} migrate-setspeed $DOMAIN_NAME ${OCF_RESKEY_migration_speed})." virsh ${VIRSH_OPTIONS} migrate-setspeed $DOMAIN_NAME ${OCF_RESKEY_migration_speed} fi # OK, we know where to connect to. Now do the actual migration. ocf_log info "$DOMAIN_NAME: Starting live migration to ${target_node} (using: virsh ${VIRSH_OPTIONS} migrate --live $migrate_opts $DOMAIN_NAME $remoteuri $migrateuri)." virsh ${VIRSH_OPTIONS} migrate --live $migrate_opts $DOMAIN_NAME $remoteuri $migrateuri & migrate_pid=${!} # Live migration downtime interval # Note: You can set downtime only while live migration is in progress if [ ${OCF_RESKEY_migration_downtime} -ne 0 ]; then sleep 2 ocf_log info "$DOMAIN_NAME: Setting live migration downtime for $DOMAIN_NAME (using: virsh ${VIRSH_OPTIONS} migrate-setmaxdowntime $DOMAIN_NAME ${OCF_RESKEY_migration_downtime})." virsh ${VIRSH_OPTIONS} migrate-setmaxdowntime $DOMAIN_NAME ${OCF_RESKEY_migration_downtime} fi wait ${migrate_pid} rc=$? if [ $rc -ne 0 ]; then ocf_exit_reason "$DOMAIN_NAME: live migration to ${target_node} failed: $rc" return $OCF_ERR_GENERIC else ocf_log info "$DOMAIN_NAME: live migration to ${target_node} succeeded." return $OCF_SUCCESS fi else ocf_exit_reason "$DOMAIN_NAME: migrate_to: Not active locally!" return $OCF_ERR_GENERIC fi } VirtualDomain_migrate_from() { while ! VirtualDomain_monitor; do sleep 1 done ocf_log info "$DOMAIN_NAME: live migration from ${OCF_RESKEY_CRM_meta_migrate_source} succeeded." # save config if needed if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then save_config fi return $OCF_SUCCESS } VirtualDomain_monitor() { # First, check the domain status. If that returns anything other # than $OCF_SUCCESS, something is definitely wrong. VirtualDomain_status rc=$? if [ ${rc} -eq ${OCF_SUCCESS} ]; then # OK, the generic status check turned out fine. Now, if we # have monitor scripts defined, run them one after another. for script in ${OCF_RESKEY_monitor_scripts}; do script_output="$($script 2>&1)" script_rc=$? if [ ${script_rc} -ne ${OCF_SUCCESS} ]; then # A monitor script returned a non-success exit # code. Stop iterating over the list of scripts, log a # warning message, and propagate $OCF_ERR_GENERIC. ocf_exit_reason "Monitor command \"${script}\" for domain ${DOMAIN_NAME} returned ${script_rc} with output: ${script_output}" rc=$OCF_ERR_GENERIC break else ocf_log debug "Monitor command \"${script}\" for domain ${DOMAIN_NAME} completed successfully with output: ${script_output}" fi done fi update_emulator_cache update_utilization # Save configuration on monitor as well, so we will have a better chance of # having fresh and up to date config files on all nodes. if ocf_is_true "$OCF_RESKEY_save_config_on_stop"; then save_config fi return ${rc} } VirtualDomain_validate_all() { if ocf_is_true $OCF_RESKEY_force_stop && [ -n "$OCF_RESKEY_snapshot" ]; then ocf_exit_reason "The 'force_stop' and 'snapshot' options can not be used together." return $OCF_ERR_CONFIGURED fi # check if we can read the config file (otherwise we're unable to # deduce $DOMAIN_NAME from it, see below) if [ ! -r $OCF_RESKEY_config ]; then if ocf_is_probe; then ocf_log info "Configuration file $OCF_RESKEY_config not readable during probe." elif [ "$__OCF_ACTION" = "stop" ]; then ocf_log info "Configuration file $OCF_RESKEY_config not readable, resource considered stopped." else ocf_exit_reason "Configuration file $OCF_RESKEY_config does not exist or not readable." fi return $OCF_ERR_INSTALLED fi if [ -z $DOMAIN_NAME ]; then ocf_exit_reason "Unable to determine domain name." return $OCF_ERR_INSTALLED fi # Check if csync2 is available when config tells us we might need it. if ocf_is_true $OCF_RESKEY_sync_config_on_stop; then check_binary csync2 fi # Check if migration_speed is a decimal value if ! ocf_is_decimal ${OCF_RESKEY_migration_speed}; then ocf_exit_reason "migration_speed has to be a decimal value" return $OCF_ERR_CONFIGURED fi # Check if migration_downtime is a decimal value if ! ocf_is_decimal ${OCF_RESKEY_migration_downtime}; then ocf_exit_reason "migration_downtime has to be a decimal value" return $OCF_ERR_CONFIGURED fi } VirtualDomain_getconfig() { # Grab the virsh uri default, but only if hypervisor isn't set : ${OCF_RESKEY_hypervisor=$(virsh --quiet uri 2>/dev/null)} # Set options to be passed to virsh: VIRSH_OPTIONS="--connect=${OCF_RESKEY_hypervisor} --quiet" # Retrieve the domain name from the xml file. DOMAIN_NAME=`egrep '[[:space:]]*.*[[:space:]]*$' ${OCF_RESKEY_config} 2>/dev/null | sed -e 's/[[:space:]]*\(.*\)<\/name>[[:space:]]*$/\1/'` EMULATOR_STATE="${HA_RSCTMP}/VirtualDomain-${DOMAIN_NAME}-emu.state" } OCF_REQUIRED_PARAMS="config" OCF_REQUIRED_BINARIES="virsh sed" ocf_rarun $* diff --git a/heartbeat/anything b/heartbeat/anything index 96dc14e8a..fbf8d2c82 100755 --- a/heartbeat/anything +++ b/heartbeat/anything @@ -1,314 +1,328 @@ #!/bin/sh # # OCF Resource Agent compliant resource script. # # Copyright (c) 2009 IN-telegence GmbH & Co. KG, Dominik Klein # All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # OCF instance parameters # OCF_RESKEY_binfile # OCF_RESKEY_cmdline_options # OCF_RESKEY_workdir # OCF_RESKEY_pidfile # OCF_RESKEY_logfile # OCF_RESKEY_errlogfile # OCF_RESKEY_user # OCF_RESKEY_monitor_hook # OCF_RESKEY_stop_timeout # # This RA starts $binfile with $cmdline_options as $user in $workdir and writes a $pidfile from that. # If you want it to, it logs: # - stdout to $logfile, stderr to $errlogfile or # - stdout and stderr to $logfile # - or to will be captured by lrmd if these options are omitted. # Monitoring is done through $pidfile or your custom $monitor_hook script. # The RA expects the program to keep running "daemon-like" and # not just quit and exit. So this is NOT (yet - feel free to # enhance) a way to just run a single one-shot command which just # does something and then exits. # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs getpid() { grep -o '[0-9]*' $1 } anything_status() { if test -f "$pidfile" then if pid=`getpid $pidfile` && [ "$pid" ] && kill -s 0 $pid then return $OCF_SUCCESS else # pidfile w/o process means the process died return $OCF_ERR_GENERIC fi else return $OCF_NOT_RUNNING fi } anything_start() { if ! anything_status then + #Make sure that PID Directory exists and is writable by proper user + piddir=`dirname $pidfile` + if ! su -s /bin/sh - $user -c "test -w $piddir"; then + #PID Directory is not writeable by user + ocf_log warn "Directory $piddir is not writable by $user, attempting to fix." + ocf_log info "Creating directory $piddir" + mkdir -p $piddir + ocf_log info "Changing permissions for $piddir for user $user" + chown $user: $piddir + else + ocf_log debug "Directory $piddir exists, and is writeable by $user. All fine" + fi if [ -n "$logfile" -a -n "$errlogfile" ] then # We have logfile and errlogfile, so redirect STDOUT und STDERR to different files cmd="su - $user -c \"cd $workdir; nohup $binfile $cmdline_options >> $logfile 2>> $errlogfile & \"'echo \$!' " else # We only have logfile so redirect STDOUT and STDERR to the same file cmd="su - $user -c \"cd $workdir; nohup $binfile $cmdline_options >> $logfile 2>&1 & \"'echo \$!' " fi ocf_log debug "Starting $process: $cmd" # Execute the command as created above eval $cmd > $pidfile if anything_status then - ocf_log debug "$process: $cmd started successfully" - return $OCF_SUCCESS + ocf_log debug "$process: $cmd started successfully, calling monitor" + anything_monitor + myres=$? + return $myres else ocf_log err "$process: $cmd could not be started" return $OCF_ERR_GENERIC fi else # If already running, consider start successful ocf_log debug "$process: $cmd is already running" return $OCF_SUCCESS fi } anything_stop() { local rc=$OCF_SUCCESS if [ -n "$OCF_RESKEY_stop_timeout" ] then stop_timeout=$OCF_RESKEY_stop_timeout elif [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then # Allow 2/3 of the action timeout for the orderly shutdown # (The origin unit is ms, hence the conversion) stop_timeout=$((OCF_RESKEY_CRM_meta_timeout/1500)) else stop_timeout=10 fi if anything_status then pid=`getpid $pidfile` kill $pid i=0 while [ $i -lt $stop_timeout ] do if ! anything_status then rm -f $pidfile return $OCF_SUCCESS fi sleep 1 i=$((i+1)) done ocf_log warn "Stop with SIGTERM failed/timed out, now sending SIGKILL." kill -s 9 $pid while : do if ! anything_status then ocf_log warn "SIGKILL did the job." rc=$OCF_SUCCESS break fi ocf_log info "The job still hasn't stopped yet. Waiting..." sleep 1 done fi rm -f $pidfile return $rc } anything_monitor() { anything_status ret=$? if [ $ret -eq $OCF_SUCCESS ] then if [ -n "$OCF_RESKEY_monitor_hook" ]; then eval "$OCF_RESKEY_monitor_hook" if [ $? -ne $OCF_SUCCESS ]; then return ${OCF_ERR_GENERIC} fi return $OCF_SUCCESS else true fi else return $ret fi } # FIXME: Attributes special meaning to the resource id process="$OCF_RESOURCE_INSTANCE" binfile="$OCF_RESKEY_binfile" cmdline_options="$OCF_RESKEY_cmdline_options" workdir="$OCF_RESKEY_workdir" pidfile="$OCF_RESKEY_pidfile" [ -z "$pidfile" ] && pidfile=${HA_VARRUN}/anything_${process}.pid logfile="${OCF_RESKEY_logfile:-/dev/null}" errlogfile="$OCF_RESKEY_errlogfile" user="$OCF_RESKEY_user" [ -z "$user" ] && user=root anything_validate() { if ! su - $user -c "test -x $binfile" then ocf_log err "binfile $binfile does not exist or is not executable by $user." exit $OCF_ERR_INSTALLED fi if ! getent passwd $user >/dev/null 2>&1 then ocf_log err "user $user does not exist." exit $OCF_ERR_INSTALLED fi for logfilename in "$logfile" "$errlogfile" do if [ -n "$logfilename" ]; then mkdir -p `dirname $logfilename` || { ocf_log err "cannot create $(dirname $logfilename)" exit $OCF_ERR_INSTALLED } fi done [ "x$workdir" != x -a ! -d "$workdir" ] && { ocf_log err "working directory $workdir doesn't exist" exit $OCF_ERR_INSTALLED } return $OCF_SUCCESS } anything_meta() { cat < 1.0 This is a generic OCF RA to manage almost anything. Manages an arbitrary service - + The full name of the binary to be executed. This is expected to keep running with the same pid and not just do something and exit. Full path name of the binary to be executed Command line options to pass to the binary Command line options The path from where the binfile will be executed. Full path name of the work directory File to read/write the PID from/to. File to write STDOUT to File to write STDOUT to File to write STDOUT to File to write STDERR to File to write STDERR to User to run the command as User to run the command as Command to run in monitor operation Command to run in monitor operation In the stop operation: Seconds to wait for kill -TERM to succeed before sending kill -SIGKILL. Defaults to 2/3 of the stop operation timeout. Seconds to wait after having sent SIGTERM before sending SIGKILL in stop operation END exit 0 } case "$1" in meta-data|metadata|meta_data) anything_meta ;; start) anything_start ;; stop) anything_stop ;; monitor) anything_monitor ;; validate-all) anything_validate ;; *) ocf_log err "$0 was called with unsupported arguments: $*" exit $OCF_ERR_UNIMPLEMENTED ;; esac diff --git a/heartbeat/clvm b/heartbeat/clvm index 8eae6c32c..43b930287 100755 --- a/heartbeat/clvm +++ b/heartbeat/clvm @@ -1,427 +1,433 @@ #!/bin/bash # # Copyright (c) 2014 David Vossel # All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs . ${OCF_FUNCTIONS_DIR}/ocf-directories ####################################################################### meta_data() { cat < 1.0 This agent manages the clvmd daemon. clvmd Start with cmirrord (cluster mirror log daemon). activate cmirrord Options to clvmd. Refer to clvmd.8 for detailed descriptions. Daemon Options Whether or not to activate all cluster volume groups after starting the clvmd or not. Note that clustered volume groups will always be deactivated before the clvmd stops regardless of what this option is set to. Activate volume groups END } ####################################################################### : ${OCF_RESKEY_daemon_options:="-d0"} : ${OCF_RESKEY_activate_vgs:="true"} sbindir=$HA_SBIN_DIR if [ -z $sbindir ]; then sbindir=/usr/sbin fi DAEMON="clvmd" CMIRROR="cmirrord" DAEMON_PATH="${sbindir}/clvmd" CMIRROR_PATH="${sbindir}/cmirrord" LVMCONF="${sbindir}/lvmconf" LOCK_FILE="/var/lock/subsys/$DAEMON" # attempt to detect where the vg tools are located # for some reason this isn't consistent with sbindir # in some distros. vgtoolsdir=$(dirname $(which vgchange 2> /dev/null) 2> /dev/null) if [ -z "$vgtoolsdir" ]; then vgtoolsdir="$sbindir" fi LVM_VGCHANGE=${vgtoolsdir}/vgchange LVM_VGDISPLAY=${vgtoolsdir}/vgdisplay LVM_VGSCAN=${vgtoolsdir}/vgscan # Leaving this in for legacy. We do not want to advertize # the abilty to set options in the systconfig exists, we want # to expand the OCF style options as necessary instead. [ -f /etc/sysconfig/cluster ] && . /etc/sysconfig/cluster [ -f /etc/sysconfig/$DAEMON ] && . /etc/sysconfig/$DAEMON CLVMD_TIMEOUT="90" if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then CLVMD_TIMEOUT=$(($OCF_RESKEY_CRM_meta_timeout/1000)) fi clvmd_usage() { cat </dev/null | grep -a "${binary}" > /dev/null 2>&1 if [ $? -eq 0 ];then # shortcut without requiring pgrep to search through all procs return $OCF_SUCCESS fi fi pid=$(pgrep ${binary}) case $? in 0) ocf_log info "PID file (pid:${pid} at $pidfile) created for ${binary}." echo "$pid" > $pidfile return $OCF_SUCCESS;; 1) rm -f "$pidfile" > /dev/null 2>&1 ocf_log info "$binary is not running" return $OCF_NOT_RUNNING;; *) rm -f "$pidfile" > /dev/null 2>&1 ocf_exit_reason "Error encountered detecting pid status of $binary" return $OCF_ERR_GENERIC;; esac } clvmd_status() { local rc local mirror_rc clvmd_validate if [ $? -ne $OCF_SUCCESS ]; then ocf_exit_reason "Unable to monitor, Environment validation failed." return $? fi check_process $DAEMON rc=$? mirror_rc=$rc if ocf_is_true $OCF_RESKEY_with_cmirrord; then check_process $CMIRROR mirror_rc=$? fi # If these ever don't match, return error to force recovery if [ $mirror_rc -ne $rc ]; then return $OCF_ERR_GENERIC fi return $rc } # NOTE: replace this with vgs, once display filter per attr is implemented. clustered_vgs() { ${LVM_VGDISPLAY} 2>/dev/null | awk 'BEGIN {RS="VG Name"} {if (/Clustered/) print $1;}' } wait_for_process() { local binary=$1 local timeout=$2 local count=0 ocf_log info "Waiting for $binary to exit" while [ $count -le $timeout ]; do check_process $binary if [ $? -eq $OCF_NOT_RUNNING ]; then ocf_log info "$binary terminated" return $OCF_SUCCESS fi sleep 1 count=$((count+1)) done return $OCF_ERR_GENERIC } time_left() { local end=$1 local default=$2 local now=$SECONDS local result=0 result=$(( $end - $now )) if [ $result -lt $default ]; then return $default fi return $result } clvmd_stop() { local LVM_VGS local rc=$OCF_SUCCESS local end=$(( $SECONDS + $CLVMD_TIMEOUT )) clvmd_status if [ $? -eq $OCF_NOT_RUNNING ]; then return $OCF_SUCCESS fi check_process $DAEMON if [ $? -ne $OCF_NOT_RUNNING ]; then LVM_VGS="$(clustered_vgs)" if [ -n "$LVM_VGS" ]; then ocf_log info "Deactivating clustered VG(s):" ocf_run ${LVM_VGCHANGE} -anl $LVM_VGS if [ $? -ne 0 ]; then ocf_exit_reason "Failed to deactivate volume groups, cluster vglist = $LVM_VGS" return $OCF_ERR_GENERIC fi fi ocf_log info "Signaling $DAEMON to exit" killall -TERM $DAEMON if [ $? != 0 ]; then ocf_exit_reason "Failed to signal -TERM to $DAEMON" return $OCF_ERR_GENERIC fi wait_for_process $DAEMON $CLVMD_TIMEOUT rc=$? if [ $rc -ne $OCF_SUCCESS ]; then ocf_exit_reason "$DAEMON failed to exit" return $rc fi rm -f $LOCK_FILE fi check_process $CMIRROR if [ $? -ne $OCF_NOT_RUNNING ] && ocf_is_true $OCF_RESKEY_with_cmirrord; then local timeout ocf_log info "Signaling $CMIRROR to exit" killall -INT $CMIRROR time_left $end 10; timeout=$? wait_for_process $CMIRROR $timeout rc=$? if [ $rc -ne $OCF_SUCCESS ]; then killall -KILL $CMIRROR time_left $end 10; timeout=$? wait_for_process $CMIRROR $(time_left $end 10) rc=$? fi fi return $rc } start_process() { local binary_path=$1 local opts=$2 check_process "$(basename $binary_path)" if [ $? -ne $OCF_SUCCESS ]; then ocf_log info "Starting $binary_path: " ocf_run $binary_path $opts rc=$? if [ $rc -ne 0 ]; then ocf_exit_reason "Failed to launch $binary_path, exit code $rc" exit $OCF_ERR_GENERIC fi fi return $OCF_SUCCESS } clvmd_activate_all() { if ! ocf_is_true "$OCF_RESKEY_activate_vgs"; then ocf_log info "skipping vg activation, activate_vgs is set to $OCF_RESKEY_activate_vgs" return $OCF_SUCCESS fi # Activate all volume groups by leaving the # "volume group name" parameter empty ocf_run ${LVM_VGCHANGE} -aay if [ $? -ne 0 ]; then ocf_log info "Failed to activate VG(s):" clvmd_stop return $OCF_ERR_GENERIC fi return $OCF_SUCCESS } clvmd_start() { local rc=0 local CLVMDOPTS="-T${CLVMD_TIMEOUT} $OCF_RESKEY_daemon_options" clvmd_validate if [ $? -ne $OCF_SUCCESS ]; then ocf_exit_reason "Unable to start, Environment validation failed." return $? fi + # systemd drop-in to stop process before storage services during + # shutdown/reboot + if ps -p 1 | grep -q systemd ; then + systemd_drop_in "99-clvmd" "After" "blk-availability.service" + fi + clvmd_status if [ $? -eq $OCF_SUCCESS ]; then ocf_log debug "$DAEMON already started" clvmd_activate_all return $?; fi # autoset locking type to clusted when lvmconf tool is available if [ -x "$LVMCONF" ]; then $LVMCONF --enable-cluster > /dev/null 2>&1 fi # if either of these fail, script will exit OCF_ERR_GENERIC if ocf_is_true $OCF_RESKEY_with_cmirrord; then start_process $CMIRROR_PATH fi start_process $DAEMON_PATH "$CLVMDOPTS" # Refresh local cache. # # It's possible that new PVs were added to this, or other VGs # while this node was down. So we run vgscan here to avoid # any potential "Missing UUID" messages with subsequent # LVM commands. # The following step would be better and more informative to the user: # 'action "Refreshing VG(s) local cache:" ${LVM_VGSCAN}' # but it could show warnings such as: # 'clvmd not running on node x-y-z Unable to obtain global lock.' # and the action would be shown as FAILED when in reality it didn't. # Ideally vgscan should have a startup mode that would not print # unnecessary warnings. ${LVM_VGSCAN} > /dev/null 2>&1 touch $LOCK_FILE clvmd_activate_all clvmd_status return $? } case $__OCF_ACTION in meta-data) meta_data exit $OCF_SUCCESS;; start) clvmd_start;; stop) clvmd_stop;; monitor) clvmd_status;; validate-all) clvmd_validate;; usage|help) clvmd_usage;; *) clvmd_usage exit $OCF_ERR_UNIMPLEMENTED;; esac rc=$? ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" exit $rc diff --git a/heartbeat/iSCSILogicalUnit b/heartbeat/iSCSILogicalUnit index e0701594c..11bee9c7c 100755 --- a/heartbeat/iSCSILogicalUnit +++ b/heartbeat/iSCSILogicalUnit @@ -1,701 +1,739 @@ #!/bin/bash # # # iSCSILogicalUnit OCF RA. Exports and manages iSCSI Logical Units. # # (c) 2013 LINBIT, Lars Ellenberg # (c) 2009-2010 Florian Haas, Dejan Muhamedagic, # and Linux-HA contributors # # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs # Defaults # Set a default implementation based on software installed if have_binary ietadm; then OCF_RESKEY_implementation_default="iet" elif have_binary tgtadm; then OCF_RESKEY_implementation_default="tgt" elif have_binary lio_node; then OCF_RESKEY_implementation_default="lio" elif have_binary targetcli; then OCF_RESKEY_implementation_default="lio-t" fi : ${OCF_RESKEY_implementation=${OCF_RESKEY_implementation_default}} # Use a default SCSI ID and SCSI SN that is unique across the cluster, # and persistent in the event of resource migration. # SCSI IDs are limited to 24 bytes, but only 16 bytes are known to be # supported by all iSCSI implementations this RA cares about. Thus, # for a default, use the first 16 characters of # $OCF_RESOURCE_INSTANCE. OCF_RESKEY_scsi_id_default="${OCF_RESOURCE_INSTANCE:0:16}" : ${OCF_RESKEY_scsi_id=${OCF_RESKEY_scsi_id_default}} # To have a reasonably unique default SCSI SN, use the first 8 bytes # of an MD5 hash of of $OCF_RESOURCE_INSTANCE sn=`echo -n "${OCF_RESOURCE_INSTANCE}" | md5sum | sed -e 's/ .*//'` OCF_RESKEY_scsi_sn_default=${sn:0:8} : ${OCF_RESKEY_scsi_sn=${OCF_RESKEY_scsi_sn_default}} # set 0 as a default value for lio iblock device number OCF_RESKEY_lio_iblock_default=0 OCF_RESKEY_lio_iblock=${OCF_RESKEY_lio_iblock:-$OCF_RESKEY_lio_iblock_default} ## tgt specifics # tgt has "backing store type" and "backing store open flags", # as well as device-type. # # suggestions how to make this generic accross all supported implementations? # how should they be named, how should they be mapped to implementation specifics? # # OCF_RESKEY_tgt_bstype # OCF_RESKEY_tgt_bsoflags # OCF_RESKEY_tgt_bsopts # OCF_RESKEY_tgt_device_type # targetcli: iSCSITarget and iSCSILogicalUnit must use the same lockfile TARGETLOCKFILE=${HA_RSCTMP}/targetcli.lock ####################################################################### meta_data() { cat < 0.9 Manages iSCSI Logical Unit. An iSCSI Logical unit is a subdivision of an SCSI Target, exported via a daemon that speaks the iSCSI protocol. Manages iSCSI Logical Units (LUs) The iSCSI target daemon implementation. Must be one of "iet", "tgt", "lio", or "lio-t". If unspecified, an implementation is selected based on the availability of management utilities, with "iet" being tried first, then "tgt", then "lio", then "lio-t". iSCSI target daemon implementation The iSCSI Qualified Name (IQN) that this Logical Unit belongs to. iSCSI target IQN The Logical Unit number (LUN) exposed to initiators. Logical Unit number (LUN) The path to the block device exposed. Some implementations allow this to be a regular file, too. Block device (or file) path The SCSI ID to be configured for this Logical Unit. The default is the resource name, truncated to 24 bytes. SCSI ID The SCSI serial number to be configured for this Logical Unit. The default is a hash of the resource name, truncated to 8 bytes. SCSI serial number + + +The SCSI UNMAP command to be configured for this Logical Unit. +Setting this integer to 1 will enable TPU IOCTL emulation. + +SCSI UNMAP (for TRIM / DISCARD) + + + + + +The SCSI EXTENDED COPY command to be configured for this Logical Unit. +Setting this integer to 1 will enable 3PC IOCTL emulation. + +SCSI extended write + + + + + +The SCSI Compare and Write command to be configured for this Logical Unit. +Setting this integer to 1 will enable CAW IOCTL emulation. + +SCSI compare and write + + + The SCSI vendor ID to be configured for this Logical Unit. SCSI vendor ID The SCSI product ID to be configured for this Logical Unit. SCSI product ID TGT specific backing store type. If you want to use aio, make sure your tgtadm is built against libaio. See tgtadm(8). TGT backing store type TGT specific backing store open flags (direct|sync). See tgtadm(8). TGT backing store open flags TGT specific backing store options. See tgtadm(8). TGT backing store options TGT specific device type. See tgtadm(8). TGT device type Additional LU parameters. A space-separated list of "name=value" pairs which will be passed through to the iSCSI daemon's management interface. The supported parameters are implementation dependent. Neither the name nor the value may contain whitespace. List of iSCSI LU parameters Allowed initiators. A space-separated list of initiators allowed to connect to this lun. Initiators may be listed in any syntax the target implementation allows. If this parameter is empty or not set, access to this lun will not be allowed from any initiator, if target is not in demo mode. This parameter is only necessary when using LIO. List of iSCSI initiators allowed to connect to this lun. LIO iblock device name, a number starting from 0. Using distinct values here avoids a warning in LIO "LEGACY: SHARED HBA"; and it is necessary when using multiple LUNs started at the same time (eg. on node failover) to prevent a race condition in tcm_core on mkdir() in /sys/kernel/config/target/core/. LIO iblock device number END } ####################################################################### iSCSILogicalUnit_usage() { cat < /sys/kernel/config/target/core/iblock_${OCF_RESKEY_lio_iblock}/${OCF_RESOURCE_INSTANCE}/wwn/vpd_unit_serial fi ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/luns create /backstores/block/${OCF_RESOURCE_INSTANCE} ${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC if $(ip a | grep -q inet6); then ocf_run -q targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/portals delete 0.0.0.0 3260 ocf_run -q targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/portals create ::0 fi if [ -n "${OCF_RESKEY_allowed_initiators}" ]; then for initiator in ${OCF_RESKEY_allowed_initiators}; do ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls create ${initiator} add_mapped_luns=False || exit $OCF_ERR_GENERIC ocf_run targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls/${initiator} create ${OCF_RESKEY_lun} ${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC done fi + + if [ -n "${OCF_RESKEY_emulate_tpu}" ]; then + echo ${OCF_RESKEY_emulate_tpu} > ${iblock_attrib_path}/emulate_tpu || exit $OCF_ERR_GENERIC + fi + if [ -n "${OCF_RESKEY_emulate_3pc}" ]; then + echo ${OCF_RESKEY_emulate_3pc} > ${iblock_attrib_path}/emulate_3pc || exit $OCF_ERR_GENERIC + fi + if [ -n "${OCF_RESKEY_emulate_caw}" ]; then + echo ${OCF_RESKEY_emulate_caw} > ${iblock_attrib_path}/emulate_caw || exit $OCF_ERR_GENERIC + fi ;; esac # Force the monitor operation to pass before start is considered a success. iSCSILogicalUnit_monitor } iSCSILogicalUnit_stop() { iSCSILogicalUnit_monitor if [ $? -eq $OCF_NOT_RUNNING ]; then return $OCF_SUCCESS fi case $OCF_RESKEY_implementation in iet) # IET allows us to remove LUs while they are in use ocf_run ietadm --op delete \ --tid=${TID} \ --lun=${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC ;; tgt) # tgt will fail to remove an LU while it is in use, # but at the same time does not allow us to # selectively shut down a connection that is using a # specific LU. Thus, we need to loop here until tgtd # decides that the LU is no longer in use, or we get # timed out by the LRM. while ! ocf_run -warn tgtadm --lld iscsi --op delete --mode logicalunit \ --tid ${TID} \ --lun=${OCF_RESKEY_lun}; do sleep 1 done ;; lio) acls_configfs_path="/sys/kernel/config/target/iscsi/${OCF_RESKEY_target_iqn}/tpgt_1/acls" for initiatorpath in ${acls_configfs_path}/*; do initiator=$(basename "${initiatorpath}") if [ -e "${initiatorpath}/lun_${OCF_RESKEY_lun}" ]; then ocf_log info "deleting acl at ${initiatorpath}/lun_${OCF_RESKEY_lun}" ocf_run lio_node --dellunacl=${OCF_RESKEY_target_iqn} 1 \ ${initiator} ${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC fi done lun_configfs_path="/sys/kernel/config/target/iscsi/${OCF_RESKEY_target_iqn}/tpgt_1/lun/lun_${OCF_RESKEY_lun}/" if [ -e "${lun_configfs_path}" ]; then ocf_run lio_node --dellun=${OCF_RESKEY_target_iqn} 1 ${OCF_RESKEY_lun} || exit $OCF_ERR_GENERIC fi block_configfs_path="/sys/kernel/config/target/core/iblock_${OCF_RESKEY_lio_iblock}/${OCF_RESOURCE_INSTANCE}/udev_path" if [ -e "${block_configfs_path}" ]; then ocf_run tcm_node --freedev=iblock_${OCF_RESKEY_lio_iblock}/${OCF_RESOURCE_INSTANCE} || exit $OCF_ERR_GENERIC fi ;; lio-t) ocf_take_lock $TARGETLOCKFILE ocf_release_lock_on_exit $TARGETLOCKFILE # "targetcli delete" will fail if the LUN is already # gone. Log a warning and still push ahead. ocf_run -warn targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/luns delete ${OCF_RESKEY_lun} if [ -n "${OCF_RESKEY_allowed_initiators}" ]; then for initiator in ${OCF_RESKEY_allowed_initiators}; do if targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls/${initiator} status | grep "Mapped LUNs: 0" >/dev/null ; then ocf_run -warn targetcli /iscsi/${OCF_RESKEY_target_iqn}/tpg1/acls/ delete ${initiator} fi done fi # If we've proceeded down to here and we're unable to # delete the backstore, then something is seriously # wrong and we need to fail the stop operation # (potentially causing fencing) ocf_run targetcli /backstores/block delete ${OCF_RESOURCE_INSTANCE} || exit $OCF_ERR_GENERIC ;; esac return $OCF_SUCCESS } iSCSILogicalUnit_monitor() { if [ x"${OCF_RESKEY_tgt_bstype}" != x"rbd" ]; then # If our backing device (or file) doesn't even exist, we're not running [ -e ${OCF_RESKEY_path} ] || return $OCF_NOT_RUNNING fi case $OCF_RESKEY_implementation in iet) # Figure out and set the target ID TID=`sed -ne "s/tid:\([[:digit:]]\+\) name:${OCF_RESKEY_target_iqn}$/\1/p" < /proc/net/iet/volume` if [ -z "${TID}" ]; then # Our target is not configured, thus we're not # running. return $OCF_NOT_RUNNING fi # FIXME: this looks for a matching LUN and path, but does # not actually test for the correct target ID. grep -E -q "[[:space:]]+lun:${OCF_RESKEY_lun}.*path:${OCF_RESKEY_path}$" /proc/net/iet/volume && return $OCF_SUCCESS ;; tgt) # Figure out and set the target ID TID=`tgtadm --lld iscsi --op show --mode target \ | sed -ne "s/^Target \([[:digit:]]\+\): ${OCF_RESKEY_target_iqn}$/\1/p"` if [ -z "$TID" ]; then # Our target is not configured, thus we're not # running. return $OCF_NOT_RUNNING fi # This only looks for the backing store, but does not test # for the correct target ID and LUN. tgtadm --lld iscsi --op show --mode target \ | grep -E -q "[[:space:]]+Backing store.*: ${OCF_RESKEY_path}$" && return $OCF_SUCCESS ;; lio) configfs_path="/sys/kernel/config/target/iscsi/${OCF_RESKEY_target_iqn}/tpgt_1/lun/lun_${OCF_RESKEY_lun}/${OCF_RESOURCE_INSTANCE}/udev_path" [ -e ${configfs_path} ] && [ `cat ${configfs_path}` = "${OCF_RESKEY_path}" ] && return $OCF_SUCCESS # if we aren't activated, is a block device still left over? block_configfs_path="/sys/kernel/config/target/core/iblock_${OCF_RESKEY_lio_iblock}/${OCF_RESOURCE_INSTANCE}/udev_path" [ -e ${block_configfs_path} ] && ocf_log warn "existing block without an active lun: ${block_configfs_path}" [ -e ${block_configfs_path} ] && return $OCF_ERR_GENERIC ;; lio-t) configfs_path="/sys/kernel/config/target/iscsi/${OCF_RESKEY_target_iqn}/tpgt_1/lun/lun_${OCF_RESKEY_lun}/*/udev_path" [ -e ${configfs_path} ] && [ `cat ${configfs_path}` = "${OCF_RESKEY_path}" ] && return $OCF_SUCCESS # if we aren't activated, is a block device still left over? block_configfs_path="/sys/kernel/config/target/core/iblock_*/${OCF_RESOURCE_INSTANCE}/udev_path" [ -e ${block_configfs_path} ] && ocf_log warn "existing block without an active lun: ${block_configfs_path}" [ -e ${block_configfs_path} ] && return $OCF_ERR_GENERIC ;; esac return $OCF_NOT_RUNNING } iSCSILogicalUnit_validate() { # Do we have all required variables? for var in target_iqn lun path; do param="OCF_RESKEY_${var}" if [ -z "${!param}" ]; then ocf_exit_reason "Missing resource parameter \"$var\"!" exit $OCF_ERR_CONFIGURED fi done # Is the configured implementation supported? case "$OCF_RESKEY_implementation" in "iet"|"tgt"|"lio"|"lio-t") ;; "") # The user didn't specify an implementation, and we were # unable to determine one from installed binaries (in # other words: no binaries for any supported # implementation could be found) ocf_exit_reason "Undefined iSCSI target implementation" exit $OCF_ERR_INSTALLED ;; *) ocf_exit_reason "Unsupported iSCSI target implementation \"$OCF_RESKEY_implementation\"!" exit $OCF_ERR_CONFIGURED ;; esac # Do we have a valid LUN? case $OCF_RESKEY_implementation in iet) # IET allows LUN 0 and up [ $OCF_RESKEY_lun -ge 0 ] case $? in 0) # OK ;; 1) ocf_log err "Invalid LUN $OCF_RESKEY_lun (must be a non-negative integer)." exit $OCF_ERR_CONFIGURED ;; *) ocf_log err "Invalid LUN $OCF_RESKEY_lun (must be an integer)." exit $OCF_ERR_CONFIGURED ;; esac ;; tgt) # tgt reserves LUN 0 for its own purposes [ $OCF_RESKEY_lun -ge 1 ] case $? in 0) # OK ;; 1) ocf_log err "Invalid LUN $OCF_RESKEY_lun (must be greater than 0)." exit $OCF_ERR_CONFIGURED ;; *) ocf_log err "Invalid LUN $OCF_RESKEY_lun (must be an integer)." exit $OCF_ERR_CONFIGURED ;; esac ;; esac # Do we have any configuration parameters that the current # implementation does not support? local unsupported_params local var local envar case $OCF_RESKEY_implementation in iet) # IET does not support setting the vendor and product ID # (it always uses "IET" and "VIRTUAL-DISK") - unsupported_params="vendor_id product_id allowed_initiators lio_iblock tgt_bstype tgt_bsoflags tgt_bsopts tgt_device_type" + unsupported_params="vendor_id product_id allowed_initiators lio_iblock tgt_bstype tgt_bsoflags tgt_bsopts tgt_device_type emulate_tpu emulate_3pc emulate_caw" ;; tgt) - unsupported_params="allowed_initiators lio_iblock" + unsupported_params="allowed_initiators lio_iblock emulate_tpu emulate_3pc emulate_caw" ;; lio) - unsupported_params="scsi_id vendor_id product_id tgt_bstype tgt_bsoflags tgt_bsopts tgt_device_type" + unsupported_params="scsi_id vendor_id product_id tgt_bstype tgt_bsoflags tgt_bsopts tgt_device_type emulate_tpu emulate_3pc emulate_caw" ;; lio-t) unsupported_params="scsi_id vendor_id product_id tgt_bstype tgt_bsoflags tgt_bsopts tgt_device_type lio_iblock" ;; esac for var in ${unsupported_params}; do envar=OCF_RESKEY_${var} defvar=OCF_RESKEY_${var}_default if [ -n "${!envar}" ]; then if [[ "${!envar}" != "${!defvar}" ]];then case "$__OCF_ACTION" in start|validate-all) ocf_log warn "Configuration parameter \"${var}\"" \ "is not supported by the iSCSI implementation" \ "and will be ignored." ;; esac fi fi done if ! ocf_is_probe; then # Do we have all required binaries? case $OCF_RESKEY_implementation in iet) check_binary ietadm ;; tgt) check_binary tgtadm ;; lio) check_binary tcm_node check_binary lio_node ;; lio-t) check_binary targetcli ;; esac # Is the required kernel functionality available? case $OCF_RESKEY_implementation in iet) [ -d /proc/net/iet ] if [ $? -ne 0 ]; then ocf_log err "/proc/net/iet does not exist or is not a directory -- check if required modules are loaded." exit $OCF_ERR_INSTALLED fi ;; tgt) # tgt is userland only ;; esac fi return $OCF_SUCCESS } case $1 in meta-data) meta_data exit $OCF_SUCCESS ;; usage|help) iSCSILogicalUnit_usage exit $OCF_SUCCESS ;; esac # Everything except usage and meta-data must pass the validate test iSCSILogicalUnit_validate case $__OCF_ACTION in start) iSCSILogicalUnit_start;; stop) iSCSILogicalUnit_stop;; monitor|status) iSCSILogicalUnit_monitor;; reload) ocf_log err "Reloading..." iSCSILogicalUnit_start ;; validate-all) ;; *) iSCSILogicalUnit_usage exit $OCF_ERR_UNIMPLEMENTED ;; esac rc=$? ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" exit $rc diff --git a/heartbeat/kamailio b/heartbeat/kamailio index ac99fd2c0..e05c3b68a 100755 --- a/heartbeat/kamailio +++ b/heartbeat/kamailio @@ -1,698 +1,741 @@ #!/bin/bash # # OCF resource agent for Kamailio for pacemaker # # Copyright (c) 2013 FREQUENTIS AG, # Authors: Stefan Wenk # Rainer Brestan # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # # OCF input parameters: # OCF_RESKEY_binary # OCF_RESKEY_conffile # OCF_RESKEY_pidfile # OCF_RESKEY_monitoring_ip # OCF_RESKEY_listen_address # OCF_RESKEY_port # OCF_RESKEY_proto # OCF_RESKEY_sipsak +# OCF_RESKEY_kamctl # OCF_RESKEY_kamctlrc # OCF_RESKEY_kamuser +# OCF_RESKEY_kamgroup +# OCF_RESKEY_extra_options # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### # Defaults RESKEY_binary_default="/usr/sbin/kamailio" RESKEY_conffile_default="/etc/kamailio/kamailio.cfg" RESKEY_pidfile_default="/var/run/kamailio_${OCF_RESOURCE_INSTANCE}/kamailio.pid" RESKEY_monitoring_ip_default=127.0.0.1 RESKEY_port_default=5060 RESKEY_proto_default="udptcp" RESKEY_sipsak_default="/usr/bin/sipsak" RESKEY_kamctl_default="/usr/bin/kamctl" RESKEY_kamctlrc_default="/etc/kamailio/kamctlrc" RESKEY_kamuser_default="" +RESKEY_kamgroup_default="" +RESKEY_extra_options_default="" ####################################################################### : ${OCF_RESKEY_binary=${RESKEY_binary_default}} : ${OCF_RESKEY_conffile=${RESKEY_conffile_default}} : ${OCF_RESKEY_pidfile=${RESKEY_pidfile_default}} : ${OCF_RESKEY_monitoring_ip=${RESKEY_monitoring_ip_default}} : ${OCF_RESKEY_port=${RESKEY_port_default}} : ${OCF_RESKEY_proto=${RESKEY_proto_default}} : ${OCF_RESKEY_sipsak=${RESKEY_sipsak_default}} : ${OCF_RESKEY_kamctl=${RESKEY_kamctl_default}} : ${OCF_RESKEY_kamctlrc=${RESKEY_kamctlrc_default}} : ${OCF_RESKEY_kamuser=${RESKEY_kamuser_default}} +: ${OCF_RESKEY_kamgroup=${RESKEY_kamgroup_default}} +: ${OCF_RESKEY_extra_options=${RESKEY_extra_options_default}} ####################################################################### usage() { - cat < 1.0 Resource agent for the Kamailio SIP proxy/registrar. Multiple instances are possible when using following parameter combinations: Parameters for Kamailio instance 1: listen_address=192.168.159.128 monitoring_ip=192.168.159.128 proto=udptcp port=5060 Parameters for Kamailio instance 2: listen_address=192.168.159.128 monitoring_ip=192.168.159.128 proto=udp port=5070 conffile=/etc/kamailio/kamailio2.cfg kamctlrc="" Only one instance can be monitored via the command "kamctl monitor" because the kamctl tool of kamailio 4.x is not designed for multiple instances. Therefore, the provided kamctrlrc file path needs to be empty for instance 2, 3 ... Parameters for a third Kamailio instance: listen_address=192.168.159.128 monitoring_ip=192.168.159.128 proto=tcp port=5080 conffile=/etc/kamailio/kamailio3.cfg kamctlrc="" Resource agent for Kamailio The kamailio binary The kamailio binary - The kamailio configuration file name with full path. + The kamailio configuration file name with full path. For example, "/etc/kamailio/kamailio.cfg" , which is the default value. Make sure to use unique names in case of having multiple instances. Configuration file name with full path The kamailio PID file. The directory used must be writable by kamailio process user. Be sure to use unique name for running more than one instance. Try to use absolute path names. If empty, resource agent create a unique directory from the resource instance name for the PID file and assign it to the process user. PID file SIP IP Address of the kamailio instance used for SIP OPTIONS polling monitoring. Usually the same IP address value as for parameter listen_address should be provided. In order to respond with a 200 OK response to the SIP OOPTION requests, the kamailio.cfg file needs to contain following section: Note: The following "kamailio.cfg" code sniplet is part of an XML section. Therefore it contains two & characters, which need to be replaced with two ampersand characters within "kamailio.cfg": if (is_method("OPTIONS") && ($ru=~"sip:monitor@.*")) { ## ## If the method is an OPTIONS we are simply going to respond ## with a 200 OK. # xlog("L_INFO", "Method is an OPTIONS, probably just monitoring\n"); sl_send_reply("200", "Kamailio is alive"); exit; } Monitoring IP address used for SIP OPTIONS polling. SIP IP address the kamailio will listen on. Listening SIP address SIP port for the kamailio instance. SIP Port + + + Extra options to add to kamailio start. + + extra_options + + + + - The protocol used for SIP proto = udp|tcp|udptcp. + The protocol used for SIP proto = udp|tcp|udptcp|conf_udp|conf_tcp|conf_udptcp. + Using the options "conf_*" does not add any "-l" parameters to the kamailio command, + the "listen" parameters from kamailio.conf are used instead. The sipsak checks are + performed depending what protocol is defined after the underscore. protocol The installation path of the sipsak tool, which is used for monitoring Kamailio via SIP OPTIONS polling. sipsak path The installation path of the "kamctl" control tool. kamctl path The location of the "kamctlrc" file for the Kamailio instance. The file "kamctlrc" is the Kamailio configuration file for its "kamctl" control tool. This parameter only needs to be provided in case of using multiple Kamailio server instances on a single cluster node: In case that the parameter "kamctlrc" is not empty, this ressource agent monitors the health state of the Kamailio server via the command "kamctl monitor 1". This setting is recommended in case of using a single Kamailio server instance. In case that the parameter "kamctlrc" is empty, the ressource agent does not monitor the health state of the Kamailio server instance via the "kamctl" command. Please note that the "kamctl" control command of Kamailio 4.x does not support running multiple Kamailio instances on one host. Nevertheless this resource agent does allow multiple Kamailio instances per host. The result of the "kamctl" limitation in terms of number of Kamailio server instances is that the health check via "kamctl monitor 1" can be configured for a single Kamailio instance only. Please refer to the long description of this resoure agent for an example of parameter combinations in case that multiple instances are to be configured per cluster node. kamctlrc path The user account for kamailio process to run with. Uses the current user, if not specified or empty. There is no check, if running kamailio with the specified user account is possible. kamailio user + + + + The group for kamailio process to run with. + Uses the current group, if not specified or empty. + + kamailio group + + END exit $OCF_SUCCESS } ####################################################################### ### #Check if a process with given PID is running # Parameter 1: PID ### isRunning_PID() { kill -s 0 "$1" > /dev/null 2>&1 } ### #Check if an instance with given command line is running # Parameter 1: command line. ### isRunning_cmd() { pkill -s 0 "$1" > /dev/null 2>&1 } ### # Formats the result of a command. # # Parameter 1: Exit status. # Parameter 2: Standard output (stdout). # Parameter 3: Error output (stderr). # Returns: Formatted result. kamailio_format_result() { local exitstatus="$1" local value="$2" local error="$3" echo -n "exit status: ${exitstatus}" if [ -n "$value" ]; then echo -n ", value: ${value}" fi if [ -n "$error" ]; then echo -n ", error: ${error}" fi echo } ### # Put the command line, how the kamailio process is started according # to the configured parameters, into the variable "kam_cmd". ### kamailio_cmd() { case ${OCF_RESKEY_proto} in udp) listen_param="-T -l udp:${OCF_RESKEY_listen_address}:${OCF_RESKEY_port} -l udp:127.0.0.1:${OCF_RESKEY_port}" ;; tcp) listen_param="-l tcp:${OCF_RESKEY_listen_address}:${OCF_RESKEY_port} -l tcp:127.0.0.1:${OCF_RESKEY_port}" ;; udptcp) listen_param1="-l udp:${OCF_RESKEY_listen_address}:${OCF_RESKEY_port} -l udp:127.0.0.1:${OCF_RESKEY_port}" listen_param2="-l tcp:${OCF_RESKEY_listen_address}:${OCF_RESKEY_port} -l tcp:127.0.0.1:${OCF_RESKEY_port}" listen_param="${listen_param1} ${listen_param2}" ;; + conf_*) + # doing nothing, no listen_param set + ;; *) listen_param="-T" ;; esac - kam_cmd="${OCF_RESKEY_binary} -P ${OCF_RESKEY_pidfile} -f ${OCF_RESKEY_conffile} $listen_param" + kam_cmd="${OCF_RESKEY_binary} -P ${OCF_RESKEY_pidfile} -f ${OCF_RESKEY_conffile}" + + if [ -n "${listen_param}" ]; then + kam_cmd="${kam_cmd} ${listen_param}" + fi + if [ -n "${OCF_RESKEY_kamuser}" ]; then + kam_cmd="${kam_cmd} -u ${OCF_RESKEY_kamuser}" + fi + if [ -n "${OCF_RESKEY_kamgroup}" ]; then + kam_cmd="${kam_cmd} -g ${OCF_RESKEY_kamgroup}" + fi + if [ -n "${OCF_RESKEY_extra_options}" ]; then + kam_cmd="${kam_cmd} ${OCF_RESKEY_extra_options}" + fi } ### # Gets the PID for the running Kamailio instance. # # Returns: The variable $PID contains the found PID value or an empty string. # Exit Status: Zero if the PID file was found and this process run under # the command line parameters of our instance. # 1) if the PID file is not present and no process running under # our command line options is active. # 2) in all other fatal cases, which we classify in the followig # as OCF_ERR_genering. These are folloing cases: # a) The PID file contains a PID value which does no match to # to our instance # b) The PID contains a empty string in its first line # c) The PID file contains some text and some processeses # from our instance are still active kamailio_get_pid() { if [ -f ${OCF_RESKEY_pidfile} ]; then PID=`head -n 1 $OCF_RESKEY_pidfile` if [ ! -z "$PID" ]; then #Cross check if the PID file really contains a process of our kamailio instance: kamailio_cmd CROSSPID=`pgrep -o -f "${kam_cmd}"` if [ x"$PID" == x"$CROSSPID" ]; then #ocf_log debug "Found kamailio process PID with value: $PID." return 0 fi #ocf_log debug "PID file does not contain a PID of a $OCF_RESKEY_binary process!" return 2 fi #PID file does not contain a valid PID rm -f ${OCF_RESKEY_pidfile} return 2 fi - # No PID file found! # Check if still a process exists even though we don't have the PID any longer: kamailio_cmd pgrep -f "${kam_cmd}" if [ $? -eq 0 ]; then ocf_log info "PID file does not contain a valid PID, but kamailio process is still active" return 2 fi ocf_log info "No PID file found and our kamailio instance is not active" return 1 } kamailio_status() { local not_running_log_level="warn" local errorfile error output if [ "$__OCF_ACTION" = "start" ]; then not_running_log_level="debug" fi kamailio_get_pid >/dev/null RET=$? if [ $RET -ne 0 ]; then if [ $RET -eq 2 ]; then ocf_log $not_running_log_level "PID file does not contain a PID of a ${OCF_RESKEY_binary} process!" return $OCF_ERR_GENERIC fi return $OCF_NOT_RUNNING fi PID=`head -n 1 $OCF_RESKEY_pidfile` isRunning_PID "$PID" RET=$? if [ "$RET" -ne 0 ]; then ocf_log $not_running_log_level "PID from $PID from ${OCF_RESKEY_pidfile} not running" rm -f ${OCF_RESKEY_pidfile} return $OCF_NOT_RUNNING fi rc=0 # In case that OCF_RESKEY_kamctlrc we perfom a health check via "kamctl monitor 1" if [ ! -z ${OCF_RESKEY_kamctlrc} ]; then # PID is running now but it is not save to check via kamctl without care, because # the implementation analysis in the case that we kill all running processes # shows that in case that the fifo cannot be read, then kamctl blocks. This needs # to be avoided. # In order to be on the safe side, we run this check therefore under "timeout" control: rc=1 - timeout 3 ${OCF_RESKEY_kamctl} monitor 1 |grep "Up since" ; rc=$? + timeout 3 ${OCF_RESKEY_kamctl} monitor 1 |grep "since" ; rc=$? fi if [ $rc -ne 0 ]; then ocf_log $not_running_log_level "Kamailio is not up according to kamctl monitor!" return $OCF_NOT_RUNNING fi errorfile=`mktemp` case ${OCF_RESKEY_proto} in udp) output=`$OCF_RESKEY_sipsak -s sip:monitor@$OCF_RESKEY_monitoring_ip:${OCF_RESKEY_port} -H localhost --transport udp>/dev/null 2>>$errorfile` result=$? ;; tcp) output=`$OCF_RESKEY_sipsak -s sip:monitor@$OCF_RESKEY_monitoring_ip:${OCF_RESKEY_port} -H localhost --transport tcp>/dev/null 2>>$errorfile` result=$? ;; udptcp) output=`$OCF_RESKEY_sipsak -s sip:monitor@$OCF_RESKEY_monitoring_ip:${OCF_RESKEY_port} -H localhost --transport tcp>/dev/null 2>>$errorfile` result=$? if [ $result -eq 0 ]; then output=`$OCF_RESKEY_sipsak -s sip:monitor@$OCF_RESKEY_monitoring_ip:${OCF_RESKEY_port} -H localhost --transport udp>/dev/null 2>>$errorfile` result=$? fi ;; *) output=`$OCF_RESKEY_sipsak -s sip:monitor@$OCF_RESKEY_monitoring_ip:${OCF_RESKEY_port} -H localhost --transport udp>/dev/null 2>>$errorfile` result=$? ;; esac error=`cat $errorfile` rm -f $errorfile if [ $result -ne 0 ]; then ocf_log $not_running_log_level "Kamailio is running, but not functional as sipsak ${OCF_RESKEY_proto} failed with $(kamailio_format_result $result "$output" "$error")" return $OCF_ERR_GENERIC fi return $OCF_SUCCESS } kamailio_monitor() { kamailio_status } kamailio_start() { local errorfile error output piddir if kamailio_status then ocf_log info "kamailio already running." return $OCF_SUCCESS fi # if pidfile directory does not exist, create it with kamailio process owner piddir=`dirname "${OCF_RESKEY_pidfile}"` if [ ! -d "$piddir" ]; then mkdir -p "$piddir" if [ "$OCF_RESKEY_kamuser" != "" ]; then chown ${OCF_RESKEY_kamuser} "$piddir" fi fi kamailio_cmd if [ "$OCF_RESKEY_kamuser" != "" ]; then kam_cmd="su -s /bin/bash $OCF_RESKEY_kamuser -c \"$kam_cmd\"" fi ocf_log info "start kamailio with $kam_cmd." errorfile=`mktemp` output=$(eval ${kam_cmd} 2>>$errorfile) result=$? error=`cat $errorfile` rm -f $errorfile if [ $result -eq 0 ]; then result=1 while [ $result -ne 0 ]; do sleep 1 kamailio_get_pid >/dev/null result=$? done ocf_log info "kamailio instance PID=$PID started." # check with monitor operation if running correctly result=$OCF_ERR_GENERIC while [ $result -ne $OCF_SUCCESS ]; do sleep 1 kamailio_monitor result=$? ocf_log info "monitor in start returned $result" done ocf_log info "kamailio started successful." else ocf_log err "kamailio instance could not be started, $(kamailio_format_result $result "$output" "$error")" result=$OCF_ERR_GENERIC fi return $result } kamailio_stop() { local piddir local TRIES=0 result=$OCF_SUCCESS kamailio_cmd ocf_log info "Stopping kamailio by sending SIGTERM to ${kam_cmd}" pkill -SIGTERM -x -f "${kam_cmd}" if [ $? -eq 1 ]; then # already stopped. no processes found # in case of not specified pidfile, delete the created directory # otherwise only the pidfile itself if [ "${OCF_RESKEY_pidfile}" == "${RESKEY_pidfile_default}" ]; then piddir=`dirname "${OCF_RESKEY_pidfile}"` rm -rf "$piddir" else rm -f "${OCF_RESKEY_pidfile}" fi return $result fi if [ "$OCF_RESKEY_CRM_meta_timeout" != "" ]; then KAMAILIO_STOP_TIMEOUT=$(( ($OCF_RESKEY_CRM_meta_timeout/1000) - 7 )) else KAMAILIO_STOP_TIMEOUT=20 fi while isRunning_cmd "${kam_cmd}" && [ "$TRIES" -lt "${KAMAILIO_STOP_TIMEOUT}" ] do sleep 1 ocf_log info "kamailio ${kam_cmd} is still running after SIGTERM" ((TRIES++)) done isRunning_cmd "${kam_cmd}" RET=$? if [ "$RET" -eq 0 ]; then ocf_log info "Killing ${kam_cmd} with SIGKILL" TRIES=0 pkill -SIGKILL -x -f "${kam_cmd}" > /dev/null 2>&1 while isRunning_cmd "${kam_cmd}" && [ "$TRIES" -lt 3 ] do sleep 1 ocf_log info "kamailio ${kam_cmd} is still running after SIGKILL" ((TRIES++)) done isRunning_cmd "${kam_cmd}" RET=$? if [ "$RET" -eq 0 ]; then ocf_log fatal "kamailio is still running even after SIGKILL" result=$OCF_ERR_GENERIC fi else ocf_log info "${kam_cmd} has stopped." fi # in case of not specified pidfile, delete the created directory # otherwise only the pidfile itself if [ "${OCF_RESKEY_pidfile}" == "${RESKEY_pidfile_default}" ]; then piddir=`dirname "${OCF_RESKEY_pidfile}"` rm -rf "$piddir" else rm -f "${OCF_RESKEY_pidfile}" fi return $result } kamailio_validate_all() { # Check if kamailio configuration is valid before starting the server if [ ! -f $OCF_RESKEY_binary ]; then ocf_log err "File OCF_RESKEY_binary [${OCF_RESKEY_binary}] does not exist!" return $OCF_NOT_INSTALLED fi out=$($OCF_RESKEY_binary -c 2>&1 > /dev/null) retcode=$? if [ "$retcode" -ne '0' ]; then ocf_log info "Not starting kamailio: $OCF_RESKEY_binary does not start!" return $OCF_ERR_CONFIGURED fi case $OCF_RESKEY_monitoring_ip in "") ocf_log err "Required parameter OCF_RESKEY_monitoring_ip is missing!" return $OCF_ERR_CONFIGURED ;; [0-9]*.[0-9]*.[0-9]*.[0-9]*) : OK ;; *) ocf_log err "Parameter OCF_RESKEY_monitoring_ip [$OCF_RESKEY_monitoring_ip] is not an IP address!" return $OCF_ERR_CONFIGURED ;; esac case $OCF_RESKEY_listen_address in "") ocf_log err "Required parameter $OCF_RESKEY_listen_address is missing!" return $OCF_ERR_CONFIGURED ;; [0-9]*.[0-9]*.[0-9]*.[0-9]*) : OK ;; *) ocf_log err "Parameter OCF_RESKEY_listen_address [$OCF_RESKEY_listen_address] not an IP address!" return $OCF_ERR_CONFIGURED ;; esac if [ ! -f ${OCF_RESKEY_sipsak} ]; then ocf_log err "sipsak [${OCF_RESKEY_sipsak}] does not exist!" return $OCF_NOT_INSTALLED fi if [ ! -z ${OCF_RESKEY_kamctlrc} ]; then if [ ! -f ${OCF_RESKEY_kamctlrc} ]; then ocf_log err "kamctlrc file [${kamctlrc}] does not exist!" return $OCF_NOT_INSTALLED fi else ocf_log debug "No monitoring via kamctl monitor because the parameter [kamctlrc] is empty." fi if [ ! -f ${OCF_RESKEY_conffile} ]; then ocf_log err "Kamailio configuration file provided in the parameter conffile [${OCF_RESKEY_conffile}] does not exist!" return $OCF_ERR_CONFIGURED fi case $OCF_RESKEY_proto in "") ocf_log err "Parameter $OCF_RESKEY_proto is empty!" return $OCF_ERR_CONFIGURED ;; udp|tcp|udptcp) : OK ;; *) ocf_log err "Parameter value $OCF_RESKEY_proto for parameter [proto] not yet supported!" return $OCF_ERR_CONFIGURED ;; esac return $OCF_SUCCESS } if [ $# -ne 1 ]; then usage exit $OCF_ERR_ARGS fi case $__OCF_ACTION in meta-data) meta_data exit $OCF_SUCCESS ;; start|stop|status|monitor) kamailio_${__OCF_ACTION} ;; validate-all) kamailio_validate_all ;; notify) exit $OCF_SUCCESS ;; usage) usage exit $OCF_SUCCESS ;; # reload) #Not supported by Kamailio, but not needed by pacemaker # ;; # recover #Not needed by pacemaker # ;; *) usage exit $OCF_ERR_UNIMPLEMENTED ;; esac exit $? diff --git a/heartbeat/mysql b/heartbeat/mysql index e76213b0d..1b24248e4 100755 --- a/heartbeat/mysql +++ b/heartbeat/mysql @@ -1,1045 +1,1061 @@ #!/bin/sh # # # MySQL # # Description: Manages a MySQL database as Linux-HA resource # # Authors: Alan Robertson: DB2 Script # Jakub Janczak: rewrite as MySQL # Andrew Beekhof: cleanup and import # Sebastian Reitenbach: add OpenBSD defaults, more cleanup # Narayan Newton: add Gentoo/Debian defaults # Marian Marinov, Florian Haas: add replication capability # Yves Trudeau, Baron Schwartz: add VIP support and improve replication # # Support: users@clusterlabs.org # License: GNU General Public License (GPL) # # (c) 2002-2005 International Business Machines, Inc. # 2005-2010 Linux-HA contributors # # An example usage in /etc/ha.d/haresources: # node1 10.0.0.170 mysql # # See usage() function below for more details... # # OCF instance parameters: # OCF_RESKEY_binary # OCF_RESKEY_client_binary # OCF_RESKEY_config # OCF_RESKEY_datadir # OCF_RESKEY_user # OCF_RESKEY_group # OCF_RESKEY_test_table # OCF_RESKEY_test_user # OCF_RESKEY_test_passwd # OCF_RESKEY_enable_creation # OCF_RESKEY_additional_parameters # OCF_RESKEY_log # OCF_RESKEY_pid # OCF_RESKEY_socket # OCF_RESKEY_replication_user # OCF_RESKEY_replication_passwd # OCF_RESKEY_replication_port # OCF_RESKEY_max_slave_lag # OCF_RESKEY_evict_outdated_slaves # OCF_RESKEY_reader_attribute ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs . ${OCF_FUNCTIONS_DIR}/mysql-common.sh ####################################################################### usage() { cat < 1.0 Resource script for MySQL. May manage a standalone MySQL database, a clone set with externally managed replication, or a complete master/slave replication setup. Note, when master/slave replication is in use, the resource must be setup to use notifications. Set 'notify=true' in the metadata attributes when defining a MySQL master/slave instance. While managing replication, the default behavior is to use uname -n values in the change master to command. Other IPs can be specified manually by adding a node attribute \${INSTANCE_ATTR_NAME}_mysql_master_IP giving the IP to use for replication. For example, if the mysql primitive you are using is p_mysql, the attribute to set will be p_mysql_mysql_master_IP. Manages a MySQL database instance Location of the MySQL server binary MySQL server binary Location of the MySQL client binary MySQL client binary Configuration file MySQL config Directory containing databases MySQL datadir User running MySQL daemon MySQL user Group running MySQL daemon (for logfile and directory permissions) MySQL group The logfile to be used for mysqld. MySQL log file The pidfile to be used for mysqld. MySQL pid file The socket to be used for mysqld. MySQL socket Table to be tested in monitor statement (in database.table notation) MySQL test table MySQL test user, must have select privilege on test_table MySQL test user MySQL test user password MySQL test user password If the MySQL database does not exist, it will be created Create the database if it does not exist Additional parameters which are passed to the mysqld on startup. (e.g. --skip-external-locking or --skip-grant-tables) Additional parameters to pass to mysqld MySQL replication user. This user is used for starting and stopping MySQL replication, for setting and resetting the master host, and for setting and unsetting read-only mode. Because of that, this user must have SUPER, REPLICATION SLAVE, REPLICATION CLIENT, PROCESS and RELOAD privileges on all nodes within the cluster. Mandatory if you define a master-slave resource. MySQL replication user MySQL replication password. Used for replication client and slave. Mandatory if you define a master-slave resource. MySQL replication user password The port on which the Master MySQL instance is listening. MySQL replication port The maximum number of seconds a replication slave is allowed to lag behind its master. Do not set this to zero. What the cluster manager does in case a slave exceeds this maximum lag is determined by the evict_outdated_slaves parameter. Maximum time (seconds) a MySQL slave is allowed to lag behind a master If set to true, any slave which is more than max_slave_lag seconds behind the master has its MySQL instance shut down. If this parameter is set to false in a primitive or clone resource, it is simply ignored. If set to false in a master/slave resource, then exceeding the maximum slave lag will merely push down the master preference so the lagging slave is never promoted to the new master. Determines whether to shut down badly lagging slaves An attribute that the RA can manage to specify whether a node can be read from. This node attribute will be 1 if it's fine to read from the node, and 0 otherwise (for example, when a slave has lagged too far behind the master). A typical example for the use of this attribute would be to tie a set of IP addresses to MySQL slaves that can be read from. This parameter is only meaningful in master/slave set configurations. Sets the node attribute that determines whether a node is usable for clients to read from. END } # Convenience functions set_read_only() { # Sets or unsets read-only mode. Accepts one boolean as its # optional argument. If invoked without any arguments, defaults to # enabling read only mode. Should only be set in master/slave # setups. # Returns $OCF_SUCCESS if the operation succeeds, or # $OCF_ERR_GENERIC if it fails. local ro_val if ocf_is_true $1; then ro_val="on" else ro_val="off" fi ocf_run $MYSQL $MYSQL_OPTIONS_REPL \ -e "SET GLOBAL read_only=${ro_val}" } get_read_only() { # Check if read-only is set local read_only_state read_only_state=`$MYSQL $MYSQL_OPTIONS_REPL \ - -e "SHOW VARIABLES" | grep -w read_only | awk '{print $2}'` + --skip-column-names -e "SHOW VARIABLES LIKE 'read_only'" | awk '{print $2}'` if [ "$read_only_state" = "ON" ]; then return 0 else return 1 fi } is_slave() { # Determine whether the machine is currently running as a MySQL # slave, as determined per SHOW SLAVE STATUS. Returns 1 if SHOW # SLAVE STATUS creates an empty result set, 0 otherwise. local rc local tmpfile # Check whether this machine should be slave if ! ocf_is_ms || ! get_read_only; then return 1 fi get_slave_info rc=$? rm -f $tmpfile if [ $rc -eq 0 ]; then # show slave status is not empty # Is there a master_log_file defined? (master_log_file is deleted # by reset slave if [ "$master_log_file" ]; then return 0 else return 1 fi else # "SHOW SLAVE STATUS" returns an empty set if instance is not a # replication slave return 1 fi } parse_slave_info() { # Extracts field $1 from result of "SHOW SLAVE STATUS\G" from file $2 sed -ne "s/^.* $1: \(.*\)$/\1/p" < $2 } get_slave_info() { # Warning: this sets $tmpfile and LEAVE this file! You must delete it after use! local mysql_options if [ "$master_log_file" -a "$master_host" ]; then # variables are already defined, get_slave_info has been run before return $OCF_SUCCESS else tmpfile=`mktemp ${HA_RSCTMP}/check_slave.${OCF_RESOURCE_INSTANCE}.XXXXXX` $MYSQL $MYSQL_OPTIONS_REPL \ -e 'SHOW SLAVE STATUS\G' > $tmpfile if [ -s $tmpfile ]; then master_host=`parse_slave_info Master_Host $tmpfile` master_user=`parse_slave_info Master_User $tmpfile` master_port=`parse_slave_info Master_Port $tmpfile` master_log_file=`parse_slave_info Master_Log_File $tmpfile` master_log_pos=`parse_slave_info Read_Master_Log_Pos $tmpfile` slave_sql=`parse_slave_info Slave_SQL_Running $tmpfile` slave_io=`parse_slave_info Slave_IO_Running $tmpfile` last_errno=`parse_slave_info Last_Errno $tmpfile` secs_behind=`parse_slave_info Seconds_Behind_Master $tmpfile` ocf_log debug "MySQL instance running as a replication slave" else # Instance produced an empty "SHOW SLAVE STATUS" output -- # instance is not a slave ocf_exit_reason "check_slave invoked on an instance that is not a replication slave." return $OCF_ERR_GENERIC fi return $OCF_SUCCESS fi } check_slave() { # Checks slave status local rc new_master get_slave_info rc=$? if [ $rc -eq 0 ]; then # Did we receive an error other than max_connections? if [ $last_errno -ne 0 -a $last_errno -ne "$MYSQL_TOO_MANY_CONN_ERR" ]; then # Whoa. Replication ran into an error. This slave has # diverged from its master. Make sure this resource # doesn't restart in place. ocf_exit_reason "MySQL instance configured for replication, but replication has failed." ocf_log err "See $tmpfile for details" # Just pull the reader VIP away, killing MySQL here would be pretty evil # on a loaded server set_reader_attr 0 exit $OCF_SUCCESS fi # If we got max_connections, let's remove the vip if [ $last_errno -eq "$MYSQL_TOO_MANY_CONN_ERR" ]; then set_reader_attr 0 exit $OCF_SUCCESS fi if [ "$slave_io" != 'Yes' ]; then # Not necessarily a bad thing. The master may have # temporarily shut down, and the slave may just be # reconnecting. A warning can't hurt, though. ocf_log warn "MySQL Slave IO threads currently not running." # Sanity check, are we at least on the right master new_master=`$CRM_ATTR_REPL_INFO --query -q | cut -d'|' -f1` if [ "$master_host" != "$new_master" ]; then # Not pointing to the right master, not good, removing the VIPs set_reader_attr 0 exit $OCF_SUCCESS fi fi if [ "$slave_sql" != 'Yes' ]; then # We don't have a replication SQL thread running. Not a # good thing. Try to recoved by restarting the SQL thread # and remove reader vip. Prevent MySQL restart. ocf_exit_reason "MySQL Slave SQL threads currently not running." ocf_log err "See $tmpfile for details" # Remove reader vip set_reader_attr 0 # try to restart slave ocf_run $MYSQL $MYSQL_OPTIONS_REPL \ -e "START SLAVE" # Return success to prevent a restart exit $OCF_SUCCESS fi if ocf_is_true $OCF_RESKEY_evict_outdated_slaves; then # We're supposed to bail out if we lag too far # behind. Let's check our lag. if [ "$secs_behind" = "NULL" ] || [ $secs_behind -gt $OCF_RESKEY_max_slave_lag ]; then ocf_exit_reason "MySQL Slave is $secs_behind seconds behind master (allowed maximum: $OCF_RESKEY_max_slave_lag)." ocf_log err "See $tmpfile for details" # Remove reader vip set_reader_attr 0 exit $OCF_ERR_INSTALLED fi elif ocf_is_ms; then # Even if we're not set to evict lagging slaves, we can # still use the seconds behind master value to set our # master preference. local master_pref master_pref=$((${OCF_RESKEY_max_slave_lag}-${secs_behind})) if [ $master_pref -lt 0 ]; then # Sanitize a below-zero preference to just zero master_pref=0 fi $CRM_MASTER -v $master_pref fi # is the slave ok to have a VIP on it if [ "$secs_behind" = "NULL" ] || [ $secs_behind -gt $OCF_RESKEY_max_slave_lag ]; then set_reader_attr 0 else set_reader_attr 1 fi ocf_log debug "MySQL instance running as a replication slave" rm -f $tmpfile else # Instance produced an empty "SHOW SLAVE STATUS" output -- # instance is not a slave # TODO: Needs to handle when get_slave_info will return too many connections error rm -f $tmpfile ocf_exit_reason "check_slave invoked on an instance that is not a replication slave." exit $OCF_ERR_GENERIC fi } set_master() { local new_master master_log_file master_log_pos local master_params new_master=`$CRM_ATTR_REPL_INFO --query -q | cut -d'|' -f1` # Keep replication position get_slave_info if [ "$master_log_file" -a "$new_master" = "$master_host" ]; then # master_params=", MASTER_LOG_FILE='$master_log_file', \ # MASTER_LOG_POS=$master_log_pos" ocf_log info "Kept master pos for $master_host : $master_log_file:$master_log_pos" rm -f $tmpfile return else master_log_file=`$CRM_ATTR_REPL_INFO --query -q | cut -d'|' -f2` master_log_pos=`$CRM_ATTR_REPL_INFO --query -q | cut -d'|' -f3` if [ -n "$master_log_file" -a -n "$master_log_pos" ]; then master_params=", MASTER_LOG_FILE='$master_log_file', \ MASTER_LOG_POS=$master_log_pos" ocf_log info "Restored master pos for $new_master : $master_log_file:$master_log_pos" fi fi # Informs the MySQL server of the master to replicate # from. Accepts one mandatory argument which must contain the host # name of the new master host. The master must either be unchanged # from the laste master the slave replicated from, or freshly # reset with RESET MASTER. ocf_run $MYSQL $MYSQL_OPTIONS_REPL \ -e "CHANGE MASTER TO MASTER_HOST='$new_master', \ MASTER_PORT=$OCF_RESKEY_replication_port, \ MASTER_USER='$OCF_RESKEY_replication_user', \ MASTER_PASSWORD='$OCF_RESKEY_replication_passwd' $master_params" rm -f $tmpfile } unset_master(){ # Instructs the MySQL server to stop replicating from a master # host. # If we're currently not configured to be replicating from any # host, then there's nothing to do. But we do log a warning as # no-one but the CRM should be touching the MySQL master/slave # configuration. if ! is_slave; then ocf_log warn "Attempted to unset the replication master on an instance that is not configured as a replication slave" return $OCF_SUCCESS fi local tmpfile tmpfile=`mktemp ${HA_RSCTMP}/unset_master.${OCF_RESOURCE_INSTANCE}.XXXXXX` # At this point, the master is read only so there should not be much binlogs to transfer # Let's wait for the last bits while true; do $MYSQL $MYSQL_OPTIONS_REPL \ -e 'SHOW PROCESSLIST\G' > $tmpfile if grep -i 'Waiting for master to send event' $tmpfile >/dev/null; then ocf_log info "MySQL slave has finished reading master binary log" break fi if grep -i 'Reconnecting after a failed master event read' $tmpfile >/dev/null; then ocf_log info "Master is down, no more binary logs to come" break fi if grep -i 'Connecting to master' $tmpfile >/dev/null; then ocf_log info "Master is down, no more binary logs to come" break fi if ! grep 'system user' $tmpfile >/dev/null; then ocf_log info "Slave is not running - not waiting to finish" break fi sleep 1 done # Now, stop the slave I/O thread and wait for relay log # processing to complete ocf_run $MYSQL $MYSQL_OPTIONS_REPL \ -e "STOP SLAVE IO_THREAD" if [ $? -gt 0 ]; then ocf_exit_reason "Error stopping slave IO thread" exit $OCF_ERR_GENERIC fi while true; do $MYSQL $MYSQL_OPTIONS_REPL \ -e 'SHOW PROCESSLIST\G' > $tmpfile if grep -i 'Has read all relay log' $tmpfile >/dev/null; then ocf_log info "MySQL slave has finished processing relay log" break fi if ! grep -q 'system user' $tmpfile; then ocf_log info "Slave not runnig - not waiting to finish" break fi ocf_log info "Waiting for MySQL slave to finish processing relay log" sleep 1 done rm -f $tmpfile # Now, stop all slave activity and unset the master host ocf_run $MYSQL $MYSQL_OPTIONS_REPL \ -e "STOP SLAVE" if [ $? -gt 0 ]; then ocf_exit_reason "Error stopping rest slave threads" exit $OCF_ERR_GENERIC fi ocf_run $MYSQL $MYSQL_OPTIONS_REPL \ -e "RESET SLAVE /*!50516 ALL */;" if [ $? -gt 0 ]; then ocf_exit_reason "Failed to reset slave" exit $OCF_ERR_GENERIC fi } # Start replication as slave start_slave() { ocf_run $MYSQL $MYSQL_OPTIONS_REPL \ -e "START SLAVE" } # Set the attribute controlling the readers VIP set_reader_attr() { local curr_attr_value curr_attr_value=$(get_reader_attr) if [ "$curr_attr_value" -ne "$1" ]; then $CRM_ATTR -l reboot --name ${OCF_RESKEY_reader_attribute} -v $1 fi } # get the attribute controlling the readers VIP get_reader_attr() { local attr_value local rc attr_value=`$CRM_ATTR -l reboot --name ${OCF_RESKEY_reader_attribute} --query -q` rc=$? if [ "$rc" -eq "0" ]; then echo $attr_value else echo -1 fi } # Stores data for MASTER STATUS from MySQL update_data_master_status() { master_status_file="${HA_RSCTMP}/master_status.${OCF_RESOURCE_INSTANCE}" $MYSQL $MYSQL_OPTIONS_REPL -e "SHOW MASTER STATUS\G" > $master_status_file } # Returns the specified value from the stored copy of SHOW MASTER STATUS. # should be call after update_data_master_status for tmpfile # Arguments: # $1 The value to get. get_master_status() { awk -v var="$1" '$1 == var ":" {print substr($0, index($0, ":") + 2)}' "$master_status_file" } # Determines what IP address is attached to the current host. The output of the # crm_attribute command looks like this: # scope=nodes name=IP value=10.2.2.161 # If the ${INSTANCE_ATTR_NAME}_MYSQL_MASTER_IP node attribute is not defined, fallback is to uname -n # The ${INSTANCE_ATTR_NAME}_MYSQL_MASTER_IP is the IP address that will be used for the # change master to command. get_local_ip() { local IP IP=`$CRM_ATTR -l forever -n ${INSTANCE_ATTR_NAME}_mysql_master_IP -q -G` if [ ! $? -eq 0 ]; then uname -n else echo $IP fi } ####################################################################### # Functions invoked by resource manager actions mysql_monitor() { local rc local status_loglevel="err" # Set loglevel to info during probe if ocf_is_probe; then status_loglevel="info" fi mysql_common_status $status_loglevel - rc=$? # TODO: check max connections error # If status returned an error, return that immediately if [ $rc -ne $OCF_SUCCESS ]; then + if ocf_is_ms ; then + # This is a master slave setup but monitored host returned some errors. + # Immediately remove it from the pool of possible masters by erasing its master-mysql key + # When new mysql master election is started and node got no or negative master-mysql attribute the following is logged + # nodename.com pengine: debug: master_color: mysql:0 master score: -1 + # If there are NO nodes with positive vaule election of mysql master will fail with + # nodename.com pengine: info: master_color: ms_mysql: Promoted 0 instances of a possible 1 to master + $CRM_MASTER -D + fi + return $rc fi if [ $OCF_CHECK_LEVEL -gt 0 -a -n "$OCF_RESKEY_test_table" ]; then # Check if this instance is configured as a slave, and if so # check slave status if is_slave; then check_slave fi # Check for test table ocf_run -q $MYSQL $MYSQL_OPTIONS_TEST \ -e "SELECT COUNT(*) FROM $OCF_RESKEY_test_table" rc=$? if [ $rc -ne 0 ]; then + # We are master/slave and test failed. Delete master score for this node as it is considered unhealthy because of this particular failed check. + ocf_is_ms && $CRM_MASTER -D ocf_exit_reason "Failed to select from $test_table"; return $OCF_ERR_GENERIC; fi + else + # In case no exnteded tests are enabled and we are in master/slave mode _always_ set the master score to 1 if we reached this point + ocf_is_ms && $CRM_MASTER -v 1 fi if ocf_is_ms && ! get_read_only; then ocf_log debug "MySQL monitor succeeded (master)"; + # Always set master score for the master + $CRM_MASTER -v 2 return $OCF_RUNNING_MASTER else ocf_log debug "MySQL monitor succeeded"; return $OCF_SUCCESS fi } mysql_start() { local rc if ocf_is_ms; then # Initialize the ReaderVIP attribute, monitor will enable it set_reader_attr 0 fi mysql_common_status info if [ $? = $OCF_SUCCESS ]; then ocf_log info "MySQL already running" return $OCF_SUCCESS fi mysql_common_prepare_dirs # Uncomment to perform permission clensing # - not convinced this should be enabled by default # #chmod 0755 $OCF_RESKEY_datadir #chown -R $OCF_RESKEY_user $OCF_RESKEY_datadir #chgrp -R $OCF_RESKEY_group $OCF_RESKEY_datadir mysql_extra_params= if ocf_is_ms; then mysql_extra_params="--skip-slave-start" fi mysql_common_start $mysql_extra_params rc=$? if [ $rc != $OCF_SUCCESS ]; then return $rc fi if ocf_is_ms; then # We're configured as a stateful resource. We must start as # slave by default. At this point we don't know if the CRM has # already promoted a master. So, we simply start in read only # mode. set_read_only on # Now, let's see whether there is a master. We might be a new # node that is just joining the cluster, and the CRM may have # promoted a master before. master_host=`echo $OCF_RESKEY_CRM_meta_notify_master_uname|tr -d " "` if [ "$master_host" -a "$master_host" != ${NODENAME} ]; then ocf_log info "Changing MySQL configuration to replicate from $master_host." set_master start_slave if [ $? -ne 0 ]; then ocf_exit_reason "Failed to start slave" return $OCF_ERR_GENERIC fi else ocf_log info "No MySQL master present - clearing replication state" unset_master fi # We also need to set a master preference, otherwise Pacemaker # won't ever promote us in the absence of any explicit # preference set by the administrator. We choose a low # greater-than-zero preference. $CRM_MASTER -v 1 fi # Initial monitor action if [ -n "$OCF_RESKEY_test_table" -a -n "$OCF_RESKEY_test_user" -a -n "$OCF_RESKEY_test_passwd" ]; then OCF_CHECK_LEVEL=10 fi mysql_monitor rc=$? if [ $rc != $OCF_SUCCESS -a $rc != $OCF_RUNNING_MASTER ]; then ocf_exit_reason "Failed initial monitor action" return $rc fi ocf_log info "MySQL started" return $OCF_SUCCESS } mysql_stop() { if ocf_is_ms; then # clear preference for becoming master $CRM_MASTER -D # Remove VIP capability set_reader_attr 0 fi mysql_common_stop } mysql_promote() { local master_info if ( ! mysql_common_status err ); then return $OCF_NOT_RUNNING fi ocf_run $MYSQL $MYSQL_OPTIONS_REPL \ -e "STOP SLAVE" # Set Master Info in CIB, cluster level attribute update_data_master_status master_info="$(get_local_ip)|$(get_master_status File)|$(get_master_status Position)" ${CRM_ATTR_REPL_INFO} -v "$master_info" rm -f $tmpfile set_read_only off || return $OCF_ERR_GENERIC # Existing master gets a higher-than-default master preference, so # the cluster manager does not shuffle the master role around # unnecessarily $CRM_MASTER -v $((${OCF_RESKEY_max_slave_lag}+1)) # A master can accept reads set_reader_attr 1 return $OCF_SUCCESS } mysql_demote() { if ! mysql_common_status err; then return $OCF_NOT_RUNNING fi # Return master preference to default, so the cluster manager gets # a chance to select a new master $CRM_MASTER -v 1 } mysql_notify() { # If not configured as a Stateful resource, we make no sense of # notifications. if ! ocf_is_ms; then ocf_log info "This agent makes no use of notifications unless running in master/slave mode." return $OCF_SUCCESS fi local type_op type_op="${OCF_RESKEY_CRM_meta_notify_type}-${OCF_RESKEY_CRM_meta_notify_operation}" ocf_log debug "Received $type_op notification." case "$type_op" in 'pre-promote') # Nothing to do now here, new replication info not yet published ;; 'post-promote') # The master has completed its promotion. Now is a good # time to check whether our replication slave is working # correctly. master_host=`echo $OCF_RESKEY_CRM_meta_notify_promote_uname|tr -d " "` if [ "$master_host" = ${NODENAME} ]; then ocf_log info "This will be the new master, ignoring post-promote notification." else ocf_log info "Resetting replication" unset_master if [ $? -ne 0 ]; then return $OCF_ERR_GENERIC fi ocf_log info "Changing MySQL configuration to replicate from $master_host" set_master if [ $? -ne 0 ]; then return $OCF_ERR_GENERIC fi start_slave if [ $? -ne 0 ]; then ocf_exit_reason "Failed to start slave" return $OCF_ERR_GENERIC fi fi return $OCF_SUCCESS ;; 'pre-demote') demote_host=`echo $OCF_RESKEY_CRM_meta_notify_demote_uname|tr -d " "` if [ $demote_host = ${NODENAME} ]; then ocf_log info "post-demote notification for $demote_host" set_read_only on if [ $? -ne 0 ]; then ocf_exit_reason "Failed to set read-only"; return $OCF_ERR_GENERIC; fi # Must kill all existing user threads because they are still Read/write # in order for the slaves to complete the read of binlogs local tmpfile tmpfile=`mktemp ${HA_RSCTMP}/threads.${OCF_RESOURCE_INSTANCE}.XXXXXX` $MYSQL $MYSQL_OPTIONS_REPL \ -e "SHOW PROCESSLIST" > $tmpfile for thread in `awk '$0 !~ /Binlog Dump|system user|event_scheduler|SHOW PROCESSLIST/ && $0 ~ /^[0-9]/ {print $1}' $tmpfile` do ocf_run $MYSQL $MYSQL_OPTIONS_REPL \ -e "KILL ${thread}" done else ocf_log info "Ignoring post-demote notification execpt for my own demotion." fi return $OCF_SUCCESS ;; 'post-demote') demote_host=`echo $OCF_RESKEY_CRM_meta_notify_demote_uname|tr -d " "` if [ $demote_host = ${NODENAME} ]; then ocf_log info "Ignoring post-demote notification for my own demotion." return $OCF_SUCCESS fi ocf_log info "post-demote notification for $demote_host." # The former master has just been gracefully demoted. unset_master ;; *) return $OCF_SUCCESS ;; esac } ####################################################################### ########################################################################## # If DEBUG_LOG is set, make this resource agent easy to debug: set up the # debug log and direct all output to it. Otherwise, redirect to /dev/null. # The log directory must be a directory owned by root, with permissions 0700, # and the log must be writable and not a symlink. ########################################################################## DEBUG_LOG="/tmp/mysql.ocf.ra.debug/log" if [ "${DEBUG_LOG}" -a -w "${DEBUG_LOG}" -a ! -L "${DEBUG_LOG}" ]; then DEBUG_LOG_DIR="${DEBUG_LOG%/*}" if [ -d "${DEBUG_LOG_DIR}" ]; then exec 9>>"$DEBUG_LOG" exec 2>&9 date >&9 echo "$*" >&9 env | grep OCF_ | sort >&9 set -x else exec 9>/dev/null fi fi case "$1" in meta-data) meta_data exit $OCF_SUCCESS;; usage|help) usage exit $OCF_SUCCESS;; esac mysql_common_validate rc=$? LSB_STATUS_STOPPED=3 if [ $rc -ne 0 ]; then case "$1" in stop) ;; monitor) mysql_common_status "info" if [ $? -eq $OCF_SUCCESS ]; then # if validatation fails and pid is active, always treat this as an error ocf_exit_reason "environment validation failed, active pid is in unknown state." exit $OCF_ERR_GENERIC fi # validation failed and pid is not active, it's safe to say this instance is inactive. exit $OCF_NOT_RUNNING;; status) exit $LSB_STATUS_STOPPED;; *) exit $rc;; esac fi # What kind of method was invoked? case "$1" in start) mysql_start;; stop) mysql_stop;; status) mysql_common_status err;; monitor) mysql_monitor;; promote) mysql_promote;; demote) mysql_demote;; notify) mysql_notify;; validate-all) exit $OCF_SUCCESS;; *) usage exit $OCF_ERR_UNIMPLEMENTED;; esac # vi:sw=4:ts=4:et: diff --git a/heartbeat/ocf-shellfuncs.in b/heartbeat/ocf-shellfuncs.in index 08a672451..ebc221d5f 100644 --- a/heartbeat/ocf-shellfuncs.in +++ b/heartbeat/ocf-shellfuncs.in @@ -1,922 +1,938 @@ # # # Common helper functions for the OCF Resource Agents supplied by # heartbeat. # # Copyright (c) 2004 SUSE LINUX AG, Lars Marowsky-Brée # All Rights Reserved. # # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Build version: $Format:%H$ # TODO: Some of this should probably split out into a generic OCF # library for shell scripts, but for the time being, we'll just use it # ourselves... # # TODO wish-list: # - Generic function for evaluating version numbers # - Generic function(s) to extract stuff from our own meta-data # - Logging function which automatically adds resource identifier etc # prefixes # TODO: Move more common functionality for OCF RAs here. # # This was common throughout all legacy Heartbeat agents unset LC_ALL; export LC_ALL unset LANGUAGE; export LANGUAGE __SCRIPT_NAME=`basename $0` if [ -z "$OCF_ROOT" ]; then : ${OCF_ROOT=@OCF_ROOT_DIR@} fi if [ "$OCF_FUNCTIONS_DIR" = ${OCF_ROOT}/resource.d/heartbeat ]; then # old unset OCF_FUNCTIONS_DIR fi : ${OCF_FUNCTIONS_DIR:=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-binaries . ${OCF_FUNCTIONS_DIR}/ocf-returncodes . ${OCF_FUNCTIONS_DIR}/ocf-directories . ${OCF_FUNCTIONS_DIR}/ocf-rarun . ${OCF_FUNCTIONS_DIR}/ocf-distro # Define OCF_RESKEY_CRM_meta_interval in case it isn't already set, # to make sure that ocf_is_probe() always works : ${OCF_RESKEY_CRM_meta_interval=0} ocf_is_root() { if [ X`id -u` = X0 ]; then true else false fi } ocf_maybe_random() { local rnd="$RANDOM" # Something sane-ish in case a shell doesn't support $RANDOM [ -n "$rnd" ] || rnd=$$ echo $rnd } # Portability comments: # o The following rely on Bourne "sh" pattern-matching, which is usually # that for filename generation (note: not regexp). # o The "*) true ;;" clause is probably unnecessary, but is included # here for completeness. # o The negation in the pattern uses "!". This seems to be common # across many OSes (whereas the alternative "^" fails on some). # o If an OS is encountered where this negation fails, then a possible # alternative would be to replace the function contents by (e.g.): # [ -z "`echo $1 | tr -d '[0-9]'`" ] # ocf_is_decimal() { case "$1" in ""|*[!0-9]*) # empty, or at least one non-decimal false ;; *) true ;; esac } ocf_is_true() { case "$1" in yes|true|1|YES|TRUE|ja|on|ON) true ;; *) false ;; esac } ocf_is_hex() { case "$1" in ""|*[!0-9a-fA-F]*) # empty, or at least one non-hex false ;; *) true ;; esac } ocf_is_octal() { case "$1" in ""|*[!0-7]*) # empty, or at least one non-octal false ;; *) true ;; esac } __ocf_set_defaults() { __OCF_ACTION="$1" # Return to sanity for the agents... unset LANG LC_ALL=C export LC_ALL # TODO: Review whether we really should source this. Or rewrite # to match some emerging helper function syntax...? This imports # things which no OCF RA should be using... # Strip the OCF_RESKEY_ prefix from this particular parameter if [ -z "$OCF_RESKEY_OCF_CHECK_LEVEL" ]; then : ${OCF_CHECK_LEVEL:=0} else : ${OCF_CHECK_LEVEL:=$OCF_RESKEY_OCF_CHECK_LEVEL} fi if [ ! -d "$OCF_ROOT" ]; then ha_log "ERROR: OCF_ROOT points to non-directory $OCF_ROOT." exit $OCF_ERR_GENERIC fi if [ -z "$OCF_RESOURCE_TYPE" ]; then : ${OCF_RESOURCE_TYPE:=$__SCRIPT_NAME} fi if [ "x$__OCF_ACTION" = "xmeta-data" ]; then : ${OCF_RESOURCE_INSTANCE:="RESOURCE_ID"} fi if [ -z "$OCF_RA_VERSION_MAJOR" ]; then : We are being invoked as an init script. : Fill in some things with reasonable values. : ${OCF_RESOURCE_INSTANCE:="default"} return 0 fi if [ -z "$OCF_RESOURCE_INSTANCE" ]; then ha_log "ERROR: Need to tell us our resource instance name." exit $OCF_ERR_ARGS fi } hadate() { date "+${HA_DATEFMT}" } set_logtag() { if [ -z "$HA_LOGTAG" ]; then if [ -n "$OCF_RESOURCE_INSTANCE" ]; then HA_LOGTAG="$__SCRIPT_NAME($OCF_RESOURCE_INSTANCE)[$$]" else HA_LOGTAG="$__SCRIPT_NAME[$$]" fi fi } __ha_log() { local ignore_stderr=false local loglevel [ "x$1" = "x--ignore-stderr" ] && ignore_stderr=true && shift [ none = "$HA_LOGFACILITY" ] && HA_LOGFACILITY="" # if we're connected to a tty, then output to stderr if tty >/dev/null; then if [ "x$HA_debug" = "x0" -a "x$loglevel" = xdebug ] ; then return 0 elif [ "$ignore_stderr" = "true" ]; then # something already printed this error to stderr, so ignore return 0 fi if [ "$HA_LOGTAG" ]; then echo "$HA_LOGTAG: $*" else echo "$*" fi >&2 return 0 fi set_logtag if [ "x${HA_LOGD}" = "xyes" ] ; then ha_logger -t "${HA_LOGTAG}" "$@" if [ "$?" -eq "0" ] ; then return 0 fi fi if [ -n "$HA_LOGFACILITY" ] then : logging through syslog # loglevel is unknown, use 'notice' for now loglevel=notice case "${*}" in *ERROR*) loglevel=err;; *WARN*) loglevel=warning;; *INFO*|info) loglevel=info;; esac logger -t "$HA_LOGTAG" -p ${HA_LOGFACILITY}.${loglevel} "${*}" fi if [ -n "$HA_LOGFILE" ] then : appending to $HA_LOGFILE echo `hadate`" $HA_LOGTAG: ${*}" >> $HA_LOGFILE fi if [ -z "$HA_LOGFACILITY" -a -z "$HA_LOGFILE" ] && ! [ "$ignore_stderr" = "true" ] then : appending to stderr echo `hadate`"${*}" >&2 fi if [ -n "$HA_DEBUGLOG" ] then : appending to $HA_DEBUGLOG if [ "$HA_LOGFILE"x != "$HA_DEBUGLOG"x ]; then echo "$HA_LOGTAG: "`hadate`"${*}" >> $HA_DEBUGLOG fi fi } ha_log() { __ha_log "$@" } ha_debug() { if [ "x${HA_debug}" = "x0" ] ; then return 0 fi if tty >/dev/null; then if [ "$HA_LOGTAG" ]; then echo "$HA_LOGTAG: $*" else echo "$*" fi >&2 return 0 fi set_logtag if [ "x${HA_LOGD}" = "xyes" ] ; then ha_logger -t "${HA_LOGTAG}" -D "ha-debug" "$@" if [ "$?" -eq "0" ] ; then return 0 fi fi [ none = "$HA_LOGFACILITY" ] && HA_LOGFACILITY="" if [ -n "$HA_LOGFACILITY" ] then : logging through syslog logger -t "$HA_LOGTAG" -p "${HA_LOGFACILITY}.debug" "${*}" fi if [ -n "$HA_DEBUGLOG" ] then : appending to $HA_DEBUGLOG echo "$HA_LOGTAG: "`hadate`"${*}" >> $HA_DEBUGLOG fi if [ -z "$HA_LOGFACILITY" -a -z "$HA_DEBUGLOG" ] then : appending to stderr echo "$HA_LOGTAG: `hadate`${*}: ${HA_LOGFACILITY}" >&2 fi } ha_parameter() { local VALUE VALUE=`sed -e 's%[ ][ ]*% %' -e 's%^ %%' -e 's%#.*%%' $HA_CF | grep -i "^$1 " | sed 's%[^ ]* %%'` if [ "X$VALUE" = X ] then case $1 in keepalive) VALUE=2;; deadtime) ka=`ha_parameter keepalive` VALUE=`expr $ka '*' 2 '+' 1`;; esac fi echo $VALUE } ocf_log() { # TODO: Revisit and implement internally. if [ $# -lt 2 ] then ocf_log err "Not enough arguments [$#] to ocf_log." fi __OCF_PRIO="$1" shift __OCF_MSG="$*" case "${__OCF_PRIO}" in crit) __OCF_PRIO="CRIT";; err) __OCF_PRIO="ERROR";; warn) __OCF_PRIO="WARNING";; info) __OCF_PRIO="INFO";; debug)__OCF_PRIO="DEBUG";; *) __OCF_PRIO=`echo ${__OCF_PRIO}| tr '[a-z]' '[A-Z]'`;; esac if [ "${__OCF_PRIO}" = "DEBUG" ]; then ha_debug "${__OCF_PRIO}: $__OCF_MSG" else ha_log "${__OCF_PRIO}: $__OCF_MSG" fi } # # ocf_exit_reason: print exit error string to stderr # Usage: Allows the OCF script to provide a string # describing why the exit code was returned. # Arguments: reason - required, The string that represents why the error # occured. # ocf_exit_reason() { local cookie="$OCF_EXIT_REASON_PREFIX" local fmt local msg # No argument is likely not intentional. # Just one argument implies a printf format string of just "%s". # "Least surprise" in case some interpolated string from variable # expansion or other contains a percent sign. # More than one argument: first argument is going to be the format string. case $# in 0) ocf_log err "Not enough arguments to ocf_log_exit_msg." ;; 1) fmt="%s" ;; *) fmt=$1 shift case $fmt in *%*) : ;; # ok, does look like a format string *) ocf_log warn "Does not look like format string: [$fmt]" ;; esac ;; esac if [ -z "$cookie" ]; then # use a default prefix cookie="ocf-exit-reason:" fi msg=$(printf "${fmt}" "$@") printf >&2 "%s%s\n" "$cookie" "$msg" __ha_log --ignore-stderr "ERROR: $msg" } # # ocf_deprecated: Log a deprecation warning # Usage: ocf_deprecated [param-name] # Arguments: param-name optional, name of a boolean resource # parameter that can be used to suppress # the warning (default # "ignore_deprecation") ocf_deprecated() { local param param=${1:-ignore_deprecation} # don't use ${!param} here, it's a bashism if ! ocf_is_true $(eval echo \$OCF_RESKEY_$param); then ocf_log warn "This resource agent is deprecated" \ "and may be removed in a future release." \ "See the man page for details." \ "To suppress this warning, set the \"${param}\"" \ "resource parameter to true." fi } # # Ocf_run: Run a script, and log its output. # Usage: ocf_run [-q] [-info|-warn|-err] # -q: don't log the output of the command if it succeeds # -info|-warn|-err: log the output of the command at given # severity if it fails (defaults to err) # ocf_run() { local rc local output local verbose=1 local loglevel=err local var for var in 1 2 do case "$1" in "-q") verbose="" shift 1;; "-info"|"-warn"|"-err") loglevel=`echo $1 | sed -e s/-//g` shift 1;; *) ;; esac done output=`"$@" 2>&1` rc=$? output=`echo "$output" | tr -s ' \t\r\n' ' '` if [ $rc -eq 0 ]; then if [ "$verbose" -a ! -z "$output" ]; then ocf_log info "$output" fi else if [ ! -z "$output" ]; then ocf_log $loglevel "$output" else ocf_log $loglevel "command failed: $*" fi fi return $rc } ocf_pidfile_status() { local pid pidfile=$1 if [ ! -e $pidfile ]; then # Not exists return 2 fi pid=`cat $pidfile` kill -0 $pid 2>&1 > /dev/null if [ $? = 0 ]; then return 0 fi # Stale return 1 } ocf_take_lock() { local lockfile=$1 local rnd=$(ocf_maybe_random) sleep 0.$rnd while ocf_pidfile_status $lockfile do ocf_log info "Sleeping until $lockfile is released..." sleep 0.$rnd done echo $$ > $lockfile } ocf_release_lock_on_exit() { local lockfile=$1 trap "rm -f $lockfile" EXIT } # returns true if the CRM is currently running a probe. A probe is # defined as a monitor operation with a monitoring interval of zero. ocf_is_probe() { [ "$__OCF_ACTION" = "monitor" -a "$OCF_RESKEY_CRM_meta_interval" = 0 ] } # returns true if the resource is configured as a clone. This is # defined as a resource where the clone-max meta attribute is present, # and set to greater than zero. ocf_is_clone() { [ ! -z "${OCF_RESKEY_CRM_meta_clone_max}" ] && [ "${OCF_RESKEY_CRM_meta_clone_max}" -gt 0 ] } # returns true if the resource is configured as a multistate # (master/slave) resource. This is defined as a resource where the # master-max meta attribute is present, and set to greater than zero. ocf_is_ms() { [ ! -z "${OCF_RESKEY_CRM_meta_master_max}" ] && [ "${OCF_RESKEY_CRM_meta_master_max}" -gt 0 ] } # version check functions # allow . and - to delimit version numbers # max version number is 999 # letters and such are effectively ignored # ocf_is_ver() { - echo $1 | grep '^[0-9][0-9.-]*[0-9]' >/dev/null 2>&1 + echo $1 | grep '^[0-9][0-9.-]*[0-9]$' >/dev/null 2>&1 } ocf_ver2num() { echo $1 | awk -F'[.-]' ' {for(i=1; i<=NF; i++) s=s*1000+$i; print s} ' } ocf_ver_level(){ echo $1 | awk -F'[.-]' '{print NF}' } ocf_ver_complete_level(){ local ver="$1" local level="$2" local i=0 while [ $i -lt $level ]; do ver=${ver}.0 i=`expr $i + 1` done echo $ver } # usage: ocf_version_cmp VER1 VER2 # version strings can contain digits, dots, and dashes # must start and end with a digit # returns: # 0: VER1 smaller (older) than VER2 # 1: versions equal # 2: VER1 greater (newer) than VER2 # 3: bad format ocf_version_cmp() { ocf_is_ver "$1" || return 3 ocf_is_ver "$2" || return 3 local v1=$1 local v2=$2 local v1_level=`ocf_ver_level $v1` local v2_level=`ocf_ver_level $v2` local level_diff if [ $v1_level -lt $v2_level ]; then level_diff=`expr $v2_level - $v1_level` v1=`ocf_ver_complete_level $v1 $level_diff` elif [ $v1_level -gt $v2_level ]; then level_diff=`expr $v1_level - $v2_level` v2=`ocf_ver_complete_level $v2 $level_diff` fi v1=`ocf_ver2num $v1` v2=`ocf_ver2num $v2` if [ $v1 -eq $v2 ]; then return 1 elif [ $v1 -lt $v2 ]; then return 0 else return 2 # -1 would look funny in shell ;-) fi } ocf_local_nodename() { # use crm_node -n for pacemaker > 1.1.8 which pacemakerd > /dev/null 2>&1 if [ $? -eq 0 ]; then local version=$(pacemakerd -$ | grep "Pacemaker .*" | awk '{ print $2 }') version=$(echo $version | awk -F- '{ print $1 }') ocf_version_cmp "$version" "1.1.8" if [ $? -eq 2 ]; then which crm_node > /dev/null 2>&1 if [ $? -eq 0 ]; then crm_node -n return fi fi fi # otherwise use uname -n uname -n } # usage: dirname DIR dirname() { local a local b [ $# = 1 ] || return 1 a="$1" while [ 1 ]; do b="${a%/}" [ "$a" = "$b" ] && break a="$b" done b=${a%/*} [ -z "$b" -o "$a" = "$b" ] && b="." echo "$b" return 0 } +# usage: systemd_drop_in +systemd_drop_in() +{ + if [ $# -ne 3 ]; then + ocf_log err "Incorrect number of arguments [$#] for systemd_drop_in." + fi + + systemdrundir="/run/systemd/system/resource-agents-deps.target.d" + mkdir "$systemdrundir" + cat > "$systemdrundir/$1.conf" </dev/null # try to leave early, and yet leave processes time to exit sleep 0.2 for i in `seq $wait_time`; do kill -s 0 $pids 2>/dev/null || return 0 sleep 1 done done return 1 } # # create a given status directory # if the directory path doesn't start with $HA_VARRUN, then # we return with error (most of the calls would be with the user # supplied configuration, hence we need to do necessary # protection) # used mostly for PID files # # usage: ocf_mkstatedir owner permissions path # # owner: user.group # permissions: permissions # path: directory path # # example: # ocf_mkstatedir named 755 `dirname $pidfile` # ocf_mkstatedir() { local owner local perms local path owner=$1 perms=$2 path=$3 test -d $path && return 0 [ $(id -u) = 0 ] || return 1 case $path in ${HA_VARRUN%/}/*) : this path is ok ;; *) ocf_log err "cannot create $path (does not start with $HA_VARRUN)" return 1 ;; esac mkdir -p $path && chown $owner $path && chmod $perms $path } # # create a unique status directory in $HA_VARRUN # used mostly for PID files # the directory is by default set to # $HA_VARRUN/$OCF_RESOURCE_INSTANCE # the directory name is printed to stdout # # usage: ocf_unique_rundir owner permissions name # # owner: user.group (default: "root") # permissions: permissions (default: "755") # name: some unique string (default: "$OCF_RESOURCE_INSTANCE") # # to use the default either don't set the parameter or set it to # empty string ("") # example: # # STATEDIR=`ocf_unique_rundir named "" myownstatedir` # ocf_unique_rundir() { local path local owner local perms local name owner=${1:-"root"} perms=${2:-"755"} name=${3:-"$OCF_RESOURCE_INSTANCE"} path=$HA_VARRUN/$name if [ ! -d $path ]; then [ $(id -u) = 0 ] || return 1 mkdir -p $path && chown $owner $path && chmod $perms $path || return 1 fi echo $path } # # RA tracing may be turned on by setting OCF_TRACE_RA # the trace output will be saved to OCF_TRACE_FILE, if set, or # by default to # $HA_VARLIB/trace_ra//.. # e.g. $HA_VARLIB/trace_ra/oracle/db.start.2012-11-27.08:37:08 # # OCF_TRACE_FILE: # - FD (small integer [3-9]) in that case it is up to the callers # to capture output; the FD _must_ be open for writing # - absolute path # # NB: FD 9 may be used for tracing with bash >= v4 in case # OCF_TRACE_FILE is set to a path. # ocf_is_bash4() { echo "$SHELL" | grep bash > /dev/null && [ ${BASH_VERSINFO[0]} = "4" ] } ocf_trace_redirect_to_file() { local dest=$1 if ocf_is_bash4; then exec 9>$dest BASH_XTRACEFD=9 else exec 2>$dest fi } ocf_trace_redirect_to_fd() { local fd=$1 if ocf_is_bash4; then BASH_XTRACEFD=$fd else exec 2>&$fd fi } __ocf_test_trc_dest() { local dest=$1 if ! touch $dest; then ocf_log warn "$dest not writable, trace not going to happen" __OCF_TRC_DEST="" __OCF_TRC_MANAGE="" return 1 fi return 0 } ocf_default_trace_dest() { tty >/dev/null && return if [ -n "$OCF_RESOURCE_TYPE" -a \ -n "$OCF_RESOURCE_INSTANCE" -a -n "$__OCF_ACTION" ]; then local ts=`date +%F.%T` __OCF_TRC_DEST=$HA_VARLIB/trace_ra/${OCF_RESOURCE_TYPE}/${OCF_RESOURCE_INSTANCE}.${__OCF_ACTION}.$ts __OCF_TRC_MANAGE="1" fi } ocf_start_trace() { export __OCF_TRC_DEST="" __OCF_TRC_MANAGE="" case "$OCF_TRACE_FILE" in [3-9]) ocf_trace_redirect_to_fd "$OCF_TRACE_FILE" ;; /*/*) __OCF_TRC_DEST=$OCF_TRACE_FILE ;; "") ocf_default_trace_dest ;; *) ocf_log warn "OCF_TRACE_FILE must be set to either FD (open for writing) or absolute file path" ocf_default_trace_dest ;; esac if [ "$__OCF_TRC_DEST" ]; then mkdir -p `dirname $__OCF_TRC_DEST` __ocf_test_trc_dest $__OCF_TRC_DEST || return ocf_trace_redirect_to_file "$__OCF_TRC_DEST" fi if [ -n "$BASH_VERSION" ]; then PS4='+ `date +"%T"`: ${FUNCNAME[0]:+${FUNCNAME[0]}:}${LINENO}: ' fi set -x env=$( echo; printenv | sort ) } ocf_stop_trace() { set +x } __ocf_set_defaults "$@" : ${OCF_TRACE_RA:=$OCF_RESKEY_trace_ra} ocf_is_true "$OCF_TRACE_RA" && ocf_start_trace # pacemaker sets HA_use_logd, some others use HA_LOGD :/ if ocf_is_true "$HA_use_logd"; then : ${HA_LOGD:=yes} fi diff --git a/heartbeat/portblock b/heartbeat/portblock index 776ad17e4..a518f49fe 100755 --- a/heartbeat/portblock +++ b/heartbeat/portblock @@ -1,566 +1,566 @@ #!/bin/sh # # portblock: iptables temporary portblocking control # # Author: Sun Jiang Dong (initial version) # Philipp Reisner (per-IP filtering) # # License: GNU General Public License (GPL) # # Copyright: (C) 2005 International Business Machines # # OCF parameters are as below: # OCF_RESKEY_protocol # OCF_RESKEY_portno # OCF_RESKEY_action # OCF_RESKEY_ip # OCF_RESKEY_tickle_dir # OCF_RESKEY_sync_script ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs # Defaults OCF_RESKEY_ip_default="0.0.0.0/0" OCF_RESKEY_reset_local_on_unblock_stop_default="false" : ${OCF_RESKEY_ip=${OCF_RESKEY_ip_default}} : ${OCF_RESKEY_reset_local_on_unblock_stop=${OCF_RESKEY_reset_local_on_unblock_stop_default}} ####################################################################### CMD=`basename $0` TICKLETCP=$HA_BIN/tickle_tcp usage() { cat <&2 usage: $CMD {start|stop|status|monitor|meta-data|validate-all} $CMD is used to temporarily block ports using iptables. It can be used to blackhole a port before bringing up an IP address, and enable it after a service is started. To do that for samba, the following can be used: crm configure < 1.0 Resource script for portblock. It is used to temporarily block ports using iptables. In addition, it may allow for faster TCP reconnects for clients on failover. Use that if there are long lived TCP connections to an HA service. This feature is enabled by setting the tickle_dir parameter and only in concert with action set to unblock. Note that the tickle ACK function is new as of version 3.0.2 and hasn't yet seen widespread use. Block and unblocks access to TCP and UDP ports The protocol used to be blocked/unblocked. protocol The port number used to be blocked/unblocked. portno The action (block/unblock) to be done on the protocol::portno. action If for some reason the long lived server side TCP sessions won't be cleaned up by a reconfiguration/flush/stop of whatever services this portblock protects, they would linger in the connection table, even after the IP is gone and services have been switched over to an other node. An example would be the default NFS kernel server. These "known" connections may seriously confuse and delay a later switchback. Enabling this option will cause this agent to try to get rid of these connections by injecting a temporary iptables rule to TCP-reset outgoing packets from the blocked ports, and additionally tickle them locally, just before it starts to DROP incoming packets on "unblock stop". (try to) reset server TCP sessions when unblock stops The IP address used to be blocked/unblocked. ip The shared or local directory (_must_ be absolute path) which stores the established TCP connections. Tickle directory If the tickle_dir is a local directory, then the TCP connection state file has to be replicated to other nodes in the cluster. It can be csync2 (default), some wrapper of rsync, or whatever. It takes the file name as a single argument. For csync2, set it to "csync2 -xv". Connection state file synchronization script END } # # Because this is the normal usage, we consider "block" # resources to be pseudo-resources -- that is, their status can't # be reliably determined through external means. # This is because we expect an "unblock" resource to come along # and disable us -- but we're still in some sense active... # #active_grep_pat {udp|tcp} portno,portno active_grep_pat() { w="[ ][ ]*" any="0\\.0\\.0\\.0/0" echo "^DROP${w}${1}${w}--${w}${any}${w}${3}${w}multiport${w}dports${w}${2}\>" } #chain_isactive {udp|tcp} portno,portno ip chain_isactive() { PAT=`active_grep_pat "$1" "$2" "$3"` $IPTABLES $wait -n -L INPUT | grep "$PAT" >/dev/null } save_tcp_connections() { [ -z "$OCF_RESKEY_tickle_dir" ] && return statefile=$OCF_RESKEY_tickle_dir/$OCF_RESKEY_ip if [ -z "$OCF_RESKEY_sync_script" ]; then netstat -tn |awk -F '[:[:space:]]+' ' $8 == "ESTABLISHED" && $4 == "'$OCF_RESKEY_ip'" \ {printf "%s:%s\t%s:%s\n", $4,$5, $6,$7}' | - dd of="$statefile".new conv=fsync && + dd of="$statefile".new conv=fsync status=none && mv "$statefile".new "$statefile" else netstat -tn |awk -F '[:[:space:]]+' ' $8 == "ESTABLISHED" && $4 == "'$OCF_RESKEY_ip'" \ {printf "%s:%s\t%s:%s\n", $4,$5, $6,$7}' \ > $statefile $OCF_RESKEY_sync_script $statefile > /dev/null 2>&1 & fi } tickle_remote() { [ -z "$OCF_RESKEY_tickle_dir" ] && return echo 1 > /proc/sys/net/ipv4/tcp_tw_recycle f=$OCF_RESKEY_tickle_dir/$OCF_RESKEY_ip [ -r $f ] || return $TICKLETCP -n 3 < $f } tickle_local() { [ -z "$OCF_RESKEY_tickle_dir" ] && return f=$OCF_RESKEY_tickle_dir/$OCF_RESKEY_ip [ -r $f ] || return # swap "local" and "remote" address, # so we tickle ourselves. # We set up a REJECT with tcp-reset before we do so, so we get rid of # the no longer wanted potentially long lived "ESTABLISHED" connection # entries on the IP we are going to delet in a sec. These would get in # the way if we switch-over and then switch-back in quick succession. local i awk '{ print $2, $1; }' $f | $TICKLETCP netstat -tn | grep -Fw $OCF_RESKEY_ip || return for i in 0.1 0.5 1 2 4 ; do sleep $i awk '{ print $2, $1; }' $f | $TICKLETCP netstat -tn | grep -Fw $OCF_RESKEY_ip || break done } SayActive() { echo "$CMD DROP rule for INPUT chain [$*] is running (OK)" } SayConsideredActive() { echo "$CMD DROP rule for INPUT chain [$*] considered to be running (OK)" } SayInactive() { echo "$CMD DROP rule for INPUT chain [$*] is inactive" } #IptablesStatus {udp|tcp} portno,portno ip {block|unblock} IptablesStatus() { local rc rc=$OCF_ERR_GENERIC activewords="$CMD $1 $2 is running (OK)" if chain_isactive "$1" "$2" "$3"; then case $4 in block) SayActive $* rc=$OCF_SUCCESS ;; *) SayInactive $* rc=$OCF_NOT_RUNNING ;; esac else case $4 in block) if ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" status; then SayConsideredActive $* rc=$OCF_SUCCESS else SayInactive $* rc=$OCF_NOT_RUNNING fi ;; *) if ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" status; then SayActive $* #This is only run on real monitor events. save_tcp_connections rc=$OCF_SUCCESS else SayInactive $* rc=$OCF_NOT_RUNNING fi ;; esac fi return $rc } #IptablesBLOCK {udp|tcp} portno,portno ip IptablesBLOCK() { local rc=0 local try_reset=false if [ "$1/$4/$__OCF_ACTION" = tcp/unblock/stop ] && ocf_is_true $reset_local_on_unblock_stop then try_reset=true fi if chain_isactive "$1" "$2" "$3" then : OK -- chain already active else if $try_reset ; then $IPTABLES $wait -I OUTPUT -p "$1" -s "$3" -m multiport --sports "$2" -j REJECT --reject-with tcp-reset tickle_local fi $IPTABLES $wait -I INPUT -p "$1" -d "$3" -m multiport --dports "$2" -j DROP rc=$? if $try_reset ; then $IPTABLES $wait -D OUTPUT -p "$1" -s "$3" -m multiport --sports "$2" -j REJECT --reject-with tcp-reset fi fi return $rc } #IptablesUNBLOCK {udp|tcp} portno,portno ip IptablesUNBLOCK() { if chain_isactive "$1" "$2" "$3" then $IPTABLES $wait -D INPUT -p "$1" -d "$3" -m multiport --dports "$2" -j DROP else : Chain Not active fi return $? } #IptablesStart {udp|tcp} portno,portno ip {block|unblock} IptablesStart() { ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" start case $4 in block) IptablesBLOCK "$@";; unblock) IptablesUNBLOCK "$@" rc=$? tickle_remote #ignore run_tickle_tcp exit code! return $rc ;; *) usage; return 1; esac return $? } #IptablesStop {udp|tcp} portno,portno ip {block|unblock} IptablesStop() { ha_pseudo_resource "${OCF_RESOURCE_INSTANCE}" stop case $4 in block) IptablesUNBLOCK "$@";; unblock) save_tcp_connections IptablesBLOCK "$@" ;; *) usage; return 1;; esac return $? } # # Check if the port is valid, this function code is not decent, but works # CheckPort() { # Examples of valid port: "1080", "1", "0080" # Examples of invalid port: "1080bad", "0", "0000", "" echo $1 |egrep -qx '[0-9]+(:[0-9]+)?(,[0-9]+(:[0-9]+)?)*' } IptablesValidateAll() { check_binary $IPTABLES case $protocol in tcp|udp) ;; *) ocf_log err "Invalid protocol $protocol!" exit $OCF_ERR_CONFIGURED ;; esac if CheckPort "$portno"; then : else ocf_log err "Invalid port number $portno!" exit $OCF_ERR_CONFIGURED fi if [ -n "$OCF_RESKEY_tickle_dir" ]; then if [ x"$action" != x"unblock" ]; then ocf_log err "Tickles are only useful with action=unblock!" exit $OCF_ERR_CONFIGURED fi if [ ! -d "$OCF_RESKEY_tickle_dir" ]; then ocf_log err "The tickle dir doesn't exist!" exit $OCF_ERR_INSTALLED fi fi case $action in block|unblock) ;; *) ocf_log err "Invalid action $action!" exit $OCF_ERR_CONFIGURED ;; esac if ocf_is_true $reset_local_on_unblock_stop; then if [ $action != unblock ] ; then ocf_log err "reset_local_on_unblock_stop is only relevant with action=unblock" exit $OCF_ERR_CONFIGURED fi if [ -z $OCF_RESKEY_tickle_dir ] ; then ocf_log warn "reset_local_on_unblock_stop works best with tickle_dir enabled as well" fi fi return $OCF_SUCCESS } if ( [ $# -ne 1 ] ) then usage exit $OCF_ERR_ARGS fi case $1 in meta-data) meta_data exit $OCF_SUCCESS ;; usage) usage exit $OCF_SUCCESS ;; *) ;; esac if [ -z "$OCF_RESKEY_protocol" ]; then ocf_log err "Please set OCF_RESKEY_protocol" exit $OCF_ERR_CONFIGURED fi if [ -z "$OCF_RESKEY_portno" ]; then ocf_log err "Please set OCF_RESKEY_portno" exit $OCF_ERR_CONFIGURED fi if [ -z "$OCF_RESKEY_action" ]; then ocf_log err "Please set OCF_RESKEY_action" exit $OCF_ERR_CONFIGURED fi # iptables v1.4.20+ is required to use -w (wait) version=$(iptables -V | awk -F ' v' '{print $NF}') ocf_version_cmp "$version" "1.4.19.1" if [ "$?" -eq "2" ]; then wait="-w" else wait="" fi protocol=$OCF_RESKEY_protocol portno=$OCF_RESKEY_portno action=$OCF_RESKEY_action ip=$OCF_RESKEY_ip reset_local_on_unblock_stop=$OCF_RESKEY_reset_local_on_unblock_stop case $1 in start) IptablesStart $protocol $portno $ip $action ;; stop) IptablesStop $protocol $portno $ip $action ;; status|monitor) IptablesStatus $protocol $portno $ip $action ;; validate-all) IptablesValidateAll ;; *) usage exit $OCF_ERR_UNIMPLEMENTED ;; esac exit $? diff --git a/heartbeat/rabbitmq-cluster b/heartbeat/rabbitmq-cluster index 30f032066..1e78d9eca 100755 --- a/heartbeat/rabbitmq-cluster +++ b/heartbeat/rabbitmq-cluster @@ -1,549 +1,549 @@ #!/bin/sh # # Copyright (c) 2014 David Vossel # All Rights Reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs ####################################################################### RMQ_SERVER=/usr/sbin/rabbitmq-server RMQ_CTL=/usr/sbin/rabbitmqctl RMQ_DATA_DIR="/var/lib/rabbitmq/mnesia" RMQ_PID_DIR="/var/run/rabbitmq" RMQ_PID_FILE="/var/run/rabbitmq/rmq.pid" RMQ_LOG_DIR="/var/log/rabbitmq" NODENAME=$(ocf_local_nodename) # this attr represents the current active local rmq node name. # when rmq stops or the node is fenced, this attr disappears RMQ_CRM_ATTR_COOKIE="rmq-node-attr-${OCF_RESOURCE_INSTANCE}" # this attr represents the last known active local rmq node name # when rmp stops or the node is fenced, the attr stays forever so # we can continue to map an offline pcmk node to it's rmq node name # equivalent. RMQ_CRM_ATTR_COOKIE_LAST_KNOWN="rmq-node-attr-last-known-${OCF_RESOURCE_INSTANCE}" meta_data() { cat < 1.0 Starts cloned rabbitmq cluster instance. NB: note that this RA cannot be spawned across a mix of pacemaker and pacemaker-remote nodes. Only on pacemaker *or* pacemaker-remote nodes exclusively. rabbitmq clustered Policy string to pass to 'rabbitmqctl set_policy' right after bootstrapping the first rabbitmq instance. rabbitmqctl set_policy args END } ####################################################################### rmq_usage() { cat < /dev/null 2>&1 } rmq_local_node() { local node_name=$(rabbitmqctl status 2>&1 | sed -n -e "s/^.*[S|s]tatus of node \(.*\)\s.*$/\1/p" | tr -d "'") if [ -z "$node_name" ]; then node_name=$(cat /etc/rabbitmq/rabbitmq-env.conf 2>/dev/null | grep "\s*RABBITMQ_NODENAME=" | awk -F= '{print $2}') fi echo "$node_name" } rmq_join_list() { local join_list=$(cibadmin -Q --xpath "//node_state[@crmd='online']//nvpair[@name='$RMQ_CRM_ATTR_COOKIE']" | grep "$RMQ_CRM_ATTR_COOKIE" | sed -n -e "s/^.*value=.\(.*\)\".*$/\1/p") # If join_list is empty we want to check if there are any remote nodes # where rabbitmq is allowed to run (i.e. nodes without the crmd=online selector) if [ -z "$join_list" ]; then # Get all the nodes written in the ATTR_COOKIE no matter if # they are online or not. This will be one line per node like # rabbit@overcloud-rabbit-0 # rabbit@overcloud-rabbit-1 # ... local remote_join_list=$(cibadmin -Q --xpath "//node_state//nvpair[@name='$RMQ_CRM_ATTR_COOKIE']" | grep "$RMQ_CRM_ATTR_COOKIE" | sed -n -e "s/^.*value=.\(.*\)\".*$/\1/p") # The following expression prepares a filter like '-e overcloud-rabbit-0 -e overcloud-rabbit-1 -e ...' local filter=$(crm_mon -r --as-xml | xmllint --format --xpath "//nodes//node[@online='true' and @standby='false']/@name" - | xargs -n1 echo | awk -F= '{print "-e "$2}') # export the intersection which gives us only the nodes that # a) wrote their namein the cib attrd # b) run on nodes where pacemaker_remote is enabled join_list="$(echo $remote_join_list | grep $filter)" fi echo $join_list } rmq_write_nodename() { local node_name=$(rmq_local_node) if [ -z "$node_name" ]; then ocf_log err "Failed to determine rabbitmq node name, exiting" exit $OCF_ERR_GENERIC fi # store the pcmknode to rmq node mapping as a transient attribute. This allows # us to retrieve the join list with a simple xpath. ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "$RMQ_CRM_ATTR_COOKIE" -v "$node_name" # the pcmknode to rmq node mapping as a permanent attribute as well. this lets # us continue to map offline nodes to their equivalent rmq node name ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l forever --name "$RMQ_CRM_ATTR_COOKIE_LAST_KNOWN" -v "$node_name" } rmq_delete_nodename() { # remove node-name ${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l reboot --name "$RMQ_CRM_ATTR_COOKIE" -D } prepare_dir () { if [ ! -d ${1} ] ; then mkdir -p ${1} chown -R rabbitmq:rabbitmq ${1} chmod 755 ${1} fi } remove_pid () { rm -f ${RMQ_PID_FILE} > /dev/null 2>&1 } rmq_monitor() { local rc $RMQ_CTL cluster_status > /dev/null 2>&1 rc=$? case "$rc" in 0) ocf_log debug "RabbitMQ server is running normally" rmq_write_nodename return $OCF_SUCCESS ;; 2|68|69|70|75|78) ocf_log info "RabbitMQ server is not running" rmq_delete_nodename return $OCF_NOT_RUNNING ;; *) ocf_log err "Unexpected return code from '$RMQ_CTL cluster_status' exit code: $rc" rmq_delete_nodename return $OCF_ERR_GENERIC ;; esac } rmq_init_and_wait() { local rc prepare_dir $RMQ_PID_DIR prepare_dir $RMQ_LOG_DIR remove_pid # the server startup script uses this environment variable export RABBITMQ_PID_FILE="$RMQ_PID_FILE" setsid sh -c "$RMQ_SERVER > ${RMQ_LOG_DIR}/startup_log 2> ${RMQ_LOG_DIR}/startup_err" & ocf_log info "Waiting for server to start" $RMQ_CTL wait $RMQ_PID_FILE rc=$? if [ $rc -ne $OCF_SUCCESS ]; then remove_pid ocf_log info "rabbitmq-server start failed: $rc" return $OCF_ERR_GENERIC fi rmq_monitor return $? } rmq_set_policy() { $RMQ_CTL set_policy "$@" > /dev/null 2>&1 } rmq_start_first() { local rc ocf_log info "Bootstrapping rabbitmq cluster" rmq_wipe_data rmq_init_and_wait rc=$? if [ $rc -eq 0 ]; then rc=$OCF_SUCCESS ocf_log info "cluster bootstrapped" if [ -n "$OCF_RESKEY_set_policy" ]; then # do not quote set_policy, we are passing in arguments rmq_set_policy $OCF_RESKEY_set_policy > /dev/null 2>&1 if [ $? -ne 0 ]; then ocf_log err "Failed to set policy: $OCF_RESKEY_set_policy" rc=$OCF_ERR_GENERIC else ocf_log info "Policy set: $OCF_RESKEY_set_policy" fi fi else ocf_log info "failed to bootstrap cluster. Check SELINUX policy" rc=$OCF_ERR_GENERIC fi return $rc } rmq_is_clustered() { $RMQ_CTL eval 'rabbit_mnesia:is_clustered().' | grep -q true } rmq_join_existing() { local join_list="$1" local rc=$OCF_ERR_GENERIC ocf_log info "Joining existing cluster with [ $(echo $join_list | tr '\n' ' ') ] nodes." rmq_init_and_wait if [ $? -ne 0 ]; then return $OCF_ERR_GENERIC fi if rmq_is_clustered; then ocf_log info "Successfully re-joined existing rabbitmq cluster automatically" return $OCF_SUCCESS fi # unconditionally join the cluster $RMQ_CTL stop_app > /dev/null 2>&1 for node in $(echo "$join_list"); do ocf_log info "Attempting to join cluster with target node $node" $RMQ_CTL join_cluster $node if [ $? -eq 0 ]; then ocf_log info "Joined cluster by connecting to node $node, starting app" $RMQ_CTL start_app rc=$? if [ $rc -ne 0 ]; then ocf_log err "'$RMQ_CTL start_app' failed" fi break; fi done if [ "$rc" -ne 0 ]; then ocf_log info "Join process incomplete, shutting down." return $OCF_ERR_GENERIC fi ocf_log info "Successfully joined existing rabbitmq cluster" return $OCF_SUCCESS } rmq_forget_cluster_node_remotely() { local running_cluster_nodes="$1" local node_to_forget="$2" ocf_log info "Forgetting $node_to_forget via nodes [ $(echo $running_cluster_nodes | tr '\n' ' ') ]." for running_cluster_node in $running_cluster_nodes; do rabbitmqctl -n $running_cluster_node forget_cluster_node $node_to_forget if [ $? = 0 ]; then ocf_log info "Succeeded forgetting $node_to_forget via $running_cluster_node." return else ocf_log err "Failed to forget node $node_to_forget via $running_cluster_node." fi done } rmq_notify() { node_list="${OCF_RESKEY_CRM_meta_notify_stop_uname}" mode="${OCF_RESKEY_CRM_meta_notify_type}-${OCF_RESKEY_CRM_meta_notify_operation}" # When notifications are on, this agent is going to "forget" nodes once they # leave the cluster. This is thought to resolve some issues where rabbitmq # blocks trying to sync with an offline node after a fencing action occurs. if ! [ "${mode}" = "post-stop" ]; then return $OCF_SUCCESS fi rmq_monitor if [ $? -ne $OCF_SUCCESS ]; then # only run forget when we are for sure active return $OCF_SUCCESS fi # forget each stopped rmq instance in the provided pcmk node in the list. for node in $(echo "$node_list"); do local rmq_node="$(${HA_SBIN_DIR}/crm_attribute -N $node -l forever --query --name $RMQ_CRM_ATTR_COOKIE_LAST_KNOWN -q)" if [ -z "$rmq_node" ]; then ocf_log warn "Unable to map pcmk node $node to a known rmq node." continue fi ocf_log notice "Forgetting stopped node $rmq_node" $RMQ_CTL forget_cluster_node $rmq_node if [ $? -ne 0 ]; then ocf_log warn "Unable to forget offline node $rmq_node." fi done return $OCF_SUCCESS } rmq_start() { local join_list="" local rc rmq_monitor if [ $? -eq $OCF_SUCCESS ]; then return $OCF_SUCCESS fi join_list=$(rmq_join_list) # No join list means no active instances are up. This instance # is the first, so it needs to bootstrap the rest if [ -z "$join_list" ]; then rmq_start_first rc=$? return $rc fi # Try to join existing cluster ocf_log info "wiping data directory before joining" local local_rmq_node="$(${HA_SBIN_DIR}/crm_attribute -N $NODENAME -l forever --query --name $RMQ_CRM_ATTR_COOKIE_LAST_KNOWN -q)" rmq_stop rmq_wipe_data rmq_forget_cluster_node_remotely "$join_list" "$local_rmq_node" rmq_join_existing "$join_list" rc=$? if [ $rc -ne 0 ]; then ocf_log info "node failed to join even after reseting local data. Check SELINUX policy" return $OCF_ERR_GENERIC fi # Restore users, user permissions, and policies (if any) BaseDataDir=`dirname $RMQ_DATA_DIR` rabbitmqctl eval " %% Run only if Mnesia is ready. lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso begin Restore = fun(Table, PostprocessFun, Filename) -> case file:consult(Filename) of {error, _} -> ok; - {ok, [Result]) -> + {ok, [Result]} -> lists:foreach(fun(X) -> mnesia:dirty_write(Table, PostprocessFun(X)) end, Result), file:delete(Filename) end end, %% Restore users Upgrade = fun ({internal_user, A, B, C}) -> {internal_user, A, B, C, rabbit_password_hashing_md5}; ({internal_user, A, B, C, D}) -> {internal_user, A, B, C, D} end, Downgrade = fun ({internal_user, A, B, C}) -> {internal_user, A, B, C}; ({internal_user, A, B, C, rabbit_password_hashing_md5}) -> {internal_user, A, B, C}; %% Incompatible scheme, so we will loose user's password ('B' value) during conversion. %% Unfortunately, this case will require manual intervention - user have to run: %% rabbitmqctl change_password ({internal_user, A, B, C, _}) -> {internal_user, A, B, C} end, %% Check db scheme first [WildPattern] = ets:select(mnesia_gvar, [ { {{rabbit_user, wild_pattern}, '\\\$1'}, [], ['\\\$1'] } ]), case WildPattern of %% Version < 3.6.0 {internal_user,'_','_','_'} -> Restore(rabbit_user, Downgrade, \"$BaseDataDir/users.erl\"); %% Version >= 3.6.0 {internal_user,'_','_','_','_'} -> Restore(rabbit_user, Upgrade, \"$BaseDataDir/users.erl\") end, NoOp = fun(X) -> X end, %% Restore user permissions Restore(rabbit_user_permission, NoOp, \"$BaseDataDir/users_perms.erl\"), %% Restore policies Restore(rabbit_runtime_parameters, NoOp, \"$BaseDataDir/policies.erl\") end. " return $OCF_SUCCESS } rmq_stop() { # Backup users, user permissions, and policies BaseDataDir=`dirname $RMQ_DATA_DIR` rabbitmqctl eval " %% Run only if Mnesia is still available. lists:any(fun({mnesia,_,_}) -> true; ({_,_,_}) -> false end, application:which_applications()) andalso begin Backup = fun(Table, SelectPattern, Filter, Filename) -> Result = case catch mnesia:dirty_select(Table, [{SelectPattern, [Filter], ['\\\$_']}]) of {'EXIT', _} -> []; Any -> Any end, Result /= [] andalso file:write_file(Filename, io_lib:fwrite(\"~p.~n\", [Result])) end, %% Backup users %% Check db scheme first [WildPattern] = ets:select(mnesia_gvar, [ { {{rabbit_user, wild_pattern}, '\\\$1'}, [], ['\\\$1'] } ]), UsersSelectPattern = case WildPattern of %% Version < 3.6.0 {internal_user,'_','_','_'} -> {internal_user, '\\\$1', '_', '_'}; %% Version >= 3.6.0 {internal_user,'_','_','_','_'} -> {internal_user, '\\\$1', '_', '_', '_'} end, Backup(rabbit_user, UsersSelectPattern, {'/=', '\\\$1', <<\"guest\">>}, \"$BaseDataDir/users.erl\"), %% Backup user permissions Backup(rabbit_user_permission, {'\\\$1', {'\\\$2', '\\\$3','\\\$4'}, '\\\$5'}, {'/=', '\\\$3', <<\"guest\">>}, \"$BaseDataDir/users_perms.erl\"), %% Backup policies Backup(rabbit_runtime_parameters, {runtime_parameters, {'_', '\\\$1', '_'}, '_'}, {'==', '\\\$1', <<\"policy\">>}, \"$BaseDataDir/policies.erl\") end. " rmq_monitor if [ $? -eq $OCF_NOT_RUNNING ]; then return $OCF_SUCCESS fi $RMQ_CTL stop rc=$? if [ $rc -ne 0 ]; then ocf_log err "rabbitmq-server stop command failed: $RMQ_CTL stop, $rc" return $rc fi #TODO add kill logic stop_wait=1 while [ $stop_wait = 1 ]; do rmq_monitor rc=$? if [ "$rc" -eq $OCF_NOT_RUNNING ]; then stop_wait=0 break elif [ "$rc" -ne $OCF_SUCCESS ]; then ocf_log info "rabbitmq-server stop failed: $rc" exit $OCF_ERR_GENERIC fi sleep 1 done remove_pid return $OCF_SUCCESS } rmq_validate() { check_binary $RMQ_SERVER check_binary $RMQ_CTL # This resource only makes sense as a clone right now. at some point # we may want to verify the following. #TODO verify cloned #TODO verify ordered=true # Given that this resource does the cluster join explicitly, # having a cluster_nodes list in the static config file will # likely conflict with this agent. #TODO verify no cluster list in rabbitmq conf #cat /etc/rabbitmq/rabbitmq.config | grep "cluster_nodes" return $OCF_SUCCESS } case $__OCF_ACTION in meta-data) meta_data exit $OCF_SUCCESS ;; start) rmq_start;; stop) rmq_stop;; monitor) rmq_monitor;; validate-all) rmq_validate;; notify) rmq_notify;; usage|help) rmq_usage exit $OCF_SUCCESS ;; *) rmq_usage exit $OCF_ERR_UNIMPLEMENTED ;; esac rc=$? ocf_log debug "${OCF_RESOURCE_INSTANCE} $__OCF_ACTION : $rc" exit $rc diff --git a/heartbeat/sapdb.sh b/heartbeat/sapdb.sh index 836250a4f..66e9854b6 100755 --- a/heartbeat/sapdb.sh +++ b/heartbeat/sapdb.sh @@ -1,367 +1,367 @@ # # sapdb.sh - for systems having SAPHostAgent installed # (sourced by SAPDatabase) # # Description: This code is separated from the SAPDatabase agent to # introduce new functions for systems which having # SAPHostAgent installed. # Someday it might be merged back into SAPDatabase agein. # # Author: Alexander Krauth, September 2010 # Support: linux@sap.com # License: GNU General Public License (GPL) # Copyright: (c) 2010, 2012 Alexander Krauth # # # background_check_saphostexec : Run a request to saphostexec in a separate task, to be able to react to a hanging process # background_check_saphostexec() { timeout=600 count=0 $SAPHOSTCTRL -function ListDatabases >/dev/null 2>&1 & pid=$! while kill -0 $pid > /dev/null 2>&1 do sleep 0.1 count=$(( $count + 1 )) if [ $count -ge $timeout ]; then kill -9 $pid >/dev/null 2>&1 ocf_log warn "saphostexec did not respond to the method 'ListDatabases' within 60 seconds" return $OCF_ERR_GENERIC # Timeout fi done # child has already finished, now evaluate its returncode wait $pid } # # cleanup_saphostexec : make sure to cleanup the SAPHostAgent in case of any # misbehavior # cleanup_saphostexec() { pkill -9 -f "$SAPHOSTEXEC" pkill -9 -f "$SAPHOSTSRV" oscolpid=$(pgrep -f "$SAPHOSTOSCOL") # we check saposcol pid, because it # might not run under control of # saphostexec # cleanup saposcol shared memory, otherwise it will not start again if [ -n "$oscolpid" ];then kill -9 $oscolpid oscolipc=$(ipcs -m | grep "4dbe " | awk '{print $2}') if [ -n "$oscolipc" ]; then ipcrm -m $oscolipc fi fi # removing the unix domain socket file as it might have wrong permissions or # ownership - it will be recreated by saphostexec during next start [ -r /tmp/.sapstream1128 ] && rm -f /tmp/.sapstream1128 } # # check_saphostexec : Before using saphostctrl we make sure that the # saphostexec is running on the current node. # check_saphostexec() { chkrc=$OCF_SUCCESS running=$(pgrep -f "$SAPHOSTEXEC" | wc -l) if [ $running -gt 0 ]; then if background_check_saphostexec; then return $OCF_SUCCESS else ocf_log warn "saphostexec did not respond to the method 'ListDatabases' correctly (rc=$?), it will be killed now" running=0 fi fi if [ $running -eq 0 ]; then ocf_log warn "saphostexec is not running on node `hostname`, it will be started now" cleanup_saphostexec output=`$SAPHOSTEXEC -restart 2>&1` # now make sure the daemon has been started and is able to respond srvrc=1 while [ $srvrc -ne 0 ] && [ "$(pgrep -f "$SAPHOSTEXEC" | wc -l)" -gt 0 ] do sleep 1 background_check_saphostexec srvrc=$? done if [ $srvrc -eq 0 ] then ocf_log info "saphostexec on node $(hostname) was restarted !" chkrc=$OCF_SUCCESS else ocf_log error "saphostexec on node $(hostname) could not be started! - $output" chkrc=$OCF_ERR_GENERIC fi fi return $chkrc } # # sapdatabase_start : Start the SAP database # sapdatabase_start() { check_saphostexec rc=$? if [ $rc -eq $OCF_SUCCESS ] then sapuserexit PRE_START_USEREXIT "$OCF_RESKEY_PRE_START_USEREXIT" DBINST="" if [ -n "$OCF_RESKEY_DBINSTANCE" ] then DBINST="-dbinstance $OCF_RESKEY_DBINSTANCE " fi FORCE="" if ocf_is_true $OCF_RESKEY_AUTOMATIC_RECOVER then FORCE="-force" fi DBOSUSER="" if [ -n "$OCF_RESKEY_DBOSUSER" ] then DBOSUSER="-dbuser $OCF_RESKEY_DBOSUSER " fi output=`$SAPHOSTCTRL -function StartDatabase -dbname $SID -dbtype $DBTYPE $DBINST $DBOSUSER $FORCE -service` sapdatabase_monitor 1 rc=$? if [ $rc -eq 0 ] then ocf_log info "SAP database $SID started: $output" rc=$OCF_SUCCESS sapuserexit POST_START_USEREXIT "$OCF_RESKEY_POST_START_USEREXIT" else ocf_log err "SAP database $SID start failed: $output" rc=$OCF_ERR_GENERIC fi fi return $rc } # # sapdatabase_stop: Stop the SAP database # sapdatabase_stop() { check_saphostexec rc=$? if [ $rc -eq $OCF_SUCCESS ] then sapuserexit PRE_STOP_USEREXIT "$OCF_RESKEY_PRE_STOP_USEREXIT" DBINST="" if [ -n "$OCF_RESKEY_DBINSTANCE" ] then DBINST="-dbinstance $OCF_RESKEY_DBINSTANCE " fi DBOSUSER="" if [ -n "$OCF_RESKEY_DBOSUSER" ] then DBOSUSER="-dbuser $OCF_RESKEY_DBOSUSER " fi output=`$SAPHOSTCTRL -function StopDatabase -dbname $SID -dbtype $DBTYPE $DBINST $DBOSUSER -force -service` if [ $? -eq 0 ] then ocf_log info "SAP database $SID stopped: $output" rc=$OCF_SUCCESS else ocf_log err "SAP database $SID stop failed: $output" rc=$OCF_ERR_GENERIC fi fi sapuserexit POST_STOP_USEREXIT "$OCF_RESKEY_POST_STOP_USEREXIT" return $rc } # # sapdatabase_monitor: Can the given database instance do anything useful? # sapdatabase_monitor() { strict=$1 rc=$OCF_SUCCESS if ! ocf_is_true $strict then sapdatabase_status rc=$? else check_saphostexec rc=$? if [ $rc -eq $OCF_SUCCESS ] then count=0 DBINST="" if [ -n "$OCF_RESKEY_DBINSTANCE" ] then DBINST="-dbinstance $OCF_RESKEY_DBINSTANCE " fi if [ -n "$OCF_RESKEY_DBOSUSER" ] then DBOSUSER="-dbuser $OCF_RESKEY_DBOSUSER " fi output=`$SAPHOSTCTRL -function GetDatabaseStatus -dbname $SID -dbtype $DBTYPE $DBINST $DBOSUSER` # we have to parse the output, because the returncode doesn't tell anything about the instance status for SERVICE in `echo "$output" | grep -i 'Component[ ]*Name *[:=] [A-Za-z][A-Za-z0-9_]* (' | sed 's/^.*Component[ ]*Name *[:=] *\([A-Za-z][A-Za-z0-9_]*\).*$/\1/i'` do COLOR=`echo "$output" | grep -i "Component[ ]*Name *[:=] *$SERVICE (" | sed 's/^.*Status *[:=] *\([A-Za-z][A-Za-z0-9_]*\).*$/\1/i' | uniq` STATE=0 case $COLOR in Running) STATE=$OCF_SUCCESS;; *) STATE=$OCF_NOT_RUNNING;; esac SEARCH=`echo "$OCF_RESKEY_MONITOR_SERVICES" | sed 's/\+/\\\+/g' | sed 's/\./\\\./g'` if [ `echo "$SERVICE" | egrep -c "$SEARCH"` -eq 1 ] then if [ $STATE -eq $OCF_NOT_RUNNING ] then ocf_log err "SAP database service $SERVICE is not running with status $COLOR !" rc=$STATE fi count=1 fi done if [ $count -eq 0 -a $rc -eq $OCF_SUCCESS ] then ocf_log err "The resource does not run any services which this RA could monitor!" rc=$OCF_ERR_ARGS fi if [ $rc -ne $OCF_SUCCESS ] then ocf_log err "The SAP database $SID is not running: $output" fi fi fi return $rc } # # sapdatabase_status: Are there any database processes on this host ? # sapdatabase_status() { sid=`echo $SID | tr '[:upper:]' '[:lower:]'` SUSER=${OCF_RESKEY_DBOSUSER:-""} case $DBTYPE in ADA) SEARCH="$SID/db/pgm/kernel" [ -z "$SUSER" ] && SUSER=`grep "^SdbOwner" /etc/opt/sdb | awk -F'=' '{print $2}'` SNUM=2 ;; ORA) DBINST=${OCF_RESKEY_DBINSTANCE} DBINST=${OCF_RESKEY_DBINSTANCE:-${SID}} SEARCH="ora_[a-z][a-z][a-z][a-z]_$DBINST" if [ -z "$SUSER" ]; then id "oracle" > /dev/null 2> /dev/null && SUSER="oracle" id "ora${sid}" > /dev/null 2> /dev/null && SUSER="${SUSER:+${SUSER},}ora${sid}" fi SNUM=4 ;; DB6) SEARCH="db2[a-z][a-z][a-z]" [ -z "$SUSER" ] && SUSER="db2${sid}" SNUM=2 ;; SYB) SEARCH="dataserver" [ -z "$SUSER" ] && SUSER="syb${sid}" SNUM=1 ;; HDB) SEARCH="hdb[a-z]*server" [ -z "$SUSER" ] && SUSER="${sid}adm" SNUM=1 ;; esac [ -z "$SUSER" ] && return $OCF_ERR_INSTALLED cnt=`ps -u $SUSER -o args 2> /dev/null | grep -v grep | grep -c $SEARCH` [ $cnt -ge $SNUM ] && return $OCF_SUCCESS return $OCF_NOT_RUNNING } # # sapdatabase_recover: # sapdatabase_recover() { OCF_RESKEY_AUTOMATIC_RECOVER=1 sapdatabase_stop sapdatabase_start } # # sapdatabase_validate: Check the semantics of the input parameters # sapdatabase_validate() { rc=$OCF_SUCCESS if [ `echo "$SID" | grep -c '^[A-Z][A-Z0-9][A-Z0-9]$'` -ne 1 ] then ocf_log err "Parsing parameter SID: '$SID' is not a valid system ID!" rc=$OCF_ERR_ARGS fi case "$DBTYPE" in ORA|ADA|DB6|SYB|HDB) ;; *) ocf_log err "Parsing parameter DBTYPE: '$DBTYPE' is not a supported database type!" rc=$OCF_ERR_ARGS ;; esac return $rc } # # sapdatabase_init: initialize global variables at the beginning # sapdatabase_init() { OCF_RESKEY_AUTOMATIC_RECOVER_default=0 : ${OCF_RESKEY_AUTOMATIC_RECOVER=${OCF_RESKEY_AUTOMATIC_RECOVER_default}} if [ -z "$OCF_RESKEY_MONITOR_SERVICES" ] then case $DBTYPE in ORA) export OCF_RESKEY_MONITOR_SERVICES="Instance|Database|Listener" ;; ADA) export OCF_RESKEY_MONITOR_SERVICES="Database" ;; DB6) db2sid="db2`echo $SID | tr '[:upper:]' '[:lower:]'`" export OCF_RESKEY_MONITOR_SERVICES="${SID}|${db2sid}" ;; SYB) export OCF_RESKEY_MONITOR_SERVICES="Server" ;; - HDB) export OCF_RESKEY_MONITOR_SERVICES="hdbindexserver" + HDB) export OCF_RESKEY_MONITOR_SERVICES="hdbindexserver|hdbnameserver" ;; esac fi } diff --git a/heartbeat/sg_persist b/heartbeat/sg_persist index 69866e6e5..78004741f 100755 --- a/heartbeat/sg_persist +++ b/heartbeat/sg_persist @@ -1,674 +1,674 @@ #!/bin/bash # # # OCF Resource Agent compliant PERSISTENT SCSI RESERVATION resource script. # # # Copyright (c) 2011 Evgeny Nifontov and lwang@suse.com All Rights Reserved. # # "Heartbeat drbd OCF Resource Agent: 2007, Lars Marowsky-Bree" was used # as example of multistate OCF Resource Agent. # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. # # This program is distributed in the hope that it would be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # Further, this software is distributed without any warranty that it is # free of the rightful claim of any third person regarding infringement # or the like. Any license provided herein, whether implied or # otherwise, applies only to this software file. Patent licenses, if # any, provided herein do not apply to combinations of this program with # other software, or any other product whatsoever. # # You should have received a copy of the GNU General Public License # along with this program; if not, write the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. # # # OCF instance parameters # OCF_RESKEY_binary # OCF_RESKEY_devs # OCF_RESKEY_required_devs_nof # OCF_RESKEY_reservation_type # OCF_RESKEY_master_score_base # OCF_RESKEY_master_score_dev_factor # OCF_RESKEY_master_score_delay # # TODO # # 1) PROBLEM: devices which were not accessible during 'start' action, will be never registered/reserved # TODO: 'Master' and 'Salve' registers new devs in 'monitor' action # TODO: 'Master' reserves new devs in 'monitor' action ####################################################################### # Initialization: : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat} . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs # set default values : ${sg_persist_binary="sg_persist"} # binary name for the resource : ${devs=""} # device list : ${required_devs_nof=1} # number of required devices : ${reservation_type=1} # reservation type : ${master_score_base=0} # master score base : ${master_score_dev_factor=100} # device factor for master score : ${master_score_delay=30} # delay for master score ####################################################################### meta_data() { cat < 1.1 This resource agent manages SCSI PERSISTENT RESERVATIONS. "sg_persist" from sg3_utils is used, please see its documentation. Should be used as multistate (Master/Slave) resource Slave registers its node id ("crm_node -i") as reservation key ( --param-rk ) on each device in the "devs" list. Master reservs all devices from "devs" list with reservation "--prout-type" value from "reservation_type" parameter. Manages SCSI PERSISTENT RESERVATIONS The name of the binary that manages the resource. the binary name of the resource Device list. Multiple devices can be listed with blank space as separator. Shell wildcars are allowed. device list Minimum number of "working" devices from device list 1) existing 2) "sg_persist --read-keys \$device" works (Return code 0) resource actions "start","monitor","promote" and "validate-all" return "\$OCF_ERR_INSTALLED" if the actual number of "working" devices is less then "required_devs_nof". resource actions "stop" and "demote" tries to remove reservations and registration keys from all working devices, but always return "\$OCF_SUCCESS" minimum number of working devices reservation type reservation type master_score_base value "master_score_base" value is used in "master_score" calculation: master_score = \$master_score_base + \$master_score_dev_factor * \$working_devs if set to bigger value in sg_persist resource configuration on some node, this node will be "preferred" for master role. base master_score value Working device factor in master_score calculation each "working" device provides additional value to "master_score", so the node that sees more devices will be preferred for the "Master"-role Setting it to 0 will disable this behavior. working device factor in master_score calculation master/slave decreases/increases its master_score after delay of \$master_score_delay seconds so if some device gets inaccessible, the slave decreases its master_score first and the resource will no be watched and after this device reappears again the master increases its master_score first this can work only if the master_score_delay is bigger then monitor interval on both master and slave Setting it to 0 will disable this behavior. master_score decrease/increase delay time END exit $OCF_SUCCESS } sg_persist_init() { if ! ocf_is_root ; then ocf_log err "You must be root to perform this operation." exit $OCF_ERR_PERM fi SG_PERSIST=${OCF_RESKEY_binary:-"$sg_persist_binary"} check_binary $SG_PERSIST ROLE=$OCF_RESKEY_CRM_meta_role NOW=$(date +%s) RESOURCE="${OCF_RESOURCE_INSTANCE}" MASTER_SCORE_VAR_NAME="master-${OCF_RESOURCE_INSTANCE//:/-}" PENDING_VAR_NAME="pending-$MASTER_SCORE_VAR_NAME" #only works with corocync CRM_NODE="${HA_SBIN_DIR}/crm_node" NODE_ID_DEC=$($CRM_NODE -i) NODE=$($CRM_NODE -l | $GREP -w ^$NODE_ID_DEC) NODE=${NODE#$NODE_ID_DEC } NODE=${NODE% *} MASTER_SCORE_ATTRIBUTE="${HA_SBIN_DIR}/crm_attribute --lifetime=reboot --name=$MASTER_SCORE_VAR_NAME --node=$NODE" CRM_MASTER="${HA_SBIN_DIR}/crm_master --lifetime=reboot" PENDING_ATTRIBUTE="${HA_SBIN_DIR}/crm_attribute --lifetime=reboot --name=$PENDING_VAR_NAME --node=$NODE" NODE_ID_HEX=$(printf '0x%x' $NODE_ID_DEC) if [ -z "$NODE_ID_HEX" ]; then ocf_log err "Couldn't get node id with \"$CRM_NODE\"" exit $OCF_ERR_INSTALLED fi ocf_log debug "$RESOURCE: NODE:$NODE, ROLE:$ROLE, NODE_ID DEC:$NODE_ID_DEC HEX:$NODE_ID_HEX" DEVS=${OCF_RESKEY_devs:=$devs} REQUIRED_DEVS_NOF=${OCF_RESKEY_required_devs_nof:=$required_devs_nof} RESERVATION_TYPE=${OCF_RESKEY_reservation_type:=$reservation_type} MASTER_SCORE_BASE=${OCF_RESKEY_master_score_base:=$master_score_base} MASTER_SCORE_DEV_FACTOR=${OCF_RESKEY_master_score_dev_factor:=$master_score_dev_factor} MASTER_SCORE_DELAY=${OCF_RESKEY_master_score_delay:=$master_score_delay} ocf_log debug "$RESOURCE: DEVS=$DEVS" ocf_log debug "$RESOURCE: REQUIRED_DEVS_NOF=$REQUIRED_DEVS_NOF" ocf_log debug "$RESOURCE: RESERVATION_TYPE=$RESERVATION_TYPE" ocf_log debug "$RESOURCE: MASTER_SCORE_BASE=$MASTER_SCORE_BASE" ocf_log debug "$RESOURCE: MASTER_SCORE_DEV_FACTOR=$MASTER_SCORE_DEV_FACTOR" ocf_log debug "$RESOURCE: MASTER_SCORE_DELAY=$MASTER_SCORE_DELAY" #expand path wildcards DEVS=$(echo $DEVS) if [ -z "$DEVS" ]; then ocf_log err "\"devs\" not defined" exit $OCF_ERR_INSTALLED fi sg_persist_check_devs sg_persist_get_status } sg_persist_action_usage() { cat <&1` [ $? -eq 0 ] || continue WORKING_DEVS+=($dev) echo "$READ_KEYS" | $GREP -qw $NODE_ID_HEX\$ [ $? -eq 0 ] || continue REGISTERED_DEVS+=($dev) READ_RESERVATION=`$SG_PERSIST --in --read-reservation $dev 2>&1` [ $? -eq 0 ] || continue echo "$READ_RESERVATION" | $GREP -qw $NODE_ID_HEX\$ if [ $? -eq 0 ]; then RESERVED_DEVS+=($dev) fi reservation_key=`echo $READ_RESERVATION | $GREP -o 'Key=0x[0-9a-f]*' | $GREP -o '0x[0-9a-f]*'` if [ -n "$reservation_key" ]; then DEVS_WITH_RESERVATION+=($dev) RESERVATION_KEYS+=($reservation_key) fi done WORKING_DEVS_NOF=${#WORKING_DEVS[*]} ocf_log debug "$RESOURCE: working devices: `sg_persist_echo_array ${WORKING_DEVS[*]}`" ocf_log debug "$RESOURCE: number of working devices: $WORKING_DEVS_NOF" ocf_log debug "$RESOURCE: registered devices: `sg_persist_echo_array ${REGISTERED_DEVS[*]}`" ocf_log debug "$RESOURCE: reserved devices: `sg_persist_echo_array ${RESERVED_DEVS[*]}`" ocf_log debug "$RESOURCE: devices with reservation: `sg_persist_echo_array ${DEVS_WITH_RESERVATION[*]}`" ocf_log debug "$RESOURCE: reservation keys: `sg_persist_echo_array ${RESERVATION_KEYS[*]}`" MASTER_SCORE=$(($MASTER_SCORE_BASE + $MASTER_SCORE_DEV_FACTOR*$WORKING_DEVS_NOF)) ocf_log debug "$RESOURCE: master_score: $MASTER_SCORE_BASE + $MASTER_SCORE_DEV_FACTOR*$WORKING_DEVS_NOF = $MASTER_SCORE" } sg_persist_check_devs() { for dev in $DEVS do if [ -e "$dev" ]; then EXISTING_DEVS+=($dev) fi done EXISTING_DEVS_NOF=${#EXISTING_DEVS[*]} if [ $EXISTING_DEVS_NOF -lt $REQUIRED_DEVS_NOF ]; then ocf_log err "Number of existing devices=$EXISTING_DEVS_NOF less then required_devs_nof=$REQUIRED_DEVS_NOF" exit $OCF_ERR_INSTALLED fi } sg_persist_is_registered() { for registered_dev in ${REGISTERED_DEVS[*]} do if [ "$registered_dev" == "$1" ]; then return 0 fi done return 1 } sg_persist_get_reservation_key() { for array_index in ${!DEVS_WITH_RESERVATION[*]} do if [ "${DEVS_WITH_RESERVATION[$array_index]}" == "$1" ]; then echo ${RESERVATION_KEYS[$array_index]} return 0 fi done echo "" } sg_persist_echo_array() { str_count=0 arr_str="" for str in "$@" do arr_str="$arr_str[$str_count]:$str " str_count=$(($str_count+1)) done echo $arr_str } sg_persist_parse_act_pending() { ACT_PENDING_TS=0 ACT_PENDING_SCORE=0 if [ -n "$ACT_PENDING" ]; then ACT_PENDING_TS=${ACT_PENDING%%_*} ACT_PENDING_SCORE=${ACT_PENDING##*_} fi } sg_persist_clear_pending() { if [ -n "$ACT_PENDING" ]; then DO_PENDING_UPDATE="YES" NEW_PENDING="" fi } sg_persist_new_master_score() { DO_MASTER_SCORE_UPDATE="YES" NEW_MASTER_SCORE=$1 } sg_persist_new_pending() { DO_PENDING_UPDATE="YES" NEW_PENDING=$1 } # Functions invoked by resource manager actions sg_persist_action_start() { ocf_run $MASTER_SCORE_ATTRIBUTE --update=$MASTER_SCORE ocf_run $PENDING_ATTRIBUTE --update="" if [ $WORKING_DEVS_NOF -lt $REQUIRED_DEVS_NOF ]; then ocf_log err "$RESOURCE: Number of working devices=$WORKING_DEVS_NOF less then required_devs_nof=$REQUIRED_DEVS_NOF" exit $OCF_ERR_GENERIC fi for dev in ${WORKING_DEVS[*]} do if sg_persist_is_registered $dev ; then : OK else ocf_run $SG_PERSIST --out --no-inquiry --register --param-rk=0 --param-sark=$NODE_ID_HEX $dev if [ $? -ne $OCF_SUCCESS ] then return $OCF_ERR_GENERIC fi fi done return $OCF_SUCCESS } sg_persist_action_stop() { if [ ${#REGISTERED_DEVS[*]} -eq 0 ]; then ocf_log debug "$RESOURCE stop: already no registrations" else # Clear preference for becoming master ocf_run $MASTER_SCORE_ATTRIBUTE --delete ocf_run $PENDING_ATTRIBUTE --delete for dev in ${REGISTERED_DEVS[*]} do ocf_run $SG_PERSIST --out --no-inquiry --register --param-rk=$NODE_ID_HEX --param-sark=0 $dev done fi return $OCF_SUCCESS } sg_persist_action_monitor() { - ACT_MASTER_SCORE=`$MASTER_SCORE_ATTRIBUTE --query --quiet 2>&1` + ACT_MASTER_SCORE=`$MASTER_SCORE_ATTRIBUTE --query --quiet 2>/dev/null` ocf_log debug "$RESOURCE monitor: ACT_MASTER_SCORE=$ACT_MASTER_SCORE" - ACT_PENDING=`$PENDING_ATTRIBUTE --query --quiet 2>&1` + ACT_PENDING=`$PENDING_ATTRIBUTE --query --quiet 2>/dev/null` ocf_log debug "$RESOURCE monitor: ACT_PENDING=$ACT_PENDING" sg_persist_parse_act_pending ocf_log debug "$RESOURCE monitor: ACT_PENDING_TS=$ACT_PENDING_TS" ocf_log debug "$RESOURCE monitor: ACT_PENDING_VAL=$ACT_PENDING_SCORE" ocf_log debug "$MASTER_SCORE, $ACT_MASTER_SCORE, $ROLE" DO_MASTER_SCORE_UPDATE="NO" DO_PENDING_UPDATE="NO" if [ -n "$ACT_MASTER_SCORE" ] then if [ $ACT_MASTER_SCORE -eq $MASTER_SCORE ]; then sg_persist_clear_pending else case $ROLE in Master) if [ $MASTER_SCORE -lt $ACT_MASTER_SCORE ]; then if [ -n "$ACT_PENDING" ] then if [ $(($NOW-$ACT_PENDING_TS-$MASTER_SCORE_DELAY)) -ge 0 ]; then sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending fi else if [ $MASTER_SCORE_DELAY -eq 0 ]; then sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending else sg_persist_new_pending "${NOW}_${MASTER_SCORE}" fi fi else sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending fi ;; Slave) if [ $MASTER_SCORE -gt $ACT_MASTER_SCORE ]; then if [ -n "$ACT_PENDING" ]; then if [ $(($NOW-$ACT_PENDING_TS-$MASTER_SCORE_DELAY)) -ge 0 ]; then sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending fi else if [ $MASTER_SCORE_DELAY -eq 0 ]; then sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending else sg_persist_new_pending "${NOW}_${MASTER_SCORE}" fi fi else sg_persist_new_master_score $MASTER_SCORE sg_persist_clear_pending fi ;; *) ;; esac fi fi if [ $DO_MASTER_SCORE_UPDATE == "YES" ]; then ocf_run $MASTER_SCORE_ATTRIBUTE --update=$NEW_MASTER_SCORE fi if [ $DO_PENDING_UPDATE == "YES" ]; then ocf_run $PENDING_ATTRIBUTE --update=$NEW_PENDING fi if [ ${#REGISTERED_DEVS[*]} -eq 0 ]; then ocf_log debug "$RESOURCE monitor: no registrations" return $OCF_NOT_RUNNING fi if [ ${#RESERVED_DEVS[*]} -eq ${#WORKING_DEVS[*]} ]; then return $OCF_RUNNING_MASTER fi if [ ${#REGISTERED_DEVS[*]} -eq ${#WORKING_DEVS[*]} ]; then if [ $RESERVATION_TYPE -eq 7 ] || [ $RESERVATION_TYPE -eq 8 ]; then if [ ${#DEVS_WITH_RESERVATION[*]} -gt 0 ]; then return $OCF_RUNNING_MASTER else return $OCF_SUCCESS fi else return $OCF_SUCCESS fi fi ocf_log err "$RESOURCE monitor: unexpected state" return $OCF_ERR_GENERIC } sg_persist_action_promote() { if [ ${#RESERVED_DEVS[*]} -gt 0 ]; then ocf_log info "$RESOURCE promote: already master" return $OCF_SUCCESS fi for dev in ${WORKING_DEVS[*]} do reservation_key=`sg_persist_get_reservation_key $dev` case $RESERVATION_TYPE in 1|3|5|6) if [ -z "$reservation_key" ]; then ocf_run $SG_PERSIST --out --no-inquiry --reserve --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi else ocf_run $SG_PERSIST --out --no-inquiry --preempt --param-sark=$reservation_key --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi fi ;; 7|8) if [ -z "$reservation_key" ]; then ocf_run $SG_PERSIST --out --no-inquiry --reserve --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ] then return $OCF_ERR_GENERIC fi else ocf_log info "$RESOURCE promote: there already exist an reservation holder, all registrants become reservation holders" return $OCF_SUCCESS fi ;; *) return $OCF_ERR_ARGS ;; esac done return $OCF_SUCCESS } sg_persist_action_demote() { case $RESERVATION_TYPE in 1|3|5|6) if [ ${#RESERVED_DEVS[*]} -eq 0 ]; then ocf_log info "$RESOURCE demote: already slave" return $OCF_SUCCESS fi for dev in ${RESERVED_DEVS[*]} do ocf_run $SG_PERSIST --out --no-inquiry --release --param-rk=$NODE_ID_HEX --prout-type=$RESERVATION_TYPE $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi done ;; 7|8) #in case of 7/8, --release won't release the reservation unless unregister the key. if [ ${#REGISTERED_DEVS[*]} -eq 0 ]; then ocf_log info "$RESOURCE demote: already slave" return $OCF_SUCCESS fi for dev in ${REGISTERED_DEVS[*]} do ocf_run $SG_PERSIST --out --no-inquiry --register --param-rk=$NODE_ID_HEX --param-sark=0 $dev if [ $? -ne $OCF_SUCCESS ]; then return $OCF_ERR_GENERIC fi done ;; *) return $OCF_ERR_ARGS ;; esac return $OCF_SUCCESS } sg_persist_action_notify() { local n_type="$OCF_RESKEY_CRM_meta_notify_type" local n_op="$OCF_RESKEY_CRM_meta_notify_operation" set -- $OCF_RESKEY_CRM_meta_notify_active_resource local n_active="$#" set -- $OCF_RESKEY_CRM_meta_notify_stop_resource local n_stop="$#" set -- $OCF_RESKEY_CRM_meta_notify_start_resource local n_start="$#" ocf_log debug "$RESOURCE notify: $n_type for $n_op - counts: active $n_active - starting $n_start - stopping $n_stop" return $OCF_SUCCESS } sg_persist_action_validate_all () { if [ "$OCF_RESKEY_CRM_meta_master_max" != "1" ] && [ "$RESERVATION_TYPE" != "7" ] && [ "$RESERVATION_TYPE" != "8" ]; then ocf_log err "Master options misconfigured." exit $OCF_ERR_CONFIGURED fi return $OCF_SUCCESS } if [ $# -ne 1 ]; then echo "Incorrect parameter count." sg_persist_action_usage exit $OCF_ERR_ARGS fi ACTION=$1 case $ACTION in meta-data) meta_data ;; validate-all) sg_persist_init sg_persist_action_validate_all ;; start|promote|monitor|stop|demote) ocf_log debug "$RESOURCE: starting action \"$ACTION\"" sg_persist_init sg_persist_action_$ACTION exit $? ;; notify) sg_persist_action_notify exit $? ;; usage|help) sg_persist_action_usage exit $OCF_SUCCESS ;; *) sg_persist_action_usage exit $OCF_ERR_ARGS ;; esac diff --git a/resource-agents.spec.in b/resource-agents.spec.in index d87364dbc..78021f1db 100644 --- a/resource-agents.spec.in +++ b/resource-agents.spec.in @@ -1,352 +1,356 @@ # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed # upon. The license for this file, and modifications and additions to the # file, is the same license as for the pristine package itself (unless the # license for the pristine package is not an Open Source License, in which # case the license is the MIT License). An "Open Source License" is a # license that conforms to the Open Source Definition (Version 1.9) # published by the Open Source Initiative. # %global rcver @rcver@ %global alphatag @alphatag@ %global numcomm @numcomm@ %global dirty @dirty@ # # Since this spec file supports multiple distributions, ensure we # use the correct group for each. # # SSLeay (required by ldirectord) %if 0%{?suse_version} %global SSLeay perl-Net_SSLeay %else %global SSLeay perl-Net-SSLeay %endif # determine the ras-set to process based on configure invokation %bcond_@rgmanager@ rgmanager %bcond_@linux-ha@ linuxha # build with HA_BIN compatibility for the existing Heartbeat stack %bcond_@compat-habindir@ compat_habindir %if %{with compat_habindir} %global _libexecdir %{_libdir} %endif Name: resource-agents Summary: Open Source HA Reusable Cluster Resource Scripts Version: @version@ Release: @specver@%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist} License: GPLv2+ and LGPLv2+ URL: https://github.com/ClusterLabs/resource-agents %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} Group: System Environment/Base %else Group: Productivity/Clustering/HA %endif Source0: %{name}-%{version}%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:-%{alphatag}}%{?dirty:-%{dirty}}.tar.bz2 Obsoletes: heartbeat-resources <= %{version} Provides: heartbeat-resources = %{version} ## Setup/build bits BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) # Build dependencies BuildRequires: automake autoconf pkgconfig BuildRequires: perl python-devel BuildRequires: libxslt glib2-devel BuildRequires: which %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} BuildRequires: cluster-glue-libs-devel BuildRequires: docbook-style-xsl docbook-dtds %if 0%{?rhel} == 0 BuildRequires: libnet-devel %endif %endif %if 0%{?suse_version} %if 0%{?suse_version} >= 1140 BuildRequires: libnet1 %else BuildRequires: libnet %endif BuildRequires: libglue-devel BuildRequires: libxslt docbook_4 docbook-xsl-stylesheets %endif ## Runtime deps # system tools shared by several agents Requires: /bin/bash /bin/grep /bin/sed /bin/gawk Requires: /bin/ps /usr/bin/pkill /bin/hostname /bin/netstat Requires: /sbin/fuser /bin/mount # Filesystem / fs.sh / netfs.sh Requires: /sbin/fsck Requires: /sbin/fsck.ext2 /sbin/fsck.ext3 /sbin/fsck.ext4 Requires: /sbin/fsck.xfs Requires: /sbin/mount.nfs /sbin/mount.nfs4 /sbin/mount.cifs # IPaddr2 Requires: /sbin/ip # LVM / lvm.sh Requires: /sbin/lvm # nfsserver / netfs.sh Requires: /usr/sbin/rpc.nfsd /sbin/rpc.statd /usr/sbin/rpc.mountd # rgmanager %if %{with rgmanager} # ip.sh Requires: /usr/sbin/ethtool Requires: /sbin/rdisc /usr/sbin/arping /bin/ping /bin/ping6 #nfsexport.sh Requires: /sbin/findfs Requires: /sbin/quotaon /sbin/quotacheck %endif %description A set of scripts to interface with several services to operate in a High Availability environment for both Pacemaker and rgmanager service managers. %if %{with linuxha} %package -n ldirectord License: GPLv2+ Summary: A Monitoring Daemon for Maintaining High Availability Resources %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} Group: System Environment/Daemons %else Group: Productivity/Clustering/HA %endif Obsoletes: heartbeat-ldirectord <= %{version} Provides: heartbeat-ldirectord = %{version} %if 0%{?fedora} > 18 || 0%{?centos_version} > 6 || 0%{?rhel} > 6 BuildRequires: perl-podlators %endif Requires: %{SSLeay} perl-libwww-perl perl-MailTools Requires: ipvsadm logrotate %if 0%{?fedora_version} Requires: perl-Net-IMAP-Simple-SSL Requires(post): /sbin/chkconfig Requires(preun):/sbin/chkconfig %endif %if %{defined systemd_requires} BuildRequires: systemd %{?systemd_requires} %endif %description -n ldirectord The Linux Director Daemon (ldirectord) was written by Jacob Rief. ldirectord is a stand alone daemon for monitoring the services on real servers. Currently, HTTP, HTTPS, and FTP services are supported. ldirectord is simple to install and works with Pacemaker (http://clusterlabs.org/). See 'ldirectord -h' and linux-ha/doc/ldirectord for more information. %endif %prep %if 0%{?suse_version} == 0 && 0%{?fedora} == 0 && 0%{?centos_version} == 0 && 0%{?rhel} == 0 %{error:Unable to determine the distribution/version. This is generally caused by missing /etc/rpm/macros.dist. Please install the correct build packages or define the required macros manually.} exit 1 %endif %setup -q -n %{name}-%{version}%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:-%{alphatag}}%{?dirty:-%{dirty}} %build if [ ! -f configure ]; then ./autogen.sh fi %if 0%{?fedora} >= 11 || 0%{?centos_version} > 5 || 0%{?rhel} > 5 CFLAGS="$(echo '%{optflags}')" %global conf_opt_fatal "--enable-fatal-warnings=no" %else CFLAGS="${CFLAGS} ${RPM_OPT_FLAGS}" %global conf_opt_fatal "--enable-fatal-warnings=yes" %endif %if %{with rgmanager} %global rasset rgmanager %endif %if %{with linuxha} %global rasset linux-ha %endif %if %{with rgmanager} && %{with linuxha} %global rasset all %endif export CFLAGS %configure \ %{?conf_opt_rsctmpdir:%conf_opt_rsctmpdir} \ %{conf_opt_fatal} \ %if %{defined _unitdir} --with-systemdsystemunitdir=%{_unitdir} \ %endif --with-pkg-name=%{name} \ --with-ras-set=%{rasset} %if %{defined jobs} JFLAGS="$(echo '-j%{jobs}')" %else JFLAGS="$(echo '%{_smp_mflags}')" %endif make $JFLAGS %install rm -rf %{buildroot} make install DESTDIR=%{buildroot} ## tree fixup # remove docs (there is only one and they should come from doc sections in files) rm -rf %{buildroot}/usr/share/doc/resource-agents %if %{with linuxha} %if 0%{?suse_version} test -d %{buildroot}/sbin || mkdir %{buildroot}/sbin ( cd %{buildroot}/sbin %if %{defined _unitdir} ln -s /usr/sbin/service rcldirectord %else ln -sf /%{_sysconfdir}/init.d/ldirectord rcldirectord %endif ) || true %endif %endif %clean rm -rf %{buildroot} %files %defattr(-,root,root) %doc AUTHORS COPYING COPYING.GPLv3 COPYING.LGPL ChangeLog %if %{with linuxha} %doc doc/README.webapps %doc %{_datadir}/%{name}/ra-api-1.dtd %doc %{_datadir}/%{name}/metadata.rng %endif %if %{with rgmanager} %{_datadir}/cluster %{_sbindir}/rhev-check.sh %endif %if %{with linuxha} %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d %dir /usr/lib/ocf/lib /usr/lib/ocf/lib/heartbeat /usr/lib/ocf/resource.d/heartbeat %if %{with rgmanager} /usr/lib/ocf/resource.d/redhat %endif +%if %{defined _unitdir} +%{_unitdir}/resource-agents-deps.target +%endif + %dir %{_datadir}/%{name} %dir %{_datadir}/%{name}/ocft %{_datadir}/%{name}/ocft/configs %{_datadir}/%{name}/ocft/caselib %{_datadir}/%{name}/ocft/helpers.sh %{_datadir}/%{name}/ocft/runocft %{_datadir}/%{name}/ocft/runocft.prereq %{_datadir}/%{name}/ocft/README %{_datadir}/%{name}/ocft/README.zh_CN %{_sbindir}/ocf-tester %{_sbindir}/ocft %{_sbindir}/sfex_init %{_sbindir}/sfex_stat %{_includedir}/heartbeat %dir %attr (1755, root, root) %{_var}/run/resource-agents %{_mandir}/man7/*.7* %{_mandir}/man8/ocf-tester.8* %{_mandir}/man8/sfex_init.8* # For compatability with pre-existing agents %dir %{_sysconfdir}/ha.d %{_sysconfdir}/ha.d/shellfuncs %{_libexecdir}/heartbeat %post -n resource-agents if [ $1 = 2 ]; then if [ -d %{_var}/run/heartbeat/rsctmp ]; then cp -fpr %{_var}/run/heartbeat/rsctmp/* %{_var}/run/resource-agents/ 1>/dev/null 2>&1 rm -fr %{_var}/run/heartbeat/rsctmp fi fi %if %{with rgmanager} ccs_update_schema > /dev/null 2>&1 ||: %endif %if 0%{?suse_version} %preun -n ldirectord %if %{defined _unitdir} %service_del_preun ldirectord.service %else %stop_on_removal ldirectord %endif %postun -n ldirectord %if %{defined _unitdir} %service_del_postun ldirectord.service %else %insserv_cleanup %endif %post -n ldirectord %if %{defined _unitdir} %service_add_post ldirectord.service %endif %pre -n ldirectord %if %{defined _unitdir} %service_add_pre ldirectord.service %endif %endif %if 0%{?fedora} %preun -n ldirectord /sbin/chkconfig --del ldirectord %postun -n ldirectord -p /sbin/ldconfig %post -n ldirectord /sbin/chkconfig --add ldirectord %endif %files -n ldirectord %defattr(-,root,root) %{_sbindir}/ldirectord %doc ldirectord/ldirectord.cf COPYING %{_mandir}/man8/ldirectord.8* %config(noreplace) %{_sysconfdir}/logrotate.d/ldirectord %dir %{_sysconfdir}/ha.d %dir %{_sysconfdir}/ha.d/resource.d %{_sysconfdir}/ha.d/resource.d/ldirectord %if %{defined _unitdir} %{_unitdir}/ldirectord.service %exclude %{_sysconfdir}/init.d/ldirectord %else %{_sysconfdir}/init.d/ldirectord %endif %if 0%{?suse_version} /sbin/rcldirectord %endif %if 0%{?fedora} /usr/lib/ocf/resource.d/heartbeat/ldirectord %endif %endif %changelog * @date@ Autotools generated version - @version@-@specver@-@numcomm@.@alphatag@.@dirty@ - Autotools generated version diff --git a/systemd/Makefile.am b/systemd/Makefile.am new file mode 100644 index 000000000..fe13de28a --- /dev/null +++ b/systemd/Makefile.am @@ -0,0 +1,23 @@ +# +# Copyright (C) 2017 Oyvind Albrigtsen +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# + +MAINTAINERCLEANFILES = Makefile.in + +if HAVE_SYSTEMD +dist_systemdsystemunit_DATA = resource-agents-deps.target +endif diff --git a/systemd/resource-agents-deps.target b/systemd/resource-agents-deps.target new file mode 100644 index 000000000..1c4fdffe8 --- /dev/null +++ b/systemd/resource-agents-deps.target @@ -0,0 +1,2 @@ +[Unit] +Description=resource-agents dependencies