diff --git a/.gitignore b/.gitignore index 0c259b5cf..e2b7c039c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,155 +1,156 @@ *.swp Makefile.in aclocal.m4 autoconf autoheader autom4te.cache automake autoscan.log compile configure configure.scan config.guess config.log config.sub config.status Makefile depcomp install-sh libtoolize ltmain.sh libtool make/stamp-h1 make/clusterautoconfig.h* missing resource-agents.spec *.pc .deps .libs *.o *.la *.lo *.loT rgmanager/src/resources/* !rgmanager/src/resources/*.in rgmanager/src/resources/Makefile.in rgmanager/src/resources/utils/config-utils.sh resource-agents-* .version # generated by ./autogen.sh && ./configure doc/man/*.7 doc/man/*.xml heartbeat/ocf-binaries heartbeat/ocf-directories heartbeat/ocf-shellfuncs heartbeat/send_ua heartbeat/shellfuncs heartbeat/*.pyc heartbeat/AoEtarget heartbeat/CTDB heartbeat/ManageRAID heartbeat/ManageVE heartbeat/Squid heartbeat/SysInfo heartbeat/aws-vpc-route53 heartbeat/azure-events +heartbeat/azure-events-az heartbeat/clvm heartbeat/conntrackd heartbeat/dnsupdate heartbeat/dummypy heartbeat/eDir88 heartbeat/fio heartbeat/galera heartbeat/gcp-pd-move heartbeat/gcp-vpc-move-ip heartbeat/gcp-vpc-move-route heartbeat/gcp-vpc-move-vip heartbeat/iSCSILogicalUnit heartbeat/iSCSITarget heartbeat/jira heartbeat/kamailio heartbeat/lxc heartbeat/lxd-info heartbeat/machine-info heartbeat/mariadb heartbeat/mpathpersist heartbeat/nfsnotify heartbeat/openstack-info heartbeat/rabbitmq-cluster heartbeat/redis heartbeat/rsyslog heartbeat/sg_persist heartbeat/slapd heartbeat/smb-share heartbeat/storage-mon heartbeat/sybaseASE heartbeat/syslog-ng heartbeat/vsftpd include/agent_config.h include/config.h include/config.h.in include/stamp-h1 include/stamp-h2 ldirectord/ldirectord ldirectord/ldirectord.8 ldirectord/OCF/ldirectord ldirectord/init.d/ldirectord ldirectord/init.d/ldirectord.debian ldirectord/init.d/ldirectord.debian.default ldirectord/systemd/ldirectord.service systemd/resource-agents.conf tools/findif tools/nfsconvert tools/ocf-tester tools/send_arp tools/storage_mon tools/tickle_tcp tools/ocft/README tools/ocft/README.zh_CN tools/ocft/caselib tools/ocft/ocft *.cache *.upgrade.xml py-compile ylwrap __pycache__ # BEAM Entries *.beam parser-messages MISC_ERRORS cscope.files cscope.out patches updates logs # OS and Editor Artifacts .DS_Store .bomb *.rej *.bz2 *.gz *.xz *.sed *.diff *.patch *.gres *~ # Misc HTML TAGS GPATH GRTAGS GSYMS GTAGS .gres.* *.orig .gdb_history *~ \#* .changes pacemaker.tar.gz diff --git a/configure.ac b/configure.ac index eeecfad0e..5716a2be2 100644 --- a/configure.ac +++ b/configure.ac @@ -1,1077 +1,1085 @@ dnl dnl autoconf for Agents dnl dnl License: GNU General Public License (GPL) dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.63) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services AC_INIT([resource-agents], m4_esyscmd([make/git-version-gen .tarball-version]), [developers@clusterlabs.org]) AC_USE_SYSTEM_EXTENSIONS CRM_DTD_VERSION="1.0" AC_CONFIG_AUX_DIR(.) AC_CONFIG_MACRO_DIR([m4]) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except lha_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/agent_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/agent_config.h.in to have configure include new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AM_CONFIG_HEADER(include/config.h include/agent_config.h) ALL_LINGUAS="en fr" AC_ARG_WITH(version, [ --with-version=version Override package version (if you're a packager needing to pretend) ], [ PACKAGE_VERSION="$withval" ]) AC_ARG_WITH(pkg-name, [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ], [ PACKAGE_NAME="$withval" ]) dnl dnl AM_INIT_AUTOMAKE([1.11.1 foreign dist-bzip2 dist-xz]) dnl AM_INIT_AUTOMAKE([1.10.1 foreign dist-bzip2]) AC_DEFINE_UNQUOTED(AGENTS_VERSION, "$PACKAGE_VERSION", Current agents version) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== # check stolen from gnulib/m4/gnu-make.m4 if ! ${MAKE-make} --version /cannot/make/this >/dev/null 2>&1; then AC_MSG_ERROR([you don't seem to have GNU make; it is required]) fi AC_PROG_CC dnl Can force other with environment variable "CC". AM_PROG_CC_C_O AC_PROG_CC_STDC AC_PROG_AWK AC_PROG_LN_S AC_PROG_INSTALL AC_PROG_MAKE_SET AC_C_STRINGIZE AC_C_INLINE AC_TYPE_SIZE_T AC_TYPE_SSIZE_T AC_TYPE_UID_T AC_TYPE_UINT16_T AC_TYPE_UINT8_T AC_TYPE_UINT32_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) AC_STRUCT_TIMEZONE dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CPPFLAGS="$@" AC_MSG_CHECKING(whether $CC supports "$@") AC_PREPROC_IFELSE([AC_LANG_PROGRAM([])], [RC=0; AC_MSG_RESULT([yes])], [RC=1; AC_MSG_RESULT([no])]) return $RC } extract_header_define() { AC_MSG_CHECKING(for $2 in $1) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` AC_MSG_RESULT($value) printf $value rm -f ${Cfile}.c ${Cfile} } AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac # ordering is important, PKG_PROG_PKG_CONFIG is to be invoked before any other PKG_* related stuff PKG_PROG_PKG_CONFIG(0.18) # PKG_CHECK_MODULES will fail if systemd is not found by default, so make sure # we set the proper vars and deal with it PKG_CHECK_MODULES([systemd], [systemd], [HAS_SYSTEMD=yes], [HAS_SYSTEMD=no]) if test "x$HAS_SYSTEMD" == "xyes"; then PKG_CHECK_VAR([SYSTEMD_UNIT_DIR], [systemd], [systemdsystemunitdir]) if test "x$SYSTEMD_UNIT_DIR" == "x"; then AC_MSG_ERROR([Unable to detect systemd unit dir automatically]) fi PKG_CHECK_VAR([SYSTEMD_TMPFILES_DIR], [systemd], [tmpfilesdir]) if test "x$SYSTEMD_TMPFILES_DIR" == "x"; then AC_MSG_ERROR([Unable to detect systemd tmpfiles directory automatically]) fi # sanitize systed vars when using non standard prefix if test "$prefix" != "/usr"; then SYSTEMD_UNIT_DIR="$prefix/$SYSTEMD_UNIT_DIR" AC_SUBST([SYSTEMD_UNIT_DIR]) SYSTEMD_TMPFILES_DIR="$prefix/$SYSTEMD_TMPFILES_DIR" AC_SUBST([SYSTEMD_TMPFILES_DIR]) fi fi AM_CONDITIONAL(HAVE_SYSTEMD, [test "x$HAS_SYSTEMD" == xyes ]) dnl =============================================== dnl Configure Options dnl =============================================== dnl Some systems, like Solaris require a custom package name AC_ARG_WITH(pkgname, [ --with-pkgname=name name for pkg (typically for Solaris) ], [ PKGNAME="$withval" ], [ PKGNAME="LXHAhb" ], ) AC_SUBST(PKGNAME) AC_ARG_ENABLE([ansi], [ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers. [default=yes]]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings very pedantic and fatal warnings for gcc [default=yes]]) INITDIR="" AC_ARG_WITH(initdir, [ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]], [ INITDIR="$withval" ]) OCF_ROOT_DIR="${prefix}/lib/ocf" AC_ARG_WITH(ocf-root, [ --with-ocf-root=DIR directory for OCF scripts [${OCF_ROOT_DIR}]], [ OCF_ROOT_DIR="$withval" ]) HA_RSCTMPDIR=${localstatedir}/run/resource-agents AC_ARG_WITH(rsctmpdir, [ --with-rsctmpdir=DIR directory for resource agents state files [${HA_RSCTMPDIR}]], [ HA_RSCTMPDIR="$withval" ]) AC_ARG_ENABLE([libnet], [ --enable-libnet Use libnet for ARP based functionality, [default=try]], [enable_libnet="$enableval"], [enable_libnet=try]) BUILD_RGMANAGER=0 BUILD_LINUX_HA=0 RASSET=linux-ha AC_ARG_WITH(ras-set, [ --with-ras-set=SET build/install only linux-ha, rgmanager or all resource-agents [default: linux-ha]], [ RASSET="$withval" ]) if test x$RASSET = xyes || test x$RASSET = xall ; then BUILD_RGMANAGER=1 BUILD_LINUX_HA=1 fi if test x$RASSET = xlinux-ha; then BUILD_LINUX_HA=1 fi if test x$RASSET = xrgmanager; then BUILD_RGMANAGER=1 fi if test $BUILD_LINUX_HA -eq 0 && test $BUILD_RGMANAGER -eq 0; then AC_MSG_ERROR([Are you really sure you want this package?]) exit 1 fi AM_CONDITIONAL(BUILD_LINUX_HA, test $BUILD_LINUX_HA -eq 1) AM_CONDITIONAL(BUILD_RGMANAGER, test $BUILD_RGMANAGER -eq 1) AC_ARG_WITH(compat-habindir, [ --with-compat-habindir use HA_BIN directory with compatibility for the Heartbeat stack [${libexecdir}]], [], [with_compat_habindir=no]) AM_CONDITIONAL(WITH_COMPAT_HABINDIR, test "x$with_compat_habindir" != "xno") dnl =============================================== dnl General Processing dnl =============================================== echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in dnl For consistency with Heartbeat, map NONE->$prefix NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done if test -z $INITDIR then INITDIR=${sysconfdir}/init.d fi AC_MSG_RESULT($INITDIR);; esac AC_SUBST(INITDIR) if test "${prefix}" = "/usr"; then INITDIRPREFIX="$INITDIR" else INITDIRPREFIX="${prefix}/$INITDIR" fi AC_SUBST(INITDIRPREFIX) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in dnl For consistency with Heartbeat, map NONE->$prefix *prefix*|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac if test "x$with_compat_habindir" != "xno" ; then libexecdir=${libdir} fi dnl Expand autoconf variables so that we dont end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl docdir is a recent addition to autotools eval docdir="`eval echo ${docdir}`" if test "x$docdir" = "x"; then docdir="`eval echo ${datadir}/doc`" fi AC_SUBST(docdir) dnl Home-grown variables eval INITDIR="${INITDIR}" for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". REBOOT_OPTIONS="-f" POWEROFF_OPTIONS="-f" case "$host_os" in *bsd*) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" ;; *solaris*) REBOOT_OPTIONS="-n" POWEROFF_OPTIONS="-n" LDFLAGS+=" -lssp -lssp_nonshared" ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) POWEROFF_OPTIONS="-nf" REBOOT_OPTIONS="-nf" ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_DEFINE_UNQUOTED(HA_LOG_FACILITY, LOG_DAEMON, Default logging facility) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac esac AC_MSG_CHECKING(which format is needed to print uint64_t) case "$host_cpu" in s390x)U64T="%lu";; *64*) U64T="%lu";; *) U64T="%llu";; esac AC_MSG_RESULT($U64T) AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t) dnl Variables needed for substitution AC_CHECK_HEADERS(heartbeat/glue_config.h) if test "$ac_cv_header_heartbeat_glue_config_h" != "yes"; then enable_libnet=no fi AC_DEFINE_UNQUOTED(OCF_ROOT_DIR,"$OCF_ROOT_DIR", OCF root directory - specified by the OCF standard) AC_SUBST(OCF_ROOT_DIR) GLUE_STATE_DIR=${localstatedir}/run AC_DEFINE_UNQUOTED(GLUE_STATE_DIR,"$GLUE_STATE_DIR", Where to keep state files and sockets) AC_SUBST(GLUE_STATE_DIR) AC_DEFINE_UNQUOTED(HA_VARRUNDIR,"$GLUE_STATE_DIR", Where Heartbeat keeps state files and sockets - old name) HA_VARRUNDIR="$GLUE_STATE_DIR" AC_SUBST(HA_VARRUNDIR) # Expand $prefix eval HA_RSCTMPDIR="`eval echo ${HA_RSCTMPDIR}`" AC_DEFINE_UNQUOTED(HA_RSCTMPDIR,"$HA_RSCTMPDIR", Where Resource agents keep state files) AC_SUBST(HA_RSCTMPDIR) dnl Eventually move out of the heartbeat dir tree and create symlinks when needed HA_VARLIBHBDIR=${localstatedir}/lib/heartbeat AC_DEFINE_UNQUOTED(HA_VARLIBHBDIR,"$HA_VARLIBHBDIR", Whatever this used to mean) AC_SUBST(HA_VARLIBHBDIR) OCF_RA_DIR="${OCF_ROOT_DIR}/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) OCF_RA_DIR_PREFIX="$OCF_RA_DIR" AC_SUBST(OCF_RA_DIR_PREFIX) OCF_LIB_DIR="${OCF_ROOT_DIR}/lib" AC_DEFINE_UNQUOTED(OCF_LIB_DIR,"$OCF_LIB_DIR", Location for shared code for OCF RAs) AC_SUBST(OCF_LIB_DIR) OCF_LIB_DIR_PREFIX="$OCF_LIB_DIR" AC_SUBST(OCF_LIB_DIR_PREFIX) dnl =============================================== dnl rgmanager ras bits dnl =============================================== LOGDIR=${localstatedir}/log/cluster CLUSTERDATA=${datadir}/cluster AC_SUBST([LOGDIR]) AC_SUBST([CLUSTERDATA]) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH AC_CHECK_PROGS(MAKE, gmake make) AC_CHECK_PROGS(SHELLCHECK, shellcheck) AM_CONDITIONAL(CI_CHECKS, test "x$SHELLCHECK" != "x" ) AC_PATH_PROGS(BASH_SHELL, bash) if test x"${BASH_SHELL}" = x""; then AC_MSG_ERROR(You need bash installed in order to build ${PACKAGE}) fi AC_PATH_PROGS(XSLTPROC, xsltproc) AM_CONDITIONAL(BUILD_DOC, test "x$XSLTPROC" != "x" ) if test "x$XSLTPROC" = "x"; then AC_MSG_WARN([xsltproc not installed, unable to (re-)build manual pages]) fi AC_SUBST(XSLTPROC) AC_PATH_PROGS(XMLCATALOG, xmlcatalog) AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh) AC_PATH_PROGS(SCP, scp, /usr/bin/scp) AC_PATH_PROGS(TAR, tar) AC_PATH_PROGS(MD5, md5) AC_PATH_PROGS(TEST, test) AC_PATH_PROGS(PING, ping, /bin/ping) AC_PATH_PROGS(IFCONFIG, ifconfig, /sbin/ifconfig) AC_PATH_PROGS(MAILCMD, mailx mail, mail) AC_PATH_PROGS(EGREP, egrep) AC_PATH_PROGS(RM, rm) AC_SUBST(BASH_SHELL) AC_SUBST(MAILCMD) AC_SUBST(EGREP) AC_SUBST(SHELL) AC_SUBST(PING) AC_SUBST(RM) AC_SUBST(TEST) dnl Ensure PYTHON is an absolute path AC_PATH_PROG([PYTHON], [$PYTHON]) AM_PATH_PYTHON if test -z "$PYTHON"; then echo "*** Essential program python not found" 1>&2 fi AC_PYTHON_MODULE(googleapiclient) AC_PYTHON_MODULE(json) AC_PYTHON_MODULE(pyroute2) AS_VERSION_COMPARE([$PYTHON_VERSION], [2.7], [BUILD_OCF_PY=0], [BUILD_OCF_PY=1], [BUILD_OCF_PY=1]) BUILD_AZURE_EVENTS=1 if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then BUILD_AZURE_EVENTS=0 AC_MSG_WARN("Not building azure-events") fi AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1) +BUILD_AZURE_EVENTS_AZ=1 +if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then + BUILD_AZURE_EVENTS_AZ=0 + AC_MSG_WARN("Not building azure-events-az") +fi +AM_CONDITIONAL(BUILD_AZURE_EVENTS_AZ, test $BUILD_AZURE_EVENTS_AZ -eq 1) + BUILD_GCP_PD_MOVE=1 if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then BUILD_GCP_PD_MOVE=0 AC_MSG_WARN("Not building gcp-pd-move") fi AM_CONDITIONAL(BUILD_GCP_PD_MOVE, test $BUILD_GCP_PD_MOVE -eq 1) BUILD_GCP_VPC_MOVE_ROUTE=1 if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || \ test "x${HAVE_PYMOD_PYROUTE2}" != xyes || test $BUILD_OCF_PY -eq 0; then BUILD_GCP_VPC_MOVE_ROUTE=0 AC_MSG_WARN("Not building gcp-vpc-move-route") fi AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_ROUTE, test $BUILD_GCP_VPC_MOVE_ROUTE -eq 1) BUILD_GCP_VPC_MOVE_VIP=1 if test -z "$PYTHON" || test "x${HAVE_PYMOD_GOOGLEAPICLIENT}" != xyes || test $BUILD_OCF_PY -eq 0; then BUILD_GCP_VPC_MOVE_VIP=0 AC_MSG_WARN("Not building gcp-vpc-move-vip") fi AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_VIP, test $BUILD_GCP_VPC_MOVE_VIP -eq 1) AC_PATH_PROGS(ROUTE, route) AC_DEFINE_UNQUOTED(ROUTE, "$ROUTE", path to route command) AC_MSG_CHECKING(ifconfig option to list interfaces) for IFCONFIG_A_OPT in "-A" "-a" "" do $IFCONFIG $IFCONFIG_A_OPT > /dev/null 2>&1 if test "$?" = 0 then AC_DEFINE_UNQUOTED(IFCONFIG_A_OPT, "$IFCONFIG_A_OPT", option for ifconfig command) AC_MSG_RESULT($IFCONFIG_A_OPT) break fi done AC_SUBST(IFCONFIG_A_OPT) if test x"${MAKE}" = x""; then AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE}) fi STYLESHEET_PREFIX="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' STYLESHEET_PREFIX=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI} \ | sed -n 's|^file://||p;q') if test x"${STYLESHEET_PREFIX}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) if test x"${DIRS}" = x""; then # when datadir is not standard OS path, we cannot find docbook.xsl # use standard OS path as backup DIRS=$(find "/usr/share" "/usr/local/share" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) fi XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then STYLESHEET_PREFIX=$(echo "${d}" | sed 's/\/manpages//') break fi done fi if test x"${STYLESHEET_PREFIX}" = x""; then AC_MSG_ERROR(You need docbook-style-xsl installed in order to build ${PACKAGE}) fi fi AC_MSG_RESULT($STYLESHEET_PREFIX) AC_SUBST(STYLESHEET_PREFIX) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) AC_CHECK_LIB(gnugetopt, getopt_long) dnl if available if test "x${enable_thread_safe}" = "xyes"; then GPKGNAME="gthread-2.0" else GPKGNAME="glib-2.0" fi PKG_CHECK_MODULES([GLIB], [$GPKGNAME]) CPPFLAGS="$CPPFLAGS $GLIB_CFLAGS" LIBS="$LIBS $GLIB_LIBS" dnl ======================================================================== dnl Headers dnl ======================================================================== AC_HEADER_STDC AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS([arpa/inet.h]) AC_CHECK_HEADERS([fcntl.h]) AC_CHECK_HEADERS([limits.h]) AC_CHECK_HEADERS([malloc.h]) AC_CHECK_HEADERS([netdb.h]) AC_CHECK_HEADERS([netinet/in.h]) AC_CHECK_HEADERS([sys/file.h]) AC_CHECK_HEADERS([sys/ioctl.h]) AC_CHECK_HEADERS([sys/param.h]) AC_CHECK_HEADERS([sys/time.h]) AC_CHECK_HEADERS([syslog.h]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_FUNC_FORK AC_FUNC_STRNLEN AC_CHECK_FUNCS([alarm gettimeofday inet_ntoa memset mkdir socket uname]) AC_CHECK_FUNCS([strcasecmp strchr strdup strerror strrchr strspn strstr strtol strtoul]) AC_PATH_PROGS(REBOOT, reboot, /sbin/reboot) AC_SUBST(REBOOT) AC_SUBST(REBOOT_OPTIONS) AC_DEFINE_UNQUOTED(REBOOT, "$REBOOT", path to the reboot command) AC_DEFINE_UNQUOTED(REBOOT_OPTIONS, "$REBOOT_OPTIONS", reboot options) AC_PATH_PROGS(POWEROFF_CMD, poweroff, /sbin/poweroff) AC_SUBST(POWEROFF_CMD) AC_SUBST(POWEROFF_OPTIONS) AC_DEFINE_UNQUOTED(POWEROFF_CMD, "$POWEROFF_CMD", path to the poweroff command) AC_DEFINE_UNQUOTED(POWEROFF_OPTIONS, "$POWEROFF_OPTIONS", poweroff options) AC_PATH_PROGS(POD2MAN, pod2man) AM_CONDITIONAL(BUILD_POD_DOC, test "x$POD2MAN" != "x" ) if test "x$POD2MAN" = "x"; then AC_MSG_WARN([pod2man not installed, unable to (re-)build ldirector manual page]) fi AC_SUBST(POD2MAN) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) dnl ======================================================================== dnl sfex dnl ======================================================================== build_sfex=no case $host_os in *Linux*|*linux*) if test "$ac_cv_header_heartbeat_glue_config_h" = "yes"; then build_sfex=yes fi ;; esac AM_CONDITIONAL(BUILD_SFEX, test "$build_sfex" = "yes" ) dnl ======================================================================== dnl tickle (needs port to BSD platforms) dnl ======================================================================== AC_CHECK_MEMBERS([struct iphdr.saddr],,,[[#include ]]) AM_CONDITIONAL(BUILD_TICKLE, test "$ac_cv_member_struct_iphdr_saddr" = "yes" ) dnl ======================================================================== dnl libnet dnl ======================================================================== libnet="" libnet_version="none" LIBNETLIBS="" LIBNETDEFINES="" AC_MSG_CHECKING(if libnet is required) libnet_fatal=$enable_libnet case $enable_libnet in no) ;; yes|libnet10|libnet11|10|11) libnet_fatal=yes;; try) case $host_os in *Linux*|*linux*) libnet_fatal=no;; *) libnet_fatal=yes;; dnl legacy behavior esac ;; *) libnet_fatal=yes; enable_libnet=try;; esac AC_MSG_RESULT($libnet_fatal) if test "x$enable_libnet" != "xno"; then AC_PATH_PROGS(LIBNETCONFIG, libnet-config) AC_CHECK_LIB(nsl, t_open) dnl -lnsl AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(net, libnet_get_hwaddr, LIBNETLIBS=" -lnet", []) fi AC_MSG_CHECKING(for libnet) if test "x$LIBNETLIBS" != "x" -o "x$enable_libnet" = "xlibnet11"; then LIBNETDEFINES="" if test "$ac_cv_lib_nsl_t_open" = yes; then LIBNETLIBS="-lnsl $LIBNETLIBS" fi if test "$ac_cv_lib_socket_socket" = yes; then LIBNETLIBS="-lsocket $LIBNETLIBS" fi libnet=net libnet_version="libnet1.1" fi if test "x$enable_libnet" = "xtry" -o "x$enable_libnet" = "xlibnet10"; then if test "x$LIBNETLIBS" = x -a "x${LIBNETCONFIG}" != "x" ; then LIBNETDEFINES="`$LIBNETCONFIG --defines` `$LIBNETCONFIG --cflags`"; LIBNETLIBS="`$LIBNETCONFIG --libs`"; libnet_version="libnet1.0 (old)" case $LIBNETLIBS in *-l*) libnet=`echo $LIBNETLIBS | sed 's%.*-l%%'`;; *) libnet_version=none;; esac CPPFLAGS="$CPPFLAGS $LIBNETDEFINES" AC_CHECK_HEADERS(libnet.h) if test "$ac_cv_header_libnet_h" = no; then libnet_version=none fi fi fi AC_MSG_RESULT(found $libnet_version) if test "$libnet_version" = none; then LIBNETLIBS="" LIBNETDEFINES="" if test $libnet_fatal = yes; then AC_MSG_ERROR(libnet not found) fi else AC_CHECK_LIB($libnet,libnet_init, [new_libnet=yes; AC_DEFINE(HAVE_LIBNET_1_1_API, 1, Libnet 1.1 API)], [new_libnet=no; AC_DEFINE(HAVE_LIBNET_1_0_API, 1, Libnet 1.0 API)],$LIBNETLIBS) AC_SUBST(LIBNETLIBS) fi if test "$new_libnet" = yes; then AC_MSG_CHECKING(for libnet API 1.1.4: ) save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -fgnu89-inline -Wall -Werror" AC_COMPILE_IFELSE([ AC_LANG_SOURCE(#include int main(){libnet_t *l=NULL; libnet_pblock_record_ip_offset(l, l->total_size); return(0); })], [AC_MSG_RESULT(no)], [AC_DEFINE(HAVE_LIBNET_1_1_4_API, 1, Libnet 1.1.4 API) AC_MSG_RESULT(yes)]) CFLAGS="$save_CFLAGS" fi sendarp_linux=0 case $host_os in *Linux*|*linux*) sendarp_linux=1;; esac redhat_based=0 AC_CHECK_FILE(/etc/redhat-release, [redhat_based=1]) AC_SUBST(LIBNETLIBS) AC_SUBST(LIBNETDEFINES) AM_CONDITIONAL(SENDARP_LINUX, test $sendarp_linux = 1 ) AM_CONDITIONAL(USE_LIBNET, test "x$libnet_version" != "xnone" ) AM_CONDITIONAL(NFSCONVERT, test $redhat_based = 1 ) dnl ************************************************************************ dnl * Check for netinet/icmp6.h to enable the IPv6addr resource agent AC_CHECK_HEADERS(netinet/icmp6.h,[],[],[#include ]) AM_CONDITIONAL(USE_IPV6ADDR_AGENT, test "$ac_cv_header_netinet_icmp6_h" = yes && test "$ac_cv_header_heartbeat_glue_config_h" = yes) AM_CONDITIONAL(IPV6ADDR_COMPATIBLE, test "$ac_cv_header_netinet_icmp6_h" = yes) dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. CC_ERRORS="" CC_EXTRAS="" if export -p | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" enable_fatal_warnings=no else CFLAGS="$CFLAGS -ggdb3" # We had to eliminate -Wnested-externs because of libtool changes # Also remove -Waggregate-return because we use one libnet # call which returns a struct EXTRA_FLAGS="-fgnu89-inline -fstack-protector-all -Wall -Wbad-function-cast -Wcast-qual -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat=2 -Wformat-security -Wformat-nonliteral -Winline -Wmissing-prototypes -Wmissing-declarations -Wmissing-format-attribute -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wunsigned-char -Wwrite-strings -Wno-maybe-uninitialized" # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $j then CC_EXTRAS="$CC_EXTRAS $j" fi done dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'` AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4) dnl System specific options case "$host_os" in *linux*|*bsd*) if test "${enable_fatal_warnings}" = "unknown"; then enable_fatal_warnings=yes fi ;; esac if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then enable_fatal_warnings=yes else enable_fatal_warnings=no fi if test "x${enable_ansi}" != xno && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS -Werror" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LOCALE) AC_SUBST(CC) AC_SUBST(MAKE) dnl The Makefiles and shell scripts we output AC_CONFIG_FILES(Makefile \ resource-agents.pc \ include/Makefile \ heartbeat/Makefile \ heartbeat/ocf-binaries \ heartbeat/ocf-directories \ heartbeat/ocf-shellfuncs \ heartbeat/shellfuncs \ systemd/Makefile \ systemd/resource-agents.conf \ tools/Makefile \ tools/nfsconvert \ tools/ocf-tester \ tools/ocft/Makefile \ tools/ocft/ocft \ tools/ocft/caselib \ tools/ocft/README \ tools/ocft/README.zh_CN \ ldirectord/Makefile \ ldirectord/ldirectord \ ldirectord/init.d/Makefile \ ldirectord/init.d/ldirectord \ ldirectord/init.d/ldirectord.debian \ ldirectord/init.d/ldirectord.debian.default \ ldirectord/systemd/Makefile \ ldirectord/systemd/ldirectord.service \ ldirectord/logrotate.d/Makefile \ ldirectord/OCF/Makefile \ ldirectord/OCF/ldirectord \ doc/Makefile \ doc/man/Makefile \ rgmanager/Makefile \ rgmanager/src/Makefile \ rgmanager/src/resources/Makefile \ rgmanager/src/resources/ocf-shellfuncs \ rgmanager/src/resources/svclib_nfslock \ rgmanager/src/resources/lvm_by_lv.sh \ rgmanager/src/resources/lvm_by_vg.sh \ rgmanager/src/resources/utils/Makefile \ rgmanager/src/resources/utils/fs-lib.sh \ rgmanager/src/resources/utils/messages.sh \ rgmanager/src/resources/utils/config-utils.sh \ rgmanager/src/resources/utils/member_util.sh \ rgmanager/src/resources/utils/ra-skelet.sh \ ) dnl Files we output that need to be executable AC_CONFIG_FILES([heartbeat/azure-events], [chmod +x heartbeat/azure-events]) +AC_CONFIG_FILES([heartbeat/azure-events-az], [chmod +x heartbeat/azure-events-az]) AC_CONFIG_FILES([heartbeat/AoEtarget], [chmod +x heartbeat/AoEtarget]) AC_CONFIG_FILES([heartbeat/ManageRAID], [chmod +x heartbeat/ManageRAID]) AC_CONFIG_FILES([heartbeat/ManageVE], [chmod +x heartbeat/ManageVE]) AC_CONFIG_FILES([heartbeat/Squid], [chmod +x heartbeat/Squid]) AC_CONFIG_FILES([heartbeat/SysInfo], [chmod +x heartbeat/SysInfo]) AC_CONFIG_FILES([heartbeat/aws-vpc-route53], [chmod +x heartbeat/aws-vpc-route53]) AC_CONFIG_FILES([heartbeat/clvm], [chmod +x heartbeat/clvm]) AC_CONFIG_FILES([heartbeat/conntrackd], [chmod +x heartbeat/conntrackd]) AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate]) AC_CONFIG_FILES([heartbeat/dummypy], [chmod +x heartbeat/dummypy]) AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88]) AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio]) AC_CONFIG_FILES([heartbeat/galera], [chmod +x heartbeat/galera]) AC_CONFIG_FILES([heartbeat/gcp-pd-move], [chmod +x heartbeat/gcp-pd-move]) AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip]) AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip]) AC_CONFIG_FILES([heartbeat/gcp-vpc-move-route], [chmod +x heartbeat/gcp-vpc-move-route]) AC_CONFIG_FILES([heartbeat/iSCSILogicalUnit], [chmod +x heartbeat/iSCSILogicalUnit]) AC_CONFIG_FILES([heartbeat/iSCSITarget], [chmod +x heartbeat/iSCSITarget]) AC_CONFIG_FILES([heartbeat/jira], [chmod +x heartbeat/jira]) AC_CONFIG_FILES([heartbeat/kamailio], [chmod +x heartbeat/kamailio]) AC_CONFIG_FILES([heartbeat/lxc], [chmod +x heartbeat/lxc]) AC_CONFIG_FILES([heartbeat/lxd-info], [chmod +x heartbeat/lxd-info]) AC_CONFIG_FILES([heartbeat/machine-info], [chmod +x heartbeat/machine-info]) AC_CONFIG_FILES([heartbeat/mariadb], [chmod +x heartbeat/mariadb]) AC_CONFIG_FILES([heartbeat/mpathpersist], [chmod +x heartbeat/mpathpersist]) AC_CONFIG_FILES([heartbeat/nfsnotify], [chmod +x heartbeat/nfsnotify]) AC_CONFIG_FILES([heartbeat/openstack-info], [chmod +x heartbeat/openstack-info]) AC_CONFIG_FILES([heartbeat/rabbitmq-cluster], [chmod +x heartbeat/rabbitmq-cluster]) AC_CONFIG_FILES([heartbeat/redis], [chmod +x heartbeat/redis]) AC_CONFIG_FILES([heartbeat/rsyslog], [chmod +x heartbeat/rsyslog]) AC_CONFIG_FILES([heartbeat/smb-share], [chmod +x heartbeat/smb-share]) AC_CONFIG_FILES([heartbeat/sg_persist], [chmod +x heartbeat/sg_persist]) AC_CONFIG_FILES([heartbeat/slapd], [chmod +x heartbeat/slapd]) AC_CONFIG_FILES([heartbeat/storage-mon], [chmod +x heartbeat/storage-mon]) AC_CONFIG_FILES([heartbeat/sybaseASE], [chmod +x heartbeat/sybaseASE]) AC_CONFIG_FILES([heartbeat/syslog-ng], [chmod +x heartbeat/syslog-ng]) AC_CONFIG_FILES([heartbeat/vsftpd], [chmod +x heartbeat/vsftpd]) AC_CONFIG_FILES([heartbeat/CTDB], [chmod +x heartbeat/CTDB]) AC_CONFIG_FILES([rgmanager/src/resources/ASEHAagent.sh], [chmod +x rgmanager/src/resources/ASEHAagent.sh]) AC_CONFIG_FILES([rgmanager/src/resources/apache.sh], [chmod +x rgmanager/src/resources/apache.sh]) AC_CONFIG_FILES([rgmanager/src/resources/bind-mount.sh], [chmod +x rgmanager/src/resources/bind-mount.sh]) AC_CONFIG_FILES([rgmanager/src/resources/clusterfs.sh], [chmod +x rgmanager/src/resources/clusterfs.sh]) AC_CONFIG_FILES([rgmanager/src/resources/db2.sh], [chmod +x rgmanager/src/resources/db2.sh]) AC_CONFIG_FILES([rgmanager/src/resources/drbd.sh], [chmod +x rgmanager/src/resources/drbd.sh]) AC_CONFIG_FILES([rgmanager/src/resources/fs.sh], [chmod +x rgmanager/src/resources/fs.sh]) AC_CONFIG_FILES([rgmanager/src/resources/ip.sh], [chmod +x rgmanager/src/resources/ip.sh]) AC_CONFIG_FILES([rgmanager/src/resources/lvm.sh], [chmod +x rgmanager/src/resources/lvm.sh]) AC_CONFIG_FILES([rgmanager/src/resources/mysql.sh], [chmod +x rgmanager/src/resources/mysql.sh]) AC_CONFIG_FILES([rgmanager/src/resources/named.sh], [chmod +x rgmanager/src/resources/named.sh]) AC_CONFIG_FILES([rgmanager/src/resources/netfs.sh], [chmod +x rgmanager/src/resources/netfs.sh]) AC_CONFIG_FILES([rgmanager/src/resources/nfsclient.sh], [chmod +x rgmanager/src/resources/nfsclient.sh]) AC_CONFIG_FILES([rgmanager/src/resources/nfsexport.sh], [chmod +x rgmanager/src/resources/nfsexport.sh]) AC_CONFIG_FILES([rgmanager/src/resources/nfsserver.sh], [chmod +x rgmanager/src/resources/nfsserver.sh]) AC_CONFIG_FILES([rgmanager/src/resources/openldap.sh], [chmod +x rgmanager/src/resources/openldap.sh]) AC_CONFIG_FILES([rgmanager/src/resources/oracledb.sh], [chmod +x rgmanager/src/resources/oracledb.sh]) AC_CONFIG_FILES([rgmanager/src/resources/oradg.sh], [chmod +x rgmanager/src/resources/oradg.sh]) AC_CONFIG_FILES([rgmanager/src/resources/orainstance.sh], [chmod +x rgmanager/src/resources/orainstance.sh]) AC_CONFIG_FILES([rgmanager/src/resources/oralistener.sh], [chmod +x rgmanager/src/resources/oralistener.sh]) AC_CONFIG_FILES([rgmanager/src/resources/postgres-8.sh], [chmod +x rgmanager/src/resources/postgres-8.sh]) AC_CONFIG_FILES([rgmanager/src/resources/samba.sh], [chmod +x rgmanager/src/resources/samba.sh]) AC_CONFIG_FILES([rgmanager/src/resources/script.sh], [chmod +x rgmanager/src/resources/script.sh]) AC_CONFIG_FILES([rgmanager/src/resources/service.sh], [chmod +x rgmanager/src/resources/service.sh]) AC_CONFIG_FILES([rgmanager/src/resources/smb.sh], [chmod +x rgmanager/src/resources/smb.sh]) AC_CONFIG_FILES([rgmanager/src/resources/tomcat-5.sh], [chmod +x rgmanager/src/resources/tomcat-5.sh]) AC_CONFIG_FILES([rgmanager/src/resources/tomcat-6.sh], [chmod +x rgmanager/src/resources/tomcat-6.sh]) AC_CONFIG_FILES([rgmanager/src/resources/vm.sh], [chmod +x rgmanager/src/resources/vm.sh]) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION}]) AC_MSG_RESULT([ Build Version = $Format:%H$]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ Documentation = ${docdir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([ HA_BIN directory prefix = ${libexecdir}]) AC_MSG_RESULT([ RA state files = ${HA_RSCTMPDIR}]) AC_MSG_RESULT([ AIS Plugins = ${LCRSODIR}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CPPFLAGS = ${CPPFLAGS}]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am index cd8fd16bf..658c700ac 100644 --- a/doc/man/Makefile.am +++ b/doc/man/Makefile.am @@ -1,258 +1,262 @@ # # doc: Linux-HA resource agents # # Copyright (C) 2009 Florian Haas # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in EXTRA_DIST = $(doc_DATA) $(REFENTRY_STYLESHEET) \ mkappendix.sh ralist.sh CLEANFILES = $(man_MANS) $(xmlfiles) metadata-*.xml STYLESHEET_PREFIX ?= http://docbook.sourceforge.net/release/xsl/current MANPAGES_STYLESHEET ?= $(STYLESHEET_PREFIX)/manpages/docbook.xsl HTML_STYLESHEET ?= $(STYLESHEET_PREFIX)/xhtml/docbook.xsl FO_STYLESHEET ?= $(STYLESHEET_PREFIX)/fo/docbook.xsl REFENTRY_STYLESHEET ?= ra2refentry.xsl XSLTPROC_OPTIONS ?= --xinclude XSLTPROC_MANPAGES_OPTIONS ?= $(XSLTPROC_OPTIONS) XSLTPROC_HTML_OPTIONS ?= $(XSLTPROC_OPTIONS) XSLTPROC_FO_OPTIONS ?= $(XSLTPROC_OPTIONS) radir = $(abs_top_builddir)/heartbeat # required for out-of-tree build symlinkstargets = \ ocf-distro ocf.py ocf-rarun ocf-returncodes \ findif.sh apache-conf.sh http-mon.sh mysql-common.sh \ nfsserver-redhat.sh openstack-common.sh ora-common.sh preptree: for i in $(symlinkstargets); do \ if [ ! -f $(radir)/$$i ]; then \ rm -rf $(radir)/$$i; \ ln -sf $(abs_top_srcdir)/heartbeat/$$i $(radir)/$$i; \ fi; \ done $(radir)/%: $(abs_top_srcdir)/heartbeat/% if [ ! -f $@ ]; then \ ln -sf $< $@; \ fi # OCF_ROOT=. is necessary due to a sanity check in ocf-shellfuncs # (which tests whether $OCF_ROOT points to a directory metadata-%.xml: $(radir)/% preptree OCF_ROOT=. OCF_FUNCTIONS_DIR=$(radir) $< meta-data > $@ metadata-IPv6addr.xml: $(radir)/IPv6addr OCF_ROOT=. OCF_FUNCTIONS_DIR=$(radir) $< meta-data > $@ clean-local: find $(radir) -type l -exec rm -rf {} \; # Please note: we can't name the man pages # ocf:heartbeat:. Believe me, I've tried. It looks like it # works, but then it doesn't. While make can deal correctly with # colons in target names (when properly escaped), it royally messes up # when it is deals with _dependencies_ that contain colons. See Bug # 12126 on savannah.gnu.org. But, maybe it gets fixed soon, it was # first reported in 1995 and added to Savannah in in 2005... if BUILD_DOC man_MANS = ocf_heartbeat_AoEtarget.7 \ ocf_heartbeat_AudibleAlarm.7 \ ocf_heartbeat_ClusterMon.7 \ ocf_heartbeat_CTDB.7 \ ocf_heartbeat_Delay.7 \ ocf_heartbeat_Dummy.7 \ ocf_heartbeat_EvmsSCC.7 \ ocf_heartbeat_Evmsd.7 \ ocf_heartbeat_Filesystem.7 \ ocf_heartbeat_ICP.7 \ ocf_heartbeat_IPaddr.7 \ ocf_heartbeat_IPaddr2.7 \ ocf_heartbeat_IPsrcaddr.7 \ ocf_heartbeat_LVM.7 \ ocf_heartbeat_LVM-activate.7 \ ocf_heartbeat_LinuxSCSI.7 \ ocf_heartbeat_MailTo.7 \ ocf_heartbeat_ManageRAID.7 \ ocf_heartbeat_ManageVE.7 \ ocf_heartbeat_NodeUtilization.7 \ ocf_heartbeat_Pure-FTPd.7 \ ocf_heartbeat_Raid1.7 \ ocf_heartbeat_Route.7 \ ocf_heartbeat_SAPDatabase.7 \ ocf_heartbeat_SAPInstance.7 \ ocf_heartbeat_SendArp.7 \ ocf_heartbeat_ServeRAID.7 \ ocf_heartbeat_SphinxSearchDaemon.7 \ ocf_heartbeat_Squid.7 \ ocf_heartbeat_Stateful.7 \ ocf_heartbeat_SysInfo.7 \ ocf_heartbeat_VIPArip.7 \ ocf_heartbeat_VirtualDomain.7 \ ocf_heartbeat_WAS.7 \ ocf_heartbeat_WAS6.7 \ ocf_heartbeat_WinPopup.7 \ ocf_heartbeat_Xen.7 \ ocf_heartbeat_Xinetd.7 \ ocf_heartbeat_ZFS.7 \ ocf_heartbeat_aliyun-vpc-move-ip.7 \ ocf_heartbeat_anything.7 \ ocf_heartbeat_apache.7 \ ocf_heartbeat_asterisk.7 \ ocf_heartbeat_aws-vpc-move-ip.7 \ ocf_heartbeat_aws-vpc-route53.7 \ ocf_heartbeat_awseip.7 \ ocf_heartbeat_awsvip.7 \ ocf_heartbeat_azure-lb.7 \ ocf_heartbeat_clvm.7 \ ocf_heartbeat_conntrackd.7 \ ocf_heartbeat_corosync-qnetd.7 \ ocf_heartbeat_crypt.7 \ ocf_heartbeat_db2.7 \ ocf_heartbeat_dhcpd.7 \ ocf_heartbeat_docker.7 \ ocf_heartbeat_docker-compose.7 \ ocf_heartbeat_dovecot.7 \ ocf_heartbeat_dnsupdate.7 \ ocf_heartbeat_dummypy.7 \ ocf_heartbeat_eDir88.7 \ ocf_heartbeat_ethmonitor.7 \ ocf_heartbeat_exportfs.7 \ ocf_heartbeat_fio.7 \ ocf_heartbeat_galera.7 \ ocf_heartbeat_garbd.7 \ ocf_heartbeat_gcp-ilb.7 \ ocf_heartbeat_gcp-vpc-move-ip.7 \ ocf_heartbeat_iSCSILogicalUnit.7 \ ocf_heartbeat_iSCSITarget.7 \ ocf_heartbeat_iface-bridge.7 \ ocf_heartbeat_iface-vlan.7 \ ocf_heartbeat_ipsec.7 \ ocf_heartbeat_ids.7 \ ocf_heartbeat_iscsi.7 \ ocf_heartbeat_jboss.7 \ ocf_heartbeat_jira.7 \ ocf_heartbeat_kamailio.7 \ ocf_heartbeat_lvmlockd.7 \ ocf_heartbeat_lxc.7 \ ocf_heartbeat_lxd-info.7 \ ocf_heartbeat_machine-info.7 \ ocf_heartbeat_mariadb.7 \ ocf_heartbeat_mdraid.7 \ ocf_heartbeat_minio.7 \ ocf_heartbeat_mpathpersist.7 \ ocf_heartbeat_mysql.7 \ ocf_heartbeat_mysql-proxy.7 \ ocf_heartbeat_nagios.7 \ ocf_heartbeat_named.7 \ ocf_heartbeat_nfsnotify.7 \ ocf_heartbeat_nfsserver.7 \ ocf_heartbeat_nginx.7 \ ocf_heartbeat_nvmet-subsystem.7 \ ocf_heartbeat_nvmet-namespace.7 \ ocf_heartbeat_nvmet-port.7 \ ocf_heartbeat_openstack-info.7 \ ocf_heartbeat_ocivip.7 \ ocf_heartbeat_openstack-cinder-volume.7 \ ocf_heartbeat_openstack-floating-ip.7 \ ocf_heartbeat_openstack-virtual-ip.7 \ ocf_heartbeat_oraasm.7 \ ocf_heartbeat_oracle.7 \ ocf_heartbeat_oralsnr.7 \ ocf_heartbeat_ovsmonitor.7 \ ocf_heartbeat_pgagent.7 \ ocf_heartbeat_pgsql.7 \ ocf_heartbeat_pingd.7 \ ocf_heartbeat_podman.7 \ ocf_heartbeat_portblock.7 \ ocf_heartbeat_postfix.7 \ ocf_heartbeat_pound.7 \ ocf_heartbeat_proftpd.7 \ ocf_heartbeat_rabbitmq-cluster.7 \ ocf_heartbeat_rabbitmq-server-ha.7 \ ocf_heartbeat_redis.7 \ ocf_heartbeat_rkt.7 \ ocf_heartbeat_rsyncd.7 \ ocf_heartbeat_rsyslog.7 \ ocf_heartbeat_scsi2reservation.7 \ ocf_heartbeat_sfex.7 \ ocf_heartbeat_slapd.7 \ ocf_heartbeat_smb-share.7 \ ocf_heartbeat_sybaseASE.7 \ ocf_heartbeat_sg_persist.7 \ ocf_heartbeat_storage-mon.7 \ ocf_heartbeat_symlink.7 \ ocf_heartbeat_syslog-ng.7 \ ocf_heartbeat_tomcat.7 \ ocf_heartbeat_varnish.7 \ ocf_heartbeat_vdo-vol.7 \ ocf_heartbeat_vmware.7 \ ocf_heartbeat_vsftpd.7 \ ocf_heartbeat_zabbixserver.7 if USE_IPV6ADDR_AGENT man_MANS += ocf_heartbeat_IPv6addr.7 endif if BUILD_AZURE_EVENTS man_MANS += ocf_heartbeat_azure-events.7 endif +if BUILD_AZURE_EVENTS_AZ +man_MANS += ocf_heartbeat_azure-events-az.7 +endif + if BUILD_GCP_PD_MOVE man_MANS += ocf_heartbeat_gcp-pd-move.7 endif if BUILD_GCP_VPC_MOVE_ROUTE man_MANS += ocf_heartbeat_gcp-vpc-move-route.7 endif if BUILD_GCP_VPC_MOVE_VIP man_MANS += ocf_heartbeat_gcp-vpc-move-vip.7 endif xmlfiles = $(man_MANS:.7=.xml) %.1 %.5 %.7 %.8: %.xml $(XSLTPROC) \ $(XSLTPROC_MANPAGES_OPTIONS) \ $(MANPAGES_STYLESHEET) $< ocf_heartbeat_%.xml: metadata-%.xml $(srcdir)/$(REFENTRY_STYLESHEET) $(XSLTPROC) --novalid \ --stringparam package $(PACKAGE_NAME) \ --stringparam version $(VERSION) \ --output $@ \ $(srcdir)/$(REFENTRY_STYLESHEET) $< ocf_resource_agents.xml: $(xmlfiles) mkappendix.sh ./mkappendix.sh $(xmlfiles) > $@ %.html: %.xml $(XSLTPROC) \ $(XSLTPROC_HTML_OPTIONS) \ --output $@ \ $(HTML_STYLESHEET) $< xml: ocf_resource_agents.xml endif diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am index 20d41e36a..1133dc13e 100644 --- a/heartbeat/Makefile.am +++ b/heartbeat/Makefile.am @@ -1,244 +1,248 @@ # Makefile.am for OCF RAs # # Author: Sun Jing Dong # Copyright (C) 2004 IBM # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in EXTRA_DIST = $(ocf_SCRIPTS) $(ocfcommon_DATA) \ $(common_DATA) $(hb_DATA) $(dtd_DATA) \ README README.galera AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/linux-ha halibdir = $(libexecdir)/heartbeat ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat dtddir = $(datadir)/$(PACKAGE_NAME) dtd_DATA = ra-api-1.dtd metadata.rng ocf_PROGRAMS = if USE_IPV6ADDR_AGENT ocf_PROGRAMS += IPv6addr endif halib_PROGRAMS = if IPV6ADDR_COMPATIBLE halib_PROGRAMS += send_ua endif IPv6addr_SOURCES = IPv6addr.c IPv6addr_utils.c IPv6addr_LDADD = -lplumb $(LIBNETLIBS) send_ua_SOURCES = send_ua.c IPv6addr_utils.c send_ua_LDADD = $(LIBNETLIBS) ocf_SCRIPTS = AoEtarget \ AudibleAlarm \ ClusterMon \ CTDB \ Delay \ Dummy \ EvmsSCC \ Evmsd \ Filesystem \ ICP \ IPaddr \ IPaddr2 \ IPsrcaddr \ LVM \ LinuxSCSI \ lvmlockd \ LVM-activate \ MailTo \ ManageRAID \ ManageVE \ NodeUtilization \ Pure-FTPd \ Raid1 \ Route \ SAPDatabase \ SAPInstance \ SendArp \ ServeRAID \ SphinxSearchDaemon \ Squid \ Stateful \ SysInfo \ VIPArip \ VirtualDomain \ WAS \ WAS6 \ WinPopup \ Xen \ Xinetd \ ZFS \ aliyun-vpc-move-ip \ anything \ apache \ asterisk \ aws-vpc-move-ip \ aws-vpc-route53 \ awseip \ awsvip \ azure-lb \ clvm \ conntrackd \ corosync-qnetd \ crypt \ db2 \ dhcpd \ dnsupdate \ dummypy \ docker \ docker-compose \ dovecot \ eDir88 \ ethmonitor \ exportfs \ fio \ galera \ garbd \ gcp-ilb \ gcp-vpc-move-ip \ iSCSILogicalUnit \ iSCSITarget \ ids \ iface-bridge \ iface-vlan \ ipsec \ iscsi \ jboss \ jira \ kamailio \ lxc \ lxd-info \ machine-info \ mariadb \ mdraid \ minio \ mysql \ mysql-proxy \ nagios \ named \ nfsnotify \ nfsserver \ nginx \ nvmet-subsystem \ nvmet-namespace \ nvmet-port \ ocivip \ openstack-cinder-volume \ openstack-floating-ip \ openstack-info \ openstack-virtual-ip \ oraasm \ oracle \ oralsnr \ ovsmonitor \ pgagent \ pgsql \ pingd \ podman \ portblock \ postfix \ pound \ proftpd \ rabbitmq-cluster \ rabbitmq-server-ha \ redis \ rkt \ rsyncd \ rsyslog \ scsi2reservation \ sfex \ sg_persist \ mpathpersist \ slapd \ smb-share \ storage-mon \ sybaseASE \ symlink \ syslog-ng \ tomcat \ varnish \ vdo-vol \ vmware \ vsftpd \ zabbixserver if BUILD_AZURE_EVENTS ocf_SCRIPTS += azure-events endif +if BUILD_AZURE_EVENTS_AZ +ocf_SCRIPTS += azure-events-az +endif + if BUILD_GCP_PD_MOVE ocf_SCRIPTS += gcp-pd-move endif if BUILD_GCP_VPC_MOVE_ROUTE ocf_SCRIPTS += gcp-vpc-move-route endif if BUILD_GCP_VPC_MOVE_VIP ocf_SCRIPTS += gcp-vpc-move-vip endif ocfcommondir = $(OCF_LIB_DIR_PREFIX)/heartbeat ocfcommon_DATA = ocf-shellfuncs \ ocf-binaries \ ocf-directories \ ocf-returncodes \ ocf-rarun \ ocf-distro \ apache-conf.sh \ http-mon.sh \ sapdb-nosha.sh \ sapdb.sh \ lvm-clvm.sh \ lvm-plain.sh \ lvm-tag.sh \ openstack-common.sh \ ora-common.sh \ mysql-common.sh \ nfsserver-redhat.sh \ findif.sh \ ocf.py # Legacy locations hbdir = $(sysconfdir)/ha.d hb_DATA = shellfuncs check: $(ocf_SCRIPTS:=.check) %.check: % OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) ./$< meta-data | xmllint --path $(abs_srcdir) --noout --relaxng $(abs_srcdir)/metadata.rng - do_spellcheck = printf '[%s]\n' "$(agent)"; \ OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) \ ./$(agent) meta-data 2>/dev/null \ | xsltproc $(top_srcdir)/make/extract_text.xsl - \ | aspell pipe list -d en_US --ignore-case \ --home-dir=$(top_srcdir)/make -p spellcheck-ignore \ | sed -n 's|^&\([^:]*\):.*|\1|p'; spellcheck: @$(foreach agent,$(ocf_SCRIPTS), $(do_spellcheck)) clean-local: rm -rf __pycache__ *.pyc diff --git a/heartbeat/azure-events-az.in b/heartbeat/azure-events-az.in new file mode 100644 index 000000000..59d095306 --- /dev/null +++ b/heartbeat/azure-events-az.in @@ -0,0 +1,771 @@ +#!@PYTHON@ -tt +# +# Resource agent for monitoring Azure Scheduled Events +# +# License: GNU General Public License (GPL) +# (c) 2018 Tobias Niekamp, Microsoft Corp. +# and Linux-HA contributors + +import os +import sys +import time +import subprocess +import json +try: + import urllib2 + from urllib2 import URLError +except ImportError: + import urllib.request as urllib2 + from urllib.error import URLError +import socket +from collections import defaultdict + +OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os.environ.get("OCF_ROOT")) +sys.path.append(OCF_FUNCTIONS_DIR) +import ocf + +############################################################################## + + +VERSION = "0.10" +USER_AGENT = "Pacemaker-ResourceAgent/%s %s" % (VERSION, ocf.distro()) + +attr_globalPullState = "azure-events-az_globalPullState" +attr_lastDocVersion = "azure-events-az_lastDocVersion" +attr_curNodeState = "azure-events-az_curNodeState" +attr_pendingEventIDs = "azure-events-az_pendingEventIDs" +attr_healthstate = "#health-azure" + +default_loglevel = ocf.logging.INFO +default_relevantEventTypes = set(["Reboot", "Redeploy"]) + +global_pullMaxAttempts = 3 +global_pullDelaySecs = 1 + +############################################################################## + +class attrDict(defaultdict): + """ + A wrapper for accessing dict keys like an attribute + """ + def __init__(self, data): + super(attrDict, self).__init__(attrDict) + for d in data.keys(): + self.__setattr__(d, data[d]) + + def __getattr__(self, key): + try: + return self[key] + except KeyError: + raise AttributeError(key) + + def __setattr__(self, key, value): + self[key] = value + +############################################################################## + +class azHelper: + """ + Helper class for Azure's metadata API (including Scheduled Events) + """ + metadata_host = "http://169.254.169.254/metadata" + instance_api = "instance" + events_api = "scheduledevents" + api_version = "2019-08-01" + + @staticmethod + def _sendMetadataRequest(endpoint, postData=None): + """ + Send a request to Azure's Azure Metadata Service API + """ + url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version) + data = "" + ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData)) + ocf.logger.debug("_sendMetadataRequest: url = %s" % url) + + if postData and type(postData) != bytes: + postData = postData.encode() + + req = urllib2.Request(url, postData) + req.add_header("Metadata", "true") + req.add_header("User-Agent", USER_AGENT) + try: + resp = urllib2.urlopen(req) + except URLError as e: + if hasattr(e, 'reason'): + ocf.logger.warning("Failed to reach the server: %s" % e.reason) + clusterHelper.setAttr(attr_globalPullState, "IDLE") + elif hasattr(e, 'code'): + ocf.logger.warning("The server couldn\'t fulfill the request. Error code: %s" % e.code) + clusterHelper.setAttr(attr_globalPullState, "IDLE") + else: + data = resp.read() + ocf.logger.debug("_sendMetadataRequest: response = %s" % data) + + if data: + data = json.loads(data) + + ocf.logger.debug("_sendMetadataRequest: finished") + return data + + @staticmethod + def getInstanceInfo(): + """ + Fetch details about the current VM from Azure's Azure Metadata Service API + """ + ocf.logger.debug("getInstanceInfo: begin") + + jsondata = azHelper._sendMetadataRequest(azHelper.instance_api) + ocf.logger.debug("getInstanceInfo: json = %s" % jsondata) + + if jsondata: + ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) + return attrDict(jsondata["compute"]) + else: + ocf.ocf_exit_reason("getInstanceInfo: Unable to get instance info") + sys.exit(ocf.OCF_ERR_GENERIC) + + @staticmethod + def pullScheduledEvents(): + """ + Retrieve all currently scheduled events via Azure Metadata Service API + """ + ocf.logger.debug("pullScheduledEvents: begin") + + jsondata = azHelper._sendMetadataRequest(azHelper.events_api) + ocf.logger.debug("pullScheduledEvents: json = %s" % jsondata) + + ocf.logger.debug("pullScheduledEvents: finished") + return attrDict(jsondata) + + @staticmethod + def forceEvents(eventIDs): + """ + Force a set of events to start immediately + """ + ocf.logger.debug("forceEvents: begin") + + events = [] + for e in eventIDs: + events.append({ + "EventId": e, + }) + postData = { + "StartRequests" : events + } + ocf.logger.info("forceEvents: postData = %s" % postData) + resp = azHelper._sendMetadataRequest(azHelper.events_api, postData=json.dumps(postData)) + + ocf.logger.debug("forceEvents: finished") + return + +############################################################################## + +class clusterHelper: + """ + Helper functions for Pacemaker control via crm + """ + @staticmethod + def _getLocation(node): + """ + Helper function to retrieve local/global attributes + """ + if node: + return ["--node", node] + else: + return ["--type", "crm_config"] + + @staticmethod + def _exec(command, *args): + """ + Helper function to execute a UNIX command + """ + args = list(args) + ocf.logger.debug("_exec: begin; command = %s, args = %s" % (command, str(args))) + + def flatten(*n): + return (str(e) for a in n + for e in (flatten(*a) if isinstance(a, (tuple, list)) else (str(a),))) + command = list(flatten([command] + args)) + ocf.logger.debug("_exec: cmd = %s" % " ".join(command)) + try: + ret = subprocess.check_output(command) + if type(ret) != str: + ret = ret.decode() + ocf.logger.debug("_exec: return = %s" % ret) + return ret.rstrip() + except Exception as err: + ocf.logger.exception(err) + return None + + @staticmethod + def setAttr(key, value, node=None): + """ + Set the value of a specific global/local attribute in the Pacemaker cluster + """ + ocf.logger.debug("setAttr: begin; key = %s, value = %s, node = %s" % (key, value, node)) + + if value: + ret = clusterHelper._exec("crm_attribute", + "--name", key, + "--update", value, + clusterHelper._getLocation(node)) + else: + ret = clusterHelper._exec("crm_attribute", + "--name", key, + "--delete", + clusterHelper._getLocation(node)) + + ocf.logger.debug("setAttr: finished") + return len(ret) == 0 + + @staticmethod + def getAttr(key, node=None): + """ + Retrieve a global/local attribute from the Pacemaker cluster + """ + ocf.logger.debug("getAttr: begin; key = %s, node = %s" % (key, node)) + + val = clusterHelper._exec("crm_attribute", + "--name", key, + "--query", "--quiet", + "--default", "", + clusterHelper._getLocation(node)) + ocf.logger.debug("getAttr: finished") + if not val: + return None + return val if not val.isdigit() else int(val) + + @staticmethod + def getAllNodes(): + """ + Get a list of hostnames for all nodes in the Pacemaker cluster + """ + ocf.logger.debug("getAllNodes: begin") + + nodes = [] + nodeList = clusterHelper._exec("crm_node", "--list") + for n in nodeList.split("\n"): + nodes.append(n.split()[1]) + ocf.logger.debug("getAllNodes: finished; return %s" % str(nodes)) + + return nodes + + @staticmethod + def getHostNameFromAzName(azName): + """ + Helper function to get the actual host name from an Azure node name + """ + return clusterHelper.getAttr("hostName_%s" % azName) + + @staticmethod + def removeHoldFromNodes(): + """ + Remove the ON_HOLD state from all nodes in the Pacemaker cluster + """ + ocf.logger.debug("removeHoldFromNodes: begin") + + for n in clusterHelper.getAllNodes(): + if clusterHelper.getAttr(attr_curNodeState, node=n) == "ON_HOLD": + clusterHelper.setAttr(attr_curNodeState, "AVAILABLE", node=n) + ocf.logger.info("removeHoldFromNodes: removed ON_HOLD from node %s" % n) + + ocf.logger.debug("removeHoldFromNodes: finished") + return False + + @staticmethod + def otherNodesAvailable(exceptNode): + """ + Check if there are any nodes (except a given node) in the Pacemaker cluster that have state AVAILABLE + """ + ocf.logger.debug("otherNodesAvailable: begin; exceptNode = %s" % exceptNode) + + for n in clusterHelper.getAllNodes(): + state = clusterHelper.getAttr(attr_curNodeState, node=n) + state = stringToNodeState(state) if state else AVAILABLE + if state == AVAILABLE and n != exceptNode.hostName: + ocf.logger.info("otherNodesAvailable: at least %s is available" % n) + ocf.logger.debug("otherNodesAvailable: finished") + return True + ocf.logger.info("otherNodesAvailable: no other nodes are available") + ocf.logger.debug("otherNodesAvailable: finished") + + return False + + @staticmethod + def transitionSummary(): + """ + Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby) + """ + # Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node? + # # crm_simulate -Ls + # Transition Summary: + # * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1) + # * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0) + # * Move rsc_ip_HN1_HDB03 (Started hsr3-db0 -> hsr3-db1) + # * Start rsc_nc_HN1_HDB03 (hsr3-db1) + # # Excepted result when there are no pending actions: + # Transition Summary: + ocf.logger.debug("transitionSummary: begin") + + summary = clusterHelper._exec("crm_simulate", "-Ls") + if not summary: + ocf.logger.warning("transitionSummary: could not load transition summary") + return False + if summary.find("Transition Summary:") < 0: + ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary) + return False + summary = summary.split("Transition Summary:")[1] + ret = summary.split("\n").pop(0) + + ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret)) + return ret + + @staticmethod + def listOperationsOnNode(node): + """ + Get a list of all current operations for a given node (used to check if any resources are pending) + """ + # hsr3-db1:/home/tniek # crm_resource --list-operations -N hsr3-db0 + # rsc_azure-events-az (ocf::heartbeat:azure-events-az): Started: rsc_azure-events-az_start_0 (node=hsr3-db0, call=91, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=115ms): complete + # rsc_azure-events-az (ocf::heartbeat:azure-events-az): Started: rsc_azure-events-az_monitor_10000 (node=hsr3-db0, call=93, rc=0, last-rc-change=Fri Jun 8 22:37:47 2018, exec=197ms): complete + # rsc_SAPHana_HN1_HDB03 (ocf::suse:SAPHana): Master: rsc_SAPHana_HN1_HDB03_start_0 (node=hsr3-db0, call=-1, rc=193, last-rc-change=Fri Jun 8 22:37:46 2018, exec=0ms): pending + # rsc_SAPHanaTopology_HN1_HDB03 (ocf::suse:SAPHanaTopology): Started: rsc_SAPHanaTopology_HN1_HDB03_start_0 (node=hsr3-db0, call=90, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=3214ms): complete + ocf.logger.debug("listOperationsOnNode: begin; node = %s" % node) + + resources = clusterHelper._exec("crm_resource", "--list-operations", "-N", node) + if len(resources) == 0: + ret = [] + else: + ret = resources.split("\n") + + ocf.logger.debug("listOperationsOnNode: finished; return = %s" % str(ret)) + return ret + + @staticmethod + def noPendingResourcesOnNode(node): + """ + Check that there are no pending resources on a given node + """ + ocf.logger.debug("noPendingResourcesOnNode: begin; node = %s" % node) + + for r in clusterHelper.listOperationsOnNode(node): + ocf.logger.debug("noPendingResourcesOnNode: * %s" % r) + resource = r.split()[-1] + if resource == "pending": + ocf.logger.info("noPendingResourcesOnNode: found resource %s that is still pending" % resource) + ocf.logger.debug("noPendingResourcesOnNode: finished; return = False") + return False + ocf.logger.info("noPendingResourcesOnNode: no pending resources on node %s" % node) + ocf.logger.debug("noPendingResourcesOnNode: finished; return = True") + + return True + + @staticmethod + def allResourcesStoppedOnNode(node): + """ + Check that all resources on a given node are stopped + """ + ocf.logger.debug("allResourcesStoppedOnNode: begin; node = %s" % node) + + if clusterHelper.noPendingResourcesOnNode(node): + if len(clusterHelper.transitionSummary()) == 0: + ocf.logger.info("allResourcesStoppedOnNode: no pending resources on node %s and empty transition summary" % node) + ocf.logger.debug("allResourcesStoppedOnNode: finished; return = True") + return True + ocf.logger.info("allResourcesStoppedOnNode: transition summary is not empty") + ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False") + return False + + ocf.logger.info("allResourcesStoppedOnNode: still pending resources on node %s" % node) + ocf.logger.debug("allResourcesStoppedOnNode: finished; return = False") + return False + +############################################################################## + +AVAILABLE = 0 # Node is online and ready to handle events +STOPPING = 1 # Standby has been triggered, but some resources are still running +IN_EVENT = 2 # All resources are stopped, and event has been initiated via Azure Metadata Service +ON_HOLD = 3 # Node has a pending event that cannot be started there are no other nodes available + +def stringToNodeState(name): + if type(name) == int: return name + if name == "STOPPING": return STOPPING + if name == "IN_EVENT": return IN_EVENT + if name == "ON_HOLD": return ON_HOLD + return AVAILABLE + +def nodeStateToString(state): + if state == STOPPING: return "STOPPING" + if state == IN_EVENT: return "IN_EVENT" + if state == ON_HOLD: return "ON_HOLD" + return "AVAILABLE" + +############################################################################## + +class Node: + """ + Core class implementing logic for a cluster node + """ + def __init__(self, ra): + self.raOwner = ra + self.azInfo = azHelper.getInstanceInfo() + self.azName = self.azInfo.name + self.hostName = socket.gethostname() + self.setAttr("azName", self.azName) + clusterHelper.setAttr("hostName_%s" % self.azName, self.hostName) + + def getAttr(self, key): + """ + Get a local attribute + """ + return clusterHelper.getAttr(key, node=self.hostName) + + def setAttr(self, key, value): + """ + Set a local attribute + """ + return clusterHelper.setAttr(key, value, node=self.hostName) + + def selfOrOtherNode(self, node): + """ + Helper function to distinguish self/other node + """ + return node if node else self.hostName + + def setState(self, state, node=None): + """ + Set the state for a given node (or self) + """ + node = self.selfOrOtherNode(node) + ocf.logger.debug("setState: begin; node = %s, state = %s" % (node, nodeStateToString(state))) + + clusterHelper.setAttr(attr_curNodeState, nodeStateToString(state), node=node) + + ocf.logger.debug("setState: finished") + + def getState(self, node=None): + """ + Get the state for a given node (or self) + """ + node = self.selfOrOtherNode(node) + ocf.logger.debug("getState: begin; node = %s" % node) + + state = clusterHelper.getAttr(attr_curNodeState, node=node) + ocf.logger.debug("getState: state = %s" % state) + ocf.logger.debug("getState: finished") + if not state: + return AVAILABLE + return stringToNodeState(state) + + def setEventIDs(self, eventIDs, node=None): + """ + Set pending EventIDs for a given node (or self) + """ + node = self.selfOrOtherNode(node) + ocf.logger.debug("setEventIDs: begin; node = %s, eventIDs = %s" % (node, str(eventIDs))) + + if eventIDs: + eventIDStr = ",".join(eventIDs) + else: + eventIDStr = None + clusterHelper.setAttr(attr_pendingEventIDs, eventIDStr, node=node) + + ocf.logger.debug("setEventIDs: finished") + return + + def getEventIDs(self, node=None): + """ + Get pending EventIDs for a given node (or self) + """ + node = self.selfOrOtherNode(node) + ocf.logger.debug("getEventIDs: begin; node = %s" % node) + + eventIDStr = clusterHelper.getAttr(attr_pendingEventIDs, node=node) + if eventIDStr: + eventIDs = eventIDStr.split(",") + else: + eventIDs = None + + ocf.logger.debug("getEventIDs: finished; eventIDs = %s" % str(eventIDs)) + return eventIDs + + def updateNodeStateAndEvents(self, state, eventIDs, node=None): + """ + Set the state and pending EventIDs for a given node (or self) + """ + ocf.logger.debug("updateNodeStateAndEvents: begin; node = %s, state = %s, eventIDs = %s" % (node, nodeStateToString(state), str(eventIDs))) + + self.setState(state, node=node) + self.setEventIDs(eventIDs, node=node) + + ocf.logger.debug("updateNodeStateAndEvents: finished") + return state + + def putNodeStandby(self, node=None): + """ + Put self to standby + """ + node = self.selfOrOtherNode(node) + ocf.logger.debug("putNodeStandby: begin; node = %s" % node) + + clusterHelper._exec("crm_attribute", + "--node", node, + "--name", attr_healthstate, + "--update", "-1000000", + "--lifetime=forever") + + ocf.logger.debug("putNodeStandby: finished") + + def isNodeInStandby(self, node=None): + """ + check if node is in standby + """ + node = self.selfOrOtherNode(node) + ocf.logger.debug("isNodeInStandby: begin; node = %s" % node) + isInStandy = False + + healthAttributeStr = clusterHelper.getAttr(attr_healthstate, node) + if healthAttributeStr is not None: + try: + healthAttribute = int(healthAttributeStr) + isInStandy = healthAttribute < 0 + except ValueError: + # Handle the exception + ocf.logger.warn("Health attribute %s on node %s cannot be converted to an integer value" % (healthAttributeStr, node)) + + ocf.logger.debug("isNodeInStandby: finished - result %s" % isInStandy) + return isInStandy + + def putNodeOnline(self, node=None): + """ + Put self back online + """ + node = self.selfOrOtherNode(node) + ocf.logger.debug("putNodeOnline: begin; node = %s" % node) + + clusterHelper._exec("crm_attribute", + "--node", node, + "--name", "#health-azure", + "--update", "0", + "--lifetime=forever") + + ocf.logger.debug("putNodeOnline: finished") + + def separateEvents(self, events): + """ + Split own/other nodes' events + """ + ocf.logger.debug("separateEvents: begin; events = %s" % str(events)) + + localEvents = [] + remoteEvents = [] + for e in events: + e = attrDict(e) + if e.EventType not in self.raOwner.relevantEventTypes: + continue + if self.azName in e.Resources: + localEvents.append(e) + else: + remoteEvents.append(e) + ocf.logger.debug("separateEvents: finished; localEvents = %s, remoteEvents = %s" % (str(localEvents), str(remoteEvents))) + return (localEvents, remoteEvents) + +############################################################################## + +class raAzEvents: + """ + Main class for resource agent + """ + def __init__(self, relevantEventTypes): + self.node = Node(self) + self.relevantEventTypes = relevantEventTypes + + def monitor(self): + ocf.logger.debug("monitor: begin") + + events = azHelper.pullScheduledEvents() + + # get current document version + curDocVersion = events.DocumentIncarnation + lastDocVersion = self.node.getAttr(attr_lastDocVersion) + ocf.logger.debug("monitor: lastDocVersion = %s; curDocVersion = %s" % (lastDocVersion, curDocVersion)) + + # split events local/remote + (localEvents, remoteEvents) = self.node.separateEvents(events.Events) + + # ensure local events are only executing once + if curDocVersion == lastDocVersion: + ocf.logger.info("monitor: already handled curDocVersion, skip") + return ocf.OCF_SUCCESS + + localAzEventIDs = set() + for e in localEvents: + localAzEventIDs.add(e.EventId) + + curState = self.node.getState() + clusterEventIDs = self.node.getEventIDs() + + ocf.logger.debug("monitor: curDocVersion has not been handled yet") + + if clusterEventIDs: + # there are pending events set, so our state must be STOPPING or IN_EVENT + i = 0; touchedEventIDs = False + while i < len(clusterEventIDs): + # clean up pending events that are already finished according to AZ + if clusterEventIDs[i] not in localAzEventIDs: + ocf.logger.info("monitor: remove finished local clusterEvent %s" % (clusterEventIDs[i])) + clusterEventIDs.pop(i) + touchedEventIDs = True + else: + i += 1 + if len(clusterEventIDs) > 0: + # there are still pending events (either because we're still stopping, or because the event is still in place) + # either way, we need to wait + if touchedEventIDs: + ocf.logger.info("monitor: added new local clusterEvent %s" % str(clusterEventIDs)) + self.node.setEventIDs(clusterEventIDs) + else: + ocf.logger.info("monitor: no local clusterEvents were updated") + else: + # there are no more pending events left after cleanup + if clusterHelper.noPendingResourcesOnNode(self.node.hostName): + # and no pending resources on the node -> set it back online + ocf.logger.info("monitor: all local events finished -> clean up, put node online and AVAILABLE") + curState = self.node.updateNodeStateAndEvents(AVAILABLE, None) + self.node.putNodeOnline() + clusterHelper.removeHoldFromNodes() + # If Azure Scheduled Events are not used for 24 hours (e.g. because the cluster was asleep), it will be disabled for a VM. + # When the cluster wakes up and starts using it again, the DocumentIncarnation is reset. + # We need to remove it during cleanup, otherwise azure-events-az will not process the event after wakeup + self.node.setAttr(attr_lastDocVersion, None) + else: + ocf.logger.info("monitor: all local events finished, but some resources have not completed startup yet -> wait") + else: + if curState == AVAILABLE: + if len(localAzEventIDs) > 0: + if clusterHelper.otherNodesAvailable(self.node): + ocf.logger.info("monitor: can handle local events %s -> set state STOPPING" % (str(localAzEventIDs))) + curState = self.node.updateNodeStateAndEvents(STOPPING, localAzEventIDs) + else: + ocf.logger.info("monitor: cannot handle azEvents %s (only node available) -> set state ON_HOLD" % str(localAzEventIDs)) + self.node.setState(ON_HOLD) + else: + ocf.logger.debug("monitor: no local azEvents to handle") + + if curState == STOPPING: + eventIDsForNode = {} + if clusterHelper.noPendingResourcesOnNode(self.node.hostName): + if not self.node.isNodeInStandby(): + ocf.logger.info("monitor: all local resources are started properly -> put node standby and exit") + self.node.putNodeStandby() + return ocf.OCF_SUCCESS + + for e in localEvents: + ocf.logger.info("monitor: handling remote event %s (%s; nodes = %s)" % (e.EventId, e.EventType, str(e.Resources))) + # before we can force an event to start, we need to ensure all nodes involved have stopped their resources + if e.EventStatus == "Scheduled": + allNodesStopped = True + for azName in e.Resources: + hostName = clusterHelper.getHostNameFromAzName(azName) + state = self.node.getState(node=hostName) + if state == STOPPING: + # the only way we can continue is when node state is STOPPING, but all resources have been stopped + if not clusterHelper.allResourcesStoppedOnNode(hostName): + ocf.logger.info("monitor: (at least) node %s has still resources running -> wait" % hostName) + allNodesStopped = False + break + elif state in (AVAILABLE, IN_EVENT, ON_HOLD): + ocf.logger.info("monitor: node %s is still %s -> remote event needs to be picked up locally" % (hostName, nodeStateToString(state))) + allNodesStopped = False + break + if allNodesStopped: + ocf.logger.info("monitor: nodes %s are stopped -> add remote event %s to force list" % (str(e.Resources), e.EventId)) + for n in e.Resources: + hostName = clusterHelper.getHostNameFromAzName(n) + if hostName in eventIDsForNode: + eventIDsForNode[hostName].append(e.EventId) + else: + eventIDsForNode[hostName] = [e.EventId] + elif e.EventStatus == "Started": + ocf.logger.info("monitor: remote event already started") + + # force the start of all events whose nodes are ready (i.e. have no more resources running) + if len(eventIDsForNode.keys()) > 0: + eventIDsToForce = set([item for sublist in eventIDsForNode.values() for item in sublist]) + ocf.logger.info("monitor: set nodes %s to IN_EVENT; force remote events %s" % (str(eventIDsForNode.keys()), str(eventIDsToForce))) + for node, eventId in eventIDsForNode.items(): + self.node.updateNodeStateAndEvents(IN_EVENT, eventId, node=node) + azHelper.forceEvents(eventIDsToForce) + self.node.setAttr(attr_lastDocVersion, curDocVersion) + else: + ocf.logger.info("monitor: some local resources are not clean yet -> wait") + + ocf.logger.debug("monitor: finished") + return ocf.OCF_SUCCESS + +############################################################################## + +def setLoglevel(verbose): + # set up writing into syslog + loglevel = default_loglevel + if verbose: + opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1)) + urllib2.install_opener(opener) + loglevel = ocf.logging.DEBUG + ocf.log.setLevel(loglevel) + +description = ( + "Microsoft Azure Scheduled Events monitoring agent", + """This resource agent implements a monitor for scheduled +(maintenance) events for a Microsoft Azure VM. + +If any relevant events are found, it moves all Pacemaker resources +away from the affected node to allow for a graceful shutdown. + + Deployment: + crm configure primitive rsc_azure-events-az ocf:heartbeat:azure-events-az \ + op monitor interval=10s + crm configure clone cln_azure-events-az rsc_azure-events-az + +For further information on Microsoft Azure Scheduled Events, please +refer to the following documentation: +https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events +""") + +def monitor_action(eventTypes): + relevantEventTypes = set(eventTypes.split(",") if eventTypes else []) + ra = raAzEvents(relevantEventTypes) + return ra.monitor() + +def validate_action(eventTypes): + if eventTypes: + for event in eventTypes.split(","): + if event not in ("Freeze", "Reboot", "Redeploy"): + ocf.ocf_exit_reason("Event type not one of Freeze, Reboot, Redeploy: " + eventTypes) + return ocf.OCF_ERR_CONFIGURED + return ocf.OCF_SUCCESS + +def main(): + agent = ocf.Agent("azure-events-az", shortdesc=description[0], longdesc=description[1]) + agent.add_parameter( + "eventTypes", + shortdesc="List of resources to be considered", + longdesc="A comma-separated list of event types that will be handled by this resource agent. (Possible values: Freeze,Reboot,Redeploy)", + content_type="string", + default="Reboot,Redeploy") + agent.add_parameter( + "verbose", + shortdesc="Enable verbose agent logging", + longdesc="Set to true to enable verbose logging", + content_type="boolean", + default="false") + agent.add_action("start", timeout=10, handler=lambda: ocf.OCF_SUCCESS) + agent.add_action("stop", timeout=10, handler=lambda: ocf.OCF_SUCCESS) + agent.add_action("validate-all", timeout=20, handler=validate_action) + agent.add_action("monitor", timeout=240, interval=10, handler=monitor_action) + setLoglevel(ocf.is_true(ocf.get_parameter("verbose", "false"))) + agent.run() + +if __name__ == '__main__': + main() \ No newline at end of file