diff --git a/configure.ac b/configure.ac index 9ba44468e..d5c57d06f 100644 --- a/configure.ac +++ b/configure.ac @@ -1,1044 +1,1044 @@ dnl dnl autoconf for Agents dnl dnl License: GNU General Public License (GPL) dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.63) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services AC_INIT([resource-agents], m4_esyscmd([make/git-version-gen .tarball-version]), [to_be_defined@foobar.org]) AC_USE_SYSTEM_EXTENSIONS CRM_DTD_VERSION="1.0" PKG_FEATURES="" AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except lha_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/agent_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/agent_config.h.in to have configure include new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AM_CONFIG_HEADER(include/config.h include/agent_config.h) ALL_LINGUAS="en fr" AC_ARG_WITH(version, [ --with-version=version Override package version (if you're a packager needing to pretend) ], [ PACKAGE_VERSION="$withval" ]) AC_ARG_WITH(pkg-name, [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ], [ PACKAGE_NAME="$withval" ]) AC_PATH_PROGS(PKGCONFIG, pkg-config) if test x"${PKGCONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi AC_ARG_WITH([systemdsystemunitdir], [AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [Directory for systemd service files])],, [with_systemdsystemunitdir=auto]) AS_IF([test "x$with_systemdsystemunitdir" = "xyes" -o "x$with_systemdsystemunitdir" = "xauto"], [ def_systemdsystemunitdir=$($PKGCONFIG --variable=systemdsystemunitdir systemd) AS_IF([test "x$def_systemdsystemunitdir" = "x"], [AS_IF([test "x$with_systemdsystemunitdir" = "xyes"], [AC_MSG_ERROR([systemd support requested but pkg-config unable to query systemd package])]) with_systemdsystemunitdir=no], [with_systemdsystemunitdir="$def_systemdsystemunitdir"])]) AS_IF([test "x$with_systemdsystemunitdir" != "xno"], [AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])]) AM_CONDITIONAL([HAVE_SYSTEMD], [test "x$with_systemdsystemunitdir" != "xno"]) AC_ARG_WITH([systemdtmpfilesdir], AS_HELP_STRING([--with-systemdtmpfilesdir=DIR], [Directory for systemd tmp files]), [], [with_systemdtmpfilesdir=$($PKGCONFIG --variable=tmpfilesdir systemd)]) if test "x$with_systemdtmpfilesdir" != xno; then AC_SUBST([systemdtmpfilesdir], [$with_systemdtmpfilesdir]) fi AM_CONDITIONAL(HAVE_SYSTEMD, [test -n "$with_systemdtmpfilesdir" -a "x$with_systemdtmpfilesdir" != xno ]) dnl dnl AM_INIT_AUTOMAKE([1.11.1 foreign dist-bzip2 dist-xz]) dnl AM_INIT_AUTOMAKE([1.10.1 foreign dist-bzip2]) AC_DEFINE_UNQUOTED(AGENTS_VERSION, "$PACKAGE_VERSION", Current agents version) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== # check stolen from gnulib/m4/gnu-make.m4 if ! ${MAKE-make} --version /cannot/make/this >/dev/null 2>&1; then AC_MSG_ERROR([you don't seem to have GNU make; it is required]) fi AC_PROG_CC dnl Can force other with environment variable "CC". AM_PROG_CC_C_O AC_PROG_CC_STDC AC_PROG_AWK AC_PROG_LN_S AC_PROG_INSTALL AC_PROG_MAKE_SET AC_C_STRINGIZE AC_C_INLINE AC_TYPE_SIZE_T AC_TYPE_SSIZE_T AC_TYPE_UID_T AC_TYPE_UINT16_T AC_TYPE_UINT8_T AC_TYPE_UINT32_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) AC_STRUCT_TIMEZONE dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CPPFLAGS="$@" AC_MSG_CHECKING(whether $CC supports "$@") AC_PREPROC_IFELSE([AC_LANG_PROGRAM([])], [RC=0; AC_MSG_RESULT([yes])], [RC=1; AC_MSG_RESULT([no])]) return $RC } extract_header_define() { AC_MSG_CHECKING(for $2 in $1) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` AC_MSG_RESULT($value) printf $value rm -f ${Cfile}.c ${Cfile} } AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac dnl =============================================== dnl Configure Options dnl =============================================== dnl Some systems, like Solaris require a custom package name AC_ARG_WITH(pkgname, [ --with-pkgname=name name for pkg (typically for Solaris) ], [ PKGNAME="$withval" ], [ PKGNAME="LXHAhb" ], ) AC_SUBST(PKGNAME) AC_ARG_ENABLE([ansi], [ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers. [default=yes]]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings very pedantic and fatal warnings for gcc [default=yes]]) INITDIR="" AC_ARG_WITH(initdir, [ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]], [ INITDIR="$withval" ]) OCF_ROOT_DIR="${prefix}/lib/ocf" AC_ARG_WITH(ocf-root, [ --with-ocf-root=DIR directory for OCF scripts [${OCF_ROOT_DIR}]], [ OCF_ROOT_DIR="$withval" ]) HA_RSCTMPDIR=${localstatedir}/run/resource-agents AC_ARG_WITH(rsctmpdir, [ --with-rsctmpdir=DIR directory for resource agents state files [${HA_RSCTMPDIR}]], [ HA_RSCTMPDIR="$withval" ]) AC_ARG_ENABLE([libnet], [ --enable-libnet Use libnet for ARP based funcationality, [default=try]], [enable_libnet="$enableval"], [enable_libnet=try]) BUILD_RGMANAGER=0 BUILD_LINUX_HA=0 RASSET=all AC_ARG_WITH(ras-set, [ --with-ras-set=SET build/install only linux-ha or rgmanager resource-agents [default: all]], [ RASSET="$withval" ]) if test x$RASSET = xyes || test x$RASSET = xall ; then BUILD_RGMANAGER=1 BUILD_LINUX_HA=1 fi if test x$RASSET = xlinux-ha; then BUILD_LINUX_HA=1 fi if test x$RASSET = xrgmanager; then BUILD_RGMANAGER=1 fi if test $BUILD_LINUX_HA -eq 0 && test $BUILD_RGMANAGER -eq 0; then AC_MSG_ERROR([Are you really sure you want this package?]) exit 1 fi AM_CONDITIONAL(BUILD_LINUX_HA, test $BUILD_LINUX_HA -eq 1) AM_CONDITIONAL(BUILD_RGMANAGER, test $BUILD_RGMANAGER -eq 1) AC_ARG_WITH(compat-habindir, [ --with-compat-habindir use HA_BIN directory with compatibility for the Heartbeat stack [${libexecdir}]], [], [with_compat_habindir=no]) AM_CONDITIONAL(WITH_COMPAT_HABINDIR, test "x$with_compat_habindir" != "xno") dnl =============================================== dnl General Processing dnl =============================================== echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in dnl For consistency with Heartbeat, map NONE->$prefix NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done if test -z $INITDIR then INITDIR=${sysconfdir}/init.d fi AC_MSG_RESULT($INITDIR);; esac AC_SUBST(INITDIR) if test "${prefix}" = "/usr"; then INITDIRPREFIX="$INITDIR" else INITDIRPREFIX="${prefix}/$INITDIR" fi AC_SUBST(INITDIRPREFIX) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in dnl For consistency with Heartbeat, map NONE->$prefix *prefix*|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac if test "x$with_compat_habindir" != "xno" ; then libexecdir=${libdir} fi dnl Expand autoconf variables so that we dont end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl docdir is a recent addition to autotools eval docdir="`eval echo ${docdir}`" if test "x$docdir" = "x"; then docdir="`eval echo ${datadir}/doc`" fi AC_SUBST(docdir) dnl Home-grown variables eval INITDIR="${INITDIR}" for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". REBOOT_OPTIONS="-f" POWEROFF_OPTIONS="-f" case "$host_os" in *bsd*) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" ;; *solaris*) REBOOT_OPTIONS="-n" POWEROFF_OPTIONS="-n" LDFLAGS+=" -lssp -lssp_nonshared" ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) POWEROFF_OPTIONS="-nf" REBOOT_OPTIONS="-nf" ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_DEFINE_UNQUOTED(HA_LOG_FACILITY, LOG_DAEMON, Default logging facility) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac esac AC_MSG_CHECKING(which format is needed to print uint64_t) case "$host_cpu" in s390x)U64T="%lu";; *64*) U64T="%lu";; *) U64T="%llu";; esac AC_MSG_RESULT($U64T) AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t) dnl Variables needed for substitution AC_CHECK_HEADERS(heartbeat/glue_config.h) if test "$ac_cv_header_heartbeat_glue_config_h" = "yes"; then OCF_ROOT_DIR=`extract_header_define heartbeat/glue_config.h OCF_ROOT_DIR` else enable_libnet=no fi AC_DEFINE_UNQUOTED(OCF_ROOT_DIR,"$OCF_ROOT_DIR", OCF root directory - specified by the OCF standard) AC_SUBST(OCF_ROOT_DIR) GLUE_STATE_DIR=${localstatedir}/run AC_DEFINE_UNQUOTED(GLUE_STATE_DIR,"$GLUE_STATE_DIR", Where to keep state files and sockets) AC_SUBST(GLUE_STATE_DIR) AC_DEFINE_UNQUOTED(HA_VARRUNDIR,"$GLUE_STATE_DIR", Where Heartbeat keeps state files and sockets - old name) HA_VARRUNDIR="$GLUE_STATE_DIR" AC_SUBST(HA_VARRUNDIR) # Expand $prefix eval HA_RSCTMPDIR="`eval echo ${HA_RSCTMPDIR}`" AC_DEFINE_UNQUOTED(HA_RSCTMPDIR,"$HA_RSCTMPDIR", Where Resource agents keep state files) AC_SUBST(HA_RSCTMPDIR) dnl Eventually move out of the heartbeat dir tree and create symlinks when needed HA_VARLIBHBDIR=${localstatedir}/lib/heartbeat AC_DEFINE_UNQUOTED(HA_VARLIBHBDIR,"$HA_VARLIBHBDIR", Whatever this used to mean) AC_SUBST(HA_VARLIBHBDIR) OCF_RA_DIR="${OCF_ROOT_DIR}/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) OCF_RA_DIR_PREFIX="$OCF_RA_DIR" AC_SUBST(OCF_RA_DIR_PREFIX) OCF_LIB_DIR="${OCF_ROOT_DIR}/lib" AC_DEFINE_UNQUOTED(OCF_LIB_DIR,"$OCF_LIB_DIR", Location for shared code for OCF RAs) AC_SUBST(OCF_LIB_DIR) OCF_LIB_DIR_PREFIX="$OCF_LIB_DIR" AC_SUBST(OCF_LIB_DIR_PREFIX) dnl =============================================== dnl rgmanager ras bits dnl =============================================== LOGDIR=${localstatedir}/log/cluster CLUSTERDATA=${datadir}/cluster AC_SUBST([LOGDIR]) AC_SUBST([CLUSTERDATA]) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH AC_CHECK_PROGS(MAKE, gmake make) AC_PATH_PROGS(BASH_SHELL, bash) if test x"${BASH_SHELL}" = x""; then AC_MSG_ERROR(You need bash installed in order to build ${PACKAGE}) fi AC_PATH_PROGS(XSLTPROC, xsltproc) AM_CONDITIONAL(BUILD_DOC, test "x$XSLTPROC" != "x" ) if test "x$XSLTPROC" = "x"; then AC_MSG_WARN([xsltproc not installed, unable to (re-)build manual pages]) fi AC_SUBST(XSLTPROC) AC_PATH_PROGS(XMLCATALOG, xmlcatalog) AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh) AC_PATH_PROGS(SCP, scp, /usr/bin/scp) AC_PATH_PROGS(TAR, tar) AC_PATH_PROGS(MD5, md5) AC_PATH_PROGS(TEST, test) AC_PATH_PROGS(PING, ping, /bin/ping) AC_PATH_PROGS(IFCONFIG, ifconfig, /sbin/ifconfig) AC_PATH_PROGS(MAILCMD, mailx mail, mail) AC_PATH_PROGS(EGREP, egrep) AC_PATH_PROGS(RM, rm) AC_SUBST(BASH_SHELL) AC_SUBST(MAILCMD) AC_SUBST(EGREP) AC_SUBST(SHELL) AC_SUBST(PING) AC_SUBST(RM) AC_SUBST(TEST) AM_PATH_PYTHON if test -z "$PYTHON"; then echo "*** Essential program python not found" 1>&2 exit 1 fi AC_PATH_PROGS(ROUTE, route) AC_DEFINE_UNQUOTED(ROUTE, "$ROUTE", path to route command) AC_MSG_CHECKING(ifconfig option to list interfaces) for IFCONFIG_A_OPT in "-A" "-a" "" do $IFCONFIG $IFCONFIG_A_OPT > /dev/null 2>&1 if test "$?" = 0 then AC_DEFINE_UNQUOTED(IFCONFIG_A_OPT, "$IFCONFIG_A_OPT", option for ifconfig command) AC_MSG_RESULT($IFCONFIG_A_OPT) break fi done AC_SUBST(IFCONFIG_A_OPT) if test x"${MAKE}" = x""; then AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE}) fi STYLESHEET_PREFIX="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' STYLESHEET_PREFIX=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI} \ | sed -n 's|^file://||p;q') if test x"${STYLESHEET_PREFIX}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then STYLESHEET_PREFIX=$(echo "${d}" | sed 's/\/manpages//') break fi done fi if test x"${STYLESHEET_PREFIX}" = x""; then AC_MSG_ERROR(You need docbook-style-xsl installed in order to build ${PACKAGE}) fi fi AC_MSG_RESULT($STYLESHEET_PREFIX) AC_SUBST(STYLESHEET_PREFIX) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) AC_CHECK_LIB(gnugetopt, getopt_long) dnl if available if test "x${enable_thread_safe}" = "xyes"; then GPKGNAME="gthread-2.0" else GPKGNAME="glib-2.0" fi if $PKGCONFIG --exists $GPKGNAME then GLIBCONFIG="$PKGCONFIG $GPKGNAME" else set -x echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH $PKGCONFIG --exists $GPKGNAME; echo $? $PKGCONFIG --cflags $GPKGNAME; echo $? $PKGCONFIG $GPKGNAME; echo $? set +x AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE}) fi AC_MSG_RESULT(using $GLIBCONFIG) if test "X$GLIBCONFIG" != X; then AC_MSG_CHECKING(for special glib includes: ) GLIBHEAD=`$GLIBCONFIG --cflags` AC_MSG_RESULT($GLIBHEAD) CPPFLAGS="$CPPFLAGS $GLIBHEAD" AC_MSG_CHECKING(for glib library flags) GLIBLIB=`$GLIBCONFIG --libs` AC_MSG_RESULT($GLIBLIB) LIBS="$LIBS $GLIBLIB" fi dnl ======================================================================== dnl Headers dnl ======================================================================== AC_HEADER_STDC AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS([arpa/inet.h]) AC_CHECK_HEADERS([fcntl.h]) AC_CHECK_HEADERS([limits.h]) AC_CHECK_HEADERS([malloc.h]) AC_CHECK_HEADERS([netdb.h]) AC_CHECK_HEADERS([netinet/in.h]) AC_CHECK_HEADERS([sys/file.h]) AC_CHECK_HEADERS([sys/ioctl.h]) AC_CHECK_HEADERS([sys/param.h]) AC_CHECK_HEADERS([sys/time.h]) AC_CHECK_HEADERS([syslog.h]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_FUNC_FORK AC_FUNC_STRNLEN AC_CHECK_FUNCS([alarm gettimeofday inet_ntoa memset mkdir socket uname]) AC_CHECK_FUNCS([strcasecmp strchr strdup strerror strrchr strspn strstr strtol strtoul]) AC_PATH_PROGS(REBOOT, reboot, /sbin/reboot) AC_SUBST(REBOOT) AC_SUBST(REBOOT_OPTIONS) AC_DEFINE_UNQUOTED(REBOOT, "$REBOOT", path to the reboot command) AC_DEFINE_UNQUOTED(REBOOT_OPTIONS, "$REBOOT_OPTIONS", reboot options) AC_PATH_PROGS(POWEROFF_CMD, poweroff, /sbin/poweroff) AC_SUBST(POWEROFF_CMD) AC_SUBST(POWEROFF_OPTIONS) AC_DEFINE_UNQUOTED(POWEROFF_CMD, "$POWEROFF_CMD", path to the poweroff command) AC_DEFINE_UNQUOTED(POWEROFF_OPTIONS, "$POWEROFF_OPTIONS", poweroff options) AC_PATH_PROGS(POD2MAN, pod2man) AM_CONDITIONAL(BUILD_POD_DOC, test "x$POD2MAN" != "x" ) if test "x$POD2MAN" = "x"; then AC_MSG_WARN([pod2man not installed, unable to (re-)build ldirector manual page]) fi AC_SUBST(POD2MAN) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) dnl ======================================================================== dnl sfex dnl ======================================================================== build_sfex=no case $host_os in *Linux*|*linux*) if test "$ac_cv_header_heartbeat_glue_config_h" = "yes"; then build_sfex=yes fi ;; esac AM_CONDITIONAL(BUILD_SFEX, test "$build_sfex" = "yes" ) dnl ======================================================================== dnl tickle (needs port to BSD platforms) dnl ======================================================================== AC_CHECK_MEMBERS([struct iphdr.saddr],,,[[#include ]]) AM_CONDITIONAL(BUILD_TICKLE, test "$ac_cv_member_struct_iphdr_saddr" = "yes" ) dnl ======================================================================== dnl libnet dnl ======================================================================== libnet="" libnet_version="none" LIBNETLIBS="" LIBNETDEFINES="" AC_MSG_CHECKING(if libnet is required) libnet_fatal=$enable_libnet case $enable_libnet in no) ;; yes|libnet10|libnet11|10|11) libnet_fatal=yes;; try) case $host_os in *Linux*|*linux*) libnet_fatal=no;; *) libnet_fatal=yes;; dnl legacy behavior esac ;; *) libnet_fatal=yes; enable_libnet=try;; esac AC_MSG_RESULT($libnet_fatal) if test "x$enable_libnet" != "xno"; then AC_PATH_PROGS(LIBNETCONFIG, libnet-config) AC_CHECK_LIB(nsl, t_open) dnl -lnsl AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(net, libnet_get_hwaddr, LIBNETLIBS=" -lnet", []) fi AC_MSG_CHECKING(for libnet) if test "x$LIBNETLIBS" != "x" -o "x$enable_libnet" = "xlibnet11"; then LIBNETDEFINES="" if test "$ac_cv_lib_nsl_t_open" = yes; then LIBNETLIBS="-lnsl $LIBNETLIBS" fi if test "$ac_cv_lib_socket_socket" = yes; then LIBNETLIBS="-lsocket $LIBNETLIBS" fi libnet=net libnet_version="libnet1.1" fi if test "x$enable_libnet" = "xtry" -o "x$enable_libnet" = "xlibnet10"; then if test "x$LIBNETLIBS" = x -a "x${LIBNETCONFIG}" != "x" ; then LIBNETDEFINES="`$LIBNETCONFIG --defines` `$LIBNETCONFIG --cflags`"; LIBNETLIBS="`$LIBNETCONFIG --libs`"; libnet_version="libnet1.0 (old)" case $LIBNETLIBS in *-l*) libnet=`echo $LIBNETLIBS | sed 's%.*-l%%'`;; *) libnet_version=none;; esac CPPFLAGS="$CPPFLAGS $LIBNETDEFINES" AC_CHECK_HEADERS(libnet.h) if test "$ac_cv_header_libnet_h" = no; then libnet_version=none fi fi fi AC_MSG_RESULT(found $libnet_version) if test "$libnet_version" = none; then LIBNETLIBS="" LIBNETDEFINES="" if test $libnet_fatal = yes; then AC_MSG_ERROR(libnet not found) fi else AC_CHECK_LIB($libnet,libnet_init, [new_libnet=yes; AC_DEFINE(HAVE_LIBNET_1_1_API, 1, Libnet 1.1 API)], [new_libnet=no; AC_DEFINE(HAVE_LIBNET_1_0_API, 1, Libnet 1.0 API)],$LIBNETLIBS) AC_SUBST(LIBNETLIBS) fi if test "$new_libnet" = yes; then AC_MSG_CHECKING(for libnet API 1.1.4: ) save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -fgnu89-inline -Wall -Werror" AC_COMPILE_IFELSE([ AC_LANG_SOURCE(#include int main(){libnet_t *l=NULL; libnet_pblock_record_ip_offset(l, l->total_size); return(0); })], [AC_MSG_RESULT(no)], [AC_DEFINE(HAVE_LIBNET_1_1_4_API, 1, Libnet 1.1.4 API) AC_MSG_RESULT(yes)]) CFLAGS="$save_CFLAGS" fi sendarp_linux=0 case $host_os in *Linux*|*linux*) sendarp_linux=1;; esac AC_SUBST(LIBNETLIBS) AC_SUBST(LIBNETDEFINES) AM_CONDITIONAL(SENDARP_LINUX, test $sendarp_linux = 1 ) AM_CONDITIONAL(USE_LIBNET, test "x$libnet_version" != "xnone" ) dnl ************************************************************************ dnl * Check for netinet/icmp6.h to enable the IPv6addr resource agent AC_CHECK_HEADERS(netinet/icmp6.h,[],[],[#include ]) AM_CONDITIONAL(USE_IPV6ADDR_AGENT, test "$ac_cv_header_netinet_icmp6_h" = yes && test "$ac_cv_header_heartbeat_glue_config_h" = yes) AM_CONDITIONAL(IPV6ADDR_COMPATIBLE, test "$ac_cv_header_netinet_icmp6_h" = yes) dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. CC_ERRORS="" CC_EXTRAS="" if export -p | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" enable_fatal_warnings=no else CFLAGS="$CFLAGS -ggdb3" # We had to eliminate -Wnested-externs because of libtool changes # Also remove -Waggregate-return because we use one libnet # call which returns a struct EXTRA_FLAGS="-fgnu89-inline -fstack-protector-all -Wall -Wbad-function-cast -Wcast-qual -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat=2 -Wformat-security -Wformat-nonliteral -Winline -Wmissing-prototypes -Wmissing-declarations -Wmissing-format-attribute -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wunsigned-char -Wwrite-strings" # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $j then CC_EXTRAS="$CC_EXTRAS $j" fi done dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'` AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4) dnl System specific options case "$host_os" in *linux*|*bsd*) if test "${enable_fatal_warnings}" = "unknown"; then enable_fatal_warnings=yes fi ;; esac if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then enable_fatal_warnings=yes else enable_fatal_warnings=no fi if test "x${enable_ansi}" != xno && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS -Werror" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LOCALE) AC_SUBST(CC) AC_SUBST(MAKE) dnl The Makefiles and shell scripts we output AC_CONFIG_FILES(Makefile \ include/Makefile \ heartbeat/Makefile \ heartbeat/ocf-binaries \ heartbeat/ocf-directories \ heartbeat/ocf-shellfuncs \ heartbeat/shellfuncs \ systemd/Makefile \ systemd/resource-agents.conf \ tools/Makefile \ tools/ocf-tester \ tools/ocft/Makefile \ tools/ocft/ocft \ tools/ocft/caselib \ tools/ocft/README \ tools/ocft/README.zh_CN \ ldirectord/Makefile \ ldirectord/ldirectord \ ldirectord/init.d/Makefile \ ldirectord/init.d/ldirectord \ ldirectord/init.d/ldirectord.debian \ ldirectord/init.d/ldirectord.debian.default \ ldirectord/systemd/Makefile \ ldirectord/systemd/ldirectord.service \ ldirectord/logrotate.d/Makefile \ ldirectord/OCF/Makefile \ ldirectord/OCF/ldirectord \ doc/Makefile \ doc/man/Makefile \ rgmanager/Makefile \ rgmanager/src/Makefile \ rgmanager/src/resources/Makefile \ rgmanager/src/resources/ocf-shellfuncs \ rgmanager/src/resources/svclib_nfslock \ rgmanager/src/resources/lvm_by_lv.sh \ rgmanager/src/resources/lvm_by_vg.sh \ rgmanager/src/resources/utils/Makefile \ rgmanager/src/resources/utils/fs-lib.sh \ rgmanager/src/resources/utils/messages.sh \ rgmanager/src/resources/utils/config-utils.sh \ rgmanager/src/resources/utils/member_util.sh \ rgmanager/src/resources/utils/ra-skelet.sh \ ) dnl Files we output that need to be executable -AC_CONFIG_FILES([heartbeat/AzEvents], [chmod +x heartbeat/AzEvents]) +AC_CONFIG_FILES([heartbeat/azure-events], [chmod +x heartbeat/azure-events]) AC_CONFIG_FILES([heartbeat/AoEtarget], [chmod +x heartbeat/AoEtarget]) AC_CONFIG_FILES([heartbeat/ManageRAID], [chmod +x heartbeat/ManageRAID]) AC_CONFIG_FILES([heartbeat/ManageVE], [chmod +x heartbeat/ManageVE]) AC_CONFIG_FILES([heartbeat/Squid], [chmod +x heartbeat/Squid]) AC_CONFIG_FILES([heartbeat/SysInfo], [chmod +x heartbeat/SysInfo]) AC_CONFIG_FILES([heartbeat/aws-vpc-route53], [chmod +x heartbeat/aws-vpc-route53]) AC_CONFIG_FILES([heartbeat/clvm], [chmod +x heartbeat/clvm]) AC_CONFIG_FILES([heartbeat/conntrackd], [chmod +x heartbeat/conntrackd]) AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate]) AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88]) AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio]) AC_CONFIG_FILES([heartbeat/gcp-pd-move], [chmod +x heartbeat/gcp-pd-move]) AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip]) AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip]) AC_CONFIG_FILES([heartbeat/gcp-vpc-move-route], [chmod +x heartbeat/gcp-vpc-move-route]) AC_CONFIG_FILES([heartbeat/iSCSILogicalUnit], [chmod +x heartbeat/iSCSILogicalUnit]) AC_CONFIG_FILES([heartbeat/iSCSITarget], [chmod +x heartbeat/iSCSITarget]) AC_CONFIG_FILES([heartbeat/jira], [chmod +x heartbeat/jira]) AC_CONFIG_FILES([heartbeat/kamailio], [chmod +x heartbeat/kamailio]) AC_CONFIG_FILES([heartbeat/lxc], [chmod +x heartbeat/lxc]) AC_CONFIG_FILES([heartbeat/lxd-info], [chmod +x heartbeat/lxd-info]) AC_CONFIG_FILES([heartbeat/machine-info], [chmod +x heartbeat/machine-info]) AC_CONFIG_FILES([heartbeat/mariadb], [chmod +x heartbeat/mariadb]) AC_CONFIG_FILES([heartbeat/mpathpersist], [chmod +x heartbeat/mpathpersist]) AC_CONFIG_FILES([heartbeat/nfsnotify], [chmod +x heartbeat/nfsnotify]) AC_CONFIG_FILES([heartbeat/redis], [chmod +x heartbeat/redis]) AC_CONFIG_FILES([heartbeat/rsyslog], [chmod +x heartbeat/rsyslog]) AC_CONFIG_FILES([heartbeat/sg_persist], [chmod +x heartbeat/sg_persist]) AC_CONFIG_FILES([heartbeat/slapd], [chmod +x heartbeat/slapd]) AC_CONFIG_FILES([heartbeat/sybaseASE], [chmod +x heartbeat/sybaseASE]) AC_CONFIG_FILES([heartbeat/syslog-ng], [chmod +x heartbeat/syslog-ng]) AC_CONFIG_FILES([heartbeat/vsftpd], [chmod +x heartbeat/vsftpd]) AC_CONFIG_FILES([heartbeat/CTDB], [chmod +x heartbeat/CTDB]) AC_CONFIG_FILES([rgmanager/src/resources/ASEHAagent.sh], [chmod +x rgmanager/src/resources/ASEHAagent.sh]) AC_CONFIG_FILES([rgmanager/src/resources/apache.sh], [chmod +x rgmanager/src/resources/apache.sh]) AC_CONFIG_FILES([rgmanager/src/resources/bind-mount.sh], [chmod +x rgmanager/src/resources/bind-mount.sh]) AC_CONFIG_FILES([rgmanager/src/resources/clusterfs.sh], [chmod +x rgmanager/src/resources/clusterfs.sh]) AC_CONFIG_FILES([rgmanager/src/resources/db2.sh], [chmod +x rgmanager/src/resources/db2.sh]) AC_CONFIG_FILES([rgmanager/src/resources/drbd.sh], [chmod +x rgmanager/src/resources/drbd.sh]) AC_CONFIG_FILES([rgmanager/src/resources/fs.sh], [chmod +x rgmanager/src/resources/fs.sh]) AC_CONFIG_FILES([rgmanager/src/resources/ip.sh], [chmod +x rgmanager/src/resources/ip.sh]) AC_CONFIG_FILES([rgmanager/src/resources/lvm.sh], [chmod +x rgmanager/src/resources/lvm.sh]) AC_CONFIG_FILES([rgmanager/src/resources/mysql.sh], [chmod +x rgmanager/src/resources/mysql.sh]) AC_CONFIG_FILES([rgmanager/src/resources/named.sh], [chmod +x rgmanager/src/resources/named.sh]) AC_CONFIG_FILES([rgmanager/src/resources/netfs.sh], [chmod +x rgmanager/src/resources/netfs.sh]) AC_CONFIG_FILES([rgmanager/src/resources/nfsclient.sh], [chmod +x rgmanager/src/resources/nfsclient.sh]) AC_CONFIG_FILES([rgmanager/src/resources/nfsexport.sh], [chmod +x rgmanager/src/resources/nfsexport.sh]) AC_CONFIG_FILES([rgmanager/src/resources/nfsserver.sh], [chmod +x rgmanager/src/resources/nfsserver.sh]) AC_CONFIG_FILES([rgmanager/src/resources/openldap.sh], [chmod +x rgmanager/src/resources/openldap.sh]) AC_CONFIG_FILES([rgmanager/src/resources/oracledb.sh], [chmod +x rgmanager/src/resources/oracledb.sh]) AC_CONFIG_FILES([rgmanager/src/resources/oradg.sh], [chmod +x rgmanager/src/resources/oradg.sh]) AC_CONFIG_FILES([rgmanager/src/resources/orainstance.sh], [chmod +x rgmanager/src/resources/orainstance.sh]) AC_CONFIG_FILES([rgmanager/src/resources/oralistener.sh], [chmod +x rgmanager/src/resources/oralistener.sh]) AC_CONFIG_FILES([rgmanager/src/resources/postgres-8.sh], [chmod +x rgmanager/src/resources/postgres-8.sh]) AC_CONFIG_FILES([rgmanager/src/resources/samba.sh], [chmod +x rgmanager/src/resources/samba.sh]) AC_CONFIG_FILES([rgmanager/src/resources/script.sh], [chmod +x rgmanager/src/resources/script.sh]) AC_CONFIG_FILES([rgmanager/src/resources/service.sh], [chmod +x rgmanager/src/resources/service.sh]) AC_CONFIG_FILES([rgmanager/src/resources/smb.sh], [chmod +x rgmanager/src/resources/smb.sh]) AC_CONFIG_FILES([rgmanager/src/resources/tomcat-5.sh], [chmod +x rgmanager/src/resources/tomcat-5.sh]) AC_CONFIG_FILES([rgmanager/src/resources/tomcat-6.sh], [chmod +x rgmanager/src/resources/tomcat-6.sh]) AC_CONFIG_FILES([rgmanager/src/resources/vm.sh], [chmod +x rgmanager/src/resources/vm.sh]) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION}]) AC_MSG_RESULT([ Build Version = $Format:%H$]) AC_MSG_RESULT([ Features =${PKG_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ Documentation = ${docdir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([ HA_BIN directory prefix = ${libexecdir}]) AC_MSG_RESULT([ RA state files = ${HA_RSCTMPDIR}]) AC_MSG_RESULT([ AIS Plugins = ${LCRSODIR}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am index 0235c9af6..b8258bccd 100644 --- a/doc/man/Makefile.am +++ b/doc/man/Makefile.am @@ -1,207 +1,208 @@ # # doc: Linux-HA resource agents # # Copyright (C) 2009 Florian Haas # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in EXTRA_DIST = $(doc_DATA) $(REFENTRY_STYLESHEET) \ mkappendix.sh ralist.sh CLEANFILES = $(man_MANS) $(xmlfiles) metadata-*.xml STYLESHEET_PREFIX ?= http://docbook.sourceforge.net/release/xsl/current MANPAGES_STYLESHEET ?= $(STYLESHEET_PREFIX)/manpages/docbook.xsl HTML_STYLESHEET ?= $(STYLESHEET_PREFIX)/xhtml/docbook.xsl FO_STYLESHEET ?= $(STYLESHEET_PREFIX)/fo/docbook.xsl REFENTRY_STYLESHEET ?= ra2refentry.xsl XSLTPROC_OPTIONS ?= --xinclude XSLTPROC_MANPAGES_OPTIONS ?= $(XSLTPROC_OPTIONS) XSLTPROC_HTML_OPTIONS ?= $(XSLTPROC_OPTIONS) XSLTPROC_FO_OPTIONS ?= $(XSLTPROC_OPTIONS) radir = $(top_srcdir)/heartbeat # OCF_ROOT=. is necessary due to a sanity check in ocf-shellfuncs # (which tests whether $OCF_ROOT points to a directory metadata-%.xml: $(radir)/% OCF_ROOT=. OCF_FUNCTIONS_DIR=$(radir) $< meta-data > $@ metadata-IPv6addr.xml: ../../heartbeat/IPv6addr OCF_ROOT=. OCF_FUNCTIONS_DIR=$(radir) $< meta-data > $@ # Please note: we can't name the man pages # ocf:heartbeat:. Believe me, I've tried. It looks like it # works, but then it doesn't. While make can deal correctly with # colons in target names (when properly escaped), it royally messes up # when it is deals with _dependencies_ that contain colons. See Bug # 12126 on savannah.gnu.org. But, maybe it gets fixed soon, it was # first reported in 1995 and added to Savannah in in 2005... if BUILD_DOC -man_MANS = ocf_heartbeat_AoEtarget.7 \ +man_MANS = ocf_heartbeat_azure-events.7 \ + ocf_heartbeat_AoEtarget.7 \ ocf_heartbeat_AudibleAlarm.7 \ ocf_heartbeat_ClusterMon.7 \ ocf_heartbeat_CTDB.7 \ ocf_heartbeat_Delay.7 \ ocf_heartbeat_Dummy.7 \ ocf_heartbeat_EvmsSCC.7 \ ocf_heartbeat_Evmsd.7 \ ocf_heartbeat_Filesystem.7 \ ocf_heartbeat_ICP.7 \ ocf_heartbeat_IPaddr.7 \ ocf_heartbeat_IPaddr2.7 \ ocf_heartbeat_IPsrcaddr.7 \ ocf_heartbeat_LVM.7 \ ocf_heartbeat_LVM-activate.7 \ ocf_heartbeat_LinuxSCSI.7 \ ocf_heartbeat_MailTo.7 \ ocf_heartbeat_ManageRAID.7 \ ocf_heartbeat_ManageVE.7 \ ocf_heartbeat_NodeUtilization.7 \ ocf_heartbeat_Pure-FTPd.7 \ ocf_heartbeat_Raid1.7 \ ocf_heartbeat_Route.7 \ ocf_heartbeat_SAPDatabase.7 \ ocf_heartbeat_SAPInstance.7 \ ocf_heartbeat_SendArp.7 \ ocf_heartbeat_ServeRAID.7 \ ocf_heartbeat_SphinxSearchDaemon.7 \ ocf_heartbeat_Squid.7 \ ocf_heartbeat_Stateful.7 \ ocf_heartbeat_SysInfo.7 \ ocf_heartbeat_VIPArip.7 \ ocf_heartbeat_VirtualDomain.7 \ ocf_heartbeat_WAS.7 \ ocf_heartbeat_WAS6.7 \ ocf_heartbeat_WinPopup.7 \ ocf_heartbeat_Xen.7 \ ocf_heartbeat_Xinetd.7 \ ocf_heartbeat_ZFS.7 \ ocf_heartbeat_aliyun-vpc-move-ip.7 \ ocf_heartbeat_anything.7 \ ocf_heartbeat_apache.7 \ ocf_heartbeat_asterisk.7 \ ocf_heartbeat_aws-vpc-move-ip.7 \ ocf_heartbeat_aws-vpc-route53.7 \ ocf_heartbeat_awseip.7 \ ocf_heartbeat_awsvip.7 \ ocf_heartbeat_azure-lb.7 \ ocf_heartbeat_clvm.7 \ ocf_heartbeat_conntrackd.7 \ ocf_heartbeat_db2.7 \ ocf_heartbeat_dhcpd.7 \ ocf_heartbeat_docker.7 \ ocf_heartbeat_dnsupdate.7 \ ocf_heartbeat_eDir88.7 \ ocf_heartbeat_ethmonitor.7 \ ocf_heartbeat_exportfs.7 \ ocf_heartbeat_fio.7 \ ocf_heartbeat_galera.7 \ ocf_heartbeat_garbd.7 \ ocf_heartbeat_gcp-pd-move.7 \ ocf_heartbeat_gcp-vpc-move-ip.7 \ ocf_heartbeat_gcp-vpc-move-vip.7 \ ocf_heartbeat_gcp-vpc-move-route.7 \ ocf_heartbeat_iSCSILogicalUnit.7 \ ocf_heartbeat_iSCSITarget.7 \ ocf_heartbeat_iface-bridge.7 \ ocf_heartbeat_iface-vlan.7 \ ocf_heartbeat_ipsec.7 \ ocf_heartbeat_ids.7 \ ocf_heartbeat_iscsi.7 \ ocf_heartbeat_jboss.7 \ ocf_heartbeat_jira.7 \ ocf_heartbeat_kamailio.7 \ ocf_heartbeat_lvmlockd.7 \ ocf_heartbeat_lxc.7 \ ocf_heartbeat_lxd-info.7 \ ocf_heartbeat_machine-info.7 \ ocf_heartbeat_mariadb.7 \ ocf_heartbeat_minio.7 \ ocf_heartbeat_mysql.7 \ ocf_heartbeat_mysql-proxy.7 \ ocf_heartbeat_nagios.7 \ ocf_heartbeat_named.7 \ ocf_heartbeat_nfsnotify.7 \ ocf_heartbeat_nfsserver.7 \ ocf_heartbeat_nginx.7 \ ocf_heartbeat_openstack-info.7 \ ocf_heartbeat_openstack-cinder-volume.7 \ ocf_heartbeat_openstack-floating-ip.7 \ ocf_heartbeat_oraasm.7 \ ocf_heartbeat_oracle.7 \ ocf_heartbeat_oralsnr.7 \ ocf_heartbeat_ovsmonitor.7 \ ocf_heartbeat_pgagent.7 \ ocf_heartbeat_pgsql.7 \ ocf_heartbeat_pingd.7 \ ocf_heartbeat_podman.7 \ ocf_heartbeat_portblock.7 \ ocf_heartbeat_postfix.7 \ ocf_heartbeat_pound.7 \ ocf_heartbeat_proftpd.7 \ ocf_heartbeat_rabbitmq-cluster.7 \ ocf_heartbeat_redis.7 \ ocf_heartbeat_rkt.7 \ ocf_heartbeat_rsyncd.7 \ ocf_heartbeat_rsyslog.7 \ ocf_heartbeat_scsi2reservation.7 \ ocf_heartbeat_sfex.7 \ ocf_heartbeat_slapd.7 \ ocf_heartbeat_sybaseASE.7 \ ocf_heartbeat_sg_persist.7 \ ocf_heartbeat_mpathpersist.7 \ ocf_heartbeat_symlink.7 \ ocf_heartbeat_syslog-ng.7 \ ocf_heartbeat_tomcat.7 \ ocf_heartbeat_varnish.7 \ ocf_heartbeat_vmware.7 \ ocf_heartbeat_vsftpd.7 \ ocf_heartbeat_zabbixserver.7 if USE_IPV6ADDR_AGENT man_MANS += ocf_heartbeat_IPv6addr.7 endif xmlfiles = $(man_MANS:.7=.xml) %.1 %.5 %.7 %.8: %.xml $(XSLTPROC) \ $(XSLTPROC_MANPAGES_OPTIONS) \ $(MANPAGES_STYLESHEET) $< ocf_heartbeat_%.xml: metadata-%.xml $(srcdir)/$(REFENTRY_STYLESHEET) $(XSLTPROC) --novalid \ --stringparam package $(PACKAGE_NAME) \ --stringparam version $(VERSION) \ --output $@ \ $(srcdir)/$(REFENTRY_STYLESHEET) $< ocf_resource_agents.xml: $(xmlfiles) mkappendix.sh ./mkappendix.sh $(xmlfiles) > $@ %.html: %.xml $(XSLTPROC) \ $(XSLTPROC_HTML_OPTIONS) \ --output $@ \ $(HTML_STYLESHEET) $< xml: ocf_resource_agents.xml endif diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am index 843186c98..8f7d2841a 100644 --- a/heartbeat/Makefile.am +++ b/heartbeat/Makefile.am @@ -1,201 +1,202 @@ # Makefile.am for OCF RAs # # Author: Sun Jing Dong # Copyright (C) 2004 IBM # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in EXTRA_DIST = $(ocf_SCRIPTS) $(ocfcommon_DATA) \ $(common_DATA) $(hb_DATA) $(dtd_DATA) \ README AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/linux-ha halibdir = $(libexecdir)/heartbeat ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat dtddir = $(datadir)/$(PACKAGE_NAME) dtd_DATA = ra-api-1.dtd metadata.rng if USE_IPV6ADDR_AGENT ocf_PROGRAMS = IPv6addr else ocf_PROGRAMS = endif if IPV6ADDR_COMPATIBLE halib_PROGRAMS = send_ua else halib_PROGRAMS = endif IPv6addr_SOURCES = IPv6addr.c IPv6addr_utils.c send_ua_SOURCES = send_ua.c IPv6addr_utils.c IPv6addr_LDADD = -lplumb $(LIBNETLIBS) send_ua_LDADD = $(LIBNETLIBS) -ocf_SCRIPTS = AoEtarget \ +ocf_SCRIPTS = azure-events \ + AoEtarget \ AudibleAlarm \ ClusterMon \ CTDB \ Delay \ Dummy \ EvmsSCC \ Evmsd \ Filesystem \ ICP \ IPaddr \ IPaddr2 \ IPsrcaddr \ LVM \ LinuxSCSI \ lvmlockd \ LVM-activate \ MailTo \ ManageRAID \ ManageVE \ NodeUtilization \ Pure-FTPd \ Raid1 \ Route \ SAPDatabase \ SAPInstance \ SendArp \ ServeRAID \ SphinxSearchDaemon \ Squid \ Stateful \ SysInfo \ VIPArip \ VirtualDomain \ WAS \ WAS6 \ WinPopup \ Xen \ Xinetd \ ZFS \ aliyun-vpc-move-ip \ anything \ apache \ asterisk \ aws-vpc-move-ip \ aws-vpc-route53 \ awseip \ awsvip \ azure-lb \ clvm \ conntrackd \ db2 \ dhcpd \ dnsupdate \ docker \ eDir88 \ ethmonitor \ exportfs \ fio \ galera \ garbd \ gcp-pd-move \ gcp-vpc-move-ip \ gcp-vpc-move-vip \ gcp-vpc-move-route \ iSCSILogicalUnit \ iSCSITarget \ ids \ iface-bridge \ iface-vlan \ ipsec \ iscsi \ jboss \ jira \ kamailio \ lxc \ lxd-info \ machine-info \ mariadb \ minio \ mysql \ mysql-proxy \ nagios \ named \ nfsnotify \ nfsserver \ nginx \ openstack-cinder-volume \ openstack-floating-ip \ openstack-info \ oraasm \ oracle \ oralsnr \ ovsmonitor \ pgagent \ pgsql \ pingd \ podman \ portblock \ postfix \ pound \ proftpd \ rabbitmq-cluster \ redis \ rkt \ rsyncd \ rsyslog \ scsi2reservation \ sfex \ sg_persist \ mpathpersist \ slapd \ sybaseASE \ symlink \ syslog-ng \ tomcat \ varnish \ vmware \ vsftpd \ zabbixserver ocfcommondir = $(OCF_LIB_DIR_PREFIX)/heartbeat ocfcommon_DATA = ocf-shellfuncs \ ocf-binaries \ ocf-directories \ ocf-returncodes \ ocf-rarun \ ocf-distro \ apache-conf.sh \ http-mon.sh \ sapdb-nosha.sh \ sapdb.sh \ lvm-clvm.sh \ lvm-plain.sh \ lvm-tag.sh \ ora-common.sh \ mysql-common.sh \ nfsserver-redhat.sh \ findif.sh \ ocf.py # Legacy locations hbdir = $(sysconfdir)/ha.d hb_DATA = shellfuncs check: $(ocf_SCRIPTS:=.check) %.check: % OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) ./$< meta-data | xmllint --path $(abs_srcdir) --noout --relaxng $(abs_srcdir)/metadata.rng - diff --git a/heartbeat/AzEvents.in b/heartbeat/azure-events.in similarity index 96% rename from heartbeat/AzEvents.in rename to heartbeat/azure-events.in index 18f9ba6f6..09611ca07 100644 --- a/heartbeat/AzEvents.in +++ b/heartbeat/azure-events.in @@ -1,893 +1,893 @@ #!@PYTHON@ -tt # # Resource agent for monitoring Azure Scheduled Events # # License: GNU General Public License (GPL) # (c) 2018 Tobias Niekamp, Microsoft Corp. # and Linux-HA contributors import os, sys, time, subprocess import json import urllib, urllib2, socket import logging, syslog from enum import Enum from collections import defaultdict ############################################################################## VERSION = "0.10" OCF_SUCCESS = 0 OCF_ERR_GENERIC = 1 OCF_ERR_UNIMPLEMENTED = 3 OCF_ERR_CONFIGURED = 6 OCF_NOT_RUNNING = 7 -attr_globalPullState = "AzEvents_globalPullState" -attr_lastDocVersion = "AzEvents_lastDocVersion" -attr_curNodeState = "AzEvents_curNodeState" -attr_pendingEventIDs = "AzEvents_pendingEventIDs" +attr_globalPullState = "azure-events_globalPullState" +attr_lastDocVersion = "azure-events_lastDocVersion" +attr_curNodeState = "azure-events_curNodeState" +attr_pendingEventIDs = "azure-events_pendingEventIDs" default_loglevel = logging.INFO default_relevantEventTypes = set(["Reboot", "Redeploy"]) global_pullMaxAttempts = 3 global_pullDelaySecs = 1 ############################################################################## class SyslogLibHandler(logging.StreamHandler): """ A handler class that correctly push messages into syslog """ def emit(self, record): syslog_level = { logging.CRITICAL: syslog.LOG_CRIT, logging.ERROR: syslog.LOG_ERR, logging.WARNING: syslog.LOG_WARNING, logging.INFO: syslog.LOG_INFO, logging.DEBUG: syslog.LOG_DEBUG, logging.NOTSET: syslog.LOG_DEBUG, }[record.levelno] msg = self.format(record) # take care of \x00 character syslog.syslog(syslog_level, msg.replace("\x00", "\n")) return ############################################################################## class attrDict(defaultdict): """ A wrapper for accessing dict keys like an attribute """ def __init__(self, data): super(attrDict, self).__init__(attrDict) for d in data.keys(): self.__setattr__(d, data[d]) def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(key) def __setattr__(self, key, value): self[key] = value ############################################################################## class azHelper: """ Helper class for Azure's metadata API (including Scheduled Events) """ metadata_host = "http://169.254.169.254/metadata" instance_api = "instance" events_api = "scheduledevents" api_version = "2017-08-01" @staticmethod def _sendMetadataRequest(endpoint, postData=None): """ Send a request to Azure's Azure Metadata Service API """ url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version) logging.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData)) logging.debug("_sendMetadataRequest: url = %s" % url) req = urllib2.Request(url, postData) req.add_header("Metadata", "true") resp = urllib2.urlopen(req) data = resp.read() logging.debug("_sendMetadataRequest: response = %s" % data) if len(data) > 0: data = json.loads(data) logging.debug("_sendMetadataRequest: finished") return data @staticmethod def getInstanceInfo(): """ Fetch details about the current VM from Azure's Azure Metadata Service API """ logging.debug("getInstanceInfo: begin") json = azHelper._sendMetadataRequest(azHelper.instance_api) logging.info("getInstanceInfo: json = %s" % json) logging.debug("getInstanceInfo: finished") return attrDict(json["compute"]) @staticmethod def pullScheduledEvents(): """ Retrieve all currently scheduled events via Azure Metadata Service API """ logging.debug("pullScheduledEvents: begin") json = azHelper._sendMetadataRequest(azHelper.events_api) logging.info("pullScheduledEvents: json = %s" % json) logging.debug("pullScheduledEvents: finished") return attrDict(json) @staticmethod def forceEvents(eventIDs): """ Force a set of events to start immediately """ logging.debug("forceEvents: begin") events = [] for e in eventIDs: events.append({ "EventId": e, }) postData = { "StartRequests" : events } logging.info("forceEvents: postData = %s" % postData) resp = azHelper._sendMetadataRequest(azHelper.events_api, postData=json.dumps(postData)) logging.debug("forceEvents: finished") return ############################################################################## class pcsHelper: """ Helper functions for Pacemaker control via crm """ @staticmethod def _getLocation(node): """ Helper function to retrieve local/global attributes """ if node: return ["--node", node] else: return ["--type", "crm_config"] @staticmethod def _exec(command, args): """ Helper function to execute a UNIX command """ logging.debug("_exec: begin; command = %s, args = %s" % (command, str(args))) flatten = lambda *n: (str(e) for a in n for e in (flatten(*a) if isinstance(a, (tuple, list)) else (str(a),))) command = list(flatten([command] + args)) logging.debug("_exec: cmd = %s" % " ".join(command)) try: ret = subprocess.check_output(command) logging.debug("_exec: return = %s" % ret) return ret.rstrip() except Exception: logging.warning("_exec: %s" % sys.exc_info()[0]) return None @staticmethod def setAttr(key, value, node=None): """ Set the value of a specific global/local attribute in the Pacemaker cluster """ logging.debug("setAttr: begin; key = %s, value = %s, node = %s" % (key, value, node)) if value: ret = pcsHelper._exec( "crm_attribute", ["--name", key, "--update", value, pcsHelper._getLocation(node)]) else: ret = pcsHelper._exec( "crm_attribute", ["--name", key, "--delete", pcsHelper._getLocation(node)]) logging.debug("setAttr: finished") return len(ret) == 0 @staticmethod def getAttr(key, node=None): """ Retrieve a global/local attribute from the Pacemaker cluster """ logging.debug("getAttr: begin; key = %s, node = %s" % (key, node)) val = pcsHelper._exec( "crm_attribute", ["--name", key, "--query", "--quiet", pcsHelper._getLocation(node)]) if not val: ret = None else: ret = val if not val.isdigit() else int(val) logging.debug("getAttr: finished") return ret @staticmethod def getAllNodes(): """ Get a list of hostnames for all nodes in the Pacemaker cluster """ logging.debug("getAllNodes: begin") nodes = [] nodeList = pcsHelper._exec( "crm_node", ["--list"]) for n in nodeList.split("\n"): nodes.append(n.split()[1]) logging.debug("getAllNodes: finished; return %s" % str(nodes)) return nodes @staticmethod def getHostNameFromAzName(azName): """ Helper function to get the actual host name from an Azure node name """ return pcsHelper.getAttr("hostName_%s" % azName) @staticmethod def removeHoldFromNodes(): """ Remove the ON_HOLD state from all nodes in the Pacemaker cluster """ logging.debug("removeHoldFromNodes: begin") for n in pcsHelper.getAllNodes(): if pcsHelper.getAttr(attr_curNodeState, node=n) == pcsNodeState.ON_HOLD.name: pcsHelper.setAttr(attr_curNodeState, pcsNodeState.AVAILABLE.name, node=n) logging.info("removeHoldFromNodes: removed ON_HOLD from node %s" % n) logging.debug("removeHoldFromNodes: finished") return False @staticmethod def otherNodesAvailable(exceptNode): """ Check if there are any nodes (except a given node) in the Pacemaker cluster that have state AVAILABLE """ logging.debug("otherNodesAvailable: begin; exceptNode = %s" % exceptNode) for n in pcsHelper.getAllNodes(): state = pcsHelper.getAttr(attr_curNodeState, node=n) if state: state = pcsNodeState[state] else: state = pcsNodeState.AVAILABLE if state == pcsNodeState.AVAILABLE and n != exceptNode.hostName: logging.info("otherNodesAvailable: at least %s is available" % n) logging.debug("otherNodesAvailable: finished") return True logging.info("otherNodesAvailable: no other nodes are available") logging.debug("otherNodesAvailable: finished") return False @staticmethod def transitionSummary(): """ Get the current Pacemaker transition summary (used to check if all resources are stopped when putting a node standby) """ # Is a global crm_simulate "too much"? Or would it be sufficient it there are no planned transitions for a particular node? # # crm_simulate -Ls # Transition Summary: # * Promote rsc_SAPHana_HN1_HDB03:0 (Slave -> Master hsr3-db1) # * Stop rsc_SAPHana_HN1_HDB03:1 (hsr3-db0) # * Move rsc_ip_HN1_HDB03 (Started hsr3-db0 -> hsr3-db1) # * Start rsc_nc_HN1_HDB03 (hsr3-db1) # # Excepted result when there are no pending actions: # Transition Summary: logging.debug("transitionSummary: begin") summary = pcsHelper._exec( "crm_simulate", ["-Ls"] ) if not summary: logging.warning("transitionSummary: could not load transition summary") return False if summary.find("Transition Summary:") < 0: logging.warning("transitionSummary: received unexpected transition summary: %s" % summary) return False summary = summary.split("Transition Summary:")[1] ret = summary.split("\n").pop(0) logging.debug("transitionSummary: finished; return = %s" % str(ret)) return ret @staticmethod def listOperationsOnNode(node): """ Get a list of all current operations for a given node (used to check if any resources are pending) """ # hsr3-db1:/home/tniek # crm_resource --list-operations -N hsr3-db0 - # rsc_AzEvents (ocf::heartbeat:AzEvents): Started: rsc_AzEvents_start_0 (node=hsr3-db0, call=91, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=115ms): complete - # rsc_AzEvents (ocf::heartbeat:AzEvents): Started: rsc_AzEvents_monitor_10000 (node=hsr3-db0, call=93, rc=0, last-rc-change=Fri Jun 8 22:37:47 2018, exec=197ms): complete + # rsc_azure-events (ocf::heartbeat:azure-events): Started: rsc_azure-events_start_0 (node=hsr3-db0, call=91, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=115ms): complete + # rsc_azure-events (ocf::heartbeat:azure-events): Started: rsc_azure-events_monitor_10000 (node=hsr3-db0, call=93, rc=0, last-rc-change=Fri Jun 8 22:37:47 2018, exec=197ms): complete # rsc_SAPHana_HN1_HDB03 (ocf::suse:SAPHana): Master: rsc_SAPHana_HN1_HDB03_start_0 (node=hsr3-db0, call=-1, rc=193, last-rc-change=Fri Jun 8 22:37:46 2018, exec=0ms): pending # rsc_SAPHanaTopology_HN1_HDB03 (ocf::suse:SAPHanaTopology): Started: rsc_SAPHanaTopology_HN1_HDB03_start_0 (node=hsr3-db0, call=90, rc=0, last-rc-change=Fri Jun 8 22:37:46 2018, exec=3214ms): complete logging.debug("listOperationsOnNode: begin; node = %s" % node) resources = pcsHelper._exec( "crm_resource", ["--list-operations", "-N", node] ) if len(resources) == 0: ret = [] else: ret = resources.split("\n") logging.debug("listOperationsOnNode: finished; return = %s" % str(ret)) return ret @staticmethod def noPendingResourcesOnNode(node): """ Check that there are no pending resources on a given node """ logging.debug("noPendingResourcesOnNode: begin; node = %s" % node) for r in pcsHelper.listOperationsOnNode(node): logging.debug("noPendingResourcesOnNode: * %s" % r) resource = r.split()[-1] if resource == "pending": logging.info("noPendingResourcesOnNode: found resource %s that is still pending" % resource) logging.debug("noPendingResourcesOnNode: finished; return = False") return False logging.info("noPendingResourcesOnNode: no pending resources on node %s" % node) logging.debug("noPendingResourcesOnNode: finished; return = True") return True @staticmethod def allResourcesStoppedOnNode(node): """ Check that all resources on a given node are stopped """ logging.debug("allResourcesStoppedOnNode: begin; node = %s" % node) if pcsHelper.noPendingResourcesOnNode(node): if len(pcsHelper.transitionSummary()) == 0: logging.info("allResourcesStoppedOnNode: no pending resources on node %s and empty transition summary" % node) logging.debug("allResourcesStoppedOnNode: finished; return = True") return True else: logging.info("allResourcesStoppedOnNode: transition summary is not empty") logging.debug("allResourcesStoppedOnNode: finished; return = False") return False logging.info("allResourcesStoppedOnNode: still pending resources on node %s" % node) logging.debug("allResourcesStoppedOnNode: finished; return = False") return False ############################################################################## class pcsNodeState(Enum): AVAILABLE = 0 # Node is online and ready to handle events STOPPING = 1 # Standby has been triggered, but some resources are still running IN_EVENT = 2 # All resources are stopped, and event has been initiated via Azure Metadata Service ON_HOLD = 3 # Node has a pending event that cannot be started there are no other nodes available ############################################################################## class pcsNode: """ Core class implementing logic for a cluster node """ def __init__(self, ra): self.raOwner = ra self.azInfo = azHelper.getInstanceInfo() self.azName = self.azInfo.name self.hostName = socket.gethostname() self.setAttr("azName", self.azName) pcsHelper.setAttr("hostName_%s" % self.azName, self.hostName) def getAttr(self, key): """ Get a local attribute """ return pcsHelper.getAttr(key, node=self.hostName) def setAttr(self, key, value): """ Set a local attribute """ return pcsHelper.setAttr(key, value, node=self.hostName) def selfOrOtherNode(self, node): """ Helper function to distinguish self/other node """ if not node: return self.hostName else: return node def setState(self, state, node=None): """ Set the state for a given node (or self) """ node = self.selfOrOtherNode(node) logging.debug("setState: begin; node = %s, state = %s" % (node, state.name)) pcsHelper.setAttr(attr_curNodeState, state.name, node=node) logging.debug("setState: finished") return def getState(self, node=None): """ Get the state for a given node (or self) """ node = self.selfOrOtherNode(node) logging.debug("getState: begin; node = %s" % node) state = pcsHelper.getAttr(attr_curNodeState, node=node) logging.debug("getState: state = %s" % state) logging.debug("getState: finished") if not state: return pcsNodeState(pcsNodeState.AVAILABLE) else: return pcsNodeState[state] def setEventIDs(self, eventIDs, node=None): """ Set pending EventIDs for a given node (or self) """ node = self.selfOrOtherNode(node) logging.debug("setEventIDs: begin; node = %s, eventIDs = %s" % (node, str(eventIDs))) if eventIDs: eventIDStr = ",".join(eventIDs) else: eventIDStr = None pcsHelper.setAttr(attr_pendingEventIDs, eventIDStr, node=node) logging.debug("setEventIDs: finished") return def getEventIDs(self, node=None): """ Get pending EventIDs for a given node (or self) """ node = self.selfOrOtherNode(node) logging.debug("getEventIDs: begin; node = %s" % node) eventIDStr = pcsHelper.getAttr(attr_pendingEventIDs, node=node) if eventIDStr: eventIDs = eventIDStr.split(",") else: eventIDs = None logging.debug("getEventIDs: finished; eventIDs = %s" % str(eventIDs)) return eventIDs def updateNodeStateAndEvents(self, state, eventIDs, node=None): """ Set the state and pending EventIDs for a given node (or self) """ logging.debug("updateNodeStateAndEvents: begin; node = %s, state = %s, eventIDs = %s" % (node, state.name, str(eventIDs))) self.setState(state, node=node) self.setEventIDs(eventIDs, node=node) logging.debug("updateNodeStateAndEvents: finished") return state def putNodeStandby(self, node=None): """ Put self to standby """ node = self.selfOrOtherNode(node) logging.debug("putNodeStandby: begin; node = %s" % node) pcsHelper._exec( "crm", ["node", "standby", node] ) logging.debug("putNodeStandby: finished") return def putNodeOnline(self, node=None): """ Put self back online """ node = self.selfOrOtherNode(node) logging.debug("putNodeOnline: begin; node = %s" % node) pcsHelper._exec( "crm", ["node", "online", node] ) logging.debug("putNodeOnline: finished") return def separateEvents(self, events): """ Split own/other nodes' events """ logging.debug("separateEvents: begin; events = %s" % str(events)) localEvents = [] remoteEvents = [] for e in events: e = attrDict(e) if e.EventType not in self.raOwner.config.relevantEventTypes: continue if self.azName in e.Resources: localEvents.append(e) else: remoteEvents.append(e) logging.debug("separateEvents: finished; localEvents = %s, remoteEvents = %s" % (str(localEvents), str(remoteEvents))) return (localEvents, remoteEvents) def removeOrphanedEvents(self, azEvents): """ Remove remote events that are already finished """ logging.debug("removeOrphanedEvents: begin; azEvents = %s" % str(azEvents)) azEventIDs = set() for e in azEvents: azEventIDs.add(e.EventId) # for all nodes except self ... for n in pcsHelper.getAllNodes(): if n == self.hostName: continue curState = self.getState(node=n) # ... that still show in an event or shutting down resources ... if curState in (pcsNodeState.STOPPING, pcsNodeState.IN_EVENT): logging.info("removeOrphanedEvents: node %s has state %s" % (n, curState)) pcsEventIDs = self.getEventIDs(node=n) stillActive = False # ... but don't have any more events running according to Azure, ... for p in pcsEventIDs: if p in azEventIDs: logging.info("removeOrphanedEvents: (at least) event %s on node %s has not yet finished" % (str(p), n)) stillActive = True break if not stillActive: # ... put them back online. logging.info("removeOrphanedEvents: pcsEvents %s on node %s are not in azEvents %s -> bring node back online" % (str(pcsEventIDs), n, str(azEventIDs))) self.putNodeOnline(node=n) logging.debug("removeOrphanedEvents: finished") return def handleRemoteEvents(self, azEvents): """ Handle a list of events (as provided by Azure Metadata Service) for other nodes """ logging.debug("handleRemoteEvents: begin; hostName = %s, events = %s" % (self.hostName, str(azEvents))) if len(azEvents) == 0: logging.info("handleRemoteEvents: no remote events to handle") logging.debug("handleRemoteEvents: finished") return eventIDsForNode = {} # iterate through all current events as per Azure for e in azEvents: logging.info("handleRemoteEvents: handling remote event %s (%s; nodes = %s)" % (e.EventId, e.EventType, str(e.Resources))) # before we can force an event to start, we need to ensure all nodes involved have stopped their resources if e.EventStatus == "Scheduled": allNodesStopped = True for azName in e.Resources: hostName = pcsHelper.getHostNameFromAzName(azName) state = self.getState(node=hostName) if state == pcsNodeState.STOPPING: # the only way we can continue is when node state is STOPPING, but all resources have been stopped if not pcsHelper.allResourcesStoppedOnNode(hostName): logging.info("handleRemoteEvents: (at least) node %s has still resources running -> wait" % hostName) allNodesStopped = False break elif state in (pcsNodeState.AVAILABLE, pcsNodeState.IN_EVENT, pcsNodeState.ON_HOLD): logging.info("handleRemoteEvents: node %s is still %s -> remote event needs to be picked up locally" % (hostName, state.name)) allNodesStopped = False break if allNodesStopped: logging.info("handleRemoteEvents: nodes %s are stopped -> add remote event %s to force list" % (str(e.Resources), e.EventId)) for n in e.Resources: hostName = pcsHelper.getHostNameFromAzName(n) if eventIDsForNode.has_key(hostName): eventIDsForNode[hostName].append(e.EventId) else: eventIDsForNode[hostName] = [e.EventId] elif e.EventStatus == "Started": logging.info("handleRemoteEvents: remote event already started") # force the start of all events whose nodes are ready (i.e. have no more resources running) if len(eventIDsForNode.keys()) > 0: eventIDsToForce = set([item for sublist in eventIDsForNode.values() for item in sublist]) logging.info("handleRemoteEvents: set nodes %s to IN_EVENT; force remote events %s" % (str(eventIDsForNode.keys()), str(eventIDsToForce))) for n in eventIDsForNode.keys(): self.updateNodeStateAndEvents(pcsNodeState.IN_EVENT, eventIDsForNode[n], node=n) azHelper.forceEvents(eventIDsToForce) logging.debug("handleRemoteEvents: finished") return def handleLocalEvents(self, azEvents): """ Handle a list of own events (as provided by Azure Metadata Service) """ logging.debug("handleLocalEvents: begin; hostName = %s, azEvents = %s" % (self.hostName, str(azEvents))) azEventIDs = set() for e in azEvents: azEventIDs.add(e.EventId) curState = self.getState() pcsEventIDs = self.getEventIDs() mayUpdateDocVersion = False logging.info("handleLocalEvents: current state = %s; pending local pcsEvents = %s" % (curState.name, str(pcsEventIDs))) # check if there are currently/still events set for the node if pcsEventIDs: # there are pending events set, so our state must be STOPPING or IN_EVENT i = 0; touchedEventIDs = False while i < len(pcsEventIDs): # clean up pending events that are already finished according to AZ if pcsEventIDs[i] not in azEventIDs: logging.info("handleLocalEvents: remove finished local pcsEvent %s" % (pcsEventIDs[i])) pcsEventIDs.pop(i) touchedEventIDs = True else: i += 1 if len(pcsEventIDs) > 0: # there are still pending events (either because we're still stopping, or because the event is still in place) # either way, we need to wait if touchedEventIDs: logging.info("handleLocalEvents: added new local pcsEvent %s" % str(pcsEventIDs)) self.setEventIDs(pcsEventIDs) else: logging.info("handleLocalEvents: no local pcsEvents were updated") else: # there are no more pending events left after cleanup if pcsHelper.noPendingResourcesOnNode(self.hostName): # and no pending resources on the node -> set it back online logging.info("handleLocalEvents: all local events finished -> clean up, put node online and AVAILABLE") curState = self.updateNodeStateAndEvents(pcsNodeState.AVAILABLE, None) self.putNodeOnline() pcsHelper.removeHoldFromNodes() # repeat handleLocalEvents() since we changed status to AVAILABLE else: logging.info("handleLocalEvents: all local events finished, but some resources have not completed startup yet -> wait") else: # there are no pending events set for us (yet) if curState == pcsNodeState.AVAILABLE: if len(azEventIDs) > 0: if pcsHelper.otherNodesAvailable(self): logging.info("handleLocalEvents: can handle local events %s -> set state STOPPING" % (str(azEventIDs))) # this will also set mayUpdateDocVersion = True curState = self.updateNodeStateAndEvents(pcsNodeState.STOPPING, azEventIDs) else: logging.info("handleLocalEvents: cannot handle azEvents %s (only node available) -> set state ON_HOLD" % str(azEventIDs)) self.setState(pcsNodeState.ON_HOLD) else: logging.info("handleLocalEvents: no local azEvents to handle") if curState == pcsNodeState.STOPPING: if pcsHelper.noPendingResourcesOnNode(self.hostName): logging.info("handleLocalEvents: all local resources are started properly -> put node standby") self.putNodeStandby() mayUpdateDocVersion = True else: logging.info("handleLocalEvents: some local resources are not clean yet -> wait") logging.debug("handleLocalEvents: finished; mayUpdateDocVersion = %s" % str(mayUpdateDocVersion)) return mayUpdateDocVersion ############################################################################## class raGlobalPullState(Enum): """ - Pull state to avoid two AzEvents resource agents pulling from Azure Metadata Service API concurrently + Pull state to avoid two azure-events resource agents pulling from Azure Metadata Service API concurrently """ IDLE = 0 PULLING = 1 ############################################################################## class raConfig: verbose = None relevantEventTypes = default_relevantEventTypes ############################################################################## class raAzEvents: """ Main class for resource agent """ def __init__(self, config): self.node = pcsNode(self) self.config = config def monitor(self): logging.debug("monitor: begin") pullFailedAttemps = 0 while True: # check if another node is pulling at the same time; # this should only be a concern for the first pull, as setting up Scheduled Events may take up to 2 minutes. if pcsHelper.getAttr(attr_globalPullState) == raGlobalPullState.PULLING.name: pullFailedAttemps += 1 if pullFailedAttemps == global_pullMaxAttempts: logging.warning("monitor: exceeded maximum number of attempts (%d) to pull events" % global_pullMaxAttempts) logging.debug("monitor: finished") return OCF_SUCCESS else: logging.info("monitor: another node is pulling; retry in %d seconds" % global_pullDelaySecs) time.sleep(global_pullDelaySecs) continue # we can pull safely from Azure Metadata Service pcsHelper.setAttr(attr_globalPullState, raGlobalPullState.PULLING.name) events = azHelper.pullScheduledEvents() pcsHelper.setAttr(attr_globalPullState, raGlobalPullState.IDLE.name) # get current document version curDocVersion = events.DocumentIncarnation lastDocVersion = self.node.getAttr(attr_lastDocVersion) logging.info("monitor: lastDocVersion = %s; curDocVersion = %s" % (lastDocVersion, curDocVersion)) # split events local/remote (localEvents, remoteEvents) = self.node.separateEvents(events.Events) # ensure local events are only executing once if curDocVersion != lastDocVersion: logging.info("monitor: curDocVersion has not been handled yet") # handleLocalEvents() returns True if mayUpdateDocVersion is True; # this is only the case if we can ensure there are no pending events if self.node.handleLocalEvents(localEvents): logging.info("monitor: handleLocalEvents completed successfully -> update curDocVersion") self.node.setAttr(attr_lastDocVersion, curDocVersion) else: logging.info("monitor: handleLocalEvents still waiting -> keep curDocVersion") else: logging.info("monitor: already handled curDocVersion, skip") # remove orphaned remote events and then handle the remaining remote events self.node.removeOrphanedEvents(remoteEvents) self.node.handleRemoteEvents(remoteEvents) break logging.debug("monitor: finished") return OCF_SUCCESS ############################################################################## ############################################################################## def help(): print("""This resource agent implements a monitor for scheduled (maintenance) events for a Microsoft Azure VM. If any relevant events are found, it moves all Pacemaker resources away from the affected node to allow for a graceful shutdown. Usage: - AzEvents [eventTypes=] [verbose=] + azure-events [eventTypes=] [verbose=] action (required): Supported values: monitor, help, meta-data eventTypes (optional): List of event types to be considered relevant by the resource agent (comma-separated). Supported values: Freeze,Reboot,Redeploy Default = Reboot,Redeploy verbose (optional): If set to true, displays debug info. Default = false Deployment: - crm configure primitive rsc_AzEvents ocf:heartbeat:AzEvents \ + crm configure primitive rsc_azure-events ocf:heartbeat:azure-events \ op monitor interval=10s - crm configure clone cln_AzEvents rsc_AzEvents + crm configure clone cln_azure-events rsc_azure-events For further information on Microsoft Azure Scheduled Events, please refer to the following documentation: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events""") def metadata(): print(""" - + %s Resource agent to handle Microsoft Azure Scheduled Events -The AzEvents resource agent is to be used nodes inside a Pacemaker cluster that run Microsoft Azure. It periodically checks if maintenance events (for example, reboots or redploys) are scheduled and takes preemptive action by moving all resources away from the affected node. +The azure-events resource agent is to be used nodes inside a Pacemaker cluster that run Microsoft Azure. It periodically checks if maintenance events (for example, reboots or redploys) are scheduled and takes preemptive action by moving all resources away from the affected node. A comma-separated list of event types that will be handled by this resource agent. (Possible values: Freeze,Reboot,Redeploy; Default = Reboot,Redeploy) List of resources to be considered """ % VERSION) def getConfig(): # get resource agent config via env variables config = raConfig() verbose = os.environ.get("OCF_RESKEY_verbose") if verbose and verbose.lower() == "true": config.verbose = True relevantEventTypes = os.environ.get("OCF_RESKEY_eventTypes") if relevantEventTypes: config.relevantEventTypes = set(relevantEventTypes.split(",")) return config def setLoglevel(verbose): # set up writing into syslog if verbose: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel = 1)) urllib2.install_opener(opener) loglevel = logging.DEBUG else: loglevel = default_loglevel logging.getLogger().setLevel(loglevel) logging.getLogger().addHandler(SyslogLibHandler()) logging.getLogger().addHandler(logging.StreamHandler(sys.stderr)) return def main(): config = getConfig() setLoglevel(config.verbose) result = OCF_ERR_UNIMPLEMENTED action = sys.argv[1].lower() if len(sys.argv) > 1 else None logging.debug("main: begin; action = %s" % action) if action == "meta-data": result = metadata() elif action == "help": help() elif action: ra = raAzEvents(config) if action == "monitor": result = ra.monitor() elif action in ("start", "stop"): result = OCF_SUCCESS else: logging.error("main: Unsupported action %s" % action) logging.debug("main: finished; result = %s" % result) sys.exit(result) if __name__ == '__main__': main()