diff --git a/configure.ac b/configure.ac
index f85d2638ef..364f339be8 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,1811 +1,1811 @@
 dnl
 dnl autoconf for Pacemaker
 dnl
 dnl Copyright 2009-2018 Andrew Beekhof <andrew@beekhof.net>
 dnl
 dnl This source code is licensed under the GNU General Public License version 2
 dnl or later (GPLv2+) WITHOUT ANY WARRANTY.
 
 dnl ===============================================
 dnl Bootstrap
 dnl ===============================================
 AC_PREREQ(2.64)
 
 AC_CONFIG_MACRO_DIR([m4])
 AC_DEFUN([AC_DATAROOTDIR_CHECKED])
 
 dnl Suggested structure:
 dnl     information on the package
 dnl     checks for programs
 dnl     checks for libraries
 dnl     checks for header files
 dnl     checks for types
 dnl     checks for structures
 dnl     checks for compiler characteristics
 dnl     checks for library functions
 dnl     checks for system services
 
 m4_include([version.m4])
 AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker],
         PCMK_URL)
 
 PCMK_FEATURES=""
 
 AC_CONFIG_AUX_DIR(.)
 AC_CANONICAL_HOST
 
 dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below)
 dnl
 dnl Internal header: include/config.h
 dnl   - Contains ALL defines
 dnl   - include/config.h.in is generated automatically by autoheader
 dnl   - NOT to be included in any header files except crm_internal.h
 dnl     (which is also not to be included in any other header files)
 dnl
 dnl External header: include/crm_config.h
 dnl   - Contains a subset of defines checked here
 dnl   - Manually edit include/crm_config.h.in to have configure include
 dnl     new defines
 dnl   - Should not include HAVE_* defines
 dnl   - Safe to include anywhere
 AC_CONFIG_HEADERS([include/config.h include/crm_config.h])
 
 AC_ARG_WITH(version,
     [  --with-version=version   Override package version (if you are a packager needing to pretend) ],
     [ PACKAGE_VERSION="$withval" ])
 
 AC_ARG_WITH(pkg-name,
     [  --with-pkg-name=name     Override package name (if you are a packager needing to pretend) ],
     [ PACKAGE_NAME="$withval" ])
 
 dnl 1.11:           minimum automake version required
 dnl foreign:        don't require GNU-standard top-level files
 dnl silent-rules:   allow "--enable-silent-rules" (no-op in 1.13+)
 dnl subdir-objects: keep .o's with their .c's (no-op in 2.0+)
 AM_INIT_AUTOMAKE([1.11 foreign silent-rules subdir-objects])
 
 dnl Example 2.4. Silent Custom Rule to Generate a File
 dnl %-bar.pc: %.pc
 dnl	$(AM_V_GEN)$(LN_S) $(notdir $^) $@
 
 AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION",
                    [Current pacemaker version])
 
 dnl Versioned attributes implementation is not yet production-ready
 AC_DEFINE_UNQUOTED(ENABLE_VERSIONED_ATTRS, 0, [Enable versioned attributes])
 
 PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'`
 AC_SUBST(PACKAGE_SERIES)
 AC_SUBST(PACKAGE_VERSION)
 
 CC_IN_CONFIGURE=yes
 export CC_IN_CONFIGURE
 
 LDD=ldd
 
 dnl ========================================================================
 dnl Compiler characteristics
 dnl ========================================================================
 
 AC_PROG_CC dnl Can force other with environment variable "CC".
 AC_PROG_CC_STDC
 gl_EARLY
 gl_INIT
 
 LT_INIT([dlopen])
 LTDL_INIT([convenience])
 
 AC_TYPE_SIZE_T
 AC_CHECK_SIZEOF(char)
 AC_CHECK_SIZEOF(short)
 AC_CHECK_SIZEOF(int)
 AC_CHECK_SIZEOF(long)
 AC_CHECK_SIZEOF(long long)
 
 dnl ===============================================
 dnl Helpers
 dnl ===============================================
 cc_supports_flag() {
     local CFLAGS="-Werror $@"
     AC_MSG_CHECKING(whether $CC supports "$@")
     AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])],
                       [RC=0; AC_MSG_RESULT(yes)],
                       [RC=1; AC_MSG_RESULT(no)])
     return $RC
 }
 
 # Some tests need to use their own CFLAGS
 
 cc_temp_flags() {
     ac_save_CFLAGS="$CFLAGS"
     CFLAGS="$*"
 }
 
 cc_restore_flags() {
     CFLAGS=$ac_save_CFLAGS
 }
 
 dnl ===============================================
 dnl Configure Options
 dnl ===============================================
 
 dnl Some systems, like Solaris require a custom package name
 AC_ARG_WITH(pkgname,
     [  --with-pkgname=name     name for pkg (typically for Solaris) ],
     [ PKGNAME="$withval" ],
     [ PKGNAME="LXHAhb" ],
   )
 AC_SUBST(PKGNAME)
 
 AC_ARG_ENABLE([ansi],
     [  --enable-ansi  Force GCC to compile to ANSI standard for older compilers. @<:@no@:>@])
 
 AC_ARG_ENABLE([fatal-warnings],
     [  --enable-fatal-warnings  Enable pedantic and fatal warnings for gcc @<:@yes@:>@])
 
 AC_ARG_ENABLE([quiet],
     [  --enable-quiet  Suppress make output unless there is an error @<:@no@:>@])
 
 AC_ARG_ENABLE([no-stack],
     [  --enable-no-stack  Build only the scheduler and its requirements @<:@no@:>@])
 
 AC_ARG_ENABLE([upstart],
     [  --enable-upstart  Enable support for managing resources via Upstart @<:@try@:>@ ],
     [],
     [enable_upstart=try],
 )
 
 AC_ARG_ENABLE([systemd],
     [  --enable-systemd  Enable support for managing resources via systemd @<:@try@:>@],
     [],
     [enable_systemd=try],
 )
 
 AC_ARG_ENABLE(hardening,
     [  --enable-hardening  Harden the resulting executables/libraries @<:@try@:>@],
     [ HARDENING="${enableval}" ],
     [ HARDENING=try ],
 )
 
 # By default, we add symlinks at the pre-2.0.0 daemon name locations, so that:
 # (1) tools that directly invoke those names for metadata etc. will still work
 # (2) this installation can be used in a bundle container image used with
 #     cluster hosts running Pacemaker 1.1.17+
 # If you know your target systems will not have any need for it, you can
 # disable this option. Once the above use cases are no longer in wide use, we
 # can disable this option by default, and once we no longer want to support
 # them at all, we can drop the option altogether.
 AC_ARG_ENABLE(legacy-links,
     [  --enable-legacy-links  Add symlinks for old daemon names @<:@yes@:>@],
     [ LEGACY_LINKS="${enableval}" ],
     [ LEGACY_LINKS=yes ],
 )
 AM_CONDITIONAL(BUILD_LEGACY_LINKS, test "x${LEGACY_LINKS}" = "xyes")
 
 AC_ARG_WITH(corosync,
     [  --with-corosync  Support the Corosync messaging and membership layer ],
     [ SUPPORT_CS=$withval ],
     [ SUPPORT_CS=try ],
 )
 
 AC_ARG_WITH(nagios,
     [  --with-nagios  Support nagios remote monitoring ],
     [ SUPPORT_NAGIOS=$withval ],
     [ SUPPORT_NAGIOS=try ],
 )
 
 AC_ARG_WITH(nagios-plugin-dir,
     [  --with-nagios-plugin-dir=DIR  Directory for nagios plugins @<:@LIBEXECDIR/nagios/plugins@:>@],
     [ NAGIOS_PLUGIN_DIR="$withval" ]
 )
 
 AC_ARG_WITH(nagios-metadata-dir,
     [  --with-nagios-metadata-dir=DIR  Directory for nagios plugins metadata @<:@DATADIR/nagios/plugins-metadata@:>@],
     [ NAGIOS_METADATA_DIR="$withval" ]
 )
 
 AC_ARG_WITH(acl,
     [  --with-acl  Support CIB ACL ],
     [ SUPPORT_ACL=$withval ],
     [ SUPPORT_ACL=yes ],
 )
 
 AC_ARG_WITH(cibsecrets,
     [  --with-cibsecrets  Support separate file for CIB secrets ],
     [ SUPPORT_CIBSECRETS=$withval ],
     [ SUPPORT_CIBSECRETS=no ],
 )
 
 PCMK_GNUTLS_PRIORITIES="NORMAL"
 AC_ARG_WITH(gnutls-priorities,
     [  --with-gnutls-priorities  GnuTLS cipher priorities @<:@NORMAL@:>@ ],
     [ test x"$withval" = x"no" || PCMK_GNUTLS_PRIORITIES="$withval" ])
 
 INITDIR=""
 AC_ARG_WITH(initdir,
     [  --with-initdir=DIR  Directory for init (rc) scripts],
     [ INITDIR="$withval" ])
 
 SUPPORT_PROFILING=0
 AC_ARG_WITH(profiling,
     [  --with-profiling  Disable optimizations for effective profiling ],
     [ SUPPORT_PROFILING=$withval ])
 
 AC_ARG_WITH(coverage,
     [  --with-coverage   Disable optimizations for effective profiling ],
     [ SUPPORT_COVERAGE=$withval ])
 
 PUBLICAN_BRAND="common"
 AC_ARG_WITH(brand,
     [  --with-brand=brand  Brand to use for generated documentation (set empty for no docs) @<:@common@:>@],
     [ test x"$withval" = x"no" || PUBLICAN_BRAND="$withval" ])
 AC_SUBST(PUBLICAN_BRAND)
 
 CONFIGDIR=""
 AC_ARG_WITH(configdir,
     [  --with-configdir=DIR  Directory for Pacemaker configuration file @<:@SYSCONFDIR/sysconfig@:>@],
     [ CONFIGDIR="$withval" ]
 )
 
 CRM_LOG_DIR=""
 AC_ARG_WITH(logdir,
     [  --with-logdir=DIR  Directory for Pacemaker log file @<:@LOCALSTATEDIR/log/pacemaker@:>@ ],
     [ CRM_LOG_DIR="$withval" ]
 )
 
 CRM_BUNDLE_DIR=""
 AC_ARG_WITH(bundledir,
     [  --with-bundledir=DIR  Directory for Pacemaker bundle logs @<:@LOCALSTATEDIR/log/pacemaker/bundles@:>@ ],
     [ CRM_BUNDLE_DIR="$withval" ]
 )
 
 dnl ===============================================
 dnl General Processing
 dnl ===============================================
 
 AC_PROG_LN_S
 AC_PROG_MKDIR_P
 
 if cc_supports_flag -Werror; then
     WERROR="-Werror"
 else
     WERROR=""
 fi
 
 # Normalize enable_fatal_warnings (defaulting to yes, when compiler supports it)
 if test "x${enable_fatal_warnings}" != "xno" ; then
     if test "$GCC" = "yes" && test "x${WERROR}" != "x" ; then
         enable_fatal_warnings=yes
     else
         AC_MSG_NOTICE(Compiler does not support fatal warnings)
         enable_fatal_warnings=no
     fi
 fi
 
 INIT_EXT=""
 echo Our Host OS: $host_os/$host
 
 AC_MSG_NOTICE(Sanitizing prefix: ${prefix})
 case $prefix in
     NONE)
         prefix=/usr
         dnl Fix default variables - "prefix" variable if not specified
         if test "$localstatedir" = "\${prefix}/var"; then
             localstatedir="/var"
         fi
         if test "$sysconfdir" = "\${prefix}/etc"; then
             sysconfdir="/etc"
         fi
         ;;
 esac
 
 AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix})
 case $exec_prefix in
     prefix|NONE)
         exec_prefix=$prefix
         ;;
 esac
 
 AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR})
 case $INITDIR in
     prefix) INITDIR=$prefix;;
     "")
         AC_MSG_CHECKING(which init (rc) directory to use)
         for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \
             /usr/local/etc/rc.d /etc/rc.d
         do
             if
                 test -d $initdir
             then
                 INITDIR=$initdir
                 break
             fi
         done
         AC_MSG_RESULT($INITDIR)
         ;;
 esac
 AC_SUBST(INITDIR)
 
 AC_MSG_NOTICE(Sanitizing libdir: ${libdir})
 case $libdir in
     prefix|NONE)
         AC_MSG_CHECKING(which lib directory to use)
         for aDir in lib64 lib
         do
             trydir="${exec_prefix}/${aDir}"
             if
                 test -d ${trydir}
             then
                 libdir=${trydir}
                 break
             fi
         done
         AC_MSG_RESULT($libdir);
         ;;
 esac
 
 dnl Expand autoconf variables so that we don't end up with '${prefix}'
 dnl in #defines and python scripts
 dnl NOTE: Autoconf deliberately leaves them unexpanded to allow
 dnl    make exec_prefix=/foo install
 dnl No longer being able to do this seems like no great loss to me...
 
 eval prefix="`eval echo ${prefix}`"
 eval exec_prefix="`eval echo ${exec_prefix}`"
 eval bindir="`eval echo ${bindir}`"
 eval sbindir="`eval echo ${sbindir}`"
 eval libexecdir="`eval echo ${libexecdir}`"
 eval datadir="`eval echo ${datadir}`"
 eval sysconfdir="`eval echo ${sysconfdir}`"
 eval sharedstatedir="`eval echo ${sharedstatedir}`"
 eval localstatedir="`eval echo ${localstatedir}`"
 eval libdir="`eval echo ${libdir}`"
 eval includedir="`eval echo ${includedir}`"
 eval oldincludedir="`eval echo ${oldincludedir}`"
 eval infodir="`eval echo ${infodir}`"
 eval mandir="`eval echo ${mandir}`"
 
 dnl Home-grown variables
 eval INITDIR="${INITDIR}"
 eval docdir="`eval echo ${docdir}`"
 if test x"${docdir}" = x""; then
     docdir=${datadir}/doc/${PACKAGE}-${VERSION}
 fi
 AC_SUBST(docdir)
 if test x"${CONFIGDIR}" = x""; then
     CONFIGDIR="${sysconfdir}/sysconfig"
 fi
 AC_SUBST(CONFIGDIR)
 
 if test x"${CRM_LOG_DIR}" = x""; then
     CRM_LOG_DIR="${localstatedir}/log/pacemaker"
 fi
 AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file)
 AC_SUBST(CRM_LOG_DIR)
 
 if test x"${CRM_BUNDLE_DIR}" = x""; then
     CRM_BUNDLE_DIR="${localstatedir}/log/pacemaker/bundles"
 fi
 AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs)
 AC_SUBST(CRM_BUNDLE_DIR)
 
 
 if test x"${PCMK_GNUTLS_PRIORITIES}" = x""; then
     AC_MSG_ERROR([Empty string not applicable with --with-gnutls-priorities])
 fi
 AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"],
                    [GnuTLS cipher priorities])
 
 for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \
     sharedstatedir localstatedir libdir includedir oldincludedir infodir \
     mandir INITDIR docdir CONFIGDIR
 do
     dirname=`eval echo '${'${j}'}'`
     if
         test ! -d "$dirname"
     then
         AC_MSG_WARN([$j directory ($dirname) does not exist!])
     fi
 done
 
 dnl This OS-based decision-making is poor autotools practice;
 dnl feature-based mechanisms are strongly preferred.
 dnl
 dnl So keep this section to a bare minimum; regard as a "necessary evil".
 
 case "$host_os" in
     *bsd*)
         AC_DEFINE_UNQUOTED(ON_BSD, 1, Compiling for BSD platform)
         LIBS="-L/usr/local/lib"
         CPPFLAGS="$CPPFLAGS -I/usr/local/include"
         INIT_EXT=".sh"
         ;;
     *solaris*)
         AC_DEFINE_UNQUOTED(ON_SOLARIS, 1, Compiling for Solaris platform)
         ;;
     *linux*)
         AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform)
         ;;
     darwin*)
         AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform)
         LIBS="$LIBS -L${prefix}/lib"
         CFLAGS="$CFLAGS -I${prefix}/include"
         ;;
 esac
 
 AC_SUBST(INIT_EXT)
 AC_MSG_NOTICE(Host CPU: $host_cpu)
 
 case "$host_cpu" in
     ppc64|powerpc64)
         case $CFLAGS in
          *powerpc64*)
              ;;
          *)
              if test "$GCC" = yes; then
                  CFLAGS="$CFLAGS -m64"
              fi
              ;;
         esac
         ;;
 esac
 
 AC_MSG_CHECKING(which format is needed to print uint64_t)
 
 cc_temp_flags "-Wall $WERROR"
 
 AC_COMPILE_IFELSE(
     [AC_LANG_PROGRAM(
         [
 #include <stdio.h>
 #include <stdint.h>
 #include <stdlib.h>
         ],
         [
 int max = 512;
 uint64_t bignum = 42;
 char *buffer = malloc(max);
 const char *random = "random";
 snprintf(buffer, max-1, "<quorum id=%lu quorate=%s/>", bignum, random);
 fprintf(stderr, "Result: %s\n", buffer);
         ]
     )],
     [U64T="%lu"],
     [U64T="%llu"]
 )
 cc_restore_flags
 
 AC_MSG_RESULT($U64T)
 AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t)
 
 dnl ===============================================
 dnl Program Paths
 dnl ===============================================
 
 PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
 export PATH
 
 dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL
 dnl was NOT being expanded all the time thus causing things to fail.
 AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13)
 
 dnl Pacemaker's executable python scripts will invoke the python specified by
 dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a
 dnl built-in list with (unversioned) "python" having precedence. To configure
 dnl Pacemaker to use a specific python interpreter version, define PYTHON
 dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6
 
 dnl Ensure PYTHON is an absolute path
 AC_PATH_PROG([PYTHON], [$PYTHON])
 
 case "x$PYTHON" in
     x*python3*|x*platform-python*)
         dnl When used with Python 3, Pacemaker requires a minimum of 3.2
         AM_PATH_PYTHON([3.2])
         ;;
     *)
         dnl Otherwise, Pacemaker requires a minimum of 2.7
         AM_PATH_PYTHON([2.7])
         ;;
 esac
 
 AC_CHECK_PROGS(MAKE, gmake make)
 AC_PATH_PROGS(HTML2TXT, lynx w3m)
 AC_PATH_PROGS(HELP2MAN, help2man)
 AC_PATH_PROGS(POD2MAN, pod2man, pod2man)
 AC_PATH_PROGS(ASCIIDOC, asciidoc)
 AC_PATH_PROGS(PUBLICAN, publican)
 AC_PATH_PROGS(INKSCAPE, inkscape)
 AC_PATH_PROGS(XSLTPROC, xsltproc)
 AC_PATH_PROGS(XMLCATALOG, xmlcatalog)
 AC_PATH_PROGS(FOP, fop)
 AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh)
 AC_PATH_PROGS(SCP, scp, /usr/bin/scp)
 AC_PATH_PROGS(TAR, tar)
 AC_PATH_PROGS(MD5, md5)
 dnl BASH is already an environment variable, so use something else
 AC_PATH_PROG([BASH_PATH], [bash])
 AC_PATH_PROGS(TEST, test)
 PKG_PROG_PKG_CONFIG
 AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind)
 AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command)
 
 if test x"${LIBTOOL}" = x""; then
     AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE})
 fi
 if test x"${MAKE}" = x""; then
     AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE})
 fi
 
 dnl Bash is needed for building man pages and running regression tests
 if test x"${BASH_PATH}" = x""; then
     AC_MSG_ERROR(bash must be installed in order to build ${PACKAGE})
 fi
 
 AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"")
 if test x"${HELP2MAN}" != x""; then
     PCMK_FEATURES="$PCMK_FEATURES generated-manpages"
 fi
 
 MANPAGE_XSLT=""
 if test x"${XSLTPROC}" != x""; then
     AC_MSG_CHECKING(docbook to manpage transform)
     # first try to figure out correct template using xmlcatalog query,
     # resort to extensive (semi-deterministic) file search if that fails
     DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current'
     DOCBOOK_XSL_PATH='manpages/docbook.xsl'
     MANPAGE_XSLT=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI}/${DOCBOOK_XSL_PATH} \
                    | sed -n 's|^file://||p;q')
     if test x"${MANPAGE_XSLT}" = x""; then
         DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \
                -type d | LC_ALL=C sort)
         XSLT=$(basename ${DOCBOOK_XSL_PATH})
         for d in ${DIRS}; do
             if test -f "${d}/${XSLT}"; then
                  MANPAGE_XSLT="${d}/${XSLT}"
                  break
             fi
         done
     fi
 fi
 AC_MSG_RESULT($MANPAGE_XSLT)
 AC_SUBST(MANPAGE_XSLT)
 
 AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"")
 if test x"${MANPAGE_XSLT}" != x""; then
     PCMK_FEATURES="$PCMK_FEATURES agent-manpages"
 fi
 
 AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"")
 if test x"${ASCIIDOC}" != x""; then
     PCMK_FEATURES="$PCMK_FEATURES ascii-docs"
 fi
 
 publican_intree_brand=no
 if test x"${PUBLICAN_BRAND}" != x"" \
    && test x"${PUBLICAN}" != x"" \
    && test x"${INKSCAPE}" != x""; then
 
     dnl special handling for clusterlabs brand (possibly in-tree version used)
     test "${PUBLICAN_BRAND}" != "clusterlabs" \
         || test -d /usr/share/publican/Common_Content/clusterlabs
     if test $? -ne 0; then
         dnl Unknown option: brand_dir vs. Option brand_dir requires an argument
         if ${PUBLICAN} build --brand_dir 2>&1 | grep -Eq 'brand_dir$'; then
             AC_MSG_WARN([Cannot use in-tree clusterlabs brand, resorting to common])
             PUBLICAN_BRAND=common
         else
             publican_intree_brand=yes
         fi
     fi
     AC_MSG_NOTICE([Enabling Publican-generated documentation using ${PUBLICAN_BRAND} brand])
     PCMK_FEATURES="$PCMK_FEATURES publican-docs"
 fi
 AM_CONDITIONAL([BUILD_DOCBOOK],
                [test x"${PUBLICAN_BRAND}" != x"" \
                 && test x"${PUBLICAN}" != x"" \
                 && test x"${INKSCAPE}" != x""])
 AM_CONDITIONAL([PUBLICAN_INTREE_BRAND],
                [test x"${publican_intree_brand}" = x"yes"])
 
 dnl Pacemaker's shell scripts (and thus man page builders) rely on GNU getopt
 AC_MSG_CHECKING([for GNU-compatible getopt])
 IFS_orig=$IFS
 IFS=:
 for PATH_DIR in $PATH; do
     IFS=$IFS_orig
     GETOPT_PATH="${PATH_DIR}/getopt"
     if test -f "$GETOPT_PATH" && test -x "$GETOPT_PATH" ; then
         $GETOPT_PATH -T >/dev/null 2>/dev/null
         if test $? -eq 4; then
             break
         fi
     fi
     GETOPT_PATH=""
 done
 IFS=$IFS_orig
 if test -n "$GETOPT_PATH"; then
   AC_MSG_RESULT([$GETOPT_PATH])
 else
   AC_MSG_RESULT([no])
   AC_MSG_ERROR(Pacemaker build requires a GNU-compatible getopt)
 fi
 AC_SUBST([GETOPT_PATH])
 
 dnl ========================================================================
 dnl checks for library functions to replace them
 dnl
 dnl     NoSuchFunctionName:
 dnl             is a dummy function which no system supplies.  It is here to make
 dnl             the system compile semi-correctly on OpenBSD which doesn't know
 dnl             how to create an empty archive
 dnl
 dnl     scandir: Only on BSD.
 dnl             System-V systems may have it, but hidden and/or deprecated.
 dnl             A replacement function is supplied for it.
 dnl
 dnl     setenv: is some bsdish function that should also be avoided (use
 dnl             putenv instead)
 dnl             On the other hand, putenv doesn't provide the right API for the
 dnl             code and has memory leaks designed in (sigh...)  Fortunately this
 dnl             A replacement function is supplied for it.
 dnl
 dnl     strerror: returns a string that corresponds to an errno.
 dnl             A replacement function is supplied for it.
 dnl
 dnl     strnlen: is a gnu function similar to strlen, but safer.
 dnl            We wrote a tolearably-fast replacement function for it.
 dnl
 dnl     strndup: is a gnu function similar to strdup, but safer.
 dnl            We wrote a tolearably-fast replacement function for it.
 
 AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup)
 
 dnl ===============================================
 dnl Libraries
 dnl ===============================================
 AC_CHECK_LIB(socket, socket)                    dnl -lsocket
 AC_CHECK_LIB(c, dlopen)                         dnl if dlopen is in libc...
 AC_CHECK_LIB(dl, dlopen)                        dnl -ldl (for Linux)
 AC_CHECK_LIB(rt, sched_getscheduler)            dnl -lrt (for Tru64)
 AC_CHECK_LIB(gnugetopt, getopt_long)            dnl -lgnugetopt ( if available )
 AC_CHECK_LIB(pam, pam_start)                    dnl -lpam (if available)
 
 AC_CHECK_FUNCS([sched_setscheduler])
 
 AC_CHECK_LIB(uuid, uuid_parse)                  dnl load the library if necessary
 AC_CHECK_FUNCS(uuid_unparse)                    dnl OSX ships uuid_* as standard functions
 
 AC_CHECK_HEADERS(uuid/uuid.h)
 
 if test "x$ac_cv_func_uuid_unparse" != xyes; then
     AC_MSG_ERROR(You do not have the libuuid development package installed)
 fi
 
 if test x"${PKG_CONFIG}" = x""; then
     AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE})
 fi
 
 # Require glib 2.16.0 (2008-03) or later for g_hash_table_iter_init() etc.
 PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.16.0],
                   [CPPFLAGS="${CPPFLAGS} ${GLIB_CFLAGS}"
                    LIBS="${LIBS} ${GLIB_LIBS}"])
 
 #
 # Where is dlopen?
 #
 if test "$ac_cv_lib_c_dlopen" = yes; then
     LIBADD_DL=""
 elif test "$ac_cv_lib_dl_dlopen" = yes; then
     LIBADD_DL=-ldl
 else
     LIBADD_DL=${lt_cv_dlopen_libs}
 fi
 
 dnl FreeBSD needs -lcompat for ftime() used by lrmd.c
 AC_CHECK_LIB([compat], [ftime], [COMPAT_LIBS='-lcompat'])
 AC_SUBST(COMPAT_LIBS)
 
 dnl ========================================================================
 dnl Headers
 dnl ========================================================================
 
 dnl Some distributions insert #warnings into deprecated headers such as
 dnl timeb.h. If we will enable fatal warnings for the build, then enable
 dnl them for the header checks as well, otherwise the build could fail
 dnl even though the header check succeeds. (We should probably be doing
 dnl this in more places.)
 if test "x${enable_fatal_warnings}" = xyes ; then
     cc_temp_flags "$CFLAGS $WERROR"
 fi
 AC_CHECK_HEADERS(arpa/inet.h)
 AC_CHECK_HEADERS(ctype.h)
 AC_CHECK_HEADERS(dirent.h)
 AC_CHECK_HEADERS(errno.h)
 AC_CHECK_HEADERS(getopt.h)
 AC_CHECK_HEADERS(glib.h)
 AC_CHECK_HEADERS(grp.h)
 AC_CHECK_HEADERS(limits.h)
 AC_CHECK_HEADERS(linux/swab.h)
 AC_CHECK_HEADERS(malloc.h)
 AC_CHECK_HEADERS(netdb.h)
 AC_CHECK_HEADERS(netinet/in.h)
 AC_CHECK_HEADERS(netinet/ip.h)
 AC_CHECK_HEADERS(pwd.h)
 AC_CHECK_HEADERS(sgtty.h)
 AC_CHECK_HEADERS(signal.h)
 AC_CHECK_HEADERS(stdarg.h)
 AC_CHECK_HEADERS(stddef.h)
 AC_CHECK_HEADERS(stdio.h)
 AC_CHECK_HEADERS(stdlib.h)
 AC_CHECK_HEADERS(string.h)
 AC_CHECK_HEADERS(strings.h)
 AC_CHECK_HEADERS(sys/dir.h)
 AC_CHECK_HEADERS(sys/ioctl.h)
 AC_CHECK_HEADERS(sys/param.h)
 AC_CHECK_HEADERS(sys/reboot.h)
 AC_CHECK_HEADERS(sys/resource.h)
 AC_CHECK_HEADERS(sys/socket.h)
 AC_CHECK_HEADERS(sys/signalfd.h)
 AC_CHECK_HEADERS(sys/sockio.h)
 AC_CHECK_HEADERS(sys/stat.h)
 AC_CHECK_HEADERS(sys/time.h)
 AC_CHECK_HEADERS(sys/timeb.h)
 AC_CHECK_HEADERS(sys/types.h)
 AC_CHECK_HEADERS(sys/utsname.h)
 AC_CHECK_HEADERS(sys/wait.h)
 AC_CHECK_HEADERS(time.h)
 AC_CHECK_HEADERS(unistd.h)
 if test "x${enable_fatal_warnings}" = xyes ; then
     cc_restore_flags
 fi
 
 dnl These headers need prerequisites before the tests will pass
 dnl AC_CHECK_HEADERS(net/if.h)
 
 PKG_CHECK_MODULES(LIBXML2, [libxml-2.0],
                   [CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}"
                    LIBS="${LIBS} ${LIBXML2_LIBS}"])
 AC_CHECK_HEADERS(libxml/xpath.h)
 if test "$ac_cv_header_libxml_xpath_h" != "yes"; then
     AC_MSG_ERROR(libxml development headers not found)
 fi
 
 AC_CHECK_LIB(xslt, xsltApplyStylesheet, [],
              AC_MSG_ERROR(Unsupported libxslt library version))
 AC_CHECK_HEADERS(libxslt/xslt.h)
 if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then
     AC_MSG_ERROR(libxslt development headers not found)
 fi
 
 AC_CACHE_CHECK(whether __progname and __progname_full are available,
                pf_cv_var_progname,
                AC_TRY_LINK([extern char *__progname, *__progname_full;],
                            [__progname = "foo"; __progname_full = "foo bar";],
                            pf_cv_var_progname="yes", pf_cv_var_progname="no"))
 
 if test "$pf_cv_var_progname" = "yes"; then
     AC_DEFINE(HAVE___PROGNAME,1,[ ])
 fi
 
 dnl ========================================================================
 dnl Structures
 dnl ========================================================================
 
 AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include <time.h>]])
 AC_CHECK_MEMBERS([lrm_op_t.rsc_deleted],,,[[#include <lrm/lrm_api.h>]])
 AC_CHECK_MEMBER([struct dirent.d_type],
     AC_DEFINE(HAVE_STRUCT_DIRENT_D_TYPE,1,[Define this if struct dirent has d_type]),,
     [#include <dirent.h>])
 
 dnl ========================================================================
 dnl Functions
 dnl ========================================================================
 
 AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT,  1, [Have getopt function]))
 AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP,  1, [Have nanosleep function]))
 
 dnl ========================================================================
 dnl   bzip2
 dnl ========================================================================
 AC_CHECK_HEADERS(bzlib.h)
 AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress)
 
 if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then
     AC_MSG_ERROR(BZ2 libraries not found)
 fi
 
 if test x$ac_cv_header_bzlib_h != xyes; then
     AC_MSG_ERROR(BZ2 Development headers not found)
 fi
 
 dnl ========================================================================
 dnl sighandler_t is missing from Illumos, Solaris11 systems
 dnl ========================================================================
 
 AC_MSG_CHECKING([for sighandler_t])
 AC_TRY_COMPILE([#include <signal.h>],[sighandler_t *f;],
 has_sighandler_t=yes,has_sighandler_t=no)
 AC_MSG_RESULT($has_sighandler_t)
 if test "$has_sighandler_t" = "yes" ; then
     AC_DEFINE( HAVE_SIGHANDLER_T, 1, [Define if sighandler_t available] )
 fi
 
 dnl ========================================================================
 dnl   ncurses
 dnl ========================================================================
 dnl
 dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses".
 dnl Many non-Linux deliver "curses"; sites may add "ncurses".
 dnl
 dnl However, the source-code recommendation for both is to #include "curses.h"
 dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h").
 dnl
 dnl ncurse takes precedence.
 dnl
 AC_CHECK_HEADERS(curses.h)
 AC_CHECK_HEADERS(curses/curses.h)
 AC_CHECK_HEADERS(ncurses.h)
 AC_CHECK_HEADERS(ncurses/ncurses.h)
 
 dnl Although n-library is preferred, only look for it if the n-header was found.
 CURSESLIBS=''
 if test "$ac_cv_header_ncurses_h" = "yes"; then
     AC_CHECK_LIB(ncurses, printw,
                  [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)])
     CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses'
 fi
 
 if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then
     AC_CHECK_LIB(ncurses, printw,
                  [AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)])
     CURSESLIBS=`$PKG_CONFIG --libs ncurses` || CURSESLIBS='-lncurses'
 fi
 
 dnl Only look for non-n-library if there was no n-library.
 if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then
     AC_CHECK_LIB(curses, printw,
                  [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)])
 fi
 
 dnl Only look for non-n-library if there was no n-library.
 if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then
     AC_CHECK_LIB(curses, printw,
                  [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)])
 fi
 
 if test "x$CURSESLIBS" != "x"; then
     PCMK_FEATURES="$PCMK_FEATURES ncurses"
 fi
 
 dnl Check for printw() prototype compatibility
 if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual; then
     ac_save_LIBS=$LIBS
     LIBS="$CURSESLIBS"
     cc_temp_flags "-Wcast-qual $WERROR"
     # avoid broken test because of hardened build environment in Fedora 23+
     # - https://fedoraproject.org/wiki/Changes/Harden_All_Packages
     # - https://bugzilla.redhat.com/1297985
     if cc_supports_flag -fPIC; then
         CFLAGS="$CFLAGS -fPIC"
     fi
 
     AC_MSG_CHECKING(whether printw() requires argument of "const char *")
     AC_LINK_IFELSE(
         [AC_LANG_PROGRAM([
 #if defined(HAVE_NCURSES_H)
 #  include <ncurses.h>
 #elif defined(HAVE_NCURSES_NCURSES_H)
 #  include <ncurses/ncurses.h>
 #elif defined(HAVE_CURSES_H)
 #  include <curses.h>
 #endif
                          ],
                          [printw((const char *)"Test");]
         )],
         [ac_cv_compatible_printw=yes],
         [ac_cv_compatible_printw=no]
     )
 
     LIBS=$ac_save_LIBS
     cc_restore_flags
 
     AC_MSG_RESULT([$ac_cv_compatible_printw])
 
     if test "$ac_cv_compatible_printw" = no; then
         AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.])
         AC_MSG_NOTICE([Disabling curses])
         AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?])
     fi
 fi
 
 AC_SUBST(CURSESLIBS)
 
 dnl ========================================================================
 dnl    Profiling and GProf
 dnl ========================================================================
 
 AC_MSG_NOTICE(Old CFLAGS: $CFLAGS)
 case $SUPPORT_COVERAGE in
     1|yes|true)
         SUPPORT_PROFILING=1
         PCMK_FEATURES="$PCMK_FEATURES coverage"
         CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage"
         dnl During linking, make sure to specify -lgcov or -coverage
         ;;
 esac
 
 case $SUPPORT_PROFILING in
     1|yes|true)
         SUPPORT_PROFILING=1
 
         dnl Disable various compiler optimizations
         CFLAGS="$CFLAGS -fno-omit-frame-pointer -fno-inline -fno-builtin "
         dnl CFLAGS="$CFLAGS -fno-inline-functions -fno-default-inline -fno-inline-functions-called-once -fno-optimize-sibling-calls"
 
         dnl Turn off optimization so tools can get accurate line numbers
         CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g' -e 's/-D_FORTIFY_SOURCE=.\ //g'`
         CFLAGS="$CFLAGS -O0 -g3 -gdwarf-2"
 
         dnl Update features
         PCMK_FEATURES="$PCMK_FEATURES profile"
         ;;
      *)
         SUPPORT_PROFILING=0
         ;;
 esac
 AC_MSG_NOTICE(New CFLAGS: $CFLAGS)
 AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for profiling)
 
 dnl ========================================================================
 dnl    Cluster infrastructure - LibQB
 dnl ========================================================================
 
 if test x${enable_no_stack} = xyes; then
     SUPPORT_CS=no
 fi
 
 PKG_CHECK_MODULES(libqb, libqb >= 0.13)
 CPPFLAGS="$libqb_CFLAGS $CPPFLAGS"
 LIBS="$libqb_LIBS $LIBS"
 
 dnl libqb 0.14.0+ (2012-06)
 AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set)
 
 PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc"
 
 dnl libqb 0.17.0+ (2014-02)
 AC_CHECK_FUNCS(qb_ipcs_connection_get_buffer_size,
                AC_DEFINE(HAVE_IPCS_GET_BUFFER_SIZE, 1,
                          [Have qb_ipcc_get_buffer_size function]))
 
 dnl libqb not yet released (as of 2018-05)
 CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_MAX_LINE_LEN])
 CHECK_ENUM_VALUE([qb/qblog.h],[qb_log_conf],[QB_LOG_CONF_ELLIPSIS])
 
 dnl Support Linux-HA fence agents if available
 if test "$cross_compiling" != "yes"; then
     CPPFLAGS="$CPPFLAGS -I${prefix}/include/heartbeat"
 fi
 AC_CHECK_HEADERS(stonith/stonith.h)
 if test "$ac_cv_header_stonith_stonith_h" = "yes";  then
     dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols
     dnl So check for all the dependencies (so they're added to LIBS) before checking for -lplumb
     AC_CHECK_LIB(pils, PILLoadPlugin)
     AC_CHECK_LIB(plumb, G_main_add_IPC_Channel)
     PCMK_FEATURES="$PCMK_FEATURES lha-fencing"
 fi
 AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test "$ac_cv_header_stonith_stonith_h" = "yes"])
 
 dnl ===============================================
 dnl Variables needed for substitution
 dnl ===============================================
 CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker"
 AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema)
 AC_SUBST(CRM_SCHEMA_DIRECTORY)
 
 CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores"
 AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons)
 AC_SUBST(CRM_CORE_DIR)
 
 CRM_DAEMON_USER="hacluster"
 AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as)
 AC_SUBST(CRM_DAEMON_USER)
 
 CRM_DAEMON_GROUP="haclient"
 AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as)
 AC_SUBST(CRM_DAEMON_GROUP)
 
 CRM_STATE_DIR=${localstatedir}/run/crm
 AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets)
 AC_SUBST(CRM_STATE_DIR)
 
 CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker
 AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons)
 AC_SUBST(CRM_PACEMAKER_DIR)
 
 CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox
 AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps)
 AC_SUBST(CRM_BLACKBOX_DIR)
 
 PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine"
 AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs)
 AC_SUBST(PE_STATE_DIR)
 
 CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib"
 AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files)
 AC_SUBST(CRM_CONFIG_DIR)
 
 CRM_CONFIG_CTS="${localstatedir}/lib/pacemaker/cts"
 AC_DEFINE_UNQUOTED(CRM_CONFIG_CTS,"$CRM_CONFIG_CTS", Where to keep cts stateful data)
 AC_SUBST(CRM_CONFIG_CTS)
 
 CRM_DAEMON_DIR="${libexecdir}/pacemaker"
 AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons)
 AC_SUBST(CRM_DAEMON_DIR)
 
 HA_STATE_DIR="${localstatedir}/run"
 AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where sbd keeps its PID file)
 AC_SUBST(HA_STATE_DIR)
 
 CRM_RSCTMP_DIR="${localstatedir}/run/resource-agents"
 AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files)
 AC_SUBST(CRM_RSCTMP_DIR)
 
 PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker"
 AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey)
 AC_SUBST(PACEMAKER_CONFIG_DIR)
 
 OCF_ROOT_DIR="/usr/lib/ocf"
 if test "X$OCF_ROOT_DIR" = X; then
     AC_MSG_ERROR(Could not locate OCF directory)
 fi
 AC_SUBST(OCF_ROOT_DIR)
 
 OCF_RA_DIR="$OCF_ROOT_DIR/resource.d"
 AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs)
 AC_SUBST(OCF_RA_DIR)
 
 RH_STONITH_DIR="$sbindir"
 AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents)
 AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir", Location for system binaries)
 
 RH_STONITH_PREFIX="fence_"
 AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents)
 
 AC_PATH_PROGS(GIT, git false)
 AC_MSG_CHECKING(build version)
 
 BUILD_VERSION=$Format:%h$
 if test $BUILD_VERSION != ":%h$"; then
     AC_MSG_RESULT(archive hash: $BUILD_VERSION)
 elif test -x $GIT -a -d .git; then
     BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1`
     AC_MSG_RESULT(git hash: $BUILD_VERSION)
 else
     # The current directory name make a reasonable default
     # Most generated archives will include the hash or tag
     BASE=`basename $PWD`
     BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::`
     AC_MSG_RESULT(directory based hash: $BUILD_VERSION)
 fi
 
 AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version)
 AC_SUBST(BUILD_VERSION)
 
 HAVE_dbus=1
 PKG_CHECK_MODULES([DBUS], [dbus-1],
 		  [CPPFLAGS="${CPPFLAGS} ${DBUS_CFLAGS}"],
 		  [HAVE_dbus=0])
 AC_DEFINE_UNQUOTED(SUPPORT_DBUS, $HAVE_dbus, Support dbus)
 AM_CONDITIONAL(BUILD_DBUS, test $HAVE_dbus = 1)
 AC_CHECK_TYPES([DBusBasicValue],,,[[#include <dbus/dbus.h>]])
 
 if test "x${enable_systemd}" != xno; then
     if test $HAVE_dbus = 0; then
         if test "x${enable_systemd}" = xyes; then
             AC_MSG_FAILURE([cannot enable systemd without DBus])
         else
             enable_systemd=no
         fi
     fi
     if test "x${enable_systemd}" = xtry; then
         AC_MSG_CHECKING([for systemd version query result via dbus-send])
         ret=$({ dbus-send --system --print-reply \
                     --dest=org.freedesktop.systemd1 \
                     /org/freedesktop/systemd1 \
                     org.freedesktop.DBus.Properties.Get \
                     string:org.freedesktop.systemd1.Manager \
                     string:Version 2>/dev/null \
                 || echo "this borked"; } | tail -n1)
         # sanitize output a bit (interested just in value, not type),
         # ret is intentionally unenquoted so as to normalize whitespace
         ret=$(echo ${ret} | cut -d' ' -f2-)
         AC_MSG_RESULT([${ret}])
         if test "x${ret}" != xborked \
            || systemctl --version 2>/dev/null | grep -q systemd; then
             enable_systemd=yes
         else
             enable_systemd=no
         fi
     fi
 fi
 
 AC_MSG_CHECKING([whether to enable support for managing resources via systemd])
 AC_MSG_RESULT([${enable_systemd}])
 HAVE_systemd=0
 if test "x${enable_systemd}" = xyes; then
     HAVE_systemd=1
     PCMK_FEATURES="$PCMK_FEATURES systemd"
 
     AC_MSG_CHECKING([for systemd path for system unit files])
     systemdunitdir="${systemdunitdir-}"
     PKG_CHECK_VAR([systemdunitdir], [systemd],
                   [systemdsystemunitdir], [], [systemdunitdir=no])
     AC_MSG_RESULT([${systemdunitdir}])
     if test "x${systemdunitdir}" = xno; then
         AC_MSG_FAILURE([cannot enable systemd when systemdunitdir unresolved])
     fi
 fi
 AC_SUBST(systemdunitdir)
 
 AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services)
 AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1)
 AC_SUBST(SUPPORT_SYSTEMD)
 
 if test "x${enable_upstart}" != xno; then
     if test $HAVE_dbus = 0; then
         if test "x${enable_upstart}" = xyes; then
             AC_MSG_FAILURE([cannot enable Upstart without DBus])
         else
             enable_upstart=no
         fi
     fi
     if test "x${enable_upstart}" = xtry; then
         AC_MSG_CHECKING([for Upstart version query result via dbus-send])
         ret=$({ dbus-send --system --print-reply --dest=com.ubuntu.Upstart \
                     /com/ubuntu/Upstart org.freedesktop.DBus.Properties.Get \
                     string:com.ubuntu.Upstart0_6 string:version 2>/dev/null \
                 || echo "this borked"; } | tail -n1)
         # sanitize output a bit (interested just in value, not type),
         # ret is intentionally unenquoted so as to normalize whitespace
         ret=$(echo ${ret} | cut -d' ' -f2-)
         AC_MSG_RESULT([${ret}])
         if test "x${ret}" != xborked \
            || initctl --version 2>/dev/null | grep -q upstart; then
             enable_upstart=yes
         else
             enable_upstart=no
         fi
     fi
 fi
 AC_MSG_CHECKING([whether to enable support for managing resources via Upstart])
 AC_MSG_RESULT([${enable_upstart}])
 HAVE_upstart=0
 if test "x${enable_upstart}" = xyes; then
     HAVE_upstart=1
     PCMK_FEATURES="$PCMK_FEATURES upstart"
 fi
 
 AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services)
 AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1)
 AC_SUBST(SUPPORT_UPSTART)
 
 case $SUPPORT_NAGIOS in
     1|yes|true|try)
         SUPPORT_NAGIOS=1
         ;;
     *)
         SUPPORT_NAGIOS=0
         ;;
 esac
 
 if test $SUPPORT_NAGIOS = 1; then
     PCMK_FEATURES="$PCMK_FEATURES nagios"
 fi
 
 AC_DEFINE_UNQUOTED(SUPPORT_NAGIOS, $SUPPORT_NAGIOS, Support nagios plugins)
 AM_CONDITIONAL(BUILD_NAGIOS, test $SUPPORT_NAGIOS = 1)
 
 if test x"$NAGIOS_PLUGIN_DIR" = x""; then
     NAGIOS_PLUGIN_DIR="${libexecdir}/nagios/plugins"
 fi
 
 AC_DEFINE_UNQUOTED(NAGIOS_PLUGIN_DIR, "$NAGIOS_PLUGIN_DIR", Directory for nagios plugins)
 AC_SUBST(NAGIOS_PLUGIN_DIR)
 
 if test x"$NAGIOS_METADATA_DIR" = x""; then
     NAGIOS_METADATA_DIR="${datadir}/nagios/plugins-metadata"
 fi
 
 AC_DEFINE_UNQUOTED(NAGIOS_METADATA_DIR, "$NAGIOS_METADATA_DIR", Directory for nagios plugins metadata)
 AC_SUBST(NAGIOS_METADATA_DIR)
 
 STACKS=""
 CLUSTERLIBS=""
 
 dnl ========================================================================
 dnl    Cluster stack - Corosync
 dnl ========================================================================
 
 dnl Normalize the values
 case $SUPPORT_CS in
     1|yes|true)
         SUPPORT_CS=yes
         missingisfatal=1
         ;;
     try)
         missingisfatal=0
         ;;
     *)
         SUPPORT_CS=no
         ;;
 esac
 
 AC_MSG_CHECKING(for native corosync)
 COROSYNC_LIBS=""
 
 if test $SUPPORT_CS = no; then
     AC_MSG_RESULT(no (disabled))
     SUPPORT_CS=0
 else
     AC_MSG_RESULT($SUPPORT_CS)
     SUPPORT_CS=1
     PKG_CHECK_MODULES(cpg,    libcpg) dnl Fatal
     PKG_CHECK_MODULES(cfg,    libcfg) dnl Fatal
     PKG_CHECK_MODULES(cmap,   libcmap) dnl Fatal
     PKG_CHECK_MODULES(quorum, libquorum) dnl Fatal
     PKG_CHECK_MODULES(libcorosync_common, libcorosync_common) dnl Fatal
 
     CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS $cmap_CFLAGS $quorum_CFLAGS $libcorosync_common_CFLAGS"
     COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS $cmap_LIBS $quorum_LIBS $libcorosync_common_LIBS"
     CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS"
     STACKS="$STACKS corosync-native"
 fi
 
 AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS,    Support the Corosync messaging and membership layer)
 AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1)
 AC_SUBST(SUPPORT_COROSYNC)
 
 dnl
 dnl    Cluster stack - Sanity
 dnl
 
 if test x${enable_no_stack} = xyes; then
     AC_MSG_NOTICE(No cluster stack supported, building only the scheduler)
     PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack"
 else
     AC_MSG_CHECKING(for supported stacks)
     if test x"$STACKS" = x; then
         AC_MSG_FAILURE(You must support at least one cluster stack)
     fi
     AC_MSG_RESULT($STACKS)
     PCMK_FEATURES="$PCMK_FEATURES $STACKS"
 fi
 
 PCMK_FEATURES="$PCMK_FEATURES atomic-attrd"
 AC_SUBST(CLUSTERLIBS)
 
 dnl ========================================================================
 dnl    ACL
 dnl ========================================================================
 
 case $SUPPORT_ACL in
     1|yes|true)
         missingisfatal=1
         ;;
     try)
         missingisfatal=0
         ;;
     *)
         SUPPORT_ACL=no
         ;;
 esac
 
 AC_MSG_CHECKING(for acl support)
 if test $SUPPORT_ACL = no; then
     AC_MSG_RESULT(no (disabled))
     SUPPORT_ACL=0
 else
     AC_MSG_RESULT($SUPPORT_ACL)
 
     SUPPORT_ACL=1
     AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set)
     if test $ac_cv_lib_qb_qb_ipcs_connection_auth_set != yes; then
         SUPPORT_ACL=0
     fi
 
     if test $SUPPORT_ACL = 0; then
         if test $missingisfatal = 0; then
             AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0)
         else
             AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0)
         fi
     fi
 fi
 
 if test $SUPPORT_ACL = 1; then
     PCMK_FEATURES="$PCMK_FEATURES acls"
 fi
 
 AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1")
 AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL)
 
 dnl ========================================================================
 dnl    CIB secrets
 dnl ========================================================================
 
 case $SUPPORT_CIBSECRETS in
     1|yes|true|try)
         SUPPORT_CIBSECRETS=1
         ;;
     *)
         SUPPORT_CIBSECRETS=0
         ;;
 esac
 
 AC_DEFINE_UNQUOTED(SUPPORT_CIBSECRETS, $SUPPORT_CIBSECRETS, Support CIB secrets)
 AM_CONDITIONAL(BUILD_CIBSECRETS, test $SUPPORT_CIBSECRETS = 1)
 
 if test $SUPPORT_CIBSECRETS = 1; then
     PCMK_FEATURES="$PCMK_FEATURES cibsecrets"
 
     LRM_CIBSECRETS_DIR="${localstatedir}/lib/pacemaker/lrm/secrets"
     AC_DEFINE_UNQUOTED(LRM_CIBSECRETS_DIR,"$LRM_CIBSECRETS_DIR", Location for CIB secrets)
     AC_SUBST(LRM_CIBSECRETS_DIR)
 fi
 
 dnl ========================================================================
 dnl    GnuTLS
 dnl ========================================================================
 
 dnl gnutls_priority_set_direct available since 2.1.7 (released 2007-11-29)
 AC_CHECK_LIB(gnutls, gnutls_priority_set_direct)
 if test "$ac_cv_lib_gnutls_gnutls_priority_set_direct" != ""; then
     AC_CHECK_HEADERS(gnutls/gnutls.h)
 fi
 
 dnl ========================================================================
 dnl    PAM
 dnl ========================================================================
 
 AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h)
 
 dnl ========================================================================
 dnl    System Health
 dnl ========================================================================
 
 dnl Check if servicelog development package is installed
 SERVICELOG=servicelog-1
 SERVICELOG_EXISTS="no"
 AC_MSG_CHECKING(for $SERVICELOG packages)
 if
     $PKG_CONFIG --exists $SERVICELOG
 then
     PKG_CHECK_MODULES([SERVICELOG], [servicelog-1])
     SERVICELOG_EXISTS="yes"
 fi
 AC_MSG_RESULT($SERVICELOG_EXISTS)
 AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes")
 
 dnl Check if OpenIMPI packages and servicelog are installed
 OPENIPMI="OpenIPMI OpenIPMIposix"
 OPENIPMI_SERVICELOG_EXISTS="no"
 AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages)
 if
     $PKG_CONFIG --exists $OPENIPMI $SERVICELOG
 then
     PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix])
     OPENIPMI_SERVICELOG_EXISTS="yes"
 fi
 AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS)
 AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes")
 
 dnl ========================================================================
 dnl Compiler flags
 dnl ========================================================================
 
 dnl Make sure that CFLAGS is not exported. If the user did
 dnl not have CFLAGS in their environment then this should have
 dnl no effect. However if CFLAGS was exported from the user's
 dnl environment, then the new CFLAGS will also be exported
 dnl to sub processes.
 if export | fgrep " CFLAGS=" > /dev/null; then
     SAVED_CFLAGS="$CFLAGS"
     unset CFLAGS
     CFLAGS="$SAVED_CFLAGS"
     unset SAVED_CFLAGS
 fi
 
 AC_ARG_VAR([CFLAGS_HARDENED_LIB], [extra C compiler flags for hardened libraries])
 AC_ARG_VAR([LDFLAGS_HARDENED_LIB], [extra linker flags for hardened libraries])
 
 AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executables])
 AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables])
 
 CC_EXTRAS=""
 
 if test "$GCC" != yes; then
     CFLAGS="$CFLAGS -g"
 else
     CFLAGS="$CFLAGS -ggdb"
 
 dnl When we don't have diagnostic push / pull, we can't explicitly disable
 dnl checking for nonliteral formats in the places where they occur on purpose
 dnl thus we disable nonliteral format checking globally as we are aborting
 dnl on warnings. 
 dnl what makes the things really ugly is that nonliteral format checking is 
 dnl obviously available as an extra switch in very modern gcc but for older
 dnl gcc this is part of -Wformat=2 
 dnl so if we have push/pull we can enable -Wformat=2 -Wformat-nonliteral
 dnl if we don't have push/pull but -Wformat-nonliteral we can enable -Wformat=2
 dnl otherwise none of both
 
     gcc_diagnostic_push_pull=no
     cc_temp_flags "$CFLAGS $WERROR"
     AC_MSG_CHECKING([for gcc diagnostic push / pull])
     AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
 #pragma GCC diagnostic push
 #pragma GCC diagnostic pop
                       ]])],
                       [
                           AC_MSG_RESULT([yes])
                           gcc_diagnostic_push_pull=yes
                       ], AC_MSG_RESULT([no]))
     cc_restore_flags
 
     if cc_supports_flag "-Wformat-nonliteral"; then
         gcc_format_nonliteral=yes
     else
         gcc_format_nonliteral=no
     fi
         
     # We had to eliminate -Wnested-externs because of libtool changes
     # Make sure to order options so that the former stand for prerequisites
     # of the latter (e.g., -Wformat-nonliteral requires -Wformat).
     EXTRA_FLAGS="-fgnu89-inline
                 -Wall
                 -Waggregate-return
                 -Wbad-function-cast
                 -Wcast-align
                 -Wdeclaration-after-statement
                 -Wendif-labels
                 -Wfloat-equal
                 -Wformat-security
                 -Wmissing-prototypes
                 -Wmissing-declarations
                 -Wnested-externs
                 -Wno-long-long
                 -Wno-strict-aliasing
                 -Wpointer-arith
                 -Wstrict-prototypes
                 -Wwrite-strings
                 -Wunused-but-set-variable
                 -Wunsigned-char"
 
     if test "x$gcc_diagnostic_push_pull" = "xyes"; then
         AC_DEFINE([GCC_FORMAT_NONLITERAL_CHECKING_ENABLED], [],
                   [gcc can complain about nonliterals in format])
         EXTRA_FLAGS="$EXTRA_FLAGS
                     -Wformat=2 
                     -Wformat-nonliteral"
     else
         if test "x$gcc_format_nonliteral" = "xyes"; then
             EXTRA_FLAGS="$EXTRA_FLAGS -Wformat=2"
         fi
     fi
 
 # Additional warnings it might be nice to enable one day
 #                -Wshadow
 #                -Wunreachable-code
     for j in $EXTRA_FLAGS
     do
         if
             cc_supports_flag $CC_EXTRAS $j
         then
             CC_EXTRAS="$CC_EXTRAS $j"
         fi
     done
 
     if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then
         AC_MSG_NOTICE(Enabling ANSI Compatibility)
         CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY"
     fi
 
     AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS})
 fi
 
 dnl
 dnl Hardening flags
 dnl
 dnl The prime control of whether to apply (targeted) hardening build flags and
 dnl which ones is --{enable,disable}-hardening option passed to ./configure:
 dnl
 dnl --enable-hardening=try (default):
 dnl     depending on whether any of CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE,
 dnl     CFLAGS_HARDENED_LIB or LDFLAGS_HARDENED_LIB environment variables
 dnl     (see below) is set and non-null, all these custom flags (even if not
 dnl     set) are used as are, otherwise the best effort is made to offer
 dnl     reasonably strong hardening in several categories (RELRO, PIE,
 dnl     "bind now", stack protector) according to what the selected toolchain
 dnl     can offer
 dnl
 dnl --enable-hardening:
 dnl     same effect as --enable-hardening=try when the environment variables
 dnl     in question are suppressed
 dnl
 dnl --disable-hardening:
 dnl     do not apply any targeted hardening measures at all
 dnl
 dnl The user-injected environment variables that regulate the hardening in
 dnl default case are as follows:
 dnl
 dnl * CFLAGS_HARDENED_EXE, LDFLAGS_HARDENED_EXE
 dnl    compiler and linker flags (respectively) for daemon programs
 dnl    (pacemakerd, pacemaker-attrd, pacemaker-controld, pacemaker-execd,
 dnl    cib, stonithd, pacemaker-remoted, pacemaker-schedulerd)
 dnl
 dnl * CFLAGS_HARDENED_LIB, LDFLAGS_HARDENED_LIB
 dnl    compiler and linker flags (respectively) for libraries linked
 dnl    with the daemon programs
 dnl
 dnl Note that these are purposedly targeted variables (addressing particular
 dnl targets all over the scattered Makefiles) and have no effect outside of
 dnl the predestined scope (e.g., CLI utilities).  For a global reach,
 dnl use CFLAGS, LDFLAGS, etc. as usual.
 dnl
 dnl For guidance on the suitable flags consult, for instance:
 dnl https://fedoraproject.org/wiki/Changes/Harden_All_Packages#Detailed_Harden_Flags_Description
 dnl https://owasp.org/index.php/C-Based_Toolchain_Hardening#GCC.2FBinutils
 dnl
 
 if test "x${HARDENING}" != "xtry"; then
     unset CFLAGS_HARDENED_EXE
     unset CFLAGS_HARDENED_LIB
     unset LDFLAGS_HARDENED_EXE
     unset LDFLAGS_HARDENED_LIB
 fi
 if test "x${HARDENING}" = "xno"; then
     AC_MSG_NOTICE([Hardening: explicitly disabled])
 elif test "x${HARDENING}" = "xyes" \
      || test "$(env | grep -Ec '^(C|LD)FLAGS_HARDENED_(EXE|LIB)=.')" = 0; then
     dnl We'll figure out on our own...
     CFLAGS_HARDENED_EXE=
     CFLAGS_HARDENED_LIB=
     LDFLAGS_HARDENED_EXE=
     LDFLAGS_HARDENED_LIB=
     relro=0
     pie=0
     bindnow=0
     # daemons incl. libs: partial RELRO
     flag="-Wl,-z,relro"
     CC_CHECK_LDFLAGS(["${flag}"],
                      [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}";
                       LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}";
                       relro=1])
     # daemons: PIE for both CFLAGS and LDFLAGS
     if cc_supports_flag -fPIE; then
         flag="-pie"
         CC_CHECK_LDFLAGS(["${flag}"],
                          [CFLAGS_HARDENED_EXE="${CFLAGS_HARDENED_EXE} -fPIE";
                           LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}";
                           pie=1])
     fi
     # daemons incl. libs: full RELRO if sensible + as-needed linking
     #                     so as to possibly mitigate startup performance
     #                     hit caused by excessive linking with unneeded
     #                     libraries
     if test "${relro}" = 1 && test "${pie}" = 1; then
         flag="-Wl,-z,now"
         CC_CHECK_LDFLAGS(["${flag}"],
                          [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}";
                           LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}";
                           bindnow=1])
     fi
     if test "${bindnow}" = 1; then
         flag="-Wl,--as-needed"
         CC_CHECK_LDFLAGS(["${flag}"],
                          [LDFLAGS_HARDENED_EXE="${LDFLAGS_HARDENED_EXE} ${flag}";
                           LDFLAGS_HARDENED_LIB="${LDFLAGS_HARDENED_LIB} ${flag}"])
     fi
     # universal: prefer strong > all > default stack protector if possible
     flag=
     if cc_supports_flag -fstack-protector-strong; then
         flag="-fstack-protector-strong"
     elif cc_supports_flag -fstack-protector-all; then
         flag="-fstack-protector-all"
     elif cc_supports_flag -fstack-protector; then
         flag="-fstack-protector"
     fi
     if test -n "${flag}"; then
         CC_EXTRAS="${CC_EXTRAS} ${flag}"
         stackprot=1
     fi
     if test "${relro}" = 1 \
     || test "${pie}" = 1 \
     || test "${stackprot}" = 1; then
         AC_MSG_NOTICE([Hardening: relro=${relro} pie=${pie} bindnow=${bindnow} stackprot=${flag}])
     else
         AC_MSG_WARN([Hardening: no suitable features in the toolchain detected])
     fi
 else
     AC_MSG_NOTICE([Hardening: using custom flags])
 fi
 
 CFLAGS="$CFLAGS $CC_EXTRAS"
 
 NON_FATAL_CFLAGS="$CFLAGS"
 AC_SUBST(NON_FATAL_CFLAGS)
 
 dnl
 dnl We reset CFLAGS to include our warnings *after* all function
 dnl checking goes on, so that our warning flags don't keep the
 dnl AC_*FUNCS() calls above from working.  In particular, -Werror will
 dnl *always* cause us troubles if we set it before here.
 dnl
 dnl
 if test "x${enable_fatal_warnings}" = xyes ; then
     AC_MSG_NOTICE(Enabling Fatal Warnings)
     CFLAGS="$CFLAGS $WERROR"
 fi
 AC_SUBST(CFLAGS)
 
 dnl This is useful for use in Makefiles that need to remove one specific flag
 CFLAGS_COPY="$CFLAGS"
 AC_SUBST(CFLAGS_COPY)
 
 AC_SUBST(LIBADD_DL)        dnl extra flags for dynamic linking libraries
 AC_SUBST(LIBADD_INTL)        dnl extra flags for GNU gettext stuff...
 
 AC_SUBST(LOCALE)
 
 dnl Options for cleaning up the compiler output
 QUIET_LIBTOOL_OPTS=""
 QUIET_MAKE_OPTS=""
 if test "x${enable_quiet}" = "xyes"; then
     QUIET_LIBTOOL_OPTS="--quiet"
     QUIET_MAKE_OPTS="--quiet"
 fi
 
-AC_MSG_RESULT(Supress make details: ${enable_quiet})
+AC_MSG_RESULT(Suppress make details: ${enable_quiet})
 
 dnl Put the above variables to use
 LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)"
 MAKE="${MAKE} \$(QUIET_MAKE_OPTS)"
 
 AC_SUBST(CC)
 AC_SUBST(MAKE)
 AC_SUBST(LIBTOOL)
 AC_SUBST(QUIET_MAKE_OPTS)
 AC_SUBST(QUIET_LIBTOOL_OPTS)
 AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features)
 AC_SUBST(PCMK_FEATURES)
 
 dnl Files we output that need to be executable
 AC_CONFIG_FILES([cts/CTSlab.py], [chmod +x cts/CTSlab.py])
 AC_CONFIG_FILES([cts/LSBDummy], [chmod +x cts/LSBDummy])
 AC_CONFIG_FILES([cts/OCFIPraTest.py], [chmod +x cts/OCFIPraTest.py])
 AC_CONFIG_FILES([cts/cluster_test], [chmod +x cts/cluster_test])
 AC_CONFIG_FILES([cts/cts], [chmod +x cts/cts])
 AC_CONFIG_FILES([cts/cts-cli], [chmod +x cts/cts-cli])
 AC_CONFIG_FILES([cts/cts-coverage], [chmod +x cts/cts-coverage])
 AC_CONFIG_FILES([cts/cts-exec], [chmod +x cts/cts-exec])
 AC_CONFIG_FILES([cts/cts-fencing], [chmod +x cts/cts-fencing])
 AC_CONFIG_FILES([cts/cts-log-watcher], [chmod +x cts/cts-log-watcher])
 AC_CONFIG_FILES([cts/cts-regression], [chmod +x cts/cts-regression])
 AC_CONFIG_FILES([cts/cts-scheduler], [chmod +x cts/cts-scheduler])
 AC_CONFIG_FILES([cts/cts-support], [chmod +x cts/cts-support])
 AC_CONFIG_FILES([cts/lxc_autogen.sh], [chmod +x cts/lxc_autogen.sh])
 AC_CONFIG_FILES([cts/benchmark/clubench], [chmod +x cts/benchmark/clubench])
 AC_CONFIG_FILES([cts/fence_dummy], [chmod +x cts/fence_dummy])
 AC_CONFIG_FILES([cts/pacemaker-cts-dummyd], [chmod +x cts/pacemaker-cts-dummyd])
 AC_CONFIG_FILES([daemons/fenced/fence_legacy], [chmod +x daemons/fenced/fence_legacy])
 AC_CONFIG_FILES([extra/resources/ClusterMon],  [chmod +x extra/resources/ClusterMon])
 AC_CONFIG_FILES([extra/resources/HealthSMART], [chmod +x extra/resources/HealthSMART])
 AC_CONFIG_FILES([extra/resources/SysInfo],     [chmod +x extra/resources/SysInfo])
 AC_CONFIG_FILES([extra/resources/ifspeed],     [chmod +x extra/resources/ifspeed])
 AC_CONFIG_FILES([extra/resources/o2cb],        [chmod +x extra/resources/o2cb])
 AC_CONFIG_FILES([tools/crm_failcount], [chmod +x tools/crm_failcount])
 AC_CONFIG_FILES([tools/crm_master], [chmod +x tools/crm_master])
 AC_CONFIG_FILES([tools/crm_report], [chmod +x tools/crm_report])
 AC_CONFIG_FILES([tools/crm_standby], [chmod +x tools/crm_standby])
 AC_CONFIG_FILES([tools/cibsecret], [chmod +x tools/cibsecret])
 
 dnl Other files we output
 AC_CONFIG_FILES(Makefile                                            \
                 Doxyfile                                            \
                 cts/Makefile                                        \
                 cts/CTS.py                                          \
                 cts/CTSvars.py                                      \
                 cts/benchmark/Makefile                              \
                 cts/pacemaker-cts-dummyd@.service                   \
                 daemons/Makefile                                    \
                 daemons/attrd/Makefile                              \
                 daemons/based/Makefile                              \
                 daemons/controld/Makefile                           \
                 daemons/execd/Makefile                              \
                 daemons/execd/pacemaker_remote                      \
                 daemons/execd/pacemaker_remote.service              \
                 daemons/fenced/Makefile                             \
                 daemons/pacemakerd/Makefile                         \
                 daemons/pacemakerd/pacemaker                        \
                 daemons/pacemakerd/pacemaker.service                \
                 daemons/pacemakerd/pacemaker.upstart                \
                 daemons/pacemakerd/pacemaker.combined.upstart       \
                 daemons/schedulerd/Makefile                         \
                 doc/Makefile                                        \
                 doc/Clusters_from_Scratch/publican.cfg              \
                 doc/Pacemaker_Administration/publican.cfg           \
                 doc/Pacemaker_Development/publican.cfg              \
                 doc/Pacemaker_Explained/publican.cfg                \
                 doc/Pacemaker_Remote/publican.cfg                   \
                 extra/Makefile                                      \
                 extra/alerts/Makefile                               \
                 extra/resources/Makefile                            \
                 extra/logrotate/Makefile                            \
                 extra/logrotate/pacemaker                           \
                 include/Makefile                                    \
                 include/crm/Makefile                                \
                 include/crm/cib/Makefile                            \
                 include/crm/common/Makefile                         \
                 include/crm/cluster/Makefile                        \
                 include/crm/fencing/Makefile                        \
                 include/crm/pengine/Makefile                        \
                 replace/Makefile                                    \
                 lib/Makefile                                        \
                 lib/pacemaker.pc                                    \
                 lib/pacemaker-cib.pc                                \
                 lib/pacemaker-lrmd.pc                               \
                 lib/pacemaker-service.pc                            \
                 lib/pacemaker-pengine.pc                            \
                 lib/pacemaker-fencing.pc                            \
                 lib/pacemaker-cluster.pc                            \
                 lib/common/Makefile                                 \
                 lib/cluster/Makefile                                \
                 lib/cib/Makefile                                    \
                 lib/gnu/Makefile                                    \
                 lib/pengine/Makefile                                \
                 lib/transition/Makefile                             \
                 lib/fencing/Makefile                                \
                 lib/lrmd/Makefile                                   \
                 lib/services/Makefile                               \
                 tools/Makefile                                      \
                 tools/report.collector                              \
                 tools/report.common                                 \
                 tools/crm_mon.service                               \
                 tools/crm_mon.upstart                               \
                 xml/Makefile                                        \
 )
 
 dnl Now process the entire list of files added by previous
 dnl  calls to AC_CONFIG_FILES()
 AC_OUTPUT()
 
 dnl *****************
 dnl Configure summary
 dnl *****************
 
 AC_MSG_RESULT([])
 AC_MSG_RESULT([$PACKAGE configuration:])
 AC_MSG_RESULT([  Version                  = ${VERSION} (Build: $BUILD_VERSION)])
 AC_MSG_RESULT([  Features                 =${PCMK_FEATURES}])
 AC_MSG_RESULT([])
 AC_MSG_RESULT([  Prefix                   = ${prefix}])
 AC_MSG_RESULT([  Executables              = ${sbindir}])
 AC_MSG_RESULT([  Man pages                = ${mandir}])
 AC_MSG_RESULT([  Libraries                = ${libdir}])
 AC_MSG_RESULT([  Header files             = ${includedir}])
 AC_MSG_RESULT([  Arch-independent files   = ${datadir}])
 AC_MSG_RESULT([  State information        = ${localstatedir}])
 AC_MSG_RESULT([  System configuration     = ${sysconfdir}])
 AC_MSG_RESULT([])
 AC_MSG_RESULT([  HA group name            = ${CRM_DAEMON_GROUP}])
 AC_MSG_RESULT([  HA user name             = ${CRM_DAEMON_USER}])
 AC_MSG_RESULT([])
 AC_MSG_RESULT([  CFLAGS                   = ${CFLAGS}])
 AC_MSG_RESULT([  CFLAGS_HARDENED_EXE      = ${CFLAGS_HARDENED_EXE}])
 AC_MSG_RESULT([  CFLAGS_HARDENED_LIB      = ${CFLAGS_HARDENED_LIB}])
 AC_MSG_RESULT([  LDFLAGS_HARDENED_EXE     = ${LDFLAGS_HARDENED_EXE}])
 AC_MSG_RESULT([  LDFLAGS_HARDENED_LIB     = ${LDFLAGS_HARDENED_LIB}])
 AC_MSG_RESULT([  Libraries                = ${LIBS}])
 AC_MSG_RESULT([  Stack Libraries          = ${CLUSTERLIBS}])
diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c
index d2c31c5793..253675db68 100644
--- a/daemons/controld/controld_remote_ra.c
+++ b/daemons/controld/controld_remote_ra.c
@@ -1,1268 +1,1269 @@
 /*
  * Copyright 2013-2018 David Vossel <davidvossel@gmail.com>
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 
 #include <pacemaker-controld.h>
 #include <controld_fsa.h>
 #include <controld_messages.h>
 #include <controld_callbacks.h>
 #include <controld_lrm.h>
 #include <crm/lrmd.h>
 #include <crm/services.h>
 
 #define REMOTE_LRMD_RA "remote"
 
 /* The max start timeout before cmd retry */
 #define MAX_START_TIMEOUT_MS 10000
 
 typedef struct remote_ra_cmd_s {
     /*! the local node the cmd is issued from */
     char *owner;
     /*! the remote node the cmd is executed on */
     char *rsc_id;
     /*! the action to execute */
     char *action;
     /*! some string the client wants us to give it back */
     char *userdata;
     char *exit_reason;          // descriptive text on error
     /*! start delay in ms */
     int start_delay;
     /*! timer id used for start delay. */
     int delay_id;
     /*! timeout in ms for cmd */
     int timeout;
     int remaining_timeout;
     /*! recurring interval in ms */
     guint interval_ms;
     /*! interval timer id */
     int interval_id;
     int reported_success;
     int monitor_timeout_id;
     int takeover_timeout_id;
     /*! action parameters */
     lrmd_key_value_t *params;
     /*! executed rc */
     int rc;
     int op_status;
     int call_id;
     time_t start_time;
     gboolean cancel;
 } remote_ra_cmd_t;
 
 enum remote_migration_status {
     expect_takeover = 1,
     takeover_complete,
 };
 
 typedef struct remote_ra_data_s {
     crm_trigger_t *work;
     remote_ra_cmd_t *cur_cmd;
     GList *cmds;
     GList *recurring_cmds;
 
     enum remote_migration_status migrate_status;
 
     gboolean active;
 
     /* Maintenance mode is difficult to determine from the controller's context,
      * so we have it signalled back with the transition from the scheduler.
      */
     gboolean is_maintenance;
 } remote_ra_data_t;
 
 static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms);
 static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd);
 static GList *fail_all_monitor_cmds(GList * list);
 
 static void
 free_cmd(gpointer user_data)
 {
     remote_ra_cmd_t *cmd = user_data;
 
     if (!cmd) {
         return;
     }
     if (cmd->delay_id) {
         g_source_remove(cmd->delay_id);
     }
     if (cmd->interval_id) {
         g_source_remove(cmd->interval_id);
     }
     if (cmd->monitor_timeout_id) {
         g_source_remove(cmd->monitor_timeout_id);
     }
     if (cmd->takeover_timeout_id) {
         g_source_remove(cmd->takeover_timeout_id);
     }
     free(cmd->owner);
     free(cmd->rsc_id);
     free(cmd->action);
     free(cmd->userdata);
     free(cmd->exit_reason);
     lrmd_key_value_freeall(cmd->params);
     free(cmd);
 }
 
 static int
 generate_callid(void)
 {
     static int remote_ra_callid = 0;
 
     remote_ra_callid++;
     if (remote_ra_callid <= 0) {
         remote_ra_callid = 1;
     }
 
     return remote_ra_callid;
 }
 
 static gboolean
 recurring_helper(gpointer data)
 {
     remote_ra_cmd_t *cmd = data;
     lrm_state_t *connection_rsc = NULL;
 
     cmd->interval_id = 0;
     connection_rsc = lrm_state_find(cmd->rsc_id);
     if (connection_rsc && connection_rsc->remote_ra_data) {
         remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
 
         ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd);
 
         ra_data->cmds = g_list_append(ra_data->cmds, cmd);
         mainloop_set_trigger(ra_data->work);
     }
     return FALSE;
 }
 
 static gboolean
 start_delay_helper(gpointer data)
 {
     remote_ra_cmd_t *cmd = data;
     lrm_state_t *connection_rsc = NULL;
 
     cmd->delay_id = 0;
     connection_rsc = lrm_state_find(cmd->rsc_id);
     if (connection_rsc && connection_rsc->remote_ra_data) {
         remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
 
         mainloop_set_trigger(ra_data->work);
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Handle cluster communication related to pacemaker_remote node joining
  *
  * \param[in] node_name  Name of newly integrated pacemaker_remote node
  */
 static void
 remote_node_up(const char *node_name)
 {
     int call_opt, call_id = 0;
     xmlNode *update, *state;
     crm_node_t *node;
 
     CRM_CHECK(node_name != NULL, return);
     crm_info("Announcing pacemaker_remote node %s", node_name);
 
     /* Clear node's operation history. The node's transient attributes should
      * and normally will be cleared when the node leaves, but since remote node
      * state has a number of corner cases, clear them here as well, to be sure.
      */
     call_opt = crmd_cib_smart_opt();
     erase_status_tag(node_name, XML_CIB_TAG_LRM, call_opt);
     erase_status_tag(node_name, XML_TAG_TRANSIENT_NODEATTRS, call_opt);
 
     /* Clear node's probed attribute */
     update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE);
 
     /* Ensure node is in the remote peer cache with member status */
     node = crm_remote_peer_get(node_name);
     CRM_CHECK(node != NULL, return);
     crm_update_peer_state(__FUNCTION__, node, CRM_NODE_MEMBER, 0);
 
     /* pacemaker_remote nodes don't participate in the membership layer,
      * so cluster nodes don't automatically get notified when they come and go.
      * We send a cluster message to the DC, and update the CIB node state entry,
      * so the DC will get it sooner (via message) or later (via CIB refresh),
      * and any other interested parties can query the CIB.
      */
     send_remote_state_message(node_name, TRUE);
 
     update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
     state = create_node_state_update(node, node_update_cluster, update,
                                      __FUNCTION__);
 
     /* Clear the XML_NODE_IS_FENCED flag in the node state. If the node ever
      * needs to be fenced, this flag will allow various actions to determine
      * whether the fencing has happened yet.
      */
     crm_xml_add(state, XML_NODE_IS_FENCED, "0");
 
     /* TODO: If the remote connection drops, and this (async) CIB update either
      * failed or has not yet completed, later actions could mistakenly think the
      * node has already been fenced (if the XML_NODE_IS_FENCED attribute was
      * previously set, because it won't have been cleared). This could prevent
      * actual fencing or allow recurring monitor failures to be cleared too
      * soon. Ideally, we wouldn't rely on the CIB for the fenced status.
      */
     fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL);
     if (call_id < 0) {
         crm_perror(LOG_WARNING, "%s CIB node state setup", node_name);
     }
     free_xml(update);
 }
 
 enum down_opts {
     DOWN_KEEP_LRM,
     DOWN_ERASE_LRM
 };
 
 /*!
  * \internal
  * \brief Handle cluster communication related to pacemaker_remote node leaving
  *
  * \param[in] node_name  Name of lost node
  * \param[in] opts       Whether to keep or erase LRM history
  */
 static void
 remote_node_down(const char *node_name, const enum down_opts opts)
 {
     xmlNode *update;
     int call_id = 0;
     int call_opt = crmd_cib_smart_opt();
     crm_node_t *node;
 
     /* Purge node from attrd's memory */
     update_attrd_remote_node_removed(node_name, NULL);
 
     /* Purge node's transient attributes */
     erase_status_tag(node_name, XML_TAG_TRANSIENT_NODEATTRS, call_opt);
 
     /* Normally, the LRM operation history should be kept until the node comes
      * back up. However, after a successful fence, we want to clear it, so we
      * don't think resources are still running on the node.
      */
     if (opts == DOWN_ERASE_LRM) {
         erase_status_tag(node_name, XML_CIB_TAG_LRM, call_opt);
     }
 
     /* Ensure node is in the remote peer cache with lost state */
     node = crm_remote_peer_get(node_name);
     CRM_CHECK(node != NULL, return);
     crm_update_peer_state(__FUNCTION__, node, CRM_NODE_LOST, 0);
 
     /* Notify DC */
     send_remote_state_message(node_name, FALSE);
 
     /* Update CIB node state */
     update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
     create_node_state_update(node, node_update_cluster, update, __FUNCTION__);
     fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL);
     if (call_id < 0) {
         crm_perror(LOG_ERR, "%s CIB node state update", node_name);
     }
     free_xml(update);
 }
 
 /*!
  * \internal
  * \brief Handle effects of a remote RA command on node state
  *
  * \param[in] cmd  Completed remote RA command
  */
 static void
 check_remote_node_state(remote_ra_cmd_t *cmd)
 {
     /* Only successful actions can change node state */
     if (cmd->rc != PCMK_OCF_OK) {
         return;
     }
 
     if (safe_str_eq(cmd->action, "start")) {
         remote_node_up(cmd->rsc_id);
 
     } else if (safe_str_eq(cmd->action, "migrate_from")) {
         /* After a successful migration, we don't need to do remote_node_up()
          * because the DC already knows the node is up, and we don't want to
          * clear LRM history etc. We do need to add the remote node to this
          * host's remote peer cache, because (unless it happens to be DC)
          * it hasn't been tracking the remote node, and other code relies on
          * the cache to distinguish remote nodes from unseen cluster nodes.
          */
         crm_node_t *node = crm_remote_peer_get(cmd->rsc_id);
 
         CRM_CHECK(node != NULL, return);
         crm_update_peer_state(__FUNCTION__, node, CRM_NODE_MEMBER, 0);
 
     } else if (safe_str_eq(cmd->action, "stop")) {
         lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id);
         remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL;
 
         if (ra_data) {
             if (ra_data->migrate_status != takeover_complete) {
                 /* Stop means down if we didn't successfully migrate elsewhere */
                 remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM);
             } else if (AM_I_DC == FALSE) {
                 /* Only the connection host and DC track node state,
                  * so if the connection migrated elsewhere and we aren't DC,
                  * un-cache the node, so we don't have stale info
                  */
                 crm_remote_peer_cache_remove(cmd->rsc_id);
             }
         }
     }
 
     /* We don't do anything for successful monitors, which is correct for
      * routine recurring monitors, and for monitors on nodes where the
      * connection isn't supposed to be (the cluster will stop the connection in
      * that case). However, if the initial probe finds the connection already
      * active on the node where we want it, we probably should do
      * remote_node_up(). Unfortunately, we can't distinguish that case here.
      * Given that connections have to be initiated by the cluster, the chance of
      * that should be close to zero.
      */
 }
 
 static void
 report_remote_ra_result(remote_ra_cmd_t * cmd)
 {
     lrmd_event_data_t op = { 0, };
 
     check_remote_node_state(cmd);
 
     op.type = lrmd_event_exec_complete;
     op.rsc_id = cmd->rsc_id;
     op.op_type = cmd->action;
     op.user_data = cmd->userdata;
     op.exit_reason = cmd->exit_reason;
     op.timeout = cmd->timeout;
     op.interval_ms = cmd->interval_ms;
     op.rc = cmd->rc;
     op.op_status = cmd->op_status;
     op.t_run = cmd->start_time;
     op.t_rcchange = cmd->start_time;
     if (cmd->reported_success && cmd->rc != PCMK_OCF_OK) {
         op.t_rcchange = time(NULL);
         /* This edge case will likely never ever occur, but if it does the
          * result is that a failure will not be processed correctly. This is only
          * remotely possible because we are able to detect a connection resource's tcp
          * connection has failed at any moment after start has completed. The actual
          * recurring operation is just a connectivity ping.
          *
          * basically, we are not guaranteed that the first successful monitor op and
          * a subsequent failed monitor op will not occur in the same timestamp. We have to
          * make it look like the operations occurred at separate times though. */
         if (op.t_rcchange == op.t_run) {
             op.t_rcchange++;
         }
     }
 
     if (cmd->params) {
         lrmd_key_value_t *tmp;
 
         op.params = crm_str_table_new();
         for (tmp = cmd->params; tmp; tmp = tmp->next) {
             g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value));
         }
 
     }
     op.call_id = cmd->call_id;
     op.remote_nodename = cmd->owner;
 
     lrm_op_callback(&op);
 
     if (op.params) {
         g_hash_table_destroy(op.params);
     }
 }
 
 static void
 update_remaining_timeout(remote_ra_cmd_t * cmd)
 {
     cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000;
 }
 
 static gboolean
 retry_start_cmd_cb(gpointer data)
 {
     lrm_state_t *lrm_state = data;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     remote_ra_cmd_t *cmd = NULL;
     int rc = -1;
 
     if (!ra_data || !ra_data->cur_cmd) {
         return FALSE;
     }
     cmd = ra_data->cur_cmd;
     if (safe_str_neq(cmd->action, "start") && safe_str_neq(cmd->action, "migrate_from")) {
         return FALSE;
     }
     update_remaining_timeout(cmd);
 
     if (cmd->remaining_timeout > 0) {
         rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout);
     }
 
     if (rc != 0) {
         cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
         cmd->op_status = PCMK_LRM_OP_ERROR;
         report_remote_ra_result(cmd);
 
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
         ra_data->cur_cmd = NULL;
         free_cmd(cmd);
     } else {
         /* wait for connection event */
     }
 
     return FALSE;
 }
 
 
 static gboolean
 connection_takeover_timeout_cb(gpointer data)
 {
     lrm_state_t *lrm_state = NULL;
     remote_ra_cmd_t *cmd = data;
 
     crm_info("takeover event timed out for node %s", cmd->rsc_id);
     cmd->takeover_timeout_id = 0;
 
     lrm_state = lrm_state_find(cmd->rsc_id);
 
     handle_remote_ra_stop(lrm_state, cmd);
     free_cmd(cmd);
 
     return FALSE;
 }
 
 static gboolean
 monitor_timeout_cb(gpointer data)
 {
     lrm_state_t *lrm_state = NULL;
     remote_ra_cmd_t *cmd = data;
 
     lrm_state = lrm_state_find(cmd->rsc_id);
 
     crm_info("Timed out waiting for remote poke response from %s%s",
              cmd->rsc_id, (lrm_state? "" : " (no LRM state)"));
     cmd->monitor_timeout_id = 0;
     cmd->op_status = PCMK_LRM_OP_TIMEOUT;
     cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
 
     if (lrm_state && lrm_state->remote_ra_data) {
         remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
         if (ra_data->cur_cmd == cmd) {
             ra_data->cur_cmd = NULL;
         }
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
     }
 
     report_remote_ra_result(cmd);
     free_cmd(cmd);
 
     if(lrm_state) {
         lrm_state_disconnect(lrm_state);
     }
     return FALSE;
 }
 
 static void
 synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type)
 {
     lrmd_event_data_t op = { 0, };
 
     if (lrm_state == NULL) {
         /* if lrm_state not given assume local */
         lrm_state = lrm_state_find(fsa_our_uname);
     }
     CRM_ASSERT(lrm_state != NULL);
 
     op.type = lrmd_event_exec_complete;
     op.rsc_id = rsc_id;
     op.op_type = op_type;
     op.rc = PCMK_OCF_OK;
     op.op_status = PCMK_LRM_OP_DONE;
     op.t_run = time(NULL);
     op.t_rcchange = op.t_run;
     op.call_id = generate_callid();
     process_lrm_event(lrm_state, &op, NULL);
 }
 
 void
 remote_lrm_op_callback(lrmd_event_data_t * op)
 {
     gboolean cmd_handled = FALSE;
     lrm_state_t *lrm_state = NULL;
     remote_ra_data_t *ra_data = NULL;
     remote_ra_cmd_t *cmd = NULL;
 
     crm_debug("remote connection event - event_type:%s node:%s action:%s rc:%s op_status:%s",
               lrmd_event_type2str(op->type),
               op->remote_nodename,
               op->op_type ? op->op_type : "none",
               services_ocf_exitcode_str(op->rc), services_lrm_status_str(op->op_status));
 
     lrm_state = lrm_state_find(op->remote_nodename);
     if (!lrm_state || !lrm_state->remote_ra_data) {
         crm_debug("No state information found for remote connection event");
         return;
     }
     ra_data = lrm_state->remote_ra_data;
 
     /* Another client has connected to the remote daemon,
      * determine if this is expected. */
     if (op->type == lrmd_event_new_client) {
         /* great, we new this was coming */
         if (ra_data->migrate_status == expect_takeover) {
             ra_data->migrate_status = takeover_complete;
         } else {
             crm_err("Unexpected pacemaker_remote client takeover for %s. Disconnecting", op->remote_nodename);
             /* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */
             /* Do not free lrm_state->conn yet. */
             /* It'll be freed in the following stop action. */
             lrm_state_disconnect_only(lrm_state);
         }
         return;
     }
 
     /* filter all EXEC events up */
     if (op->type == lrmd_event_exec_complete) {
         if (ra_data->migrate_status == takeover_complete) {
             crm_debug("ignoring event, this connection is taken over by another node");
         } else {
             lrm_op_callback(op);
         }
         return;
     }
 
     if ((op->type == lrmd_event_disconnect) &&
         (ra_data->cur_cmd == NULL) &&
         (ra_data->active == TRUE)) {
 
         if (!remote_ra_is_in_maintenance(lrm_state)) {
             crm_err("Lost connection to Pacemaker Remote node %s",
                     lrm_state->node_name);
             ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
             ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
         } else {
             crm_notice("Unmanaged Pacemaker Remote node %s disconnected",
                        lrm_state->node_name);
             /* Do roughly what a 'stop' on the remote-resource would do */
             handle_remote_ra_stop(lrm_state, NULL);
             remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM);
             /* now fake the reply of a successful 'stop' */
             synthesize_lrmd_success(NULL, lrm_state->node_name, "stop");
         }
         return;
     }
 
     if (!ra_data->cur_cmd) {
         crm_debug("no event to match");
         return;
     }
 
     cmd = ra_data->cur_cmd;
 
     /* Start actions and migrate from actions complete after connection
      * comes back to us. */
     if (op->type == lrmd_event_connect && (safe_str_eq(cmd->action, "start") ||
                                            safe_str_eq(cmd->action, "migrate_from"))) {
 
         if (op->connection_rc < 0) {
             update_remaining_timeout(cmd);
 
             if (op->connection_rc == -ENOKEY) {
                 // Hard error, don't retry
                 cmd->op_status = PCMK_LRM_OP_ERROR;
                 cmd->rc = PCMK_OCF_INVALID_PARAM;
                 cmd->exit_reason = strdup("Authentication key not readable");
 
             } else if (cmd->remaining_timeout > 3000) {
                 crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout);
                 g_timeout_add(1000, retry_start_cmd_cb, lrm_state);
                 return;
 
             } else {
                 crm_trace("can't reschedule start, remaining timeout too small %d",
                           cmd->remaining_timeout);
                 cmd->op_status = PCMK_LRM_OP_TIMEOUT;
                 cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
             }
 
         } else {
             lrm_state_reset_tables(lrm_state, TRUE);
             cmd->rc = PCMK_OCF_OK;
             cmd->op_status = PCMK_LRM_OP_DONE;
             ra_data->active = TRUE;
         }
 
         crm_debug("Remote connection event matched %s action", cmd->action);
         report_remote_ra_result(cmd);
         cmd_handled = TRUE;
 
     } else if (op->type == lrmd_event_poke && safe_str_eq(cmd->action, "monitor")) {
 
         if (cmd->monitor_timeout_id) {
             g_source_remove(cmd->monitor_timeout_id);
             cmd->monitor_timeout_id = 0;
         }
 
         /* Only report success the first time, after that only worry about failures.
          * For this function, if we get the poke pack, it is always a success. Pokes
          * only fail if the send fails, or the response times out. */
         if (!cmd->reported_success) {
             cmd->rc = PCMK_OCF_OK;
             cmd->op_status = PCMK_LRM_OP_DONE;
             report_remote_ra_result(cmd);
             cmd->reported_success = 1;
         }
 
         crm_debug("Remote poke event matched %s action", cmd->action);
 
         /* success, keep rescheduling if interval is present. */
         if (cmd->interval_ms && (cmd->cancel == FALSE)) {
             ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd);
             cmd->interval_id = g_timeout_add(cmd->interval_ms,
                                              recurring_helper, cmd);
             cmd = NULL;         /* prevent free */
         }
         cmd_handled = TRUE;
 
     } else if (op->type == lrmd_event_disconnect && safe_str_eq(cmd->action, "monitor")) {
         if (ra_data->active == TRUE && (cmd->cancel == FALSE)) {
             cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
             cmd->op_status = PCMK_LRM_OP_ERROR;
             report_remote_ra_result(cmd);
-            crm_err("remote-node %s unexpectedly disconneced during monitor operation", lrm_state->node_name);
+            crm_err("Remote connection to %s unexpectedly dropped during monitor",
+                    lrm_state->node_name);
         }
         cmd_handled = TRUE;
 
     } else if (op->type == lrmd_event_new_client && safe_str_eq(cmd->action, "stop")) {
 
         handle_remote_ra_stop(lrm_state, cmd);
         cmd_handled = TRUE;
 
     } else {
         crm_debug("Event did not match %s action", ra_data->cur_cmd->action);
     }
 
     if (cmd_handled) {
         ra_data->cur_cmd = NULL;
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
         free_cmd(cmd);
     }
 }
 
 static void
 handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd)
 {
     remote_ra_data_t *ra_data = NULL;
 
     CRM_ASSERT(lrm_state);
     ra_data = lrm_state->remote_ra_data;
 
     if (ra_data->migrate_status != takeover_complete) {
         /* delete pending ops when ever the remote connection is intentionally stopped */
         g_hash_table_remove_all(lrm_state->pending_ops);
     } else {
         /* we no longer hold the history if this connection has been migrated,
          * however, we keep metadata cache for future use */
         lrm_state_reset_tables(lrm_state, FALSE);
     }
 
     ra_data->active = FALSE;
     lrm_state_disconnect(lrm_state);
 
     if (ra_data->cmds) {
         g_list_free_full(ra_data->cmds, free_cmd);
     }
     if (ra_data->recurring_cmds) {
         g_list_free_full(ra_data->recurring_cmds, free_cmd);
     }
     ra_data->cmds = NULL;
     ra_data->recurring_cmds = NULL;
     ra_data->cur_cmd = NULL;
 
     if (cmd) {
         cmd->rc = PCMK_OCF_OK;
         cmd->op_status = PCMK_LRM_OP_DONE;
 
         report_remote_ra_result(cmd);
     }
 }
 
 static int
 handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms)
 {
     const char *server = NULL;
     lrmd_key_value_t *tmp = NULL;
     int port = 0;
     int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms;
 
     for (tmp = cmd->params; tmp; tmp = tmp->next) {
         if (safe_str_eq(tmp->key, "addr") || safe_str_eq(tmp->key, "server")) {
             server = tmp->value;
         }
         if (safe_str_eq(tmp->key, "port")) {
             port = atoi(tmp->value);
         }
     }
 
     return lrm_state_remote_connect_async(lrm_state, server, port, timeout_used);
 }
 
 static gboolean
 handle_remote_ra_exec(gpointer user_data)
 {
     int rc = 0;
     lrm_state_t *lrm_state = user_data;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     remote_ra_cmd_t *cmd;
     GList *first = NULL;
 
     if (ra_data->cur_cmd) {
         /* still waiting on previous cmd */
         return TRUE;
     }
 
     while (ra_data->cmds) {
         first = ra_data->cmds;
         cmd = first->data;
         if (cmd->delay_id) {
             /* still waiting for start delay timer to trip */
             return TRUE;
         }
 
         ra_data->cmds = g_list_remove_link(ra_data->cmds, first);
         g_list_free_1(first);
 
         if (!strcmp(cmd->action, "start") || !strcmp(cmd->action, "migrate_from")) {
             ra_data->migrate_status = 0;
             rc = handle_remote_ra_start(lrm_state, cmd, cmd->timeout);
             if (rc == 0) {
                 /* take care of this later when we get async connection result */
                 crm_debug("Remote connection started, waiting for connect event");
                 ra_data->cur_cmd = cmd;
                 return TRUE;
             } else {
                 crm_debug("connect failed, not expecting to match any connection event later");
                 cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
                 cmd->op_status = PCMK_LRM_OP_ERROR;
             }
             report_remote_ra_result(cmd);
 
         } else if (!strcmp(cmd->action, "monitor")) {
 
             if (lrm_state_is_connected(lrm_state) == TRUE) {
                 rc = lrm_state_poke_connection(lrm_state);
                 if (rc < 0) {
                     cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
                     cmd->op_status = PCMK_LRM_OP_ERROR;
                 }
             } else {
                 rc = -1;
                 cmd->op_status = PCMK_LRM_OP_DONE;
                 cmd->rc = PCMK_OCF_NOT_RUNNING;
             }
 
             if (rc == 0) {
                 crm_debug("Poked Pacemaker Remote at node %s, waiting for async response",
                           cmd->rsc_id);
                 ra_data->cur_cmd = cmd;
                 cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd);
                 return TRUE;
             }
             report_remote_ra_result(cmd);
 
         } else if (!strcmp(cmd->action, "stop")) {
 
             if (ra_data->migrate_status == expect_takeover) {
                 /* briefly wait on stop for the takeover event to occur. If the
                  * takeover event does not occur during the wait period, that's fine.
                  * It just means that the remote-node's lrm_status section is going to get
                  * cleared which will require all the resources running in the remote-node
                  * to be explicitly re-detected via probe actions.  If the takeover does occur
                  * successfully, then we can leave the status section intact. */
                 cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd);
                 ra_data->cur_cmd = cmd;
                 return TRUE;
             }
 
             handle_remote_ra_stop(lrm_state, cmd);
 
         } else if (!strcmp(cmd->action, "migrate_to")) {
             ra_data->migrate_status = expect_takeover;
             cmd->rc = PCMK_OCF_OK;
             cmd->op_status = PCMK_LRM_OP_DONE;
             report_remote_ra_result(cmd);
         } else if (!strcmp(cmd->action, "reload")) {
             /* reloads are a no-op right now, add logic here when they become important */
             cmd->rc = PCMK_OCF_OK;
             cmd->op_status = PCMK_LRM_OP_DONE;
             report_remote_ra_result(cmd);
         }
 
         free_cmd(cmd);
     }
 
     return TRUE;
 }
 
 static void
 remote_ra_data_init(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = NULL;
 
     if (lrm_state->remote_ra_data) {
         return;
     }
 
     ra_data = calloc(1, sizeof(remote_ra_data_t));
     ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state);
     lrm_state->remote_ra_data = ra_data;
 }
 
 void
 remote_ra_cleanup(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
     if (!ra_data) {
         return;
     }
 
     if (ra_data->cmds) {
         g_list_free_full(ra_data->cmds, free_cmd);
     }
 
     if (ra_data->recurring_cmds) {
         g_list_free_full(ra_data->recurring_cmds, free_cmd);
     }
     mainloop_destroy_trigger(ra_data->work);
     free(ra_data);
     lrm_state->remote_ra_data = NULL;
 }
 
 gboolean
 is_remote_lrmd_ra(const char *agent, const char *provider, const char *id)
 {
     if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) {
         return TRUE;
     }
     if (id && lrm_state_find(id) && safe_str_neq(id, fsa_our_uname)) {
         return TRUE;
     }
 
     return FALSE;
 }
 
 lrmd_rsc_info_t *
 remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id)
 {
     lrmd_rsc_info_t *info = NULL;
 
     if ((lrm_state_find(rsc_id))) {
         info = calloc(1, sizeof(lrmd_rsc_info_t));
 
         info->id = strdup(rsc_id);
         info->type = strdup(REMOTE_LRMD_RA);
         info->standard = strdup(PCMK_RESOURCE_CLASS_OCF);
         info->provider = strdup("pacemaker");
     }
 
     return info;
 }
 
 static gboolean
 is_remote_ra_supported_action(const char *action)
 {
     if (!action) {
         return FALSE;
     } else if (strcmp(action, "start") &&
                strcmp(action, "stop") &&
                strcmp(action, "reload") &&
                strcmp(action, "migrate_to") &&
                strcmp(action, "migrate_from") && strcmp(action, "monitor")) {
         return FALSE;
     }
 
     return TRUE;
 }
 
 static GList *
 fail_all_monitor_cmds(GList * list)
 {
     GList *rm_list = NULL;
     remote_ra_cmd_t *cmd = NULL;
     GListPtr gIter = NULL;
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms > 0) && safe_str_eq(cmd->action, "monitor")) {
             rm_list = g_list_append(rm_list, cmd);
         }
     }
 
     for (gIter = rm_list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
 
         cmd->rc = PCMK_OCF_UNKNOWN_ERROR;
         cmd->op_status = PCMK_LRM_OP_ERROR;
         crm_trace("Pre-emptively failing %s %s (interval=%u, %s)",
                   cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata);
         report_remote_ra_result(cmd);
 
         list = g_list_remove(list, cmd);
         free_cmd(cmd);
     }
 
     /* frees only the list data, not the cmds */
     g_list_free(rm_list);
     return list;
 }
 
 static GList *
 remove_cmd(GList * list, const char *action, guint interval_ms)
 {
     remote_ra_cmd_t *cmd = NULL;
     GListPtr gIter = NULL;
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && safe_str_eq(cmd->action, action)) {
             break;
         }
         cmd = NULL;
     }
     if (cmd) {
         list = g_list_remove(list, cmd);
         free_cmd(cmd);
     }
     return list;
 }
 
 int
 remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
                  const char *action, guint interval_ms)
 {
     lrm_state_t *connection_rsc = NULL;
     remote_ra_data_t *ra_data = NULL;
 
     connection_rsc = lrm_state_find(rsc_id);
     if (!connection_rsc || !connection_rsc->remote_ra_data) {
         return -EINVAL;
     }
 
     ra_data = connection_rsc->remote_ra_data;
     ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms);
     ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action,
                                          interval_ms);
     if (ra_data->cur_cmd &&
         (ra_data->cur_cmd->interval_ms == interval_ms) &&
         (safe_str_eq(ra_data->cur_cmd->action, action))) {
 
         ra_data->cur_cmd->cancel = TRUE;
     }
 
     return 0;
 }
 
 static remote_ra_cmd_t *
 handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
                    const char *userdata)
 {
     GList *gIter = NULL;
     remote_ra_cmd_t *cmd = NULL;
 
     /* there are 3 places a potential duplicate monitor operation
      * could exist.
      * 1. recurring_cmds list. where the op is waiting for its next interval
      * 2. cmds list, where the op is queued to get executed immediately
      * 3. cur_cmd, which means the monitor op is in flight right now.
      */
     if (interval_ms == 0) {
         return NULL;
     }
 
     if (ra_data->cur_cmd &&
         ra_data->cur_cmd->cancel == FALSE &&
         (ra_data->cur_cmd->interval_ms == interval_ms) &&
         safe_str_eq(ra_data->cur_cmd->action, "monitor")) {
 
         cmd = ra_data->cur_cmd;
         goto handle_dup;
     }
 
     for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && safe_str_eq(cmd->action, "monitor")) {
             goto handle_dup;
         }
     }
 
     for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && safe_str_eq(cmd->action, "monitor")) {
             goto handle_dup;
         }
     }
 
     return NULL;
 
 handle_dup:
 
     crm_trace("merging duplicate monitor cmd " CRM_OP_FMT,
               cmd->rsc_id, "monitor", interval_ms);
 
     /* update the userdata */
     if (userdata) {
        free(cmd->userdata);
        cmd->userdata = strdup(userdata);
     }
 
     /* if we've already reported success, generate a new call id */
     if (cmd->reported_success) {
         cmd->start_time = time(NULL);
         cmd->call_id = generate_callid();
         cmd->reported_success = 0;
     }
 
     /* if we have an interval_id set, that means we are in the process of
      * waiting for this cmd's next interval. instead of waiting, cancel
      * the timer and execute the action immediately */
     if (cmd->interval_id) {
         g_source_remove(cmd->interval_id);
         cmd->interval_id = 0;
         recurring_helper(cmd);
     }
 
     return cmd;  
 }
 
 int
 remote_ra_exec(lrm_state_t *lrm_state, const char *rsc_id, const char *action,
                const char *userdata, guint interval_ms,
                int timeout,     /* ms */
                int start_delay, /* ms */
                lrmd_key_value_t * params)
 {
     int rc = 0;
     lrm_state_t *connection_rsc = NULL;
     remote_ra_cmd_t *cmd = NULL;
     remote_ra_data_t *ra_data = NULL;
 
     if (is_remote_ra_supported_action(action) == FALSE) {
         rc = -EINVAL;
         goto exec_done;
     }
 
     connection_rsc = lrm_state_find(rsc_id);
     if (!connection_rsc) {
         rc = -EINVAL;
         goto exec_done;
     }
 
     remote_ra_data_init(connection_rsc);
     ra_data = connection_rsc->remote_ra_data;
 
     cmd = handle_dup_monitor(ra_data, interval_ms, userdata);
     if (cmd) {
        return cmd->call_id;
     }
 
     cmd = calloc(1, sizeof(remote_ra_cmd_t));
     cmd->owner = strdup(lrm_state->node_name);
     cmd->rsc_id = strdup(rsc_id);
     cmd->action = strdup(action);
     cmd->userdata = strdup(userdata);
     cmd->interval_ms = interval_ms;
     cmd->timeout = timeout;
     cmd->start_delay = start_delay;
     cmd->params = params;
     cmd->start_time = time(NULL);
 
     cmd->call_id = generate_callid();
 
     if (cmd->start_delay) {
         cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
     }
 
     ra_data->cmds = g_list_append(ra_data->cmds, cmd);
     mainloop_set_trigger(ra_data->work);
 
     return cmd->call_id;
   exec_done:
 
     lrmd_key_value_freeall(params);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Immediately fail all monitors of a remote node, if proxied here
  *
  * \param[in] node_name  Name of pacemaker_remote node
  */
 void
 remote_ra_fail(const char *node_name)
 {
     lrm_state_t *lrm_state = lrm_state_find(node_name);
 
     if (lrm_state && lrm_state_is_connected(lrm_state)) {
         remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
         crm_info("Failing monitors on pacemaker_remote node %s", node_name);
         ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
         ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
     }
 }
 
 /* A guest node fencing implied by host fencing looks like:
  *
  *  <pseudo_event id="103" operation="stonith" operation_key="stonith-lxc1-off"
  *                on_node="lxc1" on_node_uuid="lxc1">
  *     <attributes CRM_meta_master_lxc_ms="10" CRM_meta_on_node="lxc1"
  *                 CRM_meta_on_node_uuid="lxc1" CRM_meta_stonith_action="off"
  *                 crm_feature_set="3.0.12"/>
  *     <downed>
  *       <node id="lxc1"/>
  *     </downed>
  *  </pseudo_event>
  */
 #define XPATH_PSEUDO_FENCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
     "[@" XML_LRM_ATTR_TASK "='stonith']/" XML_GRAPH_TAG_DOWNED \
     "/" XML_CIB_TAG_NODE
 
 /*!
  * \internal
  * \brief Check a pseudo-action for Pacemaker Remote node side effects
  *
  * \param[in] xml  XML of pseudo-action to check
  */
 void
 remote_ra_process_pseudo(xmlNode *xml)
 {
     xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE);
 
     if (numXpathResults(search) == 1) {
         xmlNode *result = getXpathResult(search, 0);
 
         /* Normally, we handle the necessary side effects of a guest node stop
          * action when reporting the remote agent's result. However, if the stop
          * is implied due to fencing, it will be a fencing pseudo-event, and
          * there won't be a result to report. Handle that case here.
          *
          * This will result in a duplicate call to remote_node_down() if the
          * guest stop was real instead of implied, but that shouldn't hurt.
          *
          * There is still one corner case that isn't handled: if a guest node
          * isn't running any resources when its host is fenced, it will appear
          * to be cleanly stopped, so there will be no pseudo-fence, and our
          * peer cache state will be incorrect unless and until the guest is
          * recovered.
          */
         if (result) {
             const char *remote = ID(result);
 
             if (remote) {
                 remote_node_down(remote, DOWN_ERASE_LRM);
             }
         }
     }
     freeXpathObject(search);
 }
 
 static void
 remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     xmlNode *update, *state;
     int call_opt, call_id = 0;
     crm_node_t *node;
 
     call_opt = crmd_cib_smart_opt();
     node = crm_remote_peer_get(lrm_state->node_name);
     CRM_CHECK(node != NULL, return);
     update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
     state = create_node_state_update(node, node_update_none, update,
                                      __FUNCTION__);
     crm_xml_add(state, XML_NODE_IS_MAINTENANCE, maintenance?"1":"0");
     fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, call_id, NULL);
     if (call_id < 0) {
         crm_perror(LOG_WARNING, "%s CIB node state update failed", lrm_state->node_name);
     } else {
         /* TODO: still not 100% sure that async update will succeed ... */
         ra_data->is_maintenance = maintenance;
     }
     free_xml(update);
 }
 
 #define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
     "[@" XML_LRM_ATTR_TASK "='" CRM_OP_MAINTENANCE_NODES "']/" \
     XML_GRAPH_TAG_MAINTENANCE
 
 /*!
  * \internal
  * \brief Check a pseudo-action holding updates for maintenance state
  *
  * \param[in] xml  XML of pseudo-action to check
  */
 
 void
 remote_ra_process_maintenance_nodes(xmlNode *xml)
 {
     xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE);
 
     if (numXpathResults(search) == 1) {
         xmlNode *node;
         int cnt = 0, cnt_remote = 0;
 
         for (node =
                 first_named_child(getXpathResult(search, 0), XML_CIB_TAG_NODE);
             node; node = __xml_next(node)) {
             lrm_state_t *lrm_state = lrm_state_find(ID(node));
 
             cnt++;
             if (lrm_state && lrm_state->remote_ra_data &&
                 ((remote_ra_data_t *) lrm_state->remote_ra_data)->active) {
                 cnt_remote++;
                 remote_ra_maintenance(lrm_state,
                                         crm_atoi(crm_element_value(node,
                                             XML_NODE_IS_MAINTENANCE), "0"));
 
             }
         }
         crm_trace("Action holds %d nodes (%d remotes found) "
                     "adjusting maintenance-mode", cnt, cnt_remote);
     }
     freeXpathObject(search);
 }
 
 gboolean
 remote_ra_is_in_maintenance(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
     return ra_data->is_maintenance;
 }
diff --git a/daemons/controld/controld_te_actions.c b/daemons/controld/controld_te_actions.c
index 35b20dec5b..c95c6c7e90 100644
--- a/daemons/controld/controld_te_actions.c
+++ b/daemons/controld/controld_te_actions.c
@@ -1,748 +1,748 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 
 #include <crm/common/xml.h>
 #include <controld_transition.h>
 
 #include <controld_fsa.h>
 #include <controld_lrm.h>
 #include <controld_messages.h>
 #include <crm/cluster.h>
 #include <controld_throttle.h>
 
 char *te_uuid = NULL;
 GHashTable *te_targets = NULL;
 void send_rsc_command(crm_action_t * action);
 static void te_update_job_count(crm_action_t * action, int offset);
 
 static void
 te_start_action_timer(crm_graph_t * graph, crm_action_t * action)
 {
     action->timer = calloc(1, sizeof(crm_action_timer_t));
     action->timer->timeout = action->timeout;
     action->timer->action = action;
     action->timer->source_id = g_timeout_add(action->timer->timeout + graph->network_delay,
                                              action_timer_callback, (void *)action->timer);
 
     CRM_ASSERT(action->timer->source_id != 0);
 }
 
 static gboolean
 te_pseudo_action(crm_graph_t * graph, crm_action_t * pseudo)
 {
     const char *task = crm_element_value(pseudo->xml, XML_LRM_ATTR_TASK);
 
     /* send to peers as well? */
     if (safe_str_eq(task, CRM_OP_MAINTENANCE_NODES)) {
         GHashTableIter iter;
         crm_node_t *node = NULL;
 
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             xmlNode *cmd = NULL;
 
             if (safe_str_eq(fsa_our_uname, node->uname)) {
                 continue;
             }
 
             cmd = create_request(task, pseudo->xml, node->uname,
                                  CRM_SYSTEM_CRMD, CRM_SYSTEM_TENGINE, NULL);
             send_cluster_message(node, crm_msg_crmd, cmd, FALSE);
             free_xml(cmd);
         }
 
         remote_ra_process_maintenance_nodes(pseudo->xml);
     } else {
         /* Check action for Pacemaker Remote node side effects */
         remote_ra_process_pseudo(pseudo->xml);
     }
 
     crm_debug("Pseudo-action %d (%s) fired and confirmed", pseudo->id,
               crm_element_value(pseudo->xml, XML_LRM_ATTR_TASK_KEY));
     te_action_confirmed(pseudo);
     update_graph(graph, pseudo);
     trigger_graph();
     return TRUE;
 }
 
 void
 send_stonith_update(crm_action_t * action, const char *target, const char *uuid)
 {
     int rc = pcmk_ok;
     crm_node_t *peer = NULL;
 
     /* We (usually) rely on the membership layer to do node_update_cluster,
      * and the peer status callback to do node_update_peer, because the node
      * might have already rejoined before we get the stonith result here.
      */
     int flags = node_update_join | node_update_expected;
 
     /* zero out the node-status & remove all LRM status info */
     xmlNode *node_state = NULL;
 
     CRM_CHECK(target != NULL, return);
     CRM_CHECK(uuid != NULL, return);
 
     /* Make sure the membership and join caches are accurate */
     peer = crm_get_peer_full(0, target, CRM_GET_PEER_ANY);
 
     CRM_CHECK(peer != NULL, return);
 
     if (peer->state == NULL) {
         /* Usually, we rely on the membership layer to update the cluster state
          * in the CIB. However, if the node has never been seen, do it here, so
          * the node is not considered unclean.
          */
         flags |= node_update_cluster;
     }
 
     if (peer->uuid == NULL) {
         crm_info("Recording uuid '%s' for node '%s'", uuid, target);
         peer->uuid = strdup(uuid);
     }
 
     crmd_peer_down(peer, TRUE);
 
     /* Generate a node state update for the CIB */
     node_state = create_node_state_update(peer, flags, NULL, __FUNCTION__);
 
     /* we have to mark whether or not remote nodes have already been fenced */
     if (peer->flags & crm_remote_node) {
         time_t now = time(NULL);
         char *now_s = crm_itoa(now);
         crm_xml_add(node_state, XML_NODE_IS_FENCED, now_s);
         free(now_s);
     }
 
     /* Force our known ID */
     crm_xml_add(node_state, XML_ATTR_UUID, uuid);
 
     rc = fsa_cib_conn->cmds->update(fsa_cib_conn, XML_CIB_TAG_STATUS, node_state,
                                     cib_quorum_override | cib_scope_local | cib_can_create);
 
     /* Delay processing the trigger until the update completes */
     crm_debug("Sending fencing update %d for %s", rc, target);
     fsa_register_cib_callback(rc, FALSE, strdup(target), cib_fencing_updated);
 
     /* Make sure it sticks */
     /* fsa_cib_conn->cmds->bump_epoch(fsa_cib_conn, cib_quorum_override|cib_scope_local);    */
 
     erase_status_tag(peer->uname, XML_CIB_TAG_LRM, cib_scope_local);
     erase_status_tag(peer->uname, XML_TAG_TRANSIENT_NODEATTRS, cib_scope_local);
 
     free_xml(node_state);
     return;
 }
 
 static gboolean
 te_fence_node(crm_graph_t * graph, crm_action_t * action)
 {
     int rc = 0;
     const char *id = NULL;
     const char *uuid = NULL;
     const char *target = NULL;
     const char *type = NULL;
     gboolean invalid_action = FALSE;
     enum stonith_call_options options = st_opt_none;
 
     id = ID(action->xml);
     target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
     uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID);
     type = crm_meta_value(action->params, "stonith_action");
 
     CRM_CHECK(id != NULL, invalid_action = TRUE);
     CRM_CHECK(uuid != NULL, invalid_action = TRUE);
     CRM_CHECK(type != NULL, invalid_action = TRUE);
     CRM_CHECK(target != NULL, invalid_action = TRUE);
 
     if (invalid_action) {
         crm_log_xml_warn(action->xml, "BadAction");
         return FALSE;
     }
 
     crm_notice("Requesting fencing (%s) of node %s "
                CRM_XS " action=%s timeout=%d",
                type, target, id, transition_graph->stonith_timeout);
 
     /* Passing NULL means block until we can connect... */
     te_connect_stonith(NULL);
 
     if (crmd_join_phase_count(crm_join_confirmed) == 1) {
         options |= st_opt_allow_suicide;
     }
 
     rc = stonith_api->cmds->fence(stonith_api, options, target, type,
                                   transition_graph->stonith_timeout / 1000, 0);
 
     stonith_api->cmds->register_callback(stonith_api, rc, transition_graph->stonith_timeout / 1000,
                                          st_opt_timeout_updates,
                                          generate_transition_key(transition_graph->id, action->id,
                                                                  0, te_uuid),
                                          "tengine_stonith_callback", tengine_stonith_callback);
 
     return TRUE;
 }
 
 static int
 get_target_rc(crm_action_t * action)
 {
     const char *target_rc_s = crm_meta_value(action->params, XML_ATTR_TE_TARGET_RC);
 
     if (target_rc_s != NULL) {
         return crm_parse_int(target_rc_s, "0");
     }
     return 0;
 }
 
 static gboolean
 te_crm_command(crm_graph_t * graph, crm_action_t * action)
 {
     char *counter = NULL;
     xmlNode *cmd = NULL;
     gboolean is_local = FALSE;
 
     const char *id = NULL;
     const char *task = NULL;
     const char *value = NULL;
     const char *on_node = NULL;
     const char *router_node = NULL;
 
     gboolean rc = TRUE;
     gboolean no_wait = FALSE;
 
     id = ID(action->xml);
     task = crm_element_value(action->xml, XML_LRM_ATTR_TASK);
     on_node = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
     router_node = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE);
 
     if (!router_node) {
         router_node = on_node;
     }
 
     CRM_CHECK(on_node != NULL && strlen(on_node) != 0,
               crm_err("Corrupted command (id=%s) %s: no node", crm_str(id), crm_str(task));
               return FALSE);
 
-    crm_info("Executing crm-event (%s): %s on %s%s%s",
-             crm_str(id), crm_str(task), on_node,
-             is_local ? " (local)" : "", no_wait ? " - no waiting" : "");
-
     if (safe_str_eq(router_node, fsa_our_uname)) {
         is_local = TRUE;
     }
 
     value = crm_meta_value(action->params, XML_ATTR_TE_NOWAIT);
     if (crm_is_true(value)) {
         no_wait = TRUE;
     }
 
+    crm_info("Executing crm-event (%s)%s%s: %s on %s",
+             crm_str(id), (is_local? " locally" : ""),
+             (no_wait? " without waiting" : ""), crm_str(task), on_node);
+
     if (is_local && safe_str_eq(task, CRM_OP_SHUTDOWN)) {
         /* defer until everything else completes */
         crm_info("crm-event (%s) is a local shutdown", crm_str(id));
         graph->completion_action = tg_shutdown;
         graph->abort_reason = "local shutdown";
         te_action_confirmed(action);
         update_graph(graph, action);
         trigger_graph();
         return TRUE;
 
     } else if (safe_str_eq(task, CRM_OP_SHUTDOWN)) {
         crm_node_t *peer = crm_get_peer(0, router_node);
         crm_update_peer_expected(__FUNCTION__, peer, CRMD_JOINSTATE_DOWN);
     }
 
     cmd = create_request(task, action->xml, router_node, CRM_SYSTEM_CRMD, CRM_SYSTEM_TENGINE, NULL);
 
     counter =
         generate_transition_key(transition_graph->id, action->id, get_target_rc(action), te_uuid);
     crm_xml_add(cmd, XML_ATTR_TRANSITION_KEY, counter);
 
     rc = send_cluster_message(crm_get_peer(0, router_node), crm_msg_crmd, cmd, TRUE);
     free(counter);
     free_xml(cmd);
 
     if (rc == FALSE) {
         crm_err("Action %d failed: send", action->id);
         return FALSE;
 
     } else if (no_wait) {
         te_action_confirmed(action);
         update_graph(graph, action);
         trigger_graph();
 
     } else {
         if (action->timeout <= 0) {
             crm_err("Action %d: %s on %s had an invalid timeout (%dms).  Using %dms instead",
                     action->id, task, on_node, action->timeout, graph->network_delay);
             action->timeout = graph->network_delay;
         }
         te_start_action_timer(graph, action);
     }
 
     return TRUE;
 }
 
 void
 controld_record_action_timeout(crm_action_t *action)
 {
     lrmd_event_data_t *op = NULL;
     xmlNode *state = NULL;
     xmlNode *rsc = NULL;
     xmlNode *xml_op = NULL;
     xmlNode *action_rsc = NULL;
 
     int rc = pcmk_ok;
 
     const char *rsc_id = NULL;
     const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
     const char *task_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY);
     const char *target_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TARGET_UUID);
 
     int call_options = cib_quorum_override | cib_scope_local;
     int target_rc = get_target_rc(action);
 
     crm_warn("%s %d: %s on %s timed out",
              crm_element_name(action->xml), action->id, task_uuid, target);
 
     action_rsc = find_xml_node(action->xml, XML_CIB_TAG_RESOURCE, TRUE);
     if (action_rsc == NULL) {
         return;
     }
 
     rsc_id = ID(action_rsc);
     CRM_CHECK(rsc_id != NULL,
               crm_log_xml_err(action->xml, "Bad:action"); return);
 
 /*
   update the CIB
 
 <node_state id="hadev">
       <lrm>
         <lrm_resources>
           <lrm_resource id="rsc2" last_op="start" op_code="0" target="hadev"/>
 */
 
     state = create_xml_node(NULL, XML_CIB_TAG_STATE);
 
     crm_xml_add(state, XML_ATTR_UUID, target_uuid);
     crm_xml_add(state, XML_ATTR_UNAME, target);
 
     rsc = create_xml_node(state, XML_CIB_TAG_LRM);
     crm_xml_add(rsc, XML_ATTR_ID, target_uuid);
 
     rsc = create_xml_node(rsc, XML_LRM_TAG_RESOURCES);
     rsc = create_xml_node(rsc, XML_LRM_TAG_RESOURCE);
     crm_xml_add(rsc, XML_ATTR_ID, rsc_id);
 
 
     crm_copy_xml_element(action_rsc, rsc, XML_ATTR_TYPE);
     crm_copy_xml_element(action_rsc, rsc, XML_AGENT_ATTR_CLASS);
     crm_copy_xml_element(action_rsc, rsc, XML_AGENT_ATTR_PROVIDER);
 
     /* If the executor gets a timeout while waiting for the action to complete,
      * that will be reported via the usual callback. This timeout means that we
      * didn't hear from the executor or the controller that relayed the action
      * to the executor.
      *
      * @TODO Using PCMK_OCF_UNKNOWN_ERROR instead of PCMK_OCF_TIMEOUT is one way
      * to distinguish those situations, but perhaps PCMK_OCF_TIMEOUT would be
      * preferable anyway.
      */
     op = convert_graph_action(NULL, action, PCMK_LRM_OP_TIMEOUT,
                               PCMK_OCF_UNKNOWN_ERROR);
     op->call_id = -1;
     op->user_data = generate_transition_key(transition_graph->id, action->id, target_rc, te_uuid);
 
     xml_op = create_operation_update(rsc, op, CRM_FEATURE_SET, target_rc, target, __FUNCTION__, LOG_INFO);
     lrmd_free_event(op);
 
     crm_log_xml_trace(xml_op, "Action timeout");
 
     rc = fsa_cib_conn->cmds->update(fsa_cib_conn, XML_CIB_TAG_STATUS, state, call_options);
     fsa_register_cib_callback(rc, FALSE, NULL, cib_action_updated);
     free_xml(state);
 
     crm_trace("Sent CIB update (call ID %d) for timeout of action %d (%s on %s)",
               rc, action->id, task_uuid, target);
     action->sent_update = TRUE;
 }
 
 static gboolean
 te_rsc_command(crm_graph_t * graph, crm_action_t * action)
 {
     /* never overwrite stop actions in the CIB with
      *   anything other than completed results
      *
      * Writing pending stops makes it look like the
      *   resource is running again
      */
     xmlNode *cmd = NULL;
     xmlNode *rsc_op = NULL;
 
     gboolean rc = TRUE;
     gboolean no_wait = FALSE;
     gboolean is_local = FALSE;
 
     char *counter = NULL;
     const char *task = NULL;
     const char *value = NULL;
     const char *on_node = NULL;
     const char *router_node = NULL;
     const char *task_uuid = NULL;
 
     CRM_ASSERT(action != NULL);
     CRM_ASSERT(action->xml != NULL);
 
     action->executed = FALSE;
     on_node = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
 
     CRM_CHECK(on_node != NULL && strlen(on_node) != 0,
               crm_err("Corrupted command(id=%s) %s: no node", ID(action->xml), crm_str(task));
               return FALSE);
 
     rsc_op = action->xml;
     task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
     task_uuid = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY);
     router_node = crm_element_value(rsc_op, XML_LRM_ATTR_ROUTER_NODE);
 
     if (!router_node) {
         router_node = on_node;
     }
 
     counter =
         generate_transition_key(transition_graph->id, action->id, get_target_rc(action), te_uuid);
     crm_xml_add(rsc_op, XML_ATTR_TRANSITION_KEY, counter);
 
     if (safe_str_eq(router_node, fsa_our_uname)) {
         is_local = TRUE;
     }
 
     value = crm_meta_value(action->params, XML_ATTR_TE_NOWAIT);
     if (crm_is_true(value)) {
         no_wait = TRUE;
     }
 
     crm_notice("Initiating %s operation %s%s on %s%s "CRM_XS" action %d",
                task, task_uuid, (is_local? " locally" : ""), on_node,
                (no_wait? " without waiting" : ""), action->id);
 
     cmd = create_request(CRM_OP_INVOKE_LRM, rsc_op, router_node,
                          CRM_SYSTEM_LRMD, CRM_SYSTEM_TENGINE, NULL);
 
     if (is_local) {
         /* shortcut local resource commands */
         ha_msg_input_t data = {
             .msg = cmd,
             .xml = rsc_op,
         };
 
         fsa_data_t msg = {
             .id = 0,
             .data = &data,
             .data_type = fsa_dt_ha_msg,
             .fsa_input = I_NULL,
             .fsa_cause = C_FSA_INTERNAL,
             .actions = A_LRM_INVOKE,
             .origin = __FUNCTION__,
         };
 
         do_lrm_invoke(A_LRM_INVOKE, C_FSA_INTERNAL, fsa_state, I_NULL, &msg);
 
     } else {
         rc = send_cluster_message(crm_get_peer(0, router_node), crm_msg_lrmd, cmd, TRUE);
     }
 
     free(counter);
     free_xml(cmd);
 
     action->executed = TRUE;
 
     if (rc == FALSE) {
         crm_err("Action %d failed: send", action->id);
         return FALSE;
 
     } else if (no_wait) {
         crm_info("Action %d confirmed - no wait", action->id);
         action->confirmed = TRUE; /* Just mark confirmed.
                                    * Don't bump the job count only to immediately decrement it
                                    */
         update_graph(transition_graph, action);
         trigger_graph();
 
     } else if (action->confirmed == TRUE) {
         crm_debug("Action %d: %s %s on %s(timeout %dms) was already confirmed.",
                   action->id, task, task_uuid, on_node, action->timeout);
     } else {
         if (action->timeout <= 0) {
             crm_err("Action %d: %s %s on %s had an invalid timeout (%dms).  Using %dms instead",
                     action->id, task, task_uuid, on_node, action->timeout, graph->network_delay);
             action->timeout = graph->network_delay;
         }
         te_update_job_count(action, 1);
         te_start_action_timer(graph, action);
     }
 
     return TRUE;
 }
 
 struct te_peer_s
 {
         char *name;
         int jobs;
         int migrate_jobs;
 };
 
 static void te_peer_free(gpointer p)
 {
     struct te_peer_s *peer = p;
 
     free(peer->name);
     free(peer);
 }
 
 void te_reset_job_counts(void)
 {
     GHashTableIter iter;
     struct te_peer_s *peer = NULL;
 
     if(te_targets == NULL) {
         te_targets = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, te_peer_free);
     }
 
     g_hash_table_iter_init(&iter, te_targets);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & peer)) {
         peer->jobs = 0;
         peer->migrate_jobs = 0;
     }
 }
 
 static void
 te_update_job_count_on(const char *target, int offset, bool migrate)
 {
     struct te_peer_s *r = NULL;
 
     if(target == NULL || te_targets == NULL) {
         return;
     }
 
     r = g_hash_table_lookup(te_targets, target);
     if(r == NULL) {
         r = calloc(1, sizeof(struct te_peer_s));
         r->name = strdup(target);
         g_hash_table_insert(te_targets, r->name, r);
     }
 
     r->jobs += offset;
     if(migrate) {
         r->migrate_jobs += offset;
     }
     crm_trace("jobs[%s] = %d", target, r->jobs);
 }
 
 static void
 te_update_job_count(crm_action_t * action, int offset)
 {
     const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK);
     const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
 
     if (action->type != action_type_rsc || target == NULL) {
         /* No limit on these */
         return;
     }
 
     /* if we have a router node, this means the action is performing
      * on a remote node. For now, we count all actions occurring on a
      * remote node against the job list on the cluster node hosting
      * the connection resources */
     target = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE);
 
     if ((target == NULL) &&
         (safe_str_eq(task, CRMD_ACTION_MIGRATE) || safe_str_eq(task, CRMD_ACTION_MIGRATED))) {
 
         const char *t1 = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_SOURCE);
         const char *t2 = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_TARGET);
 
         te_update_job_count_on(t1, offset, TRUE);
         te_update_job_count_on(t2, offset, TRUE);
         return;
     } else if (target == NULL) {
         target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
     }
 
     te_update_job_count_on(target, offset, FALSE);
 }
 
 static gboolean
 te_should_perform_action_on(crm_graph_t * graph, crm_action_t * action, const char *target)
 {
     int limit = 0;
     struct te_peer_s *r = NULL;
     const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK);
     const char *id = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY);
 
     if(target == NULL) {
         /* No limit on these */
         return TRUE;
 
     } else if(te_targets == NULL) {
         return FALSE;
     }
 
     r = g_hash_table_lookup(te_targets, target);
     limit = throttle_get_job_limit(target);
 
     if(r == NULL) {
         r = calloc(1, sizeof(struct te_peer_s));
         r->name = strdup(target);
         g_hash_table_insert(te_targets, r->name, r);
     }
 
     if(limit <= r->jobs) {
         crm_trace("Peer %s is over their job limit of %d (%d): deferring %s",
                   target, limit, r->jobs, id);
         return FALSE;
 
     } else if(graph->migration_limit > 0 && r->migrate_jobs >= graph->migration_limit) {
         if (safe_str_eq(task, CRMD_ACTION_MIGRATE) || safe_str_eq(task, CRMD_ACTION_MIGRATED)) {
             crm_trace("Peer %s is over their migration job limit of %d (%d): deferring %s",
                       target, graph->migration_limit, r->migrate_jobs, id);
             return FALSE;
         }
     }
 
     crm_trace("Peer %s has not hit their limit yet. current jobs = %d limit= %d limit", target, r->jobs, limit);
 
     return TRUE;
 }
 
 static gboolean
 te_should_perform_action(crm_graph_t * graph, crm_action_t * action)
 {
     const char *target = NULL;
     const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK);
 
     if (action->type != action_type_rsc) {
         /* No limit on these */
         return TRUE;
     }
 
     /* if we have a router node, this means the action is performing
      * on a remote node. For now, we count all actions occurring on a
      * remote node against the job list on the cluster node hosting
      * the connection resources */
     target = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE);
 
     if ((target == NULL) &&
         (safe_str_eq(task, CRMD_ACTION_MIGRATE) || safe_str_eq(task, CRMD_ACTION_MIGRATED))) {
 
         target = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_SOURCE);
         if(te_should_perform_action_on(graph, action, target) == FALSE) {
             return FALSE;
         }
 
         target = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_TARGET);
 
     } else if (target == NULL) {
         target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
     }
 
     return te_should_perform_action_on(graph, action, target);
 }
 
 void
 te_action_confirmed(crm_action_t * action)
 {
     const char *target = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
 
     if (action->confirmed == FALSE && action->type == action_type_rsc && target != NULL) {
         te_update_job_count(action, -1);
     }
     action->confirmed = TRUE;
 }
 
 
 crm_graph_functions_t te_graph_fns = {
     te_pseudo_action,
     te_rsc_command,
     te_crm_command,
     te_fence_node,
     te_should_perform_action,
 };
 
 void
 notify_crmd(crm_graph_t * graph)
 {
     const char *type = "unknown";
     enum crmd_fsa_input event = I_NULL;
 
     crm_debug("Processing transition completion in state %s", fsa_state2string(fsa_state));
 
     if (graph->complete == FALSE) {
         CRM_CHECK(graph->complete,);
         graph->complete = TRUE;
     }
 
     switch (graph->completion_action) {
         case tg_stop:
             type = "stop";
             if (fsa_state == S_TRANSITION_ENGINE) {
                 event = I_TE_SUCCESS;
             }
             break;
         case tg_done:
             type = "done";
             if (fsa_state == S_TRANSITION_ENGINE) {
                 event = I_TE_SUCCESS;
             }
             break;
 
         case tg_restart:
             type = "restart";
             if (fsa_state == S_TRANSITION_ENGINE) {
                 if (transition_timer->period_ms > 0) {
                     crm_timer_stop(transition_timer);
                     crm_timer_start(transition_timer);
                 } else {
                     event = I_PE_CALC;
                 }
 
             } else if (fsa_state == S_POLICY_ENGINE) {
                 register_fsa_action(A_PE_INVOKE);
             }
             break;
 
         case tg_shutdown:
             type = "shutdown";
             if (is_set(fsa_input_register, R_SHUTDOWN)) {
                 event = I_STOP;
 
             } else {
                 crm_err("We didn't ask to be shut down, yet the scheduler is telling us to");
                 event = I_TERMINATE;
             }
     }
 
     crm_debug("Transition %d status: %s - %s", graph->id, type, crm_str(graph->abort_reason));
 
     graph->abort_reason = NULL;
     graph->completion_action = tg_done;
     clear_bit(fsa_input_register, R_IN_TRANSITION);
 
     if (event != I_NULL) {
         register_fsa_input(C_FSA_INTERNAL, event, NULL);
 
     } else if (fsa_source) {
         mainloop_set_trigger(fsa_source);
     }
 }
diff --git a/daemons/fenced/cts-fence-helper.c b/daemons/fenced/cts-fence-helper.c
index 36b69155de..e36e36882e 100644
--- a/daemons/fenced/cts-fence-helper.c
+++ b/daemons/fenced/cts-fence-helper.c
@@ -1,672 +1,668 @@
 /* 
  * Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
  * 
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  * 
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  * 
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/time.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #include <crm/common/mainloop.h>
 
 static GMainLoop *mainloop = NULL;
 static crm_trigger_t *trig = NULL;
 static int mainloop_iter = 0;
 static int callback_rc = 0;
 typedef void (*mainloop_test_iteration_cb) (int check_event);
 
 #define MAINLOOP_DEFAULT_TIMEOUT 2
 
 #define mainloop_test_done(pass) \
     if (pass) { \
         crm_info("SUCCESS - %s", __FUNCTION__); \
         mainloop_iter++;   \
         mainloop_set_trigger(trig);  \
     } else { \
         crm_err("FAILURE = %s async_callback %d", __FUNCTION__, callback_rc); \
         crm_exit(CRM_EX_ERROR); \
     } \
     callback_rc = 0; \
 
 
 /* *INDENT-OFF* */
 enum test_modes {
-    /* class dev test using a very specific environment */
-    test_standard = 0,
-    /* watch notifications only */
-    test_passive,
-    /* sanity test stonith client api using fence_dummy */
-    test_api_sanity,
-    /* sanity test mainloop code with async respones. */
-    test_api_mainloop,
+    test_standard = 0,  // test using a specific developer environment
+    test_passive,       // watch notifications only
+    test_api_sanity,    // sanity-test stonith client API using fence_dummy
+    test_api_mainloop,  // sanity-test mainloop code with async responses
 };
 
 static struct crm_option long_options[] = {
     {"verbose",     0, 0, 'V'},
     {"version",     0, 0, '$'},
     {"help",        0, 0, '?'},
     {"passive",     0, 0, 'p'},
     {"api_test",    0, 0, 't'},
     {"mainloop_api_test",    0, 0, 'm'},
 
     {0, 0, 0, 0}
 };
 /* *INDENT-ON* */
 
 static stonith_t *st = NULL;
 static struct pollfd pollfd;
 static int st_opts = st_opt_sync_call;
 static int expected_notifications = 0;
 static int verbose = 0;
 
 static void
 dispatch_helper(int timeout)
 {
     int rc;
 
     crm_debug("Looking for notification");
     pollfd.events = POLLIN;
     while (true) {
         rc = poll(&pollfd, 1, timeout); /* wait 10 minutes, -1 forever */
         if (rc > 0) {
             if (!stonith_dispatch(st)) {
                 break;
             }
         } else {
             break;
         }
     }
 }
 
 static void
 st_callback(stonith_t * st, stonith_event_t * e)
 {
     if (st->state == stonith_disconnected) {
         crm_exit(CRM_EX_DISCONNECT);
     }
 
     crm_notice("Operation %s requested by %s %s for peer %s.  %s reported: %s (ref=%s)",
                e->operation, e->origin, e->result == pcmk_ok ? "completed" : "failed",
                e->target, e->executioner ? e->executioner : "<none>",
                pcmk_strerror(e->result), e->id);
 
     if (expected_notifications) {
         expected_notifications--;
     }
 }
 
 static void
 st_global_callback(stonith_t * stonith, stonith_callback_data_t * data)
 {
     crm_notice("Call id %d completed with rc %d", data->call_id, data->rc);
 }
 
 static void
 passive_test(void)
 {
     int rc = 0;
 
     rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
     crm_debug("Connect: %d", rc);
 
     st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback);
     st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback);
     st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback",
                                 st_global_callback);
 
     dispatch_helper(600 * 1000);
 }
 
 #define single_test(cmd, str, num_notifications, expected_rc) \
 { \
     int rc = 0; \
     rc = cmd; \
     expected_notifications = 0;  \
     if (num_notifications) { \
         expected_notifications = num_notifications; \
         dispatch_helper(500);  \
     } \
     if (rc != expected_rc) { \
         crm_err("FAILURE - expected rc %d != %d(%s) for cmd - %s", expected_rc, rc, pcmk_strerror(rc), str); \
         crm_exit(CRM_EX_ERROR); \
     } else if (expected_notifications) { \
         crm_err("FAILURE - expected %d notifications, got only %d for cmd - %s", \
             num_notifications, num_notifications - expected_notifications, str); \
         crm_exit(CRM_EX_ERROR); \
     } else { \
         if (verbose) {                   \
             crm_info("SUCCESS - %s: %d", str, rc);    \
         } else {   \
             crm_debug("SUCCESS - %s: %d", str, rc);    \
         }                          \
     } \
 }\
 
 static void
 run_fence_failure_test(void)
 {
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
     params = stonith_key_value_add(params, "mode", "fail");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_dummy", params),
                 "Register device1 for failure test", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0),
                 "Fence failure results off", 1, -pcmk_err_generic);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "reboot", 3, 0),
                 "Fence failure results reboot", 1, -pcmk_err_generic);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id1"),
                 "Remove device1 for failure test", 1, 0);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
 run_fence_failure_rollover_test(void)
 {
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
     params = stonith_key_value_add(params, "mode", "fail");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_dummy", params),
                 "Register device1 for rollover test", 1, 0);
     stonith_key_value_freeall(params, 1, 1);
     params = NULL;
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
     params = stonith_key_value_add(params, "mode", "pass");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id2", "stonith-ng", "fence_dummy", params),
                 "Register device2 for rollover test", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0),
                 "Fence rollover results off", 1, 0);
 
     /* Expect -ENODEV because fence_dummy requires 'on' to be executed on target */
     single_test(st->cmds->fence(st, st_opts, "false_1_node2", "on", 3, 0),
                 "Fence rollover results on", 1, -ENODEV);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id1"),
                 "Remove device1 for rollover tests", 1, 0);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id2"),
                 "Remove device2 for rollover tests", 1, 0);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
 run_standard_test(void)
 {
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2 false_1_node2=3,4");
     params = stonith_key_value_add(params, "mode", "pass");
     params = stonith_key_value_add(params, "mock_dynamic_hosts", "false_1_node1 false_1_node2");
 
     single_test(st->
                 cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_dummy", params),
                 "Register", 1, 0);
     stonith_key_value_freeall(params, 1, 1);
     params = NULL;
 
     single_test(st->cmds->list(st, st_opts, "test-id", NULL, 1), "list", 1, 0);
 
     single_test(st->cmds->monitor(st, st_opts, "test-id", 1), "Monitor", 1, 0);
 
     single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node2", 1),
                 "Status false_1_node2", 1, 0);
 
     single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node1", 1),
                 "Status false_1_node1", 1, 0);
 
     single_test(st->cmds->fence(st, st_opts, "unknown-host", "off", 1, 0),
                 "Fence unknown-host (expected failure)", 0, -ENODEV);
 
     single_test(st->cmds->fence(st, st_opts, "false_1_node1", "off", 1, 0),
                 "Fence false_1_node1", 1, 0);
 
     /* Expect -ENODEV because fence_dummy requires 'on' to be executed on target */
     single_test(st->cmds->fence(st, st_opts, "false_1_node1", "on", 1, 0),
                 "Unfence false_1_node1", 1, -ENODEV);
 
     /* Confirm that an invalid level index is rejected */
     single_test(st->cmds->register_level(st, st_opts, "node1", 999, params),
                 "Attempt to register an invalid level index", 0, -EINVAL);
 
     single_test(st->cmds->remove_device(st, st_opts, "test-id"), "Remove test-id", 1, 0);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
 sanity_tests(void)
 {
     int rc = 0;
 
     rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
     crm_debug("Connect: %d", rc);
 
     st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback);
     st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback);
     st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback);
     st->cmds->register_callback(st, 0, 120, st_opt_timeout_updates, NULL, "st_global_callback",
                                 st_global_callback);
 
     crm_info("Starting API Sanity Tests");
     run_standard_test();
     run_fence_failure_test();
     run_fence_failure_rollover_test();
     crm_info("Sanity Tests Passed");
 }
 
 static void
 standard_dev_test(void)
 {
     int rc = 0;
     char *tmp = NULL;
     stonith_key_value_t *params = NULL;
 
     rc = st->cmds->connect(st, crm_system_name, &pollfd.fd);
     crm_debug("Connect: %d", rc);
 
     params = stonith_key_value_add(params, "pcmk_host_map", "some-host=pcmk-7 true_1_node1=3,4");
 
     rc = st->cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_xvm", params);
     crm_debug("Register: %d", rc);
 
     rc = st->cmds->list(st, st_opts, "test-id", &tmp, 10);
     crm_debug("List: %d output: %s", rc, tmp ? tmp : "<none>");
 
     rc = st->cmds->monitor(st, st_opts, "test-id", 10);
     crm_debug("Monitor: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node2", 10);
     crm_debug("Status false_1_node2: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "unknown-host", "off", 60, 0);
     crm_debug("Fence unknown-host: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "false_1_node1", "off", 60, 0);
     crm_debug("Fence false_1_node1: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0);
     crm_debug("Unfence false_1_node1: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
     crm_debug("Status false_1_node1: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "some-host", "off", 10, 0);
     crm_debug("Fence alias: %d", rc);
 
     rc = st->cmds->status(st, st_opts, "test-id", "some-host", 10);
     crm_debug("Status alias: %d", rc);
 
     rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0);
     crm_debug("Unfence false_1_node1: %d", rc);
 
     rc = st->cmds->remove_device(st, st_opts, "test-id");
     crm_debug("Remove test-id: %d", rc);
 
     stonith_key_value_freeall(params, 1, 1);
 }
 
 static void
  iterate_mainloop_tests(gboolean event_ready);
 
 static void
 mainloop_callback(stonith_t * stonith, stonith_callback_data_t * data)
 {
     callback_rc = data->rc;
     iterate_mainloop_tests(TRUE);
 }
 
 static int
 register_callback_helper(int callid)
 {
     return st->cmds->register_callback(st,
                                        callid,
                                        MAINLOOP_DEFAULT_TIMEOUT,
                                        st_opt_timeout_updates, NULL, "callback", mainloop_callback);
 }
 
 static void
 test_async_fence_pass(int check_event)
 {
     int rc = 0;
 
     if (check_event) {
         if (callback_rc != 0) {
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
 
     rc = st->cmds->fence(st, 0, "true_1_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
     if (rc < 0) {
         crm_err("fence failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
     register_callback_helper(rc);
     /* wait for event */
 }
 
 #define CUSTOM_TIMEOUT_ADDITION 10
 static void
 test_async_fence_custom_timeout(int check_event)
 {
     int rc = 0;
     static time_t begin = 0;
 
     if (check_event) {
         uint32_t diff = (time(NULL) - begin);
 
         if (callback_rc != -ETIME) {
             mainloop_test_done(FALSE);
         } else if (diff < CUSTOM_TIMEOUT_ADDITION + MAINLOOP_DEFAULT_TIMEOUT) {
             crm_err
                 ("Custom timeout test failed, callback expiration should be updated to %d, actual timeout was %d",
                  CUSTOM_TIMEOUT_ADDITION + MAINLOOP_DEFAULT_TIMEOUT, diff);
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
     begin = time(NULL);
 
     rc = st->cmds->fence(st, 0, "custom_timeout_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
     if (rc < 0) {
         crm_err("fence failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
     register_callback_helper(rc);
     /* wait for event */
 }
 
 static void
 test_async_fence_timeout(int check_event)
 {
     int rc = 0;
 
     if (check_event) {
         if (callback_rc != -ENODEV) {
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
 
     rc = st->cmds->fence(st, 0, "false_1_node2", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
     if (rc < 0) {
         crm_err("fence failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
     register_callback_helper(rc);
     /* wait for event */
 }
 
 static void
 test_async_monitor(int check_event)
 {
     int rc = 0;
 
     if (check_event) {
         if (callback_rc) {
             mainloop_test_done(FALSE);
         } else {
             mainloop_test_done(TRUE);
         }
         return;
     }
 
     rc = st->cmds->monitor(st, 0, "false_1", MAINLOOP_DEFAULT_TIMEOUT);
     if (rc < 0) {
         crm_err("monitor failed with rc %d", rc);
         mainloop_test_done(FALSE);
     }
 
     register_callback_helper(rc);
     /* wait for event */
 }
 
 static void
 test_register_async_devices(int check_event)
 {
     char buf[16] = { 0, };
     stonith_key_value_t *params = NULL;
 
     params = stonith_key_value_add(params, "pcmk_host_map", "false_1_node1=1,2");
     params = stonith_key_value_add(params, "mode", "fail");
     st->cmds->register_device(st, st_opts, "false_1", "stonith-ng", "fence_dummy", params);
     stonith_key_value_freeall(params, 1, 1);
 
     params = NULL;
     params = stonith_key_value_add(params, "pcmk_host_map", "true_1_node1=1,2");
     params = stonith_key_value_add(params, "mode", "pass");
     st->cmds->register_device(st, st_opts, "true_1", "stonith-ng", "fence_dummy", params);
     stonith_key_value_freeall(params, 1, 1);
 
     params = NULL;
     params = stonith_key_value_add(params, "pcmk_host_map", "custom_timeout_node1=1,2");
     params = stonith_key_value_add(params, "mode", "fail");
     params = stonith_key_value_add(params, "delay", "1000");
     snprintf(buf, sizeof(buf) - 1, "%d", MAINLOOP_DEFAULT_TIMEOUT + CUSTOM_TIMEOUT_ADDITION);
     params = stonith_key_value_add(params, "pcmk_off_timeout", buf);
     st->cmds->register_device(st, st_opts, "false_custom_timeout", "stonith-ng", "fence_dummy",
                               params);
     stonith_key_value_freeall(params, 1, 1);
 
     mainloop_test_done(TRUE);
 }
 
 static void
 try_mainloop_connect(int check_event)
 {
     int tries = 10;
     int i = 0;
     int rc = 0;
 
     for (i = 0; i < tries; i++) {
         rc = st->cmds->connect(st, crm_system_name, NULL);
 
         if (!rc) {
             crm_info("stonith client connection established");
             mainloop_test_done(TRUE);
             return;
         } else {
             crm_info("stonith client connection failed");
         }
         sleep(1);
     }
 
     crm_err("API CONNECTION FAILURE");
     mainloop_test_done(FALSE);
 }
 
 static void
 iterate_mainloop_tests(gboolean event_ready)
 {
     static mainloop_test_iteration_cb callbacks[] = {
         try_mainloop_connect,
         test_register_async_devices,
         test_async_monitor,
         test_async_fence_pass,
         test_async_fence_timeout,
         test_async_fence_custom_timeout,
     };
 
     if (mainloop_iter == (sizeof(callbacks) / sizeof(mainloop_test_iteration_cb))) {
         /* all tests ran, everything passed */
         crm_info("ALL MAINLOOP TESTS PASSED!");
         crm_exit(CRM_EX_OK);
     }
 
     callbacks[mainloop_iter] (event_ready);
 }
 
 static gboolean
 trigger_iterate_mainloop_tests(gpointer user_data)
 {
     iterate_mainloop_tests(FALSE);
     return TRUE;
 }
 
 static void
 test_shutdown(int nsig)
 {
     int rc = 0;
 
     if (st) {
         rc = st->cmds->disconnect(st);
         crm_info("Disconnect: %d", rc);
 
         crm_debug("Destroy");
         stonith_api_delete(st);
     }
 
     if (rc) {
         crm_exit(CRM_EX_ERROR);
     }
 }
 
 static void
 mainloop_tests(void)
 {
     trig = mainloop_add_trigger(G_PRIORITY_HIGH, trigger_iterate_mainloop_tests, NULL);
     mainloop_set_trigger(trig);
     mainloop_add_signal(SIGTERM, test_shutdown);
 
     crm_info("Starting");
     mainloop = g_main_loop_new(NULL, FALSE);
     g_main_loop_run(mainloop);
 }
 
 int
 main(int argc, char **argv)
 {
     int argerr = 0;
     int flag;
     int option_index = 0;
 
     enum test_modes mode = test_standard;
 
     crm_set_options(NULL, "mode [options]", long_options,
                     "Provides a summary of cluster's current state."
                     "\n\nOutputs varying levels of detail in a number of different formats.\n");
 
     while (1) {
         flag = crm_get_option(argc, argv, &option_index);
         if (flag == -1) {
             break;
         }
 
         switch (flag) {
             case 'V':
                 verbose = 1;
                 break;
             case '$':
             case '?':
                 crm_help(flag, CRM_EX_OK);
                 break;
             case 'p':
                 mode = test_passive;
                 break;
             case 't':
                 mode = test_api_sanity;
                 break;
             case 'm':
                 mode = test_api_mainloop;
                 break;
             default:
                 ++argerr;
                 break;
         }
     }
 
     crm_log_init(NULL, LOG_INFO, TRUE, (verbose? TRUE : FALSE), argc, argv,
                  FALSE);
 
     if (optind > argc) {
         ++argerr;
     }
 
     if (argerr) {
         crm_help('?', CRM_EX_USAGE);
     }
 
     crm_debug("Create");
     st = stonith_api_new();
 
     switch (mode) {
         case test_standard:
             standard_dev_test();
             break;
         case test_passive:
             passive_test();
             break;
         case test_api_sanity:
             sanity_tests();
             break;
         case test_api_mainloop:
             mainloop_tests();
             break;
     }
 
     test_shutdown(0);
     return CRM_EX_OK;
 }
diff --git a/daemons/schedulerd/sched_allocate.c b/daemons/schedulerd/sched_allocate.c
index 530b87909c..15106047d3 100644
--- a/daemons/schedulerd/sched_allocate.c
+++ b/daemons/schedulerd/sched_allocate.c
@@ -1,2535 +1,2535 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 
 #include <glib.h>
 
 #include <crm/pengine/status.h>
 #include <pacemaker-schedulerd.h>
 #include <sched_allocate.h>
 #include <sched_utils.h>
 
 CRM_TRACE_INIT_DATA(pe_allocate);
 
 void set_alloc_actions(pe_working_set_t * data_set);
 extern void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
 extern gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
 static void apply_remote_node_ordering(pe_working_set_t *data_set);
 static enum remote_connection_state get_remote_node_state(pe_node_t *node);
 
 enum remote_connection_state {
     remote_state_unknown = 0,
     remote_state_alive = 1,
     remote_state_resting = 2,
     remote_state_failed = 3,
     remote_state_stopped = 4
 };
 
 static const char *
 state2text(enum remote_connection_state state)
 {
     switch (state) {
         case remote_state_unknown:
             return "unknown";
         case remote_state_alive:
             return "alive";
         case remote_state_resting:
             return "resting";
         case remote_state_failed:
             return "failed";
         case remote_state_stopped:
             return "stopped";
     }
 
     return "impossible";
 }
 
 resource_alloc_functions_t resource_class_alloc_functions[] = {
     {
      native_merge_weights,
      native_color,
      native_create_actions,
      native_create_probe,
      native_internal_constraints,
      native_rsc_colocation_lh,
      native_rsc_colocation_rh,
      native_rsc_location,
      native_action_flags,
      native_update_actions,
      native_expand,
      native_append_meta,
      },
     {
      group_merge_weights,
      group_color,
      group_create_actions,
      native_create_probe,
      group_internal_constraints,
      group_rsc_colocation_lh,
      group_rsc_colocation_rh,
      group_rsc_location,
      group_action_flags,
      group_update_actions,
      group_expand,
      group_append_meta,
      },
     {
      clone_merge_weights,
      clone_color,
      clone_create_actions,
      clone_create_probe,
      clone_internal_constraints,
      clone_rsc_colocation_lh,
      clone_rsc_colocation_rh,
      clone_rsc_location,
      clone_action_flags,
      container_update_actions,
      clone_expand,
      clone_append_meta,
      },
     {
      container_merge_weights,
      container_color,
      container_create_actions,
      container_create_probe,
      container_internal_constraints,
      container_rsc_colocation_lh,
      container_rsc_colocation_rh,
      container_rsc_location,
      container_action_flags,
      container_update_actions,
      container_expand,
      container_append_meta,
      }
 };
 
 gboolean
 update_action_flags(action_t * action, enum pe_action_flags flags, const char *source, int line)
 {
     static unsigned long calls = 0;
     gboolean changed = FALSE;
     gboolean clear = is_set(flags, pe_action_clear);
     enum pe_action_flags last = action->flags;
 
     if (clear) {
         action->flags = crm_clear_bit(source, line, action->uuid, action->flags, flags);
     } else {
         action->flags = crm_set_bit(source, line, action->uuid, action->flags, flags);
     }
 
     if (last != action->flags) {
         calls++;
         changed = TRUE;
         /* Useful for tracking down _who_ changed a specific flag */
         /* CRM_ASSERT(calls != 534); */
         clear_bit(flags, pe_action_clear);
         crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)",
                   action->uuid, action->node ? action->node->details->uname : "[none]",
                   clear ? "un-" : "", flags, last, action->flags, calls, source);
     }
 
     return changed;
 }
 
 static gboolean
 check_rsc_parameters(resource_t * rsc, node_t * node, xmlNode * rsc_entry,
                      gboolean active_here, pe_working_set_t * data_set)
 {
     int attr_lpc = 0;
     gboolean force_restart = FALSE;
     gboolean delete_resource = FALSE;
     gboolean changed = FALSE;
 
     const char *value = NULL;
     const char *old_value = NULL;
 
     const char *attr_list[] = {
         XML_ATTR_TYPE,
         XML_AGENT_ATTR_CLASS,
         XML_AGENT_ATTR_PROVIDER
     };
 
     for (; attr_lpc < DIMOF(attr_list); attr_lpc++) {
         value = crm_element_value(rsc->xml, attr_list[attr_lpc]);
         old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]);
         if (value == old_value  /* i.e. NULL */
             || crm_str_eq(value, old_value, TRUE)) {
             continue;
         }
 
         changed = TRUE;
         trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set);
         if (active_here) {
             force_restart = TRUE;
             crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s",
                        rsc->id, node->details->uname, attr_list[attr_lpc],
                        crm_str(old_value), crm_str(value));
         }
     }
     if (force_restart) {
         /* make sure the restart happens */
         stop_action(rsc, node, FALSE);
         set_bit(rsc->flags, pe_rsc_start_pending);
         delete_resource = TRUE;
 
     } else if (changed) {
         delete_resource = TRUE;
     }
     return delete_resource;
 }
 
 static void
 CancelXmlOp(resource_t * rsc, xmlNode * xml_op, node_t * active_node,
             const char *reason, pe_working_set_t * data_set)
 {
     guint interval_ms = 0;
     action_t *cancel = NULL;
 
     const char *task = NULL;
     const char *call_id = NULL;
     const char *interval_ms_s = NULL;
 
     CRM_CHECK(xml_op != NULL, return);
     CRM_CHECK(active_node != NULL, return);
 
     task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
     interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
 
     interval_ms = crm_parse_ms(interval_ms_s);
 
     crm_info("Action " CRM_OP_FMT " on %s will be stopped: %s",
              rsc->id, task, interval_ms,
              active_node->details->uname, (reason? reason : "unknown"));
 
     cancel = pe_cancel_op(rsc, task, interval_ms, active_node, data_set);
     add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
     custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set);
 }
 
 static gboolean
 check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op,
                         pe_working_set_t * data_set)
 {
     char *key = NULL;
     guint interval_ms = 0;
     const char *interval_ms_s = NULL;
     const op_digest_cache_t *digest_data = NULL;
     gboolean did_change = FALSE;
 
     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     const char *digest_secure = NULL;
 
     CRM_CHECK(active_node != NULL, return FALSE);
     if (safe_str_eq(task, RSC_STOP)) {
         return FALSE;
     }
 
     interval_ms_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
     interval_ms = crm_parse_ms(interval_ms_s);
 
     if (interval_ms > 0) {
         xmlNode *op_match = NULL;
 
         /* we need to reconstruct the key because of the way we used to construct resource IDs */
         key = generate_op_key(rsc->id, task, interval_ms);
 
         pe_rsc_trace(rsc, "Checking parameters for %s", key);
         op_match = find_rsc_op_entry(rsc, key);
 
         if (op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) {
             CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set);
             free(key);
             return TRUE;
 
         } else if (op_match == NULL) {
             pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname);
             free(key);
             return TRUE;
         }
         free(key);
         key = NULL;
     }
 
     crm_trace("Testing " CRM_OP_FMT " on %s",
               rsc->id, task, interval_ms, active_node->details->uname);
     if ((interval_ms == 0) && safe_str_eq(task, RSC_STATUS)) {
         /* Reload based on the start action not a probe */
         task = RSC_START;
 
     } else if ((interval_ms == 0) && safe_str_eq(task, RSC_MIGRATED)) {
         /* Reload based on the start action not a migrate */
         task = RSC_START;
     } else if ((interval_ms == 0) && safe_str_eq(task, RSC_PROMOTE)) {
         /* Reload based on the start action not a promote */
         task = RSC_START;
     }
 
     digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set);
 
     if(is_set(data_set->flags, pe_flag_sanitized)) {
         digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
     }
 
     if(digest_data->rc != RSC_DIGEST_MATCH
        && digest_secure
        && digest_data->digest_secure_calc
        && strcmp(digest_data->digest_secure_calc, digest_secure) == 0) {
         if (is_set(data_set->flags, pe_flag_stdout)) {
             printf("Only 'private' parameters to " CRM_OP_FMT " on %s changed: %s\n",
                    rsc->id, task, interval_ms, active_node->details->uname,
                    crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
         }
 
     } else if (digest_data->rc == RSC_DIGEST_RESTART) {
         /* Changes that force a restart */
         pe_action_t *required = NULL;
 
         did_change = TRUE;
         key = generate_op_key(rsc->id, task, interval_ms);
         crm_log_xml_info(digest_data->params_restart, "params:restart");
         required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
         pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL,
                                   "resource definition change", pe_action_optional, TRUE);
 
         trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set);
 
     } else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) {
         /* Changes that can potentially be handled by a reload */
         const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
 
         did_change = TRUE;
         trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set);
         crm_log_xml_info(digest_data->params_all, "params:reload");
         key = generate_op_key(rsc->id, task, interval_ms);
 
         if (interval_ms > 0) {
             action_t *op = NULL;
 
 #if 0
             /* Always reload/restart the entire resource */
             ReloadRsc(rsc, active_node, data_set);
 #else
             /* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */
             op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set);
             set_bit(op->flags, pe_action_reschedule);
 #endif
 
         } else if (digest_restart) {
             pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id);
 
             /* Reload this resource */
             ReloadRsc(rsc, active_node, data_set);
             free(key);
 
         } else {
             pe_action_t *required = NULL;
             pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id);
 
             /* Re-send the start/demote/promote op
              * Recurring ops will be detected independently
              */
             required = custom_action(rsc, key, task, NULL, TRUE, TRUE, data_set);
             pe_action_set_flag_reason(__FUNCTION__, __LINE__, required, NULL,
                                       "resource definition change", pe_action_optional, TRUE);
         }
     }
 
     return did_change;
 }
 
 
 static void
 check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     int offset = -1;
     guint interval_ms = 0;
     int stop_index = 0;
     int start_index = 0;
 
     const char *task = NULL;
     const char *interval_ms_s = NULL;
 
     xmlNode *rsc_op = NULL;
     GListPtr op_list = NULL;
     GListPtr sorted_op_list = NULL;
     gboolean is_probe = FALSE;
     gboolean did_change = FALSE;
 
     CRM_CHECK(node != NULL, return);
 
     if (is_set(rsc->flags, pe_rsc_orphan)) {
         resource_t *parent = uber_parent(rsc);
         if(parent == NULL
            || pe_rsc_is_clone(parent) == FALSE
            || is_set(parent->flags, pe_rsc_unique)) {
             pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id);
             DeleteRsc(rsc, node, FALSE, data_set);
         } else {
             pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id);
         }
         return;
 
     } else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
         if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) {
             DeleteRsc(rsc, node, FALSE, data_set);
         }
         pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s",
                      rsc->id, node->details->uname);
         return;
     }
 
     pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname);
 
     if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) {
         DeleteRsc(rsc, node, FALSE, data_set);
     }
 
     for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
         if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
             op_list = g_list_prepend(op_list, rsc_op);
         }
     }
 
     sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
     calculate_active_ops(sorted_op_list, &start_index, &stop_index);
 
     for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
 
         offset++;
 
         if (start_index < stop_index) {
             /* stopped */
             continue;
         } else if (offset < start_index) {
             /* action occurred prior to a start */
             continue;
         }
 
         is_probe = FALSE;
         did_change = FALSE;
         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
 
         interval_ms_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL_MS);
         interval_ms = crm_parse_ms(interval_ms_s);
 
         if ((interval_ms == 0) && safe_str_eq(task, RSC_STATUS)) {
             is_probe = TRUE;
         }
 
         if ((interval_ms > 0) &&
             (is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) {
             CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
 
         } else if (is_probe || (interval_ms > 0)
                    || safe_str_eq(task, RSC_START)
                    || safe_str_eq(task, RSC_PROMOTE)
                    || safe_str_eq(task, RSC_MIGRATED)) {
             did_change = check_action_definition(rsc, node, rsc_op, data_set);
         }
 
         if (did_change && pe_get_failcount(node, rsc, NULL, pe_fc_effective,
                                            NULL, data_set)) {
 
             char *key = NULL;
             action_t *action_clear = NULL;
 
             key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
             action_clear =
                 custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set);
             set_bit(action_clear->flags, pe_action_runnable);
 
             crm_notice("Clearing failure of %s on %s "
                        "because action definition changed " CRM_XS " %s",
                        rsc->id, node->details->uname, action_clear->uuid);
         }
     }
 
     g_list_free(sorted_op_list);
 
 }
 
 static GListPtr
 find_rsc_list(GListPtr result, resource_t * rsc, const char *id, gboolean renamed_clones,
               gboolean partial, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     gboolean match = FALSE;
 
     if (id == NULL) {
         return NULL;
 
     } else if (rsc == NULL && data_set) {
 
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
 
             result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
         }
 
         return result;
 
     } else if (rsc == NULL) {
         return NULL;
     }
 
     if (partial) {
         if (strstr(rsc->id, id)) {
             match = TRUE;
 
         } else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) {
             match = TRUE;
         }
 
     } else {
         if (strcmp(rsc->id, id) == 0) {
             match = TRUE;
 
         } else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
             match = TRUE;
         }
     }
 
     if (match) {
         result = g_list_prepend(result, rsc);
     }
 
     if (rsc->children) {
         gIter = rsc->children;
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
 
             result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
         }
     }
 
     return result;
 }
 
 static void
 check_actions(pe_working_set_t * data_set)
 {
     const char *id = NULL;
     node_t *node = NULL;
     xmlNode *lrm_rscs = NULL;
     xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
 
     xmlNode *node_state = NULL;
 
     for (node_state = __xml_first_child(status); node_state != NULL;
          node_state = __xml_next_element(node_state)) {
         if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
             id = crm_element_value(node_state, XML_ATTR_ID);
             lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
             lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE);
 
             node = pe_find_node_id(data_set->nodes, id);
 
             if (node == NULL) {
                 continue;
 
             /* Still need to check actions for a maintenance node to cancel existing monitor operations */
             } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
                 crm_trace("Skipping param check for %s: can't run resources",
                           node->details->uname);
                 continue;
             }
 
             crm_trace("Processing node %s", node->details->uname);
             if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) {
                 xmlNode *rsc_entry = NULL;
 
                 for (rsc_entry = __xml_first_child(lrm_rscs); rsc_entry != NULL;
                      rsc_entry = __xml_next_element(rsc_entry)) {
                     if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
 
                         if (xml_has_children(rsc_entry)) {
                             GListPtr gIter = NULL;
                             GListPtr result = NULL;
                             const char *rsc_id = ID(rsc_entry);
 
                             CRM_CHECK(rsc_id != NULL, return);
 
                             result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set);
                             for (gIter = result; gIter != NULL; gIter = gIter->next) {
                                 resource_t *rsc = (resource_t *) gIter->data;
 
                                 if (rsc->variant != pe_native) {
                                     continue;
                                 }
                                 check_actions_for(rsc_entry, rsc, node, data_set);
                             }
                             g_list_free(result);
                         }
                     }
                 }
             }
         }
     }
 }
 
 static gboolean
 apply_placement_constraints(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     crm_trace("Applying constraints...");
 
     for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
         rsc_to_node_t *cons = (rsc_to_node_t *) gIter->data;
 
         cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons);
     }
 
     return TRUE;
 
 }
 
 static gboolean
 failcount_clear_action_exists(node_t * node, resource_t * rsc)
 {
     gboolean rc = FALSE;
     char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
     GListPtr list = find_actions_exact(rsc->actions, key, node);
 
     if (list) {
         rc = TRUE;
     }
     g_list_free(list);
     free(key);
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Force resource away if failures hit migration threshold
  *
  * \param[in,out] rsc       Resource to check for failures
  * \param[in,out] node      Node to check for failures
  * \param[in,out] data_set  Cluster working set to update
  */
 static void
 check_migration_threshold(resource_t *rsc, node_t *node,
                           pe_working_set_t *data_set)
 {
     int fail_count, countdown;
     resource_t *failed;
 
     /* Migration threshold of 0 means never force away */
     if (rsc->migration_threshold == 0) {
         return;
     }
 
     // If we're ignoring failures, also ignore the migration threshold
     if (is_set(rsc->flags, pe_rsc_failure_ignored)) {
         return;
     }
 
     /* If there are no failures, there's no need to force away */
     fail_count = pe_get_failcount(node, rsc, NULL,
                                   pe_fc_effective|pe_fc_fillers, NULL,
                                   data_set);
     if (fail_count <= 0) {
         return;
     }
 
     /* How many more times recovery will be tried on this node */
     countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
 
     /* If failed resource has a parent, we'll force the parent away */
     failed = rsc;
     if (is_not_set(rsc->flags, pe_rsc_unique)) {
         failed = uber_parent(rsc);
     }
 
     if (countdown == 0) {
         resource_location(failed, node, -INFINITY, "__fail_limit__", data_set);
         crm_warn("Forcing %s away from %s after %d failures (max=%d)",
                  failed->id, node->details->uname, fail_count,
                  rsc->migration_threshold);
     } else {
         crm_info("%s can fail %d more times on %s before being forced off",
                  failed->id, countdown, node->details->uname);
     }
 }
 
 static void
 common_apply_stickiness(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
 {
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child_rsc = (resource_t *) gIter->data;
 
             common_apply_stickiness(child_rsc, node, data_set);
         }
         return;
     }
 
     if (is_set(rsc->flags, pe_rsc_managed)
         && rsc->stickiness != 0 && g_list_length(rsc->running_on) == 1) {
         node_t *current = pe_find_node_id(rsc->running_on, node->details->id);
         node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
 
         if (current == NULL) {
 
         } else if (match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) {
             resource_t *sticky_rsc = rsc;
 
             resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set);
             pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location"
                          " (node=%s, weight=%d)", sticky_rsc->id,
                          node->details->uname, rsc->stickiness);
         } else {
             GHashTableIter iter;
             node_t *nIter = NULL;
 
             pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric"
                          " and node %s is not explicitly allowed", rsc->id, node->details->uname);
             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) {
                 crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight);
             }
         }
     }
 
     /* Check the migration threshold only if a failcount clear action
      * has not already been placed for this resource on the node.
      * There is no sense in potentially forcing the resource from this
      * node if the failcount is being reset anyway. */
     if (failcount_clear_action_exists(node, rsc) == FALSE) {
         check_migration_threshold(rsc, node, data_set);
     }
 }
 
 void
 complex_set_cmds(resource_t * rsc)
 {
     GListPtr gIter = rsc->children;
 
     rsc->cmds = &resource_class_alloc_functions[rsc->variant];
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         complex_set_cmds(child_rsc);
     }
 }
 
 void
 set_alloc_actions(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         complex_set_cmds(rsc);
     }
 }
 
 static void
 calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data)
 {
     const char *key = (const char *)gKey;
     const char *value = (const char *)gValue;
     int *system_health = (int *)user_data;
 
     if (!gKey || !gValue || !user_data) {
         return;
     }
 
     if (crm_starts_with(key, "#health")) {
         int score;
 
         /* Convert the value into an integer */
         score = char2score(value);
 
         /* Add it to the running total */
         *system_health = merge_weights(score, *system_health);
     }
 }
 
 static gboolean
 apply_system_health(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy");
     int base_health = 0;
 
     if (health_strategy == NULL || safe_str_eq(health_strategy, "none")) {
         /* Prevent any accidental health -> score translation */
         node_score_red = 0;
         node_score_yellow = 0;
         node_score_green = 0;
         return TRUE;
 
     } else if (safe_str_eq(health_strategy, "migrate-on-red")) {
 
         /* Resources on nodes which have health values of red are
          * weighted away from that node.
          */
         node_score_red = -INFINITY;
         node_score_yellow = 0;
         node_score_green = 0;
 
     } else if (safe_str_eq(health_strategy, "only-green")) {
 
         /* Resources on nodes which have health values of red or yellow
          * are forced away from that node.
          */
         node_score_red = -INFINITY;
         node_score_yellow = -INFINITY;
         node_score_green = 0;
 
     } else if (safe_str_eq(health_strategy, "progressive")) {
         /* Same as the above, but use the r/y/g scores provided by the user
          * Defaults are provided by the pe_prefs table
          * Also, custom health "base score" can be used
          */
         base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0");
 
     } else if (safe_str_eq(health_strategy, "custom")) {
 
         /* Requires the admin to configure the rsc_location constaints for
          * processing the stored health scores
          */
         /* TODO: Check for the existence of appropriate node health constraints */
         return TRUE;
 
     } else {
         crm_err("Unknown node health strategy: %s", health_strategy);
         return FALSE;
     }
 
     crm_info("Applying automated node health strategy: %s", health_strategy);
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         int system_health = base_health;
         node_t *node = (node_t *) gIter->data;
 
         /* Search through the node hash table for system health entries. */
         g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health);
 
         crm_info(" Node %s has an combined system health of %d",
                  node->details->uname, system_health);
 
         /* If the health is non-zero, then create a new rsc2node so that the
          * weight will be added later on.
          */
         if (system_health != 0) {
 
             GListPtr gIter2 = data_set->resources;
 
             for (; gIter2 != NULL; gIter2 = gIter2->next) {
                 resource_t *rsc = (resource_t *) gIter2->data;
 
                 rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set);
             }
         }
     }
 
     return TRUE;
 }
 
 gboolean
 stage0(pe_working_set_t * data_set)
 {
     xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
     if (data_set->input == NULL) {
         return FALSE;
     }
 
     if (is_set(data_set->flags, pe_flag_have_status) == FALSE) {
         crm_trace("Calculating status");
         cluster_status(data_set);
     }
 
     set_alloc_actions(data_set);
     apply_system_health(data_set);
     unpack_constraints(cib_constraints, data_set);
 
     return TRUE;
 }
 
 /*
  * Check nodes for resources started outside of the LRM
  */
 gboolean
 probe_resources(pe_working_set_t * data_set)
 {
     action_t *probe_node_complete = NULL;
 
     for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
         const char *probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
 
         if (node->details->online == FALSE) {
 
             if (is_baremetal_remote_node(node) && node->details->remote_rsc
                 && (get_remote_node_state(node) == remote_state_failed)) {
 
                 pe_fence_node(data_set, node, "the connection is unrecoverable");
             }
             continue;
 
         } else if (node->details->unclean) {
             continue;
 
         } else if (node->details->rsc_discovery_enabled == FALSE) {
             /* resource discovery is disabled for this node */
             continue;
         }
 
         if (probed != NULL && crm_is_true(probed) == FALSE) {
             action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname),
                                                CRM_OP_REPROBE, node, FALSE, TRUE, data_set);
 
             add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
             continue;
         }
 
         for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) {
             resource_t *rsc = (resource_t *) gIter2->data;
 
             rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set);
         }
     }
     return TRUE;
 }
 
 static void
 rsc_discover_filter(resource_t *rsc, node_t *node)
 {
     GListPtr gIter = rsc->children;
     resource_t *top = uber_parent(rsc);
     node_t *match;
 
     if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
         rsc_discover_filter(child_rsc, node);
     }
 
     match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
     if (match && match->rsc_discover_mode != pe_discover_exclusive) {
         match->weight = -INFINITY;
     }
 }
 
 /*
  * Count how many valid nodes we have (so we know the maximum number of
  *  colors we can resolve).
  *
  * Apply node constraints (i.e. filter the "allowed_nodes" part of resources)
  */
 gboolean
 stage2(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     crm_trace("Applying placement constraints");
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         if (node == NULL) {
             /* error */
 
         } else if (node->weight >= 0.0  /* global weight */
                    && node->details->online && node->details->type != node_ping) {
             data_set->max_valid_nodes++;
         }
     }
 
     apply_placement_constraints(data_set);
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         GListPtr gIter2 = NULL;
         node_t *node = (node_t *) gIter->data;
 
         gIter2 = data_set->resources;
         for (; gIter2 != NULL; gIter2 = gIter2->next) {
             resource_t *rsc = (resource_t *) gIter2->data;
 
             common_apply_stickiness(rsc, node, data_set);
             rsc_discover_filter(rsc, node);
         }
     }
 
     return TRUE;
 }
 
 /*
  * Create internal resource constraints before allocation
  */
 gboolean
 stage3(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         rsc->cmds->internal_constraints(rsc, data_set);
     }
 
     return TRUE;
 }
 
 /*
  * Check for orphaned or redefined actions
  */
 gboolean
 stage4(pe_working_set_t * data_set)
 {
     check_actions(data_set);
     return TRUE;
 }
 
 static gint
 sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
 {
     int rc = 0;
     int r1_weight = -INFINITY;
     int r2_weight = -INFINITY;
 
     const char *reason = "existence";
 
     const GListPtr nodes = (GListPtr) data;
     resource_t *resource1 = (resource_t *) convert_const_pointer(a);
     resource_t *resource2 = (resource_t *) convert_const_pointer(b);
 
     node_t *r1_node = NULL;
     node_t *r2_node = NULL;
     GListPtr gIter = NULL;
     GHashTable *r1_nodes = NULL;
     GHashTable *r2_nodes = NULL;
 
     if (a == NULL && b == NULL) {
         goto done;
     }
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     reason = "priority";
     r1_weight = resource1->priority;
     r2_weight = resource2->priority;
 
     if (r1_weight > r2_weight) {
         rc = -1;
         goto done;
     }
 
     if (r1_weight < r2_weight) {
         rc = 1;
         goto done;
     }
 
     reason = "no node list";
     if (nodes == NULL) {
         goto done;
     }
 
     r1_nodes =
         rsc_merge_weights(resource1, resource1->id, NULL, NULL, 1,
                           pe_weights_forward | pe_weights_init);
     dump_node_scores(LOG_TRACE, NULL, resource1->id, r1_nodes);
     r2_nodes =
         rsc_merge_weights(resource2, resource2->id, NULL, NULL, 1,
                           pe_weights_forward | pe_weights_init);
     dump_node_scores(LOG_TRACE, NULL, resource2->id, r2_nodes);
 
     /* Current location score */
     reason = "current location";
     r1_weight = -INFINITY;
     r2_weight = -INFINITY;
 
     if (resource1->running_on) {
         r1_node = pe__current_node(resource1);
         r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
         if (r1_node != NULL) {
             r1_weight = r1_node->weight;
         }
     }
     if (resource2->running_on) {
         r2_node = pe__current_node(resource2);
         r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
         if (r2_node != NULL) {
             r2_weight = r2_node->weight;
         }
     }
 
     if (r1_weight > r2_weight) {
         rc = -1;
         goto done;
     }
 
     if (r1_weight < r2_weight) {
         rc = 1;
         goto done;
     }
 
     reason = "score";
     for (gIter = nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         r1_node = NULL;
         r2_node = NULL;
 
         r1_weight = -INFINITY;
         if (r1_nodes) {
             r1_node = g_hash_table_lookup(r1_nodes, node->details->id);
         }
         if (r1_node) {
             r1_weight = r1_node->weight;
         }
 
         r2_weight = -INFINITY;
         if (r2_nodes) {
             r2_node = g_hash_table_lookup(r2_nodes, node->details->id);
         }
         if (r2_node) {
             r2_weight = r2_node->weight;
         }
 
         if (r1_weight > r2_weight) {
             rc = -1;
             goto done;
         }
 
         if (r1_weight < r2_weight) {
             rc = 1;
             goto done;
         }
     }
 
   done:
     crm_trace("%s (%d) on %s %c %s (%d) on %s: %s",
               resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a",
               rc < 0 ? '>' : rc > 0 ? '<' : '=',
               resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason);
 
     if (r1_nodes) {
         g_hash_table_destroy(r1_nodes);
     }
     if (r2_nodes) {
         g_hash_table_destroy(r2_nodes);
     }
 
     return rc;
 }
 
 static void
 allocate_resources(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (is_set(data_set->flags, pe_flag_have_remote_nodes)) {
         /* Force remote connection resources to be allocated first. This
          * also forces any colocation dependencies to be allocated as well */
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             resource_t *rsc = (resource_t *) gIter->data;
             if (rsc->is_remote_node == FALSE) {
                 continue;
             }
             pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
             /* For remote node connection resources, always prefer the partial
              * migration target during resource allocation, if the rsc is in the
              * middle of a migration.
              */
             rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
         }
     }
 
     /* now do the rest of the resources */
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
         if (rsc->is_remote_node == TRUE) {
             continue;
         }
         pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
         rsc->cmds->allocate(rsc, NULL, data_set);
     }
 }
 
 /* We always use pe_order_preserve with these convenience functions to exempt
  * internally generated constraints from the prohibition of user constraints
  * involving remote connection resources.
  *
  * The start ordering additionally uses pe_order_runnable_left so that the
  * specified action is not runnable if the start is not runnable.
  */
 
 static inline void
 order_start_then_action(resource_t *lh_rsc, action_t *rh_action,
                         enum pe_ordering extra, pe_working_set_t *data_set)
 {
     if (lh_rsc && rh_action && data_set) {
         custom_action_order(lh_rsc, start_key(lh_rsc), NULL,
                             rh_action->rsc, NULL, rh_action,
                             pe_order_preserve | pe_order_runnable_left | extra,
                             data_set);
     }
 }
 
 static inline void
 order_action_then_stop(action_t *lh_action, resource_t *rh_rsc,
                        enum pe_ordering extra, pe_working_set_t *data_set)
 {
     if (lh_action && rh_rsc && data_set) {
         custom_action_order(lh_action->rsc, NULL, lh_action,
                             rh_rsc, stop_key(rh_rsc), NULL,
                             pe_order_preserve | extra, data_set);
     }
 }
 
 static void
 cleanup_orphans(resource_t * rsc, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) {
         return;
     }
 
     /* Don't recurse into ->children, those are just unallocated clone instances */
     if(is_not_set(rsc->flags, pe_rsc_orphan)) {
         return;
     }
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         if (node->details->online
             && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                 data_set)) {
 
             char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
             action_t *clear_op = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT,
                                                node, FALSE, TRUE, data_set);
 
             add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
 
             pe_rsc_info(rsc,
                         "Clearing failure of %s on %s because it is orphaned "
                         CRM_XS " %s",
                         rsc->id, node->details->uname, clear_op->uuid);
 
             /* We can't use order_action_then_stop() here because its
              * pe_order_preserve breaks things
              */
             custom_action_order(clear_op->rsc, NULL, clear_op,
                                 rsc, stop_key(rsc), NULL,
                                 pe_order_optional, data_set);
         }
     }
 }
 
 gboolean
 stage5(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (safe_str_neq(data_set->placement_strategy, "default")) {
         GListPtr nodes = g_list_copy(data_set->nodes);
 
         nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL);
 
         data_set->resources =
             g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes);
 
         g_list_free(nodes);
     }
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Original", node);
     }
 
     crm_trace("Allocating services");
     /* Take (next) highest resource, assign it and create its actions */
 
     allocate_resources(data_set);
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Remaining", node);
     }
 
     if (is_set(data_set->flags, pe_flag_startup_probes)) {
         crm_trace("Calculating needed probes");
         /* This code probably needs optimization
          * ptest -x with 100 nodes, 100 clones and clone-max=100:
 
          With probes:
 
          ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
          ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
          ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
          ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions
          ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
          ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services
          ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes
          ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
          36s
          ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
 
          Without probes:
 
          ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions
          ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
          ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services
          ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
         */
 
         probe_resources(data_set);
     }
 
     crm_trace("Handle orphans");
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
         cleanup_orphans(rsc, data_set);
     }
 
     crm_trace("Creating actions");
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         rsc->cmds->create_actions(rsc, data_set);
     }
 
     crm_trace("Creating done");
     return TRUE;
 }
 
 static gboolean
 is_managed(const resource_t * rsc)
 {
     GListPtr gIter = rsc->children;
 
     if (is_set(rsc->flags, pe_rsc_managed)) {
         return TRUE;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         if (is_managed(child_rsc)) {
             return TRUE;
         }
     }
 
     return FALSE;
 }
 
 static gboolean
 any_managed_resources(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         if (is_managed(rsc)) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Create pseudo-op for guest node fence, and order relative to it
  *
  * \param[in] node      Guest node to fence
  * \param[in] done      STONITH_DONE operation
  * \param[in] data_set  Working set of CIB state
  */
 static void
 fence_guest(pe_node_t *node, pe_action_t *done, pe_working_set_t *data_set)
 {
     resource_t *container = node->details->remote_rsc->container;
     pe_action_t *stop = NULL;
     pe_action_t *stonith_op = NULL;
 
     /* The fence action is just a label; we don't do anything differently for
      * off vs. reboot. We specify it explicitly, rather than let it default to
      * cluster's default action, because we are not _initiating_ fencing -- we
      * are creating a pseudo-event to describe fencing that is already occurring
      * by other means (container recovery).
      */
     const char *fence_action = "off";
 
-    /* Check whether guest's container resource is has any explicit stop or
+    /* Check whether guest's container resource has any explicit stop or
      * start (the stop may be implied by fencing of the guest's host).
      */
     if (container) {
         stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL);
 
         if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) {
             fence_action = "reboot";
         }
     }
 
     /* Create a fence pseudo-event, so we have an event to order actions
-     * against, and crmd can always detect it.
+     * against, and the controller can always detect it.
      */
     stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", data_set);
     update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable,
                         __FUNCTION__, __LINE__);
 
     /* We want to imply stops/demotes after the guest is stopped, not wait until
      * it is restarted, so we always order pseudo-fencing after stop, not start
      * (even though start might be closer to what is done for a real reboot).
      */
     if(stop && is_set(stop->flags, pe_action_pseudo)) {
         pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE, NULL, data_set);
         crm_info("Implying guest node %s is down (action %d) after %s fencing",
                  node->details->uname, stonith_op->id, stop->node->details->uname);
         order_actions(parent_stonith_op, stonith_op,
                       pe_order_runnable_left|pe_order_implies_then);
 
     } else if (stop) {
         order_actions(stop, stonith_op,
                       pe_order_runnable_left|pe_order_implies_then);
         crm_info("Implying guest node %s is down (action %d) "
                  "after container %s is stopped (action %d)",
                  node->details->uname, stonith_op->id,
                  container->id, stop->id);
     } else {
         crm_info("Implying guest node %s is down (action %d) ",
                  node->details->uname, stonith_op->id);
     }
 
     /* @TODO: Order pseudo-fence after any (optional) fence of guest's host */
 
     /* Order/imply other actions relative to pseudo-fence as with real fence */
     stonith_constraints(node, stonith_op, data_set);
     if(done) {
         order_actions(stonith_op, done, pe_order_implies_then);
     }
 }
 
 /*
  * Create dependencies for stonith and shutdown operations
  */
 gboolean
 stage6(pe_working_set_t * data_set)
 {
     action_t *dc_down = NULL;
     action_t *dc_fence = NULL;
     action_t *stonith_op = NULL;
     action_t *last_stonith = NULL;
     gboolean integrity_lost = FALSE;
     action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set);
     action_t *done = get_pseudo_op(STONITH_DONE, data_set);
     gboolean need_stonith = TRUE;
     GListPtr gIter;
     GListPtr stonith_ops = NULL;
 
     /* Remote ordering constraints need to happen prior to calculate
      * fencing because it is one more place we will mark the node as
      * dirty.
      *
      * A nice side-effect of doing it first is that we can remove a
      * bunch of special logic from apply_*_ordering() because its
      * already part of pe_fence_node()
      */
     crm_trace("Creating remote ordering constraints");
     apply_remote_node_ordering(data_set);
 
     crm_trace("Processing fencing and shutdown cases");
     if (any_managed_resources(data_set) == FALSE) {
         crm_notice("Delaying fencing operations until there are resources to manage");
         need_stonith = FALSE;
     }
 
     /* Check each node for stonith/shutdown */
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         /* Guest nodes are "fenced" by recovering their container resource,
          * so handle them separately.
          */
         if (is_container_remote_node(node)) {
             if (node->details->remote_requires_reset && need_stonith) {
                 fence_guest(node, done, data_set);
             }
             continue;
         }
 
         stonith_op = NULL;
 
         if (node->details->unclean
             && need_stonith && pe_can_fence(data_set, node)) {
 
             stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", data_set);
             pe_warn("Scheduling Node %s for STONITH", node->details->uname);
 
             stonith_constraints(node, stonith_op, data_set);
 
             if (node->details->is_dc) {
                 dc_down = stonith_op;
                 dc_fence = stonith_op;
 
             } else if (is_set(data_set->flags, pe_flag_concurrent_fencing) == FALSE) {
                 if (last_stonith) {
                     order_actions(last_stonith, stonith_op, pe_order_optional);
                 }
                 last_stonith = stonith_op;
 
             } else {
                 order_actions(stonith_op, done, pe_order_implies_then);
                 stonith_ops = g_list_append(stonith_ops, stonith_op);
             }
 
         } else if (node->details->online && node->details->shutdown &&
                 /* TODO define what a shutdown op means for a remote node.
                  * For now we do not send shutdown operations for remote nodes, but
                  * if we can come up with a good use for this in the future, we will. */
                     is_remote_node(node) == FALSE) {
 
             action_t *down_op = NULL;
 
             crm_notice("Scheduling Node %s for shutdown", node->details->uname);
 
             down_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN, node->details->uname),
                                     CRM_OP_SHUTDOWN, node, FALSE, TRUE, data_set);
 
             shutdown_constraints(node, down_op, data_set);
             add_hash_param(down_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
 
             if (node->details->is_dc) {
                 dc_down = down_op;
             }
         }
 
         if (node->details->unclean && stonith_op == NULL) {
             integrity_lost = TRUE;
             pe_warn("Node %s is unclean!", node->details->uname);
         }
     }
 
     if (integrity_lost) {
         if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
             pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
             pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
 
         } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE) {
             crm_notice("Cannot fence unclean nodes until quorum is"
                        " attained (or no-quorum-policy is set to ignore)");
         }
     }
 
     if (dc_down != NULL) {
         GListPtr gIter = NULL;
 
         crm_trace("Ordering shutdowns before %s on %s (DC)",
                   dc_down->task, dc_down->node->details->uname);
 
         add_hash_param(dc_down->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
 
         for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
             action_t *node_stop = (action_t *) gIter->data;
 
             if (safe_str_neq(CRM_OP_SHUTDOWN, node_stop->task)) {
                 continue;
             } else if (node_stop->node->details->is_dc) {
                 continue;
             }
 
             crm_debug("Ordering shutdown on %s before %s on %s",
                       node_stop->node->details->uname,
                       dc_down->task, dc_down->node->details->uname);
 
             order_actions(node_stop, dc_down, pe_order_optional);
         }
 
         if (last_stonith) {
             if (dc_down != last_stonith) {
                 order_actions(last_stonith, dc_down, pe_order_optional);
             }
 
         } else {
             GListPtr gIter2 = NULL;
 
             for (gIter2 = stonith_ops; gIter2 != NULL; gIter2 = gIter2->next) {
                 stonith_op = (action_t *) gIter2->data;
 
                 if (dc_down != stonith_op) {
                     order_actions(stonith_op, dc_down, pe_order_optional);
                 }
             }
         }
     }
 
 
     if (dc_fence) {
         order_actions(dc_down, done, pe_order_implies_then);
 
     } else if (last_stonith) {
         order_actions(last_stonith, done, pe_order_implies_then);
     }
 
     order_actions(done, all_stopped, pe_order_implies_then);
 
     g_list_free(stonith_ops);
     return TRUE;
 }
 
 /*
  * Determine the sets of independent actions and the correct order for the
  *  actions in each set.
  *
  * Mark dependencies of un-runnable actions un-runnable
  *
  */
 static GListPtr
 find_actions_by_task(GListPtr actions, resource_t * rsc, const char *original_key)
 {
     GListPtr list = NULL;
 
     list = find_actions(actions, original_key, NULL);
     if (list == NULL) {
         /* we're potentially searching a child of the original resource */
         char *key = NULL;
         char *task = NULL;
         guint interval_ms = 0;
 
         if (parse_op_key(original_key, NULL, &task, &interval_ms)) {
             key = generate_op_key(rsc->id, task, interval_ms);
             list = find_actions(actions, key, NULL);
 
         } else {
             crm_err("search key: %s", original_key);
         }
 
         free(key);
         free(task);
     }
 
     return list;
 }
 
 static void
 rsc_order_then(action_t * lh_action, resource_t * rsc, order_constraint_t * order)
 {
     GListPtr gIter = NULL;
     GListPtr rh_actions = NULL;
     action_t *rh_action = NULL;
     enum pe_ordering type;
 
     CRM_CHECK(rsc != NULL, return);
     CRM_CHECK(order != NULL, return);
 
     type = order->type;
     rh_action = order->rh_action;
     crm_trace("Processing RH of ordering constraint %d", order->id);
 
     if (rh_action != NULL) {
         rh_actions = g_list_prepend(NULL, rh_action);
 
     } else if (rsc != NULL) {
         rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task);
     }
 
     if (rh_actions == NULL) {
         pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..."
                      " ignoring", rsc->id, order->rh_action_task);
         if (lh_action) {
             pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid);
         }
         return;
     }
 
     if (lh_action && lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) {
         pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid,
                      order->rh_action_task);
         clear_bit(type, pe_order_implies_then);
     }
 
     gIter = rh_actions;
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *rh_action_iter = (action_t *) gIter->data;
 
         if (lh_action) {
             order_actions(lh_action, rh_action_iter, type);
 
         } else if (type & pe_order_implies_then) {
             update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
             crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type);
         } else {
             crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type);
         }
     }
 
     g_list_free(rh_actions);
 }
 
 static void
 rsc_order_first(resource_t * lh_rsc, order_constraint_t * order, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     GListPtr lh_actions = NULL;
     action_t *lh_action = order->lh_action;
     resource_t *rh_rsc = order->rh_rsc;
 
     crm_trace("Processing LH of ordering constraint %d", order->id);
     CRM_ASSERT(lh_rsc != NULL);
 
     if (lh_action != NULL) {
         lh_actions = g_list_prepend(NULL, lh_action);
 
     } else {
         lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task);
     }
 
     if (lh_actions == NULL && lh_rsc != rh_rsc) {
         char *key = NULL;
         char *op_type = NULL;
         guint interval_ms = 0;
 
         parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms);
         key = generate_op_key(lh_rsc->id, op_type, interval_ms);
 
         if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) {
             free(key);
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
 
         } else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && safe_str_eq(op_type, RSC_DEMOTE)) {
             free(key);
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
 
         } else {
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
             lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set);
             lh_actions = g_list_prepend(NULL, lh_action);
         }
 
         free(op_type);
     }
 
     gIter = lh_actions;
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *lh_action_iter = (action_t *) gIter->data;
 
         if (rh_rsc == NULL && order->rh_action) {
             rh_rsc = order->rh_action->rsc;
         }
         if (rh_rsc) {
             rsc_order_then(lh_action_iter, rh_rsc, order);
 
         } else if (order->rh_action) {
             order_actions(lh_action_iter, order->rh_action, order->type);
         }
     }
 
     g_list_free(lh_actions);
 }
 
 extern gboolean update_action(action_t * action);
 extern void update_colo_start_chain(action_t * action);
 
 static int
 is_recurring_action(action_t *action) 
 {
     const char *interval_ms_s = g_hash_table_lookup(action->meta,
                                                     XML_LRM_ATTR_INTERVAL_MS);
     guint interval_ms = crm_parse_ms(interval_ms_s);
 
     return (interval_ms > 0);
 }
 
 static void
 apply_container_ordering(action_t *action, pe_working_set_t *data_set)
 {
     /* VMs are also classified as containers for these purposes... in
      * that they both involve a 'thing' running on a real or remote
      * cluster node.
      *
      * This allows us to be smarter about the type and extent of
      * recovery actions required in various scenarios
      */
     resource_t *remote_rsc = NULL;
     resource_t *container = NULL;
     enum action_tasks task = text2task(action->task);
 
     CRM_ASSERT(action->rsc);
     CRM_ASSERT(action->node);
     CRM_ASSERT(is_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     container = remote_rsc->container;
     CRM_ASSERT(container);
 
     if(is_set(container->flags, pe_rsc_failed)) {
         pe_fence_node(data_set, action->node, "container failed");
     }
 
     crm_trace("Order %s action %s relative to %s%s for %s%s",
               action->task, action->uuid,
               is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
               remote_rsc->id,
               is_set(container->flags, pe_rsc_failed)? "failed " : "",
               container->id);
 
     if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE)
         || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) {
         /* Migration ops map to "no_action", but we need to apply the same
          * ordering as for stop or demote (see get_router_node()).
          */
         task = stop_rsc;
     }
 
     switch (task) {
         case start_rsc:
         case action_promote:
             /* Force resource recovery if the container is recovered */
             order_start_then_action(container, action, pe_order_implies_then,
                                     data_set);
 
             /* Wait for the connection resource to be up too */
             order_start_then_action(remote_rsc, action, pe_order_none,
                                     data_set);
             break;
 
         case stop_rsc:
         case action_demote:
             if (is_set(container->flags, pe_rsc_failed)) {
                 /* When the container representing a guest node fails, any stop
                  * or demote actions for resources running on the guest node
                  * are implied by the container stopping. This is similar to
                  * how fencing operations work for cluster nodes and remote
                  * nodes.
                  */
             } else {
                 /* Ensure the operation happens before the connection is brought
                  * down.
                  *
                  * If we really wanted to, we could order these after the
                  * connection start, IFF the container's current role was
                  * stopped (otherwise we re-introduce an ordering loop when the
                  * connection is restarting).
                  */
                 order_action_then_stop(action, remote_rsc, pe_order_none,
                                        data_set);
             }
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (is_recurring_action(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 if(task != no_action) {
                     order_start_then_action(remote_rsc, action,
                                             pe_order_implies_then, data_set);
                 }
             } else {
                 order_start_then_action(remote_rsc, action, pe_order_none,
                                         data_set);
             }
             break;
     }
 }
 
 static enum remote_connection_state
 get_remote_node_state(pe_node_t *node) 
 {
     resource_t *remote_rsc = NULL;
     node_t *cluster_node = NULL;
 
     CRM_ASSERT(node);
 
     remote_rsc = node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     cluster_node = pe__current_node(remote_rsc);
 
     /* If the cluster node the remote connection resource resides on
      * is unclean or went offline, we can't process any operations
      * on that remote node until after it starts elsewhere.
      */
     if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) {
         /* The connection resource is not going to run anywhere */
 
         if (cluster_node && cluster_node->details->unclean) {
             /* The remote connection is failed because its resource is on a
              * failed node and can't be recovered elsewhere, so we must fence.
              */
             return remote_state_failed;
         }
 
         if (is_not_set(remote_rsc->flags, pe_rsc_failed)) {
             /* Connection resource is cleanly stopped */
             return remote_state_stopped;
         }
 
         /* Connection resource is failed */
 
         if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
             && remote_rsc->remote_reconnect_ms
             && node->details->remote_was_fenced) {
 
             /* We won't know whether the connection is recoverable until the
              * reconnect interval expires and we reattempt connection.
              */
             return remote_state_unknown;
         }
 
         /* The remote connection is in a failed state. If there are any
          * resources known to be active on it (stop) or in an unknown state
          * (probe), we must assume the worst and fence it.
          */
         return remote_state_failed;
 
     } else if (cluster_node == NULL) {
         /* Connection is recoverable but not currently running anywhere, see if we can recover it first */
         return remote_state_unknown;
 
     } else if(cluster_node->details->unclean == TRUE
               || cluster_node->details->online == FALSE) {
         /* Connection is running on a dead node, see if we can recover it first */
         return remote_state_resting;
 
     } else if (g_list_length(remote_rsc->running_on) > 1
                && remote_rsc->partial_migration_source
                && remote_rsc->partial_migration_target) {
         /* We're in the middle of migrating a connection resource,
          * wait until after the resource migrates before performing
          * any actions.
          */
         return remote_state_resting;
 
     }
     return remote_state_alive;
 }
 
 /*!
  * \internal
  * \brief Order actions on remote node relative to actions for the connection
  */
 static void
 apply_remote_ordering(action_t *action, pe_working_set_t *data_set)
 {
     resource_t *remote_rsc = NULL;
     enum action_tasks task = text2task(action->task);
     enum remote_connection_state state = get_remote_node_state(action->node);
 
     enum pe_ordering order_opts = pe_order_none;
 
     if (action->rsc == NULL) {
         return;
     }
 
     CRM_ASSERT(action->node);
     CRM_ASSERT(is_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     crm_trace("Order %s action %s relative to %s%s (state: %s)",
               action->task, action->uuid,
               is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
               remote_rsc->id, state2text(state));
 
     if (safe_str_eq(action->task, CRMD_ACTION_MIGRATE)
         || safe_str_eq(action->task, CRMD_ACTION_MIGRATED)) {
         /* Migration ops map to "no_action", but we need to apply the same
          * ordering as for stop or demote (see get_router_node()).
          */
         task = stop_rsc;
     }
 
     switch (task) {
         case start_rsc:
         case action_promote:
             order_opts = pe_order_none;
 
             if (state == remote_state_failed) {
                 /* Force recovery, by making this action required */
                 order_opts |= pe_order_implies_then;
             }
 
             /* Ensure connection is up before running this action */
             order_start_then_action(remote_rsc, action, order_opts, data_set);
             break;
 
         case stop_rsc:
             if(state == remote_state_alive) {
                 order_action_then_stop(action, remote_rsc,
                                        pe_order_implies_first, data_set);
 
             } else if(state == remote_state_failed) {
                 /* We would only be here if the resource is
                  * running on the remote node.  Since we have no
                  * way to stop it, it is necessary to fence the
                  * node.
                  */
                 pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable");
                 order_action_then_stop(action, remote_rsc,
                                        pe_order_implies_first, data_set);
 
             } else if(remote_rsc->next_role == RSC_ROLE_STOPPED) {
                 /* State must be remote_state_unknown or remote_state_stopped.
                  * Since the connection is not coming back up in this
                  * transition, stop this resource first.
                  */
                 order_action_then_stop(action, remote_rsc,
                                        pe_order_implies_first, data_set);
 
             } else {
                 /* The connection is going to be started somewhere else, so
                  * stop this resource after that completes.
                  */
                 order_start_then_action(remote_rsc, action, pe_order_none, data_set);
             }
             break;
 
         case action_demote:
             /* Only order this demote relative to the connection start if the
              * connection isn't being torn down. Otherwise, the demote would be
              * blocked because the connection start would not be allowed.
              */
             if(state == remote_state_resting || state == remote_state_unknown) {
                 order_start_then_action(remote_rsc, action, pe_order_none,
                                         data_set);
             } /* Otherwise we can rely on the stop ordering */
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (is_recurring_action(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 order_start_then_action(remote_rsc, action,
                                         pe_order_implies_then, data_set);
 
             } else {
                 node_t *cluster_node = pe__current_node(remote_rsc);
 
                 if(task == monitor_rsc && state == remote_state_failed) {
                     /* We would only be here if we do not know the
                      * state of the resource on the remote node.
                      * Since we have no way to find out, it is
                      * necessary to fence the node.
                      */
                     pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable");
                 }
 
                 if(cluster_node && state == remote_state_stopped) {
                     /* The connection is currently up, but is going
                      * down permanently.
                      *
                      * Make sure we check services are actually
                      * stopped _before_ we let the connection get
                      * closed
                      */
                     order_action_then_stop(action, remote_rsc,
                                            pe_order_runnable_left, data_set);
 
                 } else {
                     order_start_then_action(remote_rsc, action, pe_order_none,
                                             data_set);
                 }
             }
             break;
     }
 }
 
 static void
 apply_remote_node_ordering(pe_working_set_t *data_set)
 {
     if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) {
         return;
     }
 
     for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
         resource_t *remote = NULL;
 
         // We are only interested in resource actions
         if (action->rsc == NULL) {
             continue;
         }
 
         /* Special case: If we are clearing the failcount of an actual
          * remote connection resource, then make sure this happens before
          * any start of the resource in this transition.
          */
         if (action->rsc->is_remote_node &&
             safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) {
 
             custom_action_order(action->rsc,
                 NULL,
                 action,
                 action->rsc,
                 generate_op_key(action->rsc->id, RSC_START, 0),
                 NULL,
                 pe_order_optional,
                 data_set);
 
             continue;
         }
 
         // We are only interested in actions allocated to a node
         if (action->node == NULL) {
             continue;
         }
 
         if (is_remote_node(action->node) == FALSE) {
             continue;
         }
 
         /* We are only interested in real actions.
          *
          * @TODO This is probably wrong; pseudo-actions might be converted to
          * real actions and vice versa later in update_actions() at the end of
          * stage7().
          */
         if (is_set(action->flags, pe_action_pseudo)) {
             continue;
         }
 
         remote = action->node->details->remote_rsc;
         if (remote == NULL) {
             // Orphaned
             continue;
         }
 
         /* The action occurs across a remote connection, so create
          * ordering constraints that guarantee the action occurs while the node
          * is active (after start, before stop ... things like that).
          *
          * This is somewhat brittle in that we need to make sure the results of
          * this ordering are compatible with the result of get_router_node().
          * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
          * of this logic rather than action2xml().
          */
         if (remote->container) {
             crm_trace("Container ordering for %s", action->uuid);
             apply_container_ordering(action, data_set);
 
         } else {
             crm_trace("Remote ordering for %s", action->uuid);
             apply_remote_ordering(action, data_set);
         }
     }
 }
 
 static void
 order_probes(pe_working_set_t * data_set) 
 {
 #if 0
     GListPtr gIter = NULL;
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         /* Given "A then B", we would prefer to wait for A to be
          * started before probing B.
          *
          * If A was a filesystem on which the binaries and data for B
          * lived, it would have been useful if the author of B's agent
          * could assume that A is running before B.monitor will be
          * called.
          *
          * However we can't _only_ probe once A is running, otherwise
          * we'd not detect the state of B if A could not be started
          * for some reason.
          *
          * In practice however, we cannot even do an opportunistic
          * version of this because B may be moving:
          *
          *   B.probe -> B.start
          *   B.probe -> B.stop
          *   B.stop -> B.start
          *   A.stop -> A.start
          *   A.start -> B.probe
          *
          * So far so good, but if we add the result of this code:
          *
          *   B.stop -> A.stop
          *
          * Then we get a loop:
          *
          *   B.probe -> B.stop -> A.stop -> A.start -> B.probe
          *
          * We could kill the 'B.probe -> B.stop' dependency, but that
          * could mean stopping B "too" soon, because B.start must wait
          * for the probes to complete.
          *
          * Another option is to allow it only if A is a non-unique
          * clone with clone-max == node-max (since we'll never be
          * moving it).  However, we could still be stopping one
          * instance at the same time as starting another.
 
          * The complexity of checking for allowed conditions combined
          * with the ever narrowing usecase suggests that this code
          * should remain disabled until someone gets smarter.
          */
         action_t *start = NULL;
         GListPtr actions = NULL;
         GListPtr probes = NULL;
         char *key = NULL;
 
         key = start_key(rsc);
         actions = find_actions(rsc->actions, key, NULL);
         free(key);
 
         if (actions) {
             start = actions->data;
             g_list_free(actions);
         }
 
         if(start == NULL) {
             crm_err("No start action for %s", rsc->id);
             continue;
         }
 
         key = generate_op_key(rsc->id, CRMD_ACTION_STATUS, 0);
         probes = find_actions(rsc->actions, key, NULL);
         free(key);
 
         for (actions = start->actions_before; actions != NULL; actions = actions->next) {
             action_wrapper_t *before = (action_wrapper_t *) actions->data;
 
             GListPtr pIter = NULL;
             action_t *first = before->action;
             resource_t *first_rsc = first->rsc;
 
             if(first->required_runnable_before) {
                 GListPtr clone_actions = NULL;
                 for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) {
                     before = (action_wrapper_t *) clone_actions->data;
 
                     crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid);
 
                     CRM_ASSERT(before->action->rsc);
                     first_rsc = before->action->rsc;
                     break;
                 }
 
             } else if(safe_str_neq(first->task, RSC_START)) {
                 crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
             }
 
             if(first_rsc == NULL) {
                 continue;
 
             } else if(uber_parent(first_rsc) == uber_parent(start->rsc)) {
                 crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
                 continue;
 
             } else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) {
                 crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
                 continue;
             }
 
             crm_err("Applying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant);
 
             for (pIter = probes; pIter != NULL; pIter = pIter->next) {
                 action_t *probe = (action_t *) pIter->data;
 
                 crm_err("Ordering %s before %s", first->uuid, probe->uuid);
                 order_actions(first, probe, pe_order_optional);
             }
         }
     }
 #endif
 }
 
 gboolean
 stage7(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     crm_trace("Applying ordering constraints");
 
     /* Don't ask me why, but apparently they need to be processed in
      * the order they were created in... go figure
      *
      * Also g_list_append() has horrendous performance characteristics
      * So we need to use g_list_prepend() and then reverse the list here
      */
     data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
 
     for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
         order_constraint_t *order = (order_constraint_t *) gIter->data;
         resource_t *rsc = order->lh_rsc;
 
         crm_trace("Applying ordering constraint: %d", order->id);
 
         if (rsc != NULL) {
             crm_trace("rsc_action-to-*");
             rsc_order_first(rsc, order, data_set);
             continue;
         }
 
         rsc = order->rh_rsc;
         if (rsc != NULL) {
             crm_trace("action-to-rsc_action");
             rsc_order_then(order->lh_action, rsc, order);
 
         } else {
             crm_trace("action-to-action");
             order_actions(order->lh_action, order->rh_action, order->type);
         }
     }
 
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         update_colo_start_chain(action);
     }
 
     crm_trace("Ordering probes");
     order_probes(data_set);
 
     crm_trace("Updating %d actions", g_list_length(data_set->actions));
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         update_action(action);
     }
 
     LogNodeActions(data_set, FALSE);
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         LogActions(rsc, data_set, FALSE);
     }
     return TRUE;
 }
 
 int transition_id = -1;
 
 /*
  * Create a dependency graph to send to the transitioner (via the controller)
  */
 gboolean
 stage8(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     const char *value = NULL;
 
     transition_id++;
     crm_trace("Creating transition graph %d.", transition_id);
 
     data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
 
     value = pe_pref(data_set->config_hash, "cluster-delay");
     crm_xml_add(data_set->graph, "cluster-delay", value);
 
     value = pe_pref(data_set->config_hash, "stonith-timeout");
     crm_xml_add(data_set->graph, "stonith-timeout", value);
 
     crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
 
     if (is_set(data_set->flags, pe_flag_start_failure_fatal)) {
         crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
     } else {
         crm_xml_add(data_set->graph, "failed-start-offset", "1");
     }
 
     value = pe_pref(data_set->config_hash, "batch-limit");
     crm_xml_add(data_set->graph, "batch-limit", value);
 
     crm_xml_add_int(data_set->graph, "transition_id", transition_id);
 
     value = pe_pref(data_set->config_hash, "migration-limit");
     if (crm_int_helper(value, NULL) > 0) {
         crm_xml_add(data_set->graph, "migration-limit", value);
     }
 
 /* errors...
    slist_iter(action, action_t, action_list, lpc,
    if(action->optional == FALSE && action->runnable == FALSE) {
    print_action("Ignoring", action, TRUE);
    }
    );
 */
 
     gIter = data_set->resources;
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id);
         rsc->cmds->expand(rsc, data_set);
     }
 
     crm_log_xml_trace(data_set->graph, "created resource-driven action list");
 
     /* pseudo action to distribute list of nodes with maintenance state update */
     add_maintenance_update(data_set);
 
     /* catch any non-resource specific actions */
     crm_trace("processing non-resource actions");
 
     gIter = data_set->actions;
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         if (action->rsc
             && action->node
             && action->node->details->shutdown
             && is_not_set(action->rsc->flags, pe_rsc_maintenance)
             && is_not_set(action->flags, pe_action_optional)
             && is_not_set(action->flags, pe_action_runnable)
             && crm_str_eq(action->task, RSC_STOP, TRUE)
             ) {
             /* Eventually we should just ignore the 'fence' case
              * But for now it's the best way to detect (in CTS) when
              * CIB resource updates are being lost
              */
             if (is_set(data_set->flags, pe_flag_have_quorum)
                 || data_set->no_quorum_policy == no_quorum_ignore) {
                 crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
                          action->node->details->unclean ? "fence" : "shut down",
                          action->node->details->uname, action->rsc->id,
                          is_not_set(action->rsc->flags, pe_rsc_managed) ? " unmanaged" : " blocked",
                          is_set(action->rsc->flags, pe_rsc_failed) ? " failed" : "",
                          action->uuid);
             }
         }
 
         graph_element_from_action(action, data_set);
     }
 
     crm_log_xml_trace(data_set->graph, "created generic action list");
     crm_trace("Created transition graph %d.", transition_id);
 
     return TRUE;
 }
 
 void
 LogNodeActions(pe_working_set_t * data_set, gboolean terminal)
 {
     GListPtr gIter = NULL;
 
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         char *node_name = NULL;
         char *task = NULL;
         action_t *action = (action_t *) gIter->data;
 
         if (action->rsc != NULL) {
             continue;
         } else if (is_set(action->flags, pe_action_optional)) {
             continue;
         }
 
         if (is_container_remote_node(action->node)) {
             node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id);
         } else if(action->node) {
             node_name = crm_strdup_printf("%s", action->node->details->uname);
         }
 
 
         if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
             task = strdup("Shutdown");
         } else if (safe_str_eq(action->task, CRM_OP_FENCE)) {
             const char *op = g_hash_table_lookup(action->meta, "stonith_action");
             task = crm_strdup_printf("Fence (%s)", op);
         }
 
         if(task == NULL) {
             /* Nothing to report */
         } else if(terminal && action->reason) {
             printf(" * %s %s '%s'\n", task, node_name, action->reason);
         } else if(terminal) {
             printf(" * %s %s\n", task, node_name);
         } else if(action->reason) {
             crm_notice(" * %s %s '%s'\n", task, node_name, action->reason);
         } else {
             crm_notice(" * %s %s\n", task, node_name);
         }
 
         free(node_name);
         free(task);
     }
 }
 
 void
 cleanup_alloc_calculations(pe_working_set_t * data_set)
 {
     if (data_set == NULL) {
         return;
     }
 
     crm_trace("deleting %d order cons: %p",
               g_list_length(data_set->ordering_constraints), data_set->ordering_constraints);
     pe_free_ordering(data_set->ordering_constraints);
     data_set->ordering_constraints = NULL;
 
     crm_trace("deleting %d node cons: %p",
               g_list_length(data_set->placement_constraints), data_set->placement_constraints);
     pe_free_rsc_to_node(data_set->placement_constraints);
     data_set->placement_constraints = NULL;
 
     crm_trace("deleting %d inter-resource cons: %p",
               g_list_length(data_set->colocation_constraints), data_set->colocation_constraints);
     g_list_free_full(data_set->colocation_constraints, free);
     data_set->colocation_constraints = NULL;
 
     crm_trace("deleting %d ticket deps: %p",
               g_list_length(data_set->ticket_constraints), data_set->ticket_constraints);
     g_list_free_full(data_set->ticket_constraints, free);
     data_set->ticket_constraints = NULL;
 
     cleanup_calculations(data_set);
 }
diff --git a/extra/resources/controld b/extra/resources/controld
index 7c44845bfc..97eb8ee32f 100755
--- a/extra/resources/controld
+++ b/extra/resources/controld
@@ -1,298 +1,298 @@
 #!/bin/sh
 #
 # OCF resource agent for managing the DLM controld process
 #
 # Copyright 2009-2018 Novell, Inc
 #                    All Rights Reserved.
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of version 2 of the GNU General Public License as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it would be useful, but
 # WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 #
 # Further, this software is distributed without any warranty that it is
 # free of the rightful claim of any third person regarding infringement
 # or the like.  Any license provided herein, whether implied or
 # otherwise, applies only to this software file.  Patent licenses, if
 # any, provided herein do not apply to combinations of this program with
 # other software, or any other product whatsoever.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write the Free Software Foundation,
 # Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
 #
 
 #######################################################################
 # Initialization:
 
 : ${OCF_FUNCTIONS=${OCF_ROOT}/resource.d/heartbeat/.ocf-shellfuncs}
 . ${OCF_FUNCTIONS}
 : ${__OCF_ACTION=$1}
 
 #######################################################################
 
 if [ -e "$OCF_ROOT/resource.d/heartbeat/controld" ]; then
     ocf_log info "Using heartbeat controld agent"
     $OCF_ROOT/resource.d/heartbeat/controld $1
     exit $?
 fi
 
 meta_data() {
     cat <<END
 <?xml version="1.0"?>
 <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
 <resource-agent name="controld" version="1.1">
 <version>1.0</version>
 
 <longdesc lang="en">
 This Resource Agent can control the dlm_controld services needed by cluster-aware file systems.
 It assumes that dlm_controld is in your default PATH.
 In most cases, it should be run as an anonymous clone.
 </longdesc>
 <shortdesc lang="en">DLM Agent for cluster file systems</shortdesc>
 
 <parameters>
 
 <parameter name="args" unique="1">
 <longdesc lang="en">
 Any additional options to start the dlm_controld service with
 </longdesc>
 <shortdesc lang="en">DLM Options</shortdesc>
 <content type="string" default="-s 0" />
 </parameter>
 
 <parameter name="daemon" unique="1">
 <longdesc lang="en">
-The daemon to start - supports gfs_controld(.pcmk) and dlm_controld(.pcmk)
+The daemon to start - supports gfs_controld and dlm_controld
 </longdesc>
 <shortdesc lang="en">The daemon to start</shortdesc>
-<content type="string" default="dlm_controld.pcmk" />
+<content type="string" default="dlm_controld" />
 </parameter>
 
 <parameter name="allow_stonith_disabled">
 <longdesc lang="en">
 Allow DLM start-up even if STONITH/fencing is disabled in the cluster.
 
 Setting this option to true will cause cluster malfunction and hangs on
 fail-over for DLM clients that require fencing (such as GFS2, OCFS2, and
 cLVM2).
 
 This option is advanced use only.
 </longdesc>
 <shortdesc lang="en">Allow start-up even without STONITH/fencing</shortdesc>
 <content type="string" default="false" />
 </parameter>
 
 </parameters>
 
 <actions>
 <action name="start"        timeout="90" />
 <action name="stop"         timeout="100" />
 <action name="monitor"      timeout="20" interval="10" depth="0" start-delay="0" />
 <action name="meta-data"    timeout="5" />
 <action name="validate-all"   timeout="30" />
 </actions>
 </resource-agent>
 END
 }
 
 #######################################################################
 
 CONFIGFS_DIR="/sys/kernel/config"
 DLM_CONFIGFS_DIR="${CONFIGFS_DIR}/dlm"
 DLM_SYSFS_DIR="/sys/kernel/dlm"
 
 controld_usage() {
     cat <<END
 usage: $0 {start|stop|monitor|validate-all|meta-data}
 
 Expects to have a fully populated OCF RA-compliant environment set.
 END
 }
 
 check_uncontrolled_locks()
 {
     CUL_TMP=$(ls $DLM_SYSFS_DIR 2>&1)
     if [ $? -eq 0 ]; then
         if [ -n "$CUL_TMP" ]; then
 
             ocf_log err "Uncontrolled lockspace exists, system must reboot. Executing suicide fencing"
             stonith_admin --reboot="$(crm_node -n)" --tag controld
 
             exit $OCF_ERR_GENERIC
         fi
     fi
 }
 
 controld_start() {
     controld_monitor; rc=$?
 
     case $rc in
       $OCF_SUCCESS)     return $OCF_SUCCESS;;
       $OCF_NOT_RUNNING) ;;
       *) return $OCF_ERR_GENERIC;;
     esac
 
     # Ensure configfs is mounted
     if [ ! -e "$CONFIGFS_DIR" ]; then
         modprobe configfs
         if [ ! -e "$CONFIGFS_DIR" ]; then
            ocf_log err "$CONFIGFS_DIR not available"
            return $OCF_ERR_INSTALLED
         fi
     fi
     mount -t configfs | grep " $CONFIGFS_DIR " >/dev/null 2>/dev/null
     if [ $? -ne 0 ]; then
        mount -t configfs none "$CONFIGFS_DIR"
     fi
 
     # Ensure DLM is available
     if [ ! -e "$DLM_CONFIGFS_DIR" ]; then
        modprobe dlm
        if [ ! -e "$DLM_CONFIGFS_DIR" ]; then
           ocf_log err "$DLM_CONFIGFS_DIR not available"
           return $OCF_ERR_INSTALLED
        fi
     fi
 
     if ! ocf_is_true "$OCF_RESKEY_allow_stonith_disabled" && \
         ! ocf_is_true "`crm_attribute --type=crm_config --name=stonith-enabled --query --quiet --default=true`"; then
         ocf_log err "The cluster property stonith-enabled may not be deactivated to use the DLM"
         return $OCF_ERR_CONFIGURED
     fi
 
     ${OCF_RESKEY_daemon} $OCF_RESKEY_args
 
     while true
     do
         sleep 1
 
         controld_monitor; rc=$?
         case $rc in
           $OCF_SUCCESS)
             CS_ADDR_LIST="$(cat "${DLM_CONFIGFS_DIR}"/cluster/comms/*/addr_list 2>/dev/null)"
             if [ $? -eq 0 ] && [ -n "$CS_ADDR_LIST" ]; then
                 return $OCF_SUCCESS
             fi
             ;;
           $OCF_NOT_RUNNING) 
             return $OCF_NOT_RUNNING
             ;;
           *) 
             return $OCF_ERR_GENERIC
             ;;
         esac
 
         ocf_log debug "Waiting for ${OCF_RESKEY_daemon} to be ready"
     done
 }
 
 controld_stop() {
     controld_monitor; rc=$?
 
     if [ $rc = $OCF_NOT_RUNNING ]; then
         return $OCF_SUCCESS
     fi
 
     killall -TERM ${OCF_RESKEY_daemon}; rc=$?
 
     if [ $rc != 0 ]; then
         return $OCF_ERR_GENERIC
     fi
 
     rc=$OCF_SUCCESS
     while [ $rc = $OCF_SUCCESS ]; do
         controld_monitor; rc=$?
         sleep 1
     done
 
     if [ $rc = $OCF_NOT_RUNNING ]; then
         rc=$OCF_SUCCESS
     fi
 
     return $rc
 }
 
 controld_monitor() {
     killall -0 ${OCF_RESKEY_daemon} >/dev/null 2>&1 ; CM_RC=$?
 
     case $CM_RC in
       0) smw=$(dlm_tool status -v | grep "stateful_merge_wait=" | cut -d= -f2)
          if [ -n "$smw" ] && [ $smw -eq 1 ]; then
              ocf_log err "DLM status is: stateful_merge_wait"
              CM_RC=$OCF_ERR_GENERIC
          elif [ -z "$smw" ] && dlm_tool ls | grep -q "wait fencing" && \
               ! stonith_admin -H '*' -V | grep -q "wishes to"; then
              ocf_log err "DLM status is: wait fencing"
              CM_RC=$OCF_ERR_GENERIC
          else
              CM_RC=$OCF_SUCCESS
          fi
          ;;
       1) CM_RC=$OCF_NOT_RUNNING;;
       *) CM_RC=$OCF_ERR_GENERIC;;
     esac
 
     # if the dlm is not successfully running, but
     # dlm lockspace bits are left over, we self must fence.
     if [ $CM_RC -ne $OCF_SUCCESS ]; then
         check_uncontrolled_locks
     fi
 
     return $CM_RC
 }
 
 controld_validate() {
     check_binary killall
     check_binary ${OCF_RESKEY_daemon}
 
     case ${OCF_RESKEY_CRM_meta_globally_unique} in
         yes|Yes|true|True|1) 
             ocf_log err "$OCF_RESOURCE_INSTANCE must be configured with the globally_unique=false meta attribute"
             exit $OCF_ERR_CONFIGURED
             ;;
     esac
 
     [ -d /var/run/cluster ] || mkdir /var/run/cluster
 
     return $OCF_SUCCESS
 }
 
 : ${OCF_RESKEY_sctp=false}
 : ${OCF_RESKEY_CRM_meta_globally_unique:="false"}
 
 case "$OCF_RESOURCE_INSTANCE" in
     *[gG][fF][sS]*) 
         : ${OCF_RESKEY_args=-g 0}
         : ${OCF_RESKEY_daemon=gfs_controld}
         ;;
     *[dD][lL][mM]*)
         : ${OCF_RESKEY_args=-s 0}
         : ${OCF_RESKEY_daemon=dlm_controld}
         ;;
     *)
         : ${OCF_RESKEY_args=-s 0}
         : ${OCF_RESKEY_daemon=dlm_controld}
 esac
 
 case $__OCF_ACTION in
 meta-data)      meta_data
                 exit $OCF_SUCCESS
                 ;;
 start)          controld_validate; controld_start;;
 stop)           controld_stop;;
 monitor)        controld_validate; controld_monitor;;
 validate-all)   controld_validate;;
 usage|help)     controld_usage
                 exit $OCF_SUCCESS
                 ;;
 *)              controld_usage
                 exit $OCF_ERR_UNIMPLEMENTED
                 ;;
 esac
 rc=$?
 
 exit $rc
diff --git a/lib/cib/cib_file.c b/lib/cib/cib_file.c
index 2816e5b5cd..fce8d69c81 100644
--- a/lib/cib/cib_file.c
+++ b/lib/cib/cib_file.c
@@ -1,862 +1,851 @@
 /*
- * Copyright (c) 2004 International Business Machines
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ * Copyright 2004-2018 International Business Machines
  *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
+
 #include <crm_internal.h>
 #include <unistd.h>
 #include <limits.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <stdarg.h>
 #include <string.h>
 #include <pwd.h>
 
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <glib.h>
 
 #include <crm/crm.h>
 #include <crm/cib/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/xml.h>
 
 #define cib_flag_dirty 0x00001
 #define cib_flag_live  0x00002
 
 typedef struct cib_file_opaque_s {
     int flags;
     char *filename;
 
 } cib_file_opaque_t;
 
 int cib_file_perform_op(cib_t * cib, const char *op, const char *host, const char *section,
                         xmlNode * data, xmlNode ** output_data, int call_options);
 
 int cib_file_perform_op_delegate(cib_t * cib, const char *op, const char *host, const char *section,
                                  xmlNode * data, xmlNode ** output_data, int call_options,
                                  const char *user_name);
 
 int cib_file_signon(cib_t * cib, const char *name, enum cib_conn_type type);
 int cib_file_signoff(cib_t * cib);
 int cib_file_free(cib_t * cib);
 
 static int
 cib_file_inputfd(cib_t * cib)
 {
     return -EPROTONOSUPPORT;
 }
 
 static int
 cib_file_set_connection_dnotify(cib_t * cib, void (*dnotify) (gpointer user_data))
 {
     return -EPROTONOSUPPORT;
 }
 
 static int
 cib_file_register_notification(cib_t * cib, const char *callback, int enabled)
 {
     return -EPROTONOSUPPORT;
 }
 
 /*!
  * \internal
  * \brief Compare the calculated digest of an XML tree against a signature file
  *
  * \param[in] root Root of XML tree to compare
  * \param[in] sigfile Name of signature file containing digest to compare
  *
  * \return TRUE if digests match or signature file does not exist, else FALSE
  */
 static gboolean
 cib_file_verify_digest(xmlNode *root, const char *sigfile)
 {
     gboolean passed = FALSE;
     char *expected = crm_read_contents(sigfile);
 
     if (expected == NULL) {
         switch (errno) {
             case 0:
                 crm_err("On-disk digest at %s is empty", sigfile);
                 return FALSE;
             case ENOENT:
                 crm_warn("No on-disk digest present at %s", sigfile);
                 return TRUE;
             default:
                 crm_perror(LOG_ERR, "Could not read on-disk digest from %s", sigfile);
                 return FALSE;
         }
     }
     passed = crm_digest_verify(root, expected);
     free(expected);
     return passed;
 }
 
 /*!
  * \internal
  * \brief Read an XML tree from a file and verify its digest
  *
  * \param[in] filename Name of XML file to read
  * \param[in] sigfile Name of signature file containing digest to compare
  * \param[in] root If non-NULL, will be set to pointer to parsed XML tree
  *
  * \return 0 if file was successfully read, parsed and verified, otherwise:
  *         -errno on stat() failure,
  *         -pcmk_err_cib_corrupt if file size is 0 or XML is not parseable, or
  *         -pcmk_err_cib_modified if digests do not match
  * \note If root is non-NULL, it is the caller's responsibility to free *root on
  *       successful return.
  */
 int
 cib_file_read_and_verify(const char *filename, const char *sigfile, xmlNode **root)
 {
     int s_res;
     struct stat buf;
     char *local_sigfile = NULL;
     xmlNode *local_root = NULL;
 
     CRM_ASSERT(filename != NULL);
     if (root) {
         *root = NULL;
     }
 
     /* Verify that file exists and its size is nonzero */
     s_res = stat(filename, &buf);
     if (s_res < 0) {
         crm_perror(LOG_WARNING, "Could not verify cluster configuration file %s", filename);
         return -errno;
     } else if (buf.st_size == 0) {
         crm_warn("Cluster configuration file %s is corrupt (size is zero)", filename);
         return -pcmk_err_cib_corrupt;
     }
 
     /* Parse XML */
     local_root = filename2xml(filename);
     if (local_root == NULL) {
         crm_warn("Cluster configuration file %s is corrupt (unparseable as XML)", filename);
         return -pcmk_err_cib_corrupt;
     }
 
     /* If sigfile is not specified, use original file name plus .sig */
     if (sigfile == NULL) {
         sigfile = local_sigfile = crm_concat(filename, "sig", '.');
     }
 
     /* Verify that digests match */
     if (cib_file_verify_digest(local_root, sigfile) == FALSE) {
         free(local_sigfile);
         free_xml(local_root);
         return -pcmk_err_cib_modified;
     }
 
     free(local_sigfile);
     if (root) {
         *root = local_root;
     } else {
         free_xml(local_root);
     }
     return pcmk_ok;
 }
 
 #define CIB_SERIES "cib"
 #define CIB_SERIES_MAX 100
 #define CIB_SERIES_BZIP FALSE /* Must be false because archived copies are
                                  created with hard links
                                */
 
 #define CIB_LIVE_NAME CIB_SERIES ".xml"
 
 /*!
  * \internal
  * \brief Check whether a file is the live CIB
  *
  * \param[in] filename Name of file to check
  *
  * \return TRUE if file exists and its real path is same as live CIB's
  */
 static gboolean
 cib_file_is_live(const char *filename)
 {
     gboolean same = FALSE;
 
     if (filename != NULL) {
         // Canonicalize file names for true comparison
         char *real_filename = crm_compat_realpath(filename);
 
         if (real_filename != NULL) {
             char *real_livename;
 
             real_livename = crm_compat_realpath(CRM_CONFIG_DIR "/" CIB_LIVE_NAME);
             if (real_livename && !strcmp(real_filename, real_livename)) {
                 same = TRUE;
             }
             free(real_livename);
             free(real_filename);
         }
     }
     return same;
 }
 
 /* cib_file_backup() and cib_file_write_with_digest() need to chown the
  * written files only in limited circumstances, so these variables allow
  * that to be indicated without affecting external callers
  */
 static uid_t cib_file_owner = 0;
 static uid_t cib_file_group = 0;
 static gboolean cib_do_chown = FALSE;
 
 /*!
  * \internal
  * \brief Back up a CIB
  *
  * \param[in] cib_dirname Directory containing CIB file and backups
  * \param[in] cib_filename Name (relative to cib_dirname) of CIB file to back up
  *
  * \return 0 on success, -1 on error
  */
 static int
 cib_file_backup(const char *cib_dirname, const char *cib_filename)
 {
     int rc = 0;
     char *cib_path = crm_concat(cib_dirname, cib_filename, '/');
     char *cib_digest = crm_concat(cib_path, "sig", '.');
 
     /* Figure out what backup file sequence number to use */
     int seq = get_last_sequence(cib_dirname, CIB_SERIES);
     char *backup_path = generate_series_filename(cib_dirname, CIB_SERIES, seq,
                                                  CIB_SERIES_BZIP);
     char *backup_digest = crm_concat(backup_path, "sig", '.');
 
     CRM_ASSERT((cib_path != NULL) && (cib_digest != NULL)
                && (backup_path != NULL) && (backup_digest != NULL));
 
     /* Remove the old backups if they exist */
     unlink(backup_path);
     unlink(backup_digest);
 
     /* Back up the CIB, by hard-linking it to the backup name */
     if ((link(cib_path, backup_path) < 0) && (errno != ENOENT)) {
         crm_perror(LOG_ERR, "Could not archive %s by linking to %s",
                    cib_path, backup_path);
         rc = -1;
 
     /* Back up the CIB signature similarly */
     } else if ((link(cib_digest, backup_digest) < 0) && (errno != ENOENT)) {
         crm_perror(LOG_ERR, "Could not archive %s by linking to %s",
                    cib_digest, backup_digest);
         rc = -1;
 
     /* Update the last counter and ensure everything is sync'd to media */
     } else {
         write_last_sequence(cib_dirname, CIB_SERIES, seq + 1, CIB_SERIES_MAX);
         if (cib_do_chown) {
             if ((chown(backup_path, cib_file_owner, cib_file_group) < 0)
                     && (errno != ENOENT)) {
                 crm_perror(LOG_ERR, "Could not set owner of %s", backup_path);
                 rc = -1;
             }
             if ((chown(backup_digest, cib_file_owner, cib_file_group) < 0)
                     && (errno != ENOENT)) {
                 crm_perror(LOG_ERR, "Could not set owner of %s", backup_digest);
                 rc = -1;
             }
             if (crm_chown_last_sequence(cib_dirname, CIB_SERIES, cib_file_owner,
                                         cib_file_group) < 0) {
                 crm_perror(LOG_ERR,
                            "Could not set owner of %s last sequence file",
                            cib_dirname);
                 rc = -1;
             }
         }
         crm_sync_directory(cib_dirname);
         crm_info("Archived previous version as %s", backup_path);
     }
 
     free(cib_path);
     free(cib_digest);
     free(backup_path);
     free(backup_digest);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Prepare CIB XML to be written to disk
  *
  * Set num_updates to 0, set cib-last-written to the current timestamp,
  * and strip out the status section.
  *
  * \param[in] root Root of CIB XML tree
  *
  * \return void
  */
 static void
 cib_file_prepare_xml(xmlNode *root)
 {
     xmlNode *cib_status_root = NULL;
 
     /* Always write out with num_updates=0 and current last-written timestamp */
     crm_xml_add(root, XML_ATTR_NUMUPDATES, "0");
     crm_xml_add_last_written(root);
 
     /* Delete status section before writing to file, because
      * we discard it on startup anyway, and users get confused by it */
     cib_status_root = find_xml_node(root, XML_CIB_TAG_STATUS, TRUE);
     CRM_LOG_ASSERT(cib_status_root != NULL);
     if (cib_status_root != NULL) {
         free_xml(cib_status_root);
     }
 }
 
 /*!
  * \internal
  * \brief Write CIB to disk, along with a signature file containing its digest
  *
  * \param[in] cib_root Root of XML tree to write
  * \param[in] cib_dirname Directory containing CIB and signature files
  * \param[in] cib_filename Name (relative to cib_dirname) of file to write
  *
  * \return pcmk_ok on success,
  *         pcmk_err_cib_modified if existing cib_filename doesn't match digest,
  *         pcmk_err_cib_backup if existing cib_filename couldn't be backed up,
  *         or pcmk_err_cib_save if new cib_filename couldn't be saved
  */
 int
 cib_file_write_with_digest(xmlNode *cib_root, const char *cib_dirname,
                            const char *cib_filename)
 {
     int exit_rc = pcmk_ok;
     int rc, fd;
     char *digest = NULL;
 
     /* Detect CIB version for diagnostic purposes */
     const char *epoch = crm_element_value(cib_root, XML_ATTR_GENERATION);
     const char *admin_epoch = crm_element_value(cib_root,
                                                 XML_ATTR_GENERATION_ADMIN);
 
     /* Determine full CIB and signature pathnames */
     char *cib_path = crm_concat(cib_dirname, cib_filename, '/');
     char *digest_path = crm_concat(cib_path, "sig", '.');
 
     /* Create temporary file name patterns for writing out CIB and signature */
     char *tmp_cib = crm_strdup_printf("%s/cib.XXXXXX", cib_dirname);
     char *tmp_digest = crm_strdup_printf("%s/cib.XXXXXX", cib_dirname);
 
     CRM_ASSERT((cib_path != NULL) && (digest_path != NULL)
                && (tmp_cib != NULL) && (tmp_digest != NULL));
 
     /* Ensure the admin didn't modify the existing CIB underneath us */
     crm_trace("Reading cluster configuration file %s", cib_path);
     rc = cib_file_read_and_verify(cib_path, NULL, NULL);
     if ((rc != pcmk_ok) && (rc != -ENOENT)) {
         crm_err("%s was manually modified while the cluster was active!",
                 cib_path);
         exit_rc = pcmk_err_cib_modified;
         goto cleanup;
     }
 
     /* Back up the existing CIB */
     if (cib_file_backup(cib_dirname, cib_filename) < 0) {
         exit_rc = pcmk_err_cib_backup;
         goto cleanup;
     }
 
     crm_debug("Writing CIB to disk");
     umask(S_IWGRP | S_IWOTH | S_IROTH);
     cib_file_prepare_xml(cib_root);
 
     /* Write the CIB to a temporary file, so we can deploy (near) atomically */
     fd = mkstemp(tmp_cib);
     if (fd < 0) {
         crm_perror(LOG_ERR, "Couldn't open temporary file %s for writing CIB",
                    tmp_cib);
         exit_rc = pcmk_err_cib_save;
         goto cleanup;
     }
 
     /* Protect the temporary file */
     if (fchmod(fd, S_IRUSR | S_IWUSR) < 0) {
         crm_perror(LOG_ERR, "Couldn't protect temporary file %s for writing CIB",
                    tmp_cib);
         exit_rc = pcmk_err_cib_save;
         goto cleanup;
     }
     if (cib_do_chown && (fchown(fd, cib_file_owner, cib_file_group) < 0)) {
         crm_perror(LOG_ERR, "Couldn't protect temporary file %s for writing CIB",
                    tmp_cib);
         exit_rc = pcmk_err_cib_save;
         goto cleanup;
     }
 
     /* Write out the CIB */
     if (write_xml_fd(cib_root, tmp_cib, fd, FALSE) <= 0) {
         crm_err("Changes couldn't be written to %s", tmp_cib);
         exit_rc = pcmk_err_cib_save;
         goto cleanup;
     }
 
     /* Calculate CIB digest */
     digest = calculate_on_disk_digest(cib_root);
     CRM_ASSERT(digest != NULL);
     crm_info("Wrote version %s.%s.0 of the CIB to disk (digest: %s)",
              (admin_epoch ? admin_epoch : "0"), (epoch ? epoch : "0"), digest);
 
     /* Write the CIB digest to a temporary file */
     fd = mkstemp(tmp_digest);
     if (fd < 0) {
         crm_perror(LOG_ERR, "Could not create temporary file for CIB digest");
         exit_rc = pcmk_err_cib_save;
         goto cleanup;
     }
     if (cib_do_chown && (fchown(fd, cib_file_owner, cib_file_group) < 0)) {
         crm_perror(LOG_ERR, "Couldn't protect temporary file %s for writing CIB",
                    tmp_cib);
         exit_rc = pcmk_err_cib_save;
         close(fd);
         goto cleanup;
     }
     if (crm_write_sync(fd, digest) < 0) {
         crm_perror(LOG_ERR, "Could not write digest to file %s", tmp_digest);
         exit_rc = pcmk_err_cib_save;
         close(fd);
         goto cleanup;
     }
     close(fd);
     crm_debug("Wrote digest %s to disk", digest);
 
     /* Verify that what we wrote is sane */
     crm_info("Reading cluster configuration file %s (digest: %s)",
              tmp_cib, tmp_digest);
     rc = cib_file_read_and_verify(tmp_cib, tmp_digest, NULL);
     CRM_ASSERT(rc == 0);
 
     /* Rename temporary files to live, and sync directory changes to media */
     crm_debug("Activating %s", tmp_cib);
     if (rename(tmp_cib, cib_path) < 0) {
         crm_perror(LOG_ERR, "Couldn't rename %s as %s", tmp_cib, cib_path);
         exit_rc = pcmk_err_cib_save;
     }
     if (rename(tmp_digest, digest_path) < 0) {
         crm_perror(LOG_ERR, "Couldn't rename %s as %s", tmp_digest,
                    digest_path);
         exit_rc = pcmk_err_cib_save;
     }
     crm_sync_directory(cib_dirname);
 
   cleanup:
     free(cib_path);
     free(digest_path);
     free(digest);
     free(tmp_digest);
     free(tmp_cib);
     return exit_rc;
 }
 
 cib_t *
 cib_file_new(const char *cib_location)
 {
     cib_file_opaque_t *private = NULL;
     cib_t *cib = cib_new_variant();
 
     private = calloc(1, sizeof(cib_file_opaque_t));
     CRM_ASSERT((cib != NULL) && (private != NULL));
 
     cib->variant = cib_file;
     cib->variant_opaque = private;
 
     if (cib_location == NULL) {
         cib_location = getenv("CIB_file");
     }
     private->flags = 0;
     if (cib_file_is_live(cib_location)) {
         set_bit(private->flags, cib_flag_live);
         crm_trace("File %s detected as live CIB", cib_location);
     }
     private->filename = strdup(cib_location);
 
     /* assign variant specific ops */
     cib->delegate_fn = cib_file_perform_op_delegate;
     cib->cmds->signon = cib_file_signon;
     cib->cmds->signoff = cib_file_signoff;
     cib->cmds->free = cib_file_free;
     cib->cmds->inputfd = cib_file_inputfd;
 
     cib->cmds->register_notification = cib_file_register_notification;
     cib->cmds->set_connection_dnotify = cib_file_set_connection_dnotify;
 
     return cib;
 }
 
 static xmlNode *in_mem_cib = NULL;
 
 /*!
  * \internal
  * \brief Read CIB from disk and validate it against XML schema
  *
  * \param[in] filename Name of file to read CIB from
  *
  * \return pcmk_ok on success,
  *         -ENXIO if file does not exist (or stat() otherwise fails), or
  *         -pcmk_err_schema_validation if XML doesn't parse or validate
  * \note If filename is the live CIB, this will *not* verify its digest,
  *       though that functionality would be trivial to add here.
- *       Also, this will *not* verify that the file is writeable,
+ *       Also, this will *not* verify that the file is writable,
  *       because some callers might not need to write.
  */
 static int
 load_file_cib(const char *filename)
 {
     struct stat buf;
     xmlNode *root = NULL;
 
     /* Ensure file is readable */
     if (stat(filename, &buf) < 0) {
         return -ENXIO;
     }
 
     /* Parse XML from file */
     root = filename2xml(filename);
     if (root == NULL) {
         return -pcmk_err_schema_validation;
     }
 
     /* Add a status section if not already present */
     if (find_xml_node(root, XML_CIB_TAG_STATUS, FALSE) == NULL) {
         create_xml_node(root, XML_CIB_TAG_STATUS);
     }
 
     /* Validate XML against its specified schema */
     if (validate_xml(root, NULL, TRUE) == FALSE) {
         const char *schema = crm_element_value(root, XML_ATTR_VALIDATION);
 
         crm_err("CIB does not validate against %s", schema);
         free_xml(root);
         return -pcmk_err_schema_validation;
     }
 
     /* Remember the parsed XML for later use */
     in_mem_cib = root;
     return pcmk_ok;
 }
 
 int
 cib_file_signon(cib_t * cib, const char *name, enum cib_conn_type type)
 {
     int rc = pcmk_ok;
     cib_file_opaque_t *private = cib->variant_opaque;
 
     if (private->filename == NULL) {
         rc = -EINVAL;
     } else {
         rc = load_file_cib(private->filename);
     }
 
     if (rc == pcmk_ok) {
         crm_debug("%s: Opened connection to local file '%s'", name, private->filename);
         cib->state = cib_connected_command;
         cib->type = cib_command;
 
     } else {
         fprintf(stderr, "%s: Connection to local file '%s' failed: %s\n",
                 name, private->filename, pcmk_strerror(rc));
     }
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Write out the in-memory CIB to a live CIB file
  *
  * param[in] path Full path to file to write
  *
  * \return 0 on success, -1 on failure
  */
 static int
 cib_file_write_live(char *path)
 {
     uid_t uid = geteuid();
     struct passwd *daemon_pwent;
     char *sep = strrchr(path, '/');
     const char *cib_dirname, *cib_filename;
     int rc = 0;
 
     /* Get the desired uid/gid */
     errno = 0;
     daemon_pwent = getpwnam(CRM_DAEMON_USER);
     if (daemon_pwent == NULL) {
         crm_perror(LOG_ERR, "Could not find %s user", CRM_DAEMON_USER);
         return -1;
     }
 
     /* If we're root, we can change the ownership;
      * if we're daemon, anything we create will be OK;
      * otherwise, block access so we don't create wrong owner
      */
     if ((uid != 0) && (uid != daemon_pwent->pw_uid)) {
         crm_perror(LOG_ERR, "Must be root or %s to modify live CIB",
                    CRM_DAEMON_USER);
         return 0;
     }
 
     /* fancy footwork to separate dirname from filename
      * (we know the canonical name maps to the live CIB,
      * but the given name might be relative, or symlinked)
      */
     if (sep == NULL) { /* no directory component specified */
         cib_dirname = "./";
         cib_filename = path;
     } else if (sep == path) { /* given name is in / */
         cib_dirname = "/";
         cib_filename = path + 1;
     } else { /* typical case; split given name into parts */
         *sep = '\0';
         cib_dirname = path;
         cib_filename = sep + 1;
     }
 
     /* if we're root, we want to update the file ownership */
     if (uid == 0) {
         cib_file_owner = daemon_pwent->pw_uid;
         cib_file_group = daemon_pwent->pw_gid;
         cib_do_chown = TRUE;
     }
 
     /* write the file */
     if (cib_file_write_with_digest(in_mem_cib, cib_dirname,
                                    cib_filename) != pcmk_ok) {
         rc = -1;
     }
 
     /* turn off file ownership changes, for other callers */
     if (uid == 0) {
         cib_do_chown = FALSE;
     }
 
     /* undo fancy stuff */
     if ((sep != NULL) && (*sep == '\0')) {
         *sep = '/';
     }
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Sign-off method for CIB file variants
  *
  * This will write the file to disk if needed, and free the in-memory CIB. If
  * the file is the live CIB, it will compute and write a signature as well.
  *
  * \param[in] cib CIB object to sign off
  *
  * \return pcmk_ok on success, pcmk_err_generic on failure
  * \todo This method should refuse to write the live CIB if the CIB manager is
  *       running.
  */
 int
 cib_file_signoff(cib_t * cib)
 {
     int rc = pcmk_ok;
     cib_file_opaque_t *private = cib->variant_opaque;
 
     crm_debug("Disconnecting from the CIB manager");
     cib->state = cib_disconnected;
     cib->type = cib_no_connection;
 
     /* If the in-memory CIB has been changed, write it to disk */
     if (is_set(private->flags, cib_flag_dirty)) {
 
         /* If this is the live CIB, write it out with a digest */
         if (is_set(private->flags, cib_flag_live)) {
             if (cib_file_write_live(private->filename) < 0) {
                 rc = pcmk_err_generic;
             }
 
         /* Otherwise, it's a simple write */
         } else {
             gboolean do_bzip = crm_ends_with_ext(private->filename, ".bz2");
 
             if (write_xml_file(in_mem_cib, private->filename, do_bzip) <= 0) {
                 rc = pcmk_err_generic;
             }
         }
 
         if (rc == pcmk_ok) {
             crm_info("Wrote CIB to %s", private->filename);
             clear_bit(private->flags, cib_flag_dirty);
         } else {
             crm_err("Could not write CIB to %s", private->filename);
         }
     }
 
     /* Free the in-memory CIB */
     free_xml(in_mem_cib);
     in_mem_cib = NULL;
     return rc;
 }
 
 int
 cib_file_free(cib_t * cib)
 {
     int rc = pcmk_ok;
 
     if (cib->state != cib_disconnected) {
         rc = cib_file_signoff(cib);
     }
 
     if (rc == pcmk_ok) {
         cib_file_opaque_t *private = cib->variant_opaque;
 
         free(private->filename);
         free(cib->cmds);
         free(private);
         free(cib);
 
     } else {
         fprintf(stderr, "Couldn't sign off: %d\n", rc);
     }
 
     return rc;
 }
 
 struct cib_func_entry {
     const char *op;
     gboolean read_only;
     cib_op_t fn;
 };
 
 /* *INDENT-OFF* */
 static struct cib_func_entry cib_file_ops[] = {
     {CIB_OP_QUERY,      TRUE,  cib_process_query},
     {CIB_OP_MODIFY,     FALSE, cib_process_modify},
     {CIB_OP_APPLY_DIFF, FALSE, cib_process_diff},
     {CIB_OP_BUMP,       FALSE, cib_process_bump},
     {CIB_OP_REPLACE,    FALSE, cib_process_replace},
     {CIB_OP_CREATE,     FALSE, cib_process_create},
     {CIB_OP_DELETE,     FALSE, cib_process_delete},
     {CIB_OP_ERASE,      FALSE, cib_process_erase},
     {CIB_OP_UPGRADE,    FALSE, cib_process_upgrade},
 };
 /* *INDENT-ON* */
 
 int
 cib_file_perform_op(cib_t * cib, const char *op, const char *host, const char *section,
                     xmlNode * data, xmlNode ** output_data, int call_options)
 {
     return cib_file_perform_op_delegate(cib, op, host, section, data, output_data, call_options,
                                         NULL);
 }
 
 int
 cib_file_perform_op_delegate(cib_t * cib, const char *op, const char *host, const char *section,
                              xmlNode * data, xmlNode ** output_data, int call_options,
                              const char *user_name)
 {
     int rc = pcmk_ok;
     char *effective_user = NULL;
     gboolean query = FALSE;
     gboolean changed = FALSE;
     xmlNode *request = NULL;
     xmlNode *output = NULL;
     xmlNode *cib_diff = NULL;
     xmlNode *result_cib = NULL;
     cib_op_t *fn = NULL;
     int lpc = 0;
     static int max_msg_types = DIMOF(cib_file_ops);
     cib_file_opaque_t *private = cib->variant_opaque;
 
     crm_info("%s on %s", op, section);
     call_options |= (cib_no_mtime | cib_inhibit_bcast | cib_scope_local);
 
     if (cib->state == cib_disconnected) {
         return -ENOTCONN;
     }
 
     if (output_data != NULL) {
         *output_data = NULL;
     }
 
     if (op == NULL) {
         return -EINVAL;
     }
 
     for (lpc = 0; lpc < max_msg_types; lpc++) {
         if (safe_str_eq(op, cib_file_ops[lpc].op)) {
             fn = &(cib_file_ops[lpc].fn);
             query = cib_file_ops[lpc].read_only;
             break;
         }
     }
 
     if (fn == NULL) {
         return -EPROTONOSUPPORT;
     }
 
     cib->call_id++;
     request = cib_create_op(cib->call_id, "dummy-token", op, host, section, data, call_options, user_name);
 #if ENABLE_ACL
     if(user_name) {
         crm_xml_add(request, XML_ACL_TAG_USER, user_name);
     }
     crm_trace("Performing %s operation as %s", op, user_name);
 #endif
 
     /* Mirror the logic in cib_prepare_common() */
     if (section != NULL && data != NULL && crm_str_eq(crm_element_name(data), XML_TAG_CIB, TRUE)) {
         data = get_object_root(section, data);
     }
 
     rc = cib_perform_op(op, call_options, fn, query,
                         section, request, data, TRUE, &changed, in_mem_cib, &result_cib, &cib_diff,
                         &output);
 
     free_xml(request);
     if (rc == -pcmk_err_schema_validation) {
         validate_xml_verbose(result_cib);
     }
 
     if (rc != pcmk_ok) {
         free_xml(result_cib);
 
     } else if (query == FALSE) {
         xml_log_patchset(LOG_DEBUG, "cib:diff", cib_diff);
         free_xml(in_mem_cib);
         in_mem_cib = result_cib;
         set_bit(private->flags, cib_flag_dirty);
     }
 
     free_xml(cib_diff);
 
     if (cib->op_callback != NULL) {
         cib->op_callback(NULL, cib->call_id, rc, output);
     }
 
     if (output_data && output) {
         if(output == in_mem_cib) {
             *output_data = copy_xml(output);
         } else {
             *output_data = output;
         }
 
     } else if(output != in_mem_cib) {
         free_xml(output);
     }
 
     free(effective_user);
     return rc;
 }
diff --git a/lib/common/iso8601.c b/lib/common/iso8601.c
index 98c1435586..171cff03cd 100644
--- a/lib/common/iso8601.c
+++ b/lib/common/iso8601.c
@@ -1,1441 +1,1441 @@
 /*
  * Copyright 2005-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 /*
  * Primary reference:
  *	http://en.wikipedia.org/wiki/ISO_8601 (as at 2005-08-01)
  *
  * Secondary references:
  *	http://hydracen.com/dx/iso8601.htm
  *	http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
  *	http://www.personal.ecu.edu/mccartyr/isowdcal.html
  *	http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
  *
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <time.h>
 #include <ctype.h>
 #include <crm/common/iso8601.h>
 #include <crm/common/iso8601_internal.h>
 
 /*
  * Andrew's code was originally written for OSes whose "struct tm" contains:
  *	long tm_gmtoff;		:: Seconds east of UTC
  *	const char *tm_zone;	:: Timezone abbreviation
  * Some OSes lack these, instead having:
  *	time_t (or long) timezone;
 		:: "difference between UTC and local standard time"
  *	char *tzname[2] = { "...", "..." };
  * I (David Lee) confess to not understanding the details.  So my attempted
  * generalisations for where their use is necessary may be flawed.
  *
  * 1. Does "difference between ..." subtract the same or opposite way?
  * 2. Should it use "altzone" instead of "timezone"?
  * 3. Should it use tzname[0] or tzname[1]?  Interaction with timezone/altzone?
  */
 #if defined(HAVE_STRUCT_TM_TM_GMTOFF)
 #  define GMTOFF(tm) ((tm)->tm_gmtoff)
 #else
 /* Note: extern variable; macro argument not actually used.  */
 #  define GMTOFF(tm) (-timezone+daylight)
 #endif
 
 struct crm_time_s {
     int years;
     int months;                 /* Only for durations */
     int days;
     int seconds;
     int offset;                 /* Seconds */
     bool duration;
 };
 
 char *crm_time_as_string(crm_time_t * date_time, int flags);
 crm_time_t *parse_date(const char *date_str);
 
 gboolean check_for_ordinal(const char *str);
 
 static crm_time_t *
 crm_get_utc_time(crm_time_t * dt)
 {
     crm_time_t *utc = calloc(1, sizeof(crm_time_t));
 
     utc->years = dt->years;
     utc->days = dt->days;
     utc->seconds = dt->seconds;
     utc->offset = 0;
 
     if (dt->offset) {
         crm_time_add_seconds(utc, -dt->offset);
     } else {
         /* Durations (which are the only things that can include months, never have a timezone */
         utc->months = dt->months;
     }
 
     crm_time_log(LOG_TRACE, "utc-source", dt,
                  crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
     crm_time_log(LOG_TRACE, "utc-target", utc,
                  crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
     return utc;
 }
 
 crm_time_t *
 crm_time_new(const char *date_time)
 {
     time_t tm_now;
     crm_time_t *dt = NULL;
 
     tzset();
     if (date_time == NULL) {
         tm_now = time(NULL);
         dt = calloc(1, sizeof(crm_time_t));
         crm_time_set_timet(dt, &tm_now);
     } else {
         dt = parse_date(date_time);
     }
     return dt;
 }
 
 void
 crm_time_free(crm_time_t * dt)
 {
     if (dt == NULL) {
         return;
     }
     free(dt);
 }
 
 static int
 year_days(int year)
 {
     int d = 365;
 
     if (crm_time_leapyear(year)) {
         d++;
     }
     return d;
 }
 
 /* http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
  *
  * 5. Find the Jan1Weekday for Y (Monday=1, Sunday=7)
  *  YY = (Y-1) % 100
  *  C = (Y-1) - YY
  *  G = YY + YY/4
  *  Jan1Weekday = 1 + (((((C / 100) % 4) x 5) + G) % 7)
  */
 int
 crm_time_january1_weekday(int year)
 {
     int YY = (year - 1) % 100;
     int C = (year - 1) - YY;
     int G = YY + YY / 4;
     int jan1 = 1 + (((((C / 100) % 4) * 5) + G) % 7);
 
     crm_trace("YY=%d, C=%d, G=%d", YY, C, G);
     crm_trace("January 1 %.4d: %d", year, jan1);
     return jan1;
 }
 
 int
 crm_time_weeks_in_year(int year)
 {
     int weeks = 52;
     int jan1 = crm_time_january1_weekday(year);
 
     /* if jan1 == thursday */
     if (jan1 == 4) {
         weeks++;
     } else {
         jan1 = crm_time_january1_weekday(year + 1);
         /* if dec31 == thursday aka. jan1 of next year is a friday */
         if (jan1 == 5) {
             weeks++;
         }
 
     }
     return weeks;
 }
 
 int month_days[14] = { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 29 };
 
 int
 crm_time_days_in_month(int month, int year)
 {
     if (month == 2 && crm_time_leapyear(year)) {
         month = 13;
     }
     return month_days[month];
 }
 
 bool
 crm_time_leapyear(int year)
 {
     gboolean is_leap = FALSE;
 
     if (year % 4 == 0) {
         is_leap = TRUE;
     }
     if (year % 100 == 0 && year % 400 != 0) {
         is_leap = FALSE;
     }
     return is_leap;
 }
 
 static uint32_t
 get_ordinal_days(uint32_t y, uint32_t m, uint32_t d)
 {
     int lpc;
 
     for (lpc = 1; lpc < m; lpc++) {
         d += crm_time_days_in_month(lpc, y);
     }
     return d;
 }
 
 void
 crm_time_log_alias(int log_level, const char *file, const char *function, int line,
                    const char *prefix, crm_time_t * date_time, int flags)
 {
     char *date_s = crm_time_as_string(date_time, flags);
 
     if (log_level < LOG_CRIT) {
         printf("%s%s%s\n",
                prefix ? prefix : "", prefix ? ": " : "", date_s ? date_s : "__invalid_date__");
     } else {
         do_crm_log_alias(log_level, file, function, line, "%s%s%s",
                          prefix ? prefix : "", prefix ? ": " : "",
                          date_s ? date_s : "__invalid_date__");
     }
     free(date_s);
 }
 
 static int
 crm_time_get_sec(int sec, uint * h, uint * m, uint * s)
 {
     uint hours, minutes, seconds;
 
     if (sec < 0) {
         seconds = 0 - sec;
     } else {
         seconds = sec;
     }
 
     hours = seconds / (60 * 60);
     seconds -= 60 * 60 * hours;
 
     minutes = seconds / (60);
     seconds -= 60 * minutes;
 
     crm_trace("%d == %.2d:%.2d:%.2d", sec, hours, minutes, seconds);
 
     *h = hours;
     *m = minutes;
     *s = seconds;
 
     return TRUE;
 }
 
 int
 crm_time_get_timeofday(crm_time_t * dt, uint * h, uint * m, uint * s)
 {
     return crm_time_get_sec(dt->seconds, h, m, s);
 }
 
 int
 crm_time_get_timezone(crm_time_t * dt, uint * h, uint * m)
 {
     uint s;
 
     return crm_time_get_sec(dt->seconds, h, m, &s);
 }
 
 long long
 crm_time_get_seconds(crm_time_t * dt)
 {
     int lpc;
     crm_time_t *utc = NULL;
     long long in_seconds = 0;
 
     utc = crm_get_utc_time(dt);
 
     for (lpc = 1; lpc < utc->years; lpc++) {
         int dmax = year_days(lpc);
 
         in_seconds += 60 * 60 * 24 * dmax;
     }
 
     /* utc->months is an offset that can only be set for a duration
      * By definiton, the value is variable depending on the date to
      * which it is applied
      *
      * Force 30-day months so that something vaguely sane happens
      * for anyone that tries to use a month in this way
      */
     if (utc->months > 0) {
         in_seconds += 60 * 60 * 24 * 30 * utc->months;
     }
 
     if (utc->days > 0) {
         in_seconds += 60 * 60 * 24 * (utc->days - 1);
     }
     in_seconds += utc->seconds;
 
     crm_time_free(utc);
     return in_seconds;
 }
 
 #define EPOCH_SECONDS 62135596800ULL    /* Calculated using crm_time_get_seconds() */
 long long
 crm_time_get_seconds_since_epoch(crm_time_t * dt)
 {
     return crm_time_get_seconds(dt) - EPOCH_SECONDS;
 }
 
 int
 crm_time_get_gregorian(crm_time_t * dt, uint * y, uint * m, uint * d)
 {
     int months = 0;
     int days = dt->days;
 
     if(dt->years != 0) {
         for (months = 1; months <= 12 && days > 0; months++) {
             int mdays = crm_time_days_in_month(months, dt->years);
 
             if (mdays >= days) {
                 break;
             } else {
                 days -= mdays;
             }
         }
 
     } else if (dt->months) {
         /* This is a duration including months, don't convert the days field */
         months = dt->months;
 
     } else {
         /* This is a duration not including months, still don't convert the days field */
     }
 
     *y = dt->years;
     *m = months;
     *d = days;
     crm_trace("%.4d-%.3d -> %.4d-%.2d-%.2d", dt->years, dt->days, dt->years, months, days);
     return TRUE;
 }
 
 int
 crm_time_get_ordinal(crm_time_t * dt, uint * y, uint * d)
 {
     *y = dt->years;
     *d = dt->days;
     return TRUE;
 }
 
 int
 crm_time_get_isoweek(crm_time_t * dt, uint * y, uint * w, uint * d)
 {
     /*
      * Monday 29 December 2008 is written "2009-W01-1"
      * Sunday 3 January 2010 is written "2009-W53-7"
      */
     int year_num = 0;
     int jan1 = crm_time_january1_weekday(dt->years);
     int h = -1;
 
     CRM_CHECK(dt->days > 0, return FALSE);
 
 /* 6. Find the Weekday for Y M D */
     h = dt->days + jan1 - 1;
     *d = 1 + ((h - 1) % 7);
 
 /* 7. Find if Y M D falls in YearNumber Y-1, WeekNumber 52 or 53 */
     if (dt->days <= (8 - jan1) && jan1 > 4) {
         crm_trace("year--, jan1=%d", jan1);
         year_num = dt->years - 1;
         *w = crm_time_weeks_in_year(year_num);
 
     } else {
         year_num = dt->years;
     }
 
 /* 8. Find if Y M D falls in YearNumber Y+1, WeekNumber 1 */
     if (year_num == dt->years) {
         int dmax = year_days(year_num);
         int correction = 4 - *d;
 
         if ((dmax - dt->days) < correction) {
             crm_trace("year++, jan1=%d, i=%d vs. %d", jan1, dmax - dt->days, correction);
             year_num = dt->years + 1;
             *w = 1;
         }
     }
 
 /* 9. Find if Y M D falls in YearNumber Y, WeekNumber 1 through 53 */
     if (year_num == dt->years) {
         int j = dt->days + (7 - *d) + (jan1 - 1);
 
         *w = j / 7;
         if (jan1 > 4) {
             *w -= 1;
         }
     }
 
     *y = year_num;
     crm_trace("Converted %.4d-%.3d to %.4d-W%.2d-%d", dt->years, dt->days, *y, *w, *d);
     return TRUE;
 }
 
 #define DATE_MAX 128
 
 char *
 crm_time_as_string(crm_time_t * date_time, int flags)
 {
     char *date_s = NULL;
     char *time_s = NULL;
     char *offset_s = NULL;
     char *result_s = NULL;
     crm_time_t *dt = NULL;
     crm_time_t *utc = NULL;
 
     if (date_time == NULL) {
         return strdup("");
 
     } else if (date_time->offset && (flags & crm_time_log_with_timezone) == 0) {
         crm_trace("UTC conversion");
         utc = crm_get_utc_time(date_time);
         dt = utc;
     } else {
         dt = date_time;
     }
 
     CRM_CHECK(dt != NULL, return NULL);
     if (flags & crm_time_log_duration) {
         uint h = 0, m = 0, s = 0;
         int offset = 0;
 
         date_s = calloc(1, DATE_MAX);
         crm_time_get_sec(dt->seconds, &h, &m, &s);
 
         if (date_s == NULL) {
             goto done;
         }
 
         if(dt->years) {
             offset += snprintf(date_s+offset, DATE_MAX - offset, "%4d year%s ", dt->years, dt->years>1?"s":"");
         }
         if(dt->months) {
             offset += snprintf(date_s+offset, DATE_MAX - offset, "%2d month%s ", dt->months, dt->months>1?"s":"");
         }
         if(dt->days) {
             offset += snprintf(date_s+offset, DATE_MAX - offset, "%2d day%s ", dt->days, dt->days>1?"s":"");
         }
         if(dt->seconds) {
             offset += snprintf(date_s+offset, DATE_MAX - offset, "%d seconds ( ", dt->seconds);
             if(h) {
                 offset += snprintf(date_s+offset, DATE_MAX - offset, "%u hour%s ", h, h>1?"s":"");
             }
             if(m) {
                 offset += snprintf(date_s+offset, DATE_MAX - offset, "%u minute%s ", m, m>1?"s":"");
             }
             if(s) {
                 offset += snprintf(date_s+offset, DATE_MAX - offset, "%u second%s ", s, s>1?"s":"");
             }
             offset += snprintf(date_s+offset, DATE_MAX - offset, ")");
         }
         goto done;
     }
 
     if (flags & crm_time_log_date) {
         date_s = calloc(1, 34);
         if (date_s == NULL) {
             goto done;
 
         } else if (flags & crm_time_seconds) {
             long long s = crm_time_get_seconds(date_time);
 
             snprintf(date_s, 32, "%lld", s);
             goto done;
 
         } else if (flags & crm_time_epoch) {
             long long s = crm_time_get_seconds_since_epoch(date_time);
 
             snprintf(date_s, 32, "%lld", s);
             goto done;
 
         } else if (flags & crm_time_weeks) {
             /* YYYY-Www-D */
             uint y, w, d;
 
             if (crm_time_get_isoweek(dt, &y, &w, &d)) {
                 snprintf(date_s, 34, "%u-W%.2u-%u", y, w, d);
             }
 
         } else if (flags & crm_time_ordinal) {
             /* YYYY-DDD */
             uint y, d;
 
             if (crm_time_get_ordinal(dt, &y, &d)) {
                 snprintf(date_s, 22, "%u-%.3u", y, d);
             }
 
         } else {
             /* YYYY-MM-DD */
             uint y, m, d;
 
             if (crm_time_get_gregorian(dt, &y, &m, &d)) {
                 snprintf(date_s, 33, "%.4u-%.2u-%.2u", y, m, d);
             }
         }
     }
 
     if (flags & crm_time_log_timeofday) {
         uint h, m, s;
 
         time_s = calloc(1, 33);
         if (time_s == NULL) {
             goto cleanup;
         }
 
         if (crm_time_get_timeofday(dt, &h, &m, &s)) {
             snprintf(time_s, 33, "%.2u:%.2u:%.2u", h, m, s);
         }
 
         if (dt->offset != 0) {
             crm_time_get_sec(dt->offset, &h, &m, &s);
         }
 
         offset_s = calloc(1, 31);
         if ((flags & crm_time_log_with_timezone) == 0 || dt->offset == 0) {
             crm_trace("flags %6x %6x", flags, crm_time_log_with_timezone);
             snprintf(offset_s, 31, "Z");
 
         } else {
             snprintf(offset_s, 24, " %c%.2u:%.2u", dt->offset < 0 ? '-' : '+', h, m);
         }
     }
 
   done:
     result_s = calloc(1, 100);
 
     snprintf(result_s, 100, "%s%s%s%s",
              date_s ? date_s : "", (date_s != NULL && time_s != NULL) ? " " : "",
              time_s ? time_s : "", offset_s ? offset_s : "");
 
   cleanup:
     free(date_s);
     free(time_s);
     free(offset_s);
     crm_time_free(utc);
 
     return result_s;
 }
 
 static int
 crm_time_parse_sec(const char *time_str)
 {
     int rc;
     uint hour = 0;
     uint minute = 0;
     uint second = 0;
 
     rc = sscanf(time_str, "%d:%d:%d", &hour, &minute, &second);
     if (rc == 1) {
         rc = sscanf(time_str, "%2d%2d%2d", &hour, &minute, &second);
     }
 
     if (rc > 0 && rc < 4) {
         crm_trace("Got valid time: %.2d:%.2d:%.2d", hour, minute, second);
         if (hour >= 24) {
             crm_err("Invalid hour: %d", hour);
         } else if (minute >= 60) {
             crm_err("Invalid minute: %d", minute);
         } else if (second >= 60) {
             crm_err("Invalid second: %d", second);
         } else {
             second += (minute * 60);
             second += (hour * 60 * 60);
         }
     } else {
         crm_err("Bad time: %s (%d)", time_str, rc);
     }
     return second;
 }
 
 static int
 crm_time_parse_offset(const char *offset_str)
 {
     int offset = 0;
 
     tzset();
     if (offset_str == NULL) {
 #if defined(HAVE_STRUCT_TM_TM_GMTOFF)
         time_t now = time(NULL);
         struct tm *now_tm = localtime(&now);
 #endif
         int h_offset = GMTOFF(now_tm) / (3600);
         int m_offset = (GMTOFF(now_tm) - (3600 * h_offset)) / (60);
 
         if (h_offset < 0 && m_offset < 0) {
             m_offset = 0 - m_offset;
         }
         offset += (60 * 60 * h_offset);
         offset += (60 * m_offset);
 
     } else if (offset_str[0] == 'Z') {
 
     } else if (offset_str[0] == '+' || offset_str[0] == '-' || isdigit((int)offset_str[0])) {
         gboolean negate = FALSE;
 
         if (offset_str[0] == '-') {
             negate = TRUE;
             offset_str++;
         }
         offset = crm_time_parse_sec(offset_str);
         if (negate) {
             offset = 0 - offset;
         }
     }
     return offset;
 }
 
 static crm_time_t *
 crm_time_parse(const char *time_str, crm_time_t * a_time)
 {
     uint h, m, s;
     char *offset_s = NULL;
     crm_time_t *dt = a_time;
 
     tzset();
     if (a_time == NULL) {
         dt = calloc(1, sizeof(crm_time_t));
     }
 
     if (time_str) {
         dt->seconds = crm_time_parse_sec(time_str);
 
         offset_s = strstr(time_str, "Z");
         if (offset_s == NULL) {
             offset_s = strstr(time_str, " ");
         }
     }
 
     if (offset_s) {
         while (isspace(offset_s[0])) {
             offset_s++;
         }
     }
     dt->offset = crm_time_parse_offset(offset_s);
     crm_time_get_sec(dt->offset, &h, &m, &s);
     crm_trace("Got tz: %c%2.d:%.2d", dt->offset < 0 ? '-' : '+', h, m);
     return dt;
 }
 
 crm_time_t *
 parse_date(const char *date_str)
 {
     char *time_s;
     crm_time_t *dt = NULL;
 
     int year = 0;
     int month = 0;
     int week = 0;
     int day = 0;
     int rc = 0;
 
     CRM_CHECK(date_str != NULL, return NULL);
     CRM_CHECK(strlen(date_str) > 0, return NULL);
 
     if (date_str[0] == 'T' || date_str[2] == ':') {
         /* Just a time supplied - Infer current date */
         dt = crm_time_new(NULL);
         dt = crm_time_parse(date_str, dt);
         goto done;
 
     } else {
         dt = calloc(1, sizeof(crm_time_t));
     }
 
     if (safe_str_eq("epoch", date_str)) {
         dt->days = 1;
         dt->years = 1970;
         crm_time_log(LOG_TRACE, "Unpacked", dt, crm_time_log_date | crm_time_log_timeofday);
         return dt;
     }
 
     /* YYYY-MM-DD */
     rc = sscanf(date_str, "%d-%d-%d", &year, &month, &day);
     if (rc == 1) {
         /* YYYYMMDD */
         rc = sscanf(date_str, "%4d%2d%2d", &year, &month, &day);
     }
     if (rc == 3) {
         if (month > 12) {
             crm_err("Invalid month: %d", month);
         } else if (day > 31) {
             crm_err("Invalid day: %d", day);
         } else {
             dt->years = year;
             dt->days = get_ordinal_days(year, month, day);
             crm_trace("Got gergorian date: %.4d-%.3d", year, dt->days);
         }
         goto done;
     }
 
     /* YYYY-DDD */
     rc = sscanf(date_str, "%d-%d", &year, &day);
     if (rc == 2) {
         crm_trace("Got ordinal date");
         if (day > year_days(year)) {
             crm_err("Invalid day: %d (max=%d)", day, year_days(year));
         } else {
             dt->days = day;
             dt->years = year;
         }
         goto done;
     }
 
     /* YYYY-Www-D */
     rc = sscanf(date_str, "%d-W%d-%d", &year, &week, &day);
     if (rc == 3) {
         crm_trace("Got week date");
         if (week > crm_time_weeks_in_year(year)) {
             crm_err("Invalid week: %d (max=%d)", week, crm_time_weeks_in_year(year));
         } else if (day < 1 || day > 7) {
             crm_err("Invalid day: %d", day);
         } else {
             /*
              * http://en.wikipedia.org/wiki/ISO_week_date
              *
              * Monday 29 December 2008 is written "2009-W01-1"
              * Sunday 3 January 2010 is written "2009-W53-7"
              *
              * Saturday 27 September 2008 is written "2008-W37-6"
              *
              * http://en.wikipedia.org/wiki/ISO_week_date
              * If 1 January is on a Monday, Tuesday, Wednesday or Thursday, it is in week 01.
              * If 1 January is on a Friday, Saturday or Sunday, it is in week 52 or 53 of the previous year.
              */
             int jan1 = crm_time_january1_weekday(year);
 
             crm_trace("Jan 1 = %d", jan1);
 
             dt->years = year;
             crm_time_add_days(dt, (week - 1) * 7);
 
             if (jan1 <= 4) {
                 crm_time_add_days(dt, 1 - jan1);
             } else {
                 crm_time_add_days(dt, 8 - jan1);
             }
 
             crm_time_add_days(dt, day);
         }
         goto done;
     }
 
     crm_err("Couldn't parse %s", date_str);
   done:
 
     time_s = strstr(date_str, " ");
     if (time_s == NULL) {
         time_s = strstr(date_str, "T");
     }
 
     if (dt && time_s) {
         time_s++;
         crm_time_parse(time_s, dt);
     }
 
     crm_time_log(LOG_TRACE, "Unpacked", dt, crm_time_log_date | crm_time_log_timeofday);
 
     CRM_CHECK(crm_time_check(dt), return NULL);
 
     return dt;
 }
 
 static int
 parse_int(const char *str, int field_width, int uppper_bound, int *result)
 {
     int lpc = 0;
     int offset = 0;
     int intermediate = 0;
     gboolean fraction = FALSE;
     gboolean negate = FALSE;
 
     CRM_CHECK(str != NULL, return FALSE);
     CRM_CHECK(result != NULL, return FALSE);
 
     *result = 0;
 
     if (*str == '\0') {
         return FALSE;
     }
 
     if (str[offset] == 'T') {
         offset++;
     }
 
     if (str[offset] == '.' || str[offset] == ',') {
         fraction = TRUE;
         field_width = -1;
         offset++;
     } else if (str[offset] == '-') {
         negate = TRUE;
         offset++;
     } else if (str[offset] == '+' || str[offset] == ':') {
         offset++;
     }
 
     for (; (fraction || lpc < field_width) && isdigit((int)str[offset]); lpc++) {
         if (fraction) {
             intermediate = (str[offset] - '0') / (10 ^ lpc);
         } else {
             *result *= 10;
             intermediate = str[offset] - '0';
         }
         *result += intermediate;
         offset++;
     }
     if (fraction) {
         *result = (int)(*result * uppper_bound);
 
     } else if (uppper_bound > 0 && *result > uppper_bound) {
         *result = uppper_bound;
     }
     if (negate) {
         *result = 0 - *result;
     }
     if (lpc > 0) {
         crm_trace("Found int: %d.  Stopped at str[%d]='%c'", *result, lpc, str[lpc]);
         return offset;
     }
     return 0;
 }
 
 crm_time_t *
 crm_time_parse_duration(const char *period_s)
 {
     gboolean is_time = FALSE;
     crm_time_t *diff = NULL;
 
     CRM_CHECK(period_s != NULL, goto bail);
     CRM_CHECK(strlen(period_s) > 0, goto bail);
     CRM_CHECK(period_s[0] == 'P', goto bail);
     period_s++;
 
     diff = calloc(1, sizeof(crm_time_t));
 
     while (isspace((int)period_s[0]) == FALSE) {
         int an_int = 0, rc;
         char ch = 0;
 
         if (period_s[0] == 'T') {
             is_time = TRUE;
             period_s++;
         }
 
         rc = parse_int(period_s, 10, 0, &an_int);
         if (rc == 0) {
             break;
         }
         period_s += rc;
 
         ch = period_s[0];
         period_s++;
 
         crm_trace("Testing %c=%d, rc=%d", ch, an_int, rc);
 
         switch (ch) {
             case 0:
                 return diff;
                 break;
             case 'Y':
                 diff->years = an_int;
                 break;
             case 'M':
                 if (is_time) {
                     /* Minutes */
                     diff->seconds += an_int * 60;
                 } else {
                     diff->months = an_int;
                 }
                 break;
             case 'W':
                 diff->days += an_int * 7;
                 break;
             case 'D':
                 diff->days += an_int;
                 break;
             case 'H':
                 diff->seconds += an_int * 60 * 60;
                 break;
             case 'S':
                 diff->seconds += an_int;
                 break;
             default:
                 goto bail;
                 break;
         }
     }
     return diff;
 
   bail:
     free(diff);
     return NULL;
 }
 
 crm_time_period_t *
 crm_time_parse_period(const char *period_str)
 {
     gboolean invalid = FALSE;
     const char *original = period_str;
     crm_time_period_t *period = NULL;
 
     CRM_CHECK(period_str != NULL, return NULL);
     CRM_CHECK(strlen(period_str) > 0, return NULL);
 
     tzset();
     period = calloc(1, sizeof(crm_time_period_t));
 
     if (period_str[0] == 'P') {
         period->diff = crm_time_parse_duration(period_str);
     } else {
         period->start = parse_date(period_str);
     }
 
     period_str = strstr(original, "/");
     if (period_str) {
         CRM_CHECK(period_str[0] == '/', invalid = TRUE;
                   goto bail);
         period_str++;
 
         if (period_str[0] == 'P') {
             period->diff = crm_time_parse_duration(period_str);
         } else {
             period->end = parse_date(period_str);
         }
 
     } else if (period->diff != NULL) {
         /* just aduration starting from now */
         period->start = crm_time_new(NULL);
 
     } else {
         invalid = TRUE;
         CRM_CHECK(period_str != NULL, goto bail);
     }
 
     /* sanity checks */
     if (period->start == NULL && period->end == NULL) {
         crm_err("Invalid time period: %s", original);
         invalid = TRUE;
 
     } else if (period->start == NULL && period->diff == NULL) {
         crm_err("Invalid time period: %s", original);
         invalid = TRUE;
 
     } else if (period->end == NULL && period->diff == NULL) {
         crm_err("Invalid time period: %s", original);
         invalid = TRUE;
     }
 
   bail:
     if (invalid) {
         free(period->start);
         free(period->end);
         free(period->diff);
         free(period);
         return NULL;
     }
     if (period->end == NULL && period->diff == NULL) {
     }
 
     if (period->start == NULL) {
         period->start = crm_time_subtract(period->end, period->diff);
 
     } else if (period->end == NULL) {
         period->end = crm_time_add(period->start, period->diff);
     }
 
     crm_time_check(period->start);
     crm_time_check(period->end);
 
     return period;
 }
 
 void
 crm_time_set(crm_time_t * target, crm_time_t * source)
 {
     crm_trace("target=%p, source=%p", target, source);
 
     CRM_CHECK(target != NULL && source != NULL, return);
 
     target->years = source->years;
     target->days = source->days;
     target->months = source->months;    /* Only for durations */
     target->seconds = source->seconds;
     target->offset = source->offset;
 
     crm_time_log(LOG_TRACE, "source", source,
                  crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
     crm_time_log(LOG_TRACE, "target", target,
                  crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
 }
 
 static void
 ha_set_tm_time(crm_time_t * target, struct tm *source)
 {
     int h_offset = 0;
     int m_offset = 0;
 
     /* Ensure target is fully initialized */
     target->years = 0;
     target->months = 0;
     target->days = 0;
     target->seconds = 0;
     target->offset = 0;
     target->duration = FALSE;
 
     if (source->tm_year > 0) {
         /* years since 1900 */
         target->years = 1900 + source->tm_year;
     }
 
     if (source->tm_yday >= 0) {
         /* days since January 1 [0-365] */
         target->days = 1 + source->tm_yday;
     }
 
     if (source->tm_hour >= 0) {
         target->seconds += 60 * 60 * source->tm_hour;
     }
     if (source->tm_min >= 0) {
         target->seconds += 60 * source->tm_min;
     }
     if (source->tm_sec >= 0) {
         target->seconds += source->tm_sec;
     }
 
     /* tm_gmtoff == offset from UTC in seconds */
     h_offset = GMTOFF(source) / (3600);
     m_offset = (GMTOFF(source) - (3600 * h_offset)) / (60);
     crm_trace("Offset (s): %ld, offset (hh:mm): %.2d:%.2d", GMTOFF(source), h_offset, m_offset);
 
     target->offset += 60 * 60 * h_offset;
     target->offset += 60 * m_offset;
 }
 
 void
 crm_time_set_timet(crm_time_t * target, time_t * source)
 {
     ha_set_tm_time(target, localtime(source));
 }
 
 crm_time_t *
 crm_time_add(crm_time_t * dt, crm_time_t * value)
 {
     crm_time_t *utc = NULL;
     crm_time_t *answer = NULL;
 
     CRM_CHECK(dt != NULL && value != NULL, return NULL);
 
     answer = calloc(1, sizeof(crm_time_t));
     crm_time_set(answer, dt);
 
     utc = crm_get_utc_time(value);
 
     answer->years += utc->years;
     crm_time_add_months(answer, utc->months);
     crm_time_add_days(answer, utc->days);
     crm_time_add_seconds(answer, utc->seconds);
 
     crm_time_free(utc);
     return answer;
 }
 
 crm_time_t *
 crm_time_calculate_duration(crm_time_t * dt, crm_time_t * value)
 {
     crm_time_t *utc = NULL;
     crm_time_t *answer = NULL;
 
     CRM_CHECK(dt != NULL && value != NULL, return NULL);
 
     utc = crm_get_utc_time(value);
     answer = crm_get_utc_time(dt);
     answer->duration = TRUE;
 
     answer->years -= utc->years;
     if(utc->months != 0) {
         crm_time_add_months(answer, -utc->months);
     }
     crm_time_add_days(answer, -utc->days);
     crm_time_add_seconds(answer, -utc->seconds);
 
     crm_time_free(utc);
     return answer;
 }
 
 crm_time_t *
 crm_time_subtract(crm_time_t * dt, crm_time_t * value)
 {
     crm_time_t *utc = NULL;
     crm_time_t *answer = NULL;
 
     CRM_CHECK(dt != NULL && value != NULL, return NULL);
 
     answer = calloc(1, sizeof(crm_time_t));
     crm_time_set(answer, dt);
     utc = crm_get_utc_time(value);
 
     answer->years -= utc->years;
     if(utc->months != 0) {
         crm_time_add_months(answer, -utc->months);
     }
     crm_time_add_days(answer, -utc->days);
     crm_time_add_seconds(answer, -utc->seconds);
 
     return answer;
 }
 
 bool
 crm_time_check(crm_time_t * dt)
 {
     int ydays = 0;
 
     CRM_CHECK(dt != NULL, return FALSE);
 
     ydays = year_days(dt->years);
     crm_trace("max ydays: %d", ydays);
 
     CRM_CHECK(dt->days > 0, return FALSE);
     CRM_CHECK(dt->days <= ydays, return FALSE);
 
     CRM_CHECK(dt->seconds >= 0, return FALSE);
     CRM_CHECK(dt->seconds < 24 * 60 * 60, return FALSE);
 
     return TRUE;
 }
 
 #define do_cmp_field(l, r, field)					\
     if(rc == 0) {                                                       \
 		if(l->field > r->field) {				\
 			crm_trace("%s: %d > %d",			\
 				    #field, l->field, r->field);	\
 			rc = 1;                                         \
 		} else if(l->field < r->field) {			\
 			crm_trace("%s: %d < %d",			\
 				    #field, l->field, r->field);	\
 			rc = -1;					\
 		}							\
     }
 
 int
 crm_time_compare(crm_time_t * a, crm_time_t * b)
 {
     int rc = 0;
     crm_time_t *t1 = NULL;
     crm_time_t *t2 = NULL;
 
     if (a == NULL && b == NULL) {
         return 0;
     } else if (a == NULL) {
         return -1;
     } else if (b == NULL) {
         return 1;
     }
 
     t1 = crm_get_utc_time(a);
     t2 = crm_get_utc_time(b);
 
     do_cmp_field(t1, t2, years);
     do_cmp_field(t1, t2, days);
     do_cmp_field(t1, t2, seconds);
 
     crm_time_free(t1);
     crm_time_free(t2);
     return rc;
 }
 
 void
 crm_time_add_seconds(crm_time_t * a_time, int extra)
 {
     int days = 0;
     int seconds = 24 * 60 * 60;
 
     crm_trace("Adding %d seconds to %d (max=%d)", extra, a_time->seconds, seconds);
 
     a_time->seconds += extra;
     while (a_time->seconds >= seconds) {
         a_time->seconds -= seconds;
         days++;
     }
 
     while (a_time->seconds < 0) {
         a_time->seconds += seconds;
         days--;
     }
     crm_time_add_days(a_time, days);
 }
 
 void
 crm_time_add_days(crm_time_t * a_time, int extra)
 {
     int lower_bound = 1;
     int ydays = crm_time_leapyear(a_time->years) ? 366 : 365;
 
     crm_trace("Adding %d days to %.4d-%.3d", extra, a_time->years, a_time->days);
 
     a_time->days += extra;
     while (a_time->days > ydays) {
         a_time->years++;
         a_time->days -= ydays;
         ydays = crm_time_leapyear(a_time->years) ? 366 : 365;
     }
 
     if(a_time->duration) {
         lower_bound = 0;
     }
 
     while (a_time->days < lower_bound) {
         a_time->years--;
         a_time->days += crm_time_leapyear(a_time->years) ? 366 : 365;
     }
 }
 
 void
 crm_time_add_months(crm_time_t * a_time, int extra)
 {
     int lpc;
     uint32_t y, m, d, dmax;
 
     crm_time_get_gregorian(a_time, &y, &m, &d);
     crm_trace("Adding %d months to %.4d-%.2d-%.2d", extra, y, m, d);
 
     if (extra > 0) {
         for (lpc = extra; lpc > 0; lpc--) {
             m++;
             if (m == 13) {
                 m = 1;
                 y++;
             }
         }
     } else {
         for (lpc = -extra; lpc > 0; lpc--) {
             m--;
             if (m == 0) {
                 m = 12;
                 y--;
             }
         }
     }
 
     dmax = crm_time_days_in_month(m, y);
     if (dmax < d) {
         /* Preserve day-of-month unless the month doesn't have enough days */
         d = dmax;
     }
 
     crm_trace("Calculated %.4d-%.2d-%.2d", y, m, d);
 
     a_time->years = y;
     a_time->days = get_ordinal_days(y, m, d);
 
     crm_time_get_gregorian(a_time, &y, &m, &d);
     crm_trace("Got %.4d-%.2d-%.2d", y, m, d);
 }
 
 void
 crm_time_add_minutes(crm_time_t * a_time, int extra)
 {
     crm_time_add_seconds(a_time, extra * 60);
 }
 
 void
 crm_time_add_hours(crm_time_t * a_time, int extra)
 {
     crm_time_add_seconds(a_time, extra * 60 * 60);
 }
 
 void
 crm_time_add_weeks(crm_time_t * a_time, int extra)
 {
     crm_time_add_days(a_time, extra * 7);
 }
 
 void
 crm_time_add_years(crm_time_t * a_time, int extra)
 {
     a_time->years += extra;
 }
 
 static void
 ha_get_tm_time( struct tm *target, crm_time_t *source)
 {
     *target = (struct tm) {
         .tm_year = source->years - 1900,
         .tm_mday = source->days,
         .tm_sec = source->seconds % 60,
         .tm_min = ( source->seconds / 60 ) % 60,
         .tm_hour = source->seconds / 60 / 60,
         .tm_isdst = -1, /* don't adjust */
 
 #if defined(HAVE_STRUCT_TM_TM_GMTOFF)
         .tm_gmtoff = source->offset
 #endif
     };
     mktime(target);
 }
 
 crm_time_hr_t *
 crm_time_hr_convert(crm_time_hr_t *target, crm_time_t *dt)
 {
     crm_time_hr_t *hr_dt = NULL;
 
     if (dt) {
         hr_dt = target?target:calloc(1, sizeof(crm_time_hr_t));
         if (hr_dt) {
             *hr_dt = (crm_time_hr_t) {
                 .years = dt->years,
                 .months = dt->months,
                 .days = dt->days,
                 .seconds = dt->seconds,
                 .offset = dt->offset,
                 .duration = dt->duration
             };
         }
     }
 
     return hr_dt;
 }
 
 void
 crm_time_set_hr_dt(crm_time_t *target, crm_time_hr_t *hr_dt)
 {
     CRM_ASSERT((hr_dt) && (target));
     *target = (crm_time_t) {
         .years = hr_dt->years,
         .months = hr_dt->months,
         .days = hr_dt->days,
         .seconds = hr_dt->seconds,
         .offset = hr_dt->offset,
         .duration = hr_dt->duration
     };
 }
 
 crm_time_hr_t *
 crm_time_timeval_hr_convert(crm_time_hr_t *target, struct timeval *tv)
 {
     crm_time_t dt;
     crm_time_hr_t *ret;
 
     crm_time_set_timet(&dt, &tv->tv_sec);
     ret = crm_time_hr_convert(target, &dt);
     if (ret) {
         ret->useconds = tv->tv_usec;
     }
     return ret;
 }
 
 crm_time_hr_t *
 crm_time_hr_new(const char *date_time)
 {
     crm_time_hr_t *hr_dt = NULL;
     struct timeval tv_now;
 
     if (!date_time) {
         if (gettimeofday(&tv_now, NULL) == 0) {
             hr_dt = crm_time_timeval_hr_convert(NULL, &tv_now);
         }
     } else {
         crm_time_t *dt;
 
         dt = parse_date(date_time);
         hr_dt = crm_time_hr_convert(NULL, dt);
         crm_time_free(dt);
     }
     return hr_dt;
 }
 
 void
 crm_time_hr_free(crm_time_hr_t * hr_dt)
 {
     free(hr_dt);
 }
 
 char *
 crm_time_format_hr(const char *format, crm_time_hr_t * hr_dt)
 {
     const char *mark_s;
     int max = 128, scanned_pos = 0, printed_pos = 0, fmt_pos = 0,
-        date_len = 0, nano_digits = 0, fmt_len;
+        date_len = 0, nano_digits = 0;
     char nano_s[10], date_s[max+1], nanofmt_s[5] = "%", *tmp_fmt_s;
     struct tm tm;
     crm_time_t dt;
 
     if (!format) {
         return NULL;
     }
     crm_time_set_hr_dt(&dt, hr_dt);
     ha_get_tm_time(&tm, &dt);
     sprintf(nano_s, "%06d000", hr_dt->useconds);
 
     while ((format[scanned_pos]) != '\0') {
-        fmt_len = 0;
         mark_s = strchr(&format[scanned_pos], '%');
         if (mark_s) {
+            int fmt_len = 1;
+
             fmt_pos = mark_s - format;
-            fmt_len = 1;
             while ((format[fmt_pos+fmt_len] != '\0') &&
                 (format[fmt_pos+fmt_len] >= '0') &&
                 (format[fmt_pos+fmt_len] <= '9')) {
                 fmt_len++;
             }
             scanned_pos = fmt_pos + fmt_len + 1;
             if (format[fmt_pos+fmt_len] == 'N') {
                 nano_digits = atoi(&format[fmt_pos+1]);
                 nano_digits = (nano_digits > 6)?6:nano_digits;
                 nano_digits = (nano_digits < 0)?0:nano_digits;
                 sprintf(&nanofmt_s[1], ".%ds", nano_digits);
             } else {
                 if (format[scanned_pos] != '\0') {
                     continue;
                 }
                 fmt_pos = scanned_pos; /* print till end */
             }
         } else {
             scanned_pos = strlen(format);
             fmt_pos = scanned_pos; /* print till end */
         }
         tmp_fmt_s = strndup(&format[printed_pos], fmt_pos - printed_pos);
 #ifdef GCC_FORMAT_NONLITERAL_CHECKING_ENABLED
 #pragma GCC diagnostic push
 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
 #endif
         date_len += strftime(&date_s[date_len], max-date_len, tmp_fmt_s, &tm);
 #ifdef GCC_FORMAT_NONLITERAL_CHECKING_ENABLED
 #pragma GCC diagnostic pop
 #endif
         printed_pos = scanned_pos;
         free(tmp_fmt_s);
         if (nano_digits) {
 #ifdef GCC_FORMAT_NONLITERAL_CHECKING_ENABLED
 #pragma GCC diagnostic push
 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
 #endif
             date_len += snprintf(&date_s[date_len], max-date_len,
                                  nanofmt_s, nano_s);
 #ifdef GCC_FORMAT_NONLITERAL_CHECKING_ENABLED
 #pragma GCC diagnostic pop
 #endif
             nano_digits = 0;
         }
     }
 
     return (date_len == 0)?NULL:strdup(date_s);
 }
diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c
index af7b4328ac..3e81be4c90 100644
--- a/lib/fencing/st_client.c
+++ b/lib/fencing/st_client.c
@@ -1,2466 +1,2466 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <unistd.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <string.h>
 #include <ctype.h>
 
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 
 #include <glib.h>
 
 #include <crm/crm.h>
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 
 #include <crm/common/mainloop.h>
 
 #if SUPPORT_CIBSECRETS
 #  include <crm/common/cib_secrets.h>
 #endif
 
 CRM_TRACE_INIT_DATA(stonith);
 
 struct stonith_action_s {
     /*! user defined data */
     char *agent;
     char *action;
     char *victim;
     char *args;
     int timeout;
     int async;
     void *userdata;
     void (*done_cb) (GPid pid, gint status, const char *output, gpointer user_data);
 
     /*! internal async track data */
     int fd_stdout;
     int fd_stderr;
     int last_timeout_signo;
 
     /*! internal timing information */
     time_t initial_start_time;
     int tries;
     int remaining_timeout;
     guint timer_sigterm;
     guint timer_sigkill;
     int max_retries;
 
     /* device output data */
     GPid pid;
     int rc;
     char *output;
     char *error;
 };
 
 typedef struct stonith_private_s {
     char *token;
     crm_ipc_t *ipc;
     mainloop_io_t *source;
     GHashTable *stonith_op_callback_table;
     GList *notify_list;
 
     void (*op_callback) (stonith_t * st, stonith_callback_data_t * data);
 
 } stonith_private_t;
 
 typedef struct stonith_notify_client_s {
     const char *event;
     const char *obj_id;         /* implement one day */
     const char *obj_type;       /* implement one day */
     void (*notify) (stonith_t * st, stonith_event_t * e);
 
 } stonith_notify_client_t;
 
 typedef struct stonith_callback_client_s {
     void (*callback) (stonith_t * st, stonith_callback_data_t * data);
     const char *id;
     void *user_data;
     gboolean only_success;
     gboolean allow_timeout_updates;
     struct timer_rec_s *timer;
 
 } stonith_callback_client_t;
 
 struct notify_blob_s {
     stonith_t *stonith;
     xmlNode *xml;
 };
 
 struct timer_rec_s {
     int call_id;
     int timeout;
     guint ref;
     stonith_t *stonith;
 };
 
 typedef int (*stonith_op_t) (const char *, int, const char *, xmlNode *,
                              xmlNode *, xmlNode *, xmlNode **, xmlNode **);
 
 bool stonith_dispatch(stonith_t * st);
 xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
                            int call_options);
 int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data,
                          xmlNode ** output_data, int call_options, int timeout);
 
 static void stonith_connection_destroy(gpointer user_data);
 static void stonith_send_notification(gpointer data, gpointer user_data);
 static int internal_stonith_action_execute(stonith_action_t * action);
 static void log_action(stonith_action_t *action, pid_t pid);
 
 /*!
  * \brief Get agent namespace by name
  *
  * \param[in] namespace_s  Name of namespace as string
  *
  * \return Namespace as enum value
  */
 enum stonith_namespace
 stonith_text2namespace(const char *namespace_s)
 {
     if ((namespace_s == NULL) || !strcmp(namespace_s, "any")) {
         return st_namespace_any;
 
     } else if (!strcmp(namespace_s, "redhat")
                || !strcmp(namespace_s, "stonith-ng")) {
         return st_namespace_rhcs;
 
     } else if (!strcmp(namespace_s, "internal")) {
         return st_namespace_internal;
 
     } else if (!strcmp(namespace_s, "heartbeat")) {
         return st_namespace_lha;
     }
     return st_namespace_invalid;
 }
 
 /*!
  * \brief Get agent namespace name
  *
  * \param[in] namespace  Namespace as enum value
  *
  * \return Namespace name as string
  */
 const char *
 stonith_namespace2text(enum stonith_namespace namespace)
 {
     switch (namespace) {
         case st_namespace_any:      return "any";
         case st_namespace_rhcs:     return "stonith-ng";
         case st_namespace_internal: return "internal";
         case st_namespace_lha:      return "heartbeat";
         default:                    break;
     }
     return "unsupported";
 }
 
 /*!
  * \brief Determine namespace of a fence agent
  *
  * \param[in] agent        Fence agent type
  * \param[in] namespace_s  Name of agent namespace as string, if known
  *
  * \return Namespace of specified agent, as enum value
  */
 enum stonith_namespace
 stonith_get_namespace(const char *agent, const char *namespace_s)
 {
     if (safe_str_eq(namespace_s, "internal")) {
         return st_namespace_internal;
     }
 
     if (stonith__agent_is_rhcs(agent)) {
         return st_namespace_rhcs;
     }
 
 #if HAVE_STONITH_STONITH_H
     if (stonith__agent_is_lha(agent)) {
         return st_namespace_lha;
     }
 #endif
 
     crm_err("Unknown fence agent: %s", agent);
     return st_namespace_invalid;
 }
 
 static void
 log_action(stonith_action_t *action, pid_t pid)
 {
     if (action->output) {
         /* Logging the whole string confuses syslog when the string is xml */
         char *prefix = crm_strdup_printf("%s[%d] stdout:", action->agent, pid);
 
         crm_log_output(LOG_TRACE, prefix, action->output);
         free(prefix);
     }
 
     if (action->error) {
         /* Logging the whole string confuses syslog when the string is xml */
         char *prefix = crm_strdup_printf("%s[%d] stderr:", action->agent, pid);
 
         crm_log_output(LOG_WARNING, prefix, action->error);
         free(prefix);
     }
 }
 
 static void
 stonith_connection_destroy(gpointer user_data)
 {
     stonith_t *stonith = user_data;
     stonith_private_t *native = NULL;
     struct notify_blob_s blob;
 
     crm_trace("Sending destroyed notification");
     blob.stonith = stonith;
     blob.xml = create_xml_node(NULL, "notify");
 
     native = stonith->st_private;
     native->ipc = NULL;
     native->source = NULL;
 
     stonith->state = stonith_disconnected;
     crm_xml_add(blob.xml, F_TYPE, T_STONITH_NOTIFY);
     crm_xml_add(blob.xml, F_SUBTYPE, T_STONITH_NOTIFY_DISCONNECT);
 
     g_list_foreach(native->notify_list, stonith_send_notification, &blob);
     free_xml(blob.xml);
 }
 
 xmlNode *
 create_device_registration_xml(const char *id, enum stonith_namespace namespace,
                                const char *agent, stonith_key_value_t *params,
                                const char *rsc_provides)
 {
     xmlNode *data = create_xml_node(NULL, F_STONITH_DEVICE);
     xmlNode *args = create_xml_node(data, XML_TAG_ATTRS);
 
 #if HAVE_STONITH_STONITH_H
     if (namespace == st_namespace_any) {
         namespace = stonith_get_namespace(agent, NULL);
     }
     if (namespace == st_namespace_lha) {
         hash2field((gpointer) "plugin", (gpointer) agent, args);
         agent = "fence_legacy";
     }
 #endif
 
     crm_xml_add(data, XML_ATTR_ID, id);
     crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
     crm_xml_add(data, "agent", agent);
     if ((namespace != st_namespace_any) && (namespace != st_namespace_invalid)) {
         crm_xml_add(data, "namespace", stonith_namespace2text(namespace));
     }
     if (rsc_provides) {
         crm_xml_add(data, "rsc_provides", rsc_provides);
     }
 
     for (; params; params = params->next) {
         hash2field((gpointer) params->key, (gpointer) params->value, args);
     }
 
     return data;
 }
 
 static int
 stonith_api_register_device(stonith_t * st, int call_options,
                             const char *id, const char *namespace, const char *agent,
                             stonith_key_value_t * params)
 {
     int rc = 0;
     xmlNode *data = NULL;
 
     data = create_device_registration_xml(id, stonith_text2namespace(namespace),
                                           agent, params, NULL);
 
     rc = stonith_send_command(st, STONITH_OP_DEVICE_ADD, data, NULL, call_options, 0);
     free_xml(data);
 
     return rc;
 }
 
 static int
 stonith_api_remove_device(stonith_t * st, int call_options, const char *name)
 {
     int rc = 0;
     xmlNode *data = NULL;
 
     data = create_xml_node(NULL, F_STONITH_DEVICE);
     crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
     crm_xml_add(data, XML_ATTR_ID, name);
     rc = stonith_send_command(st, STONITH_OP_DEVICE_DEL, data, NULL, call_options, 0);
     free_xml(data);
 
     return rc;
 }
 
 static int
 stonith_api_remove_level_full(stonith_t *st, int options,
                               const char *node, const char *pattern,
                               const char *attr, const char *value, int level)
 {
     int rc = 0;
     xmlNode *data = NULL;
 
     CRM_CHECK(node || pattern || (attr && value), return -EINVAL);
 
     data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
     crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
 
     if (node) {
         crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
 
     } else if (pattern) {
         crm_xml_add(data, XML_ATTR_STONITH_TARGET_PATTERN, pattern);
 
     } else {
         crm_xml_add(data, XML_ATTR_STONITH_TARGET_ATTRIBUTE, attr);
         crm_xml_add(data, XML_ATTR_STONITH_TARGET_VALUE, value);
     }
 
     crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
     rc = stonith_send_command(st, STONITH_OP_LEVEL_DEL, data, NULL, options, 0);
     free_xml(data);
 
     return rc;
 }
 
 static int
 stonith_api_remove_level(stonith_t * st, int options, const char *node, int level)
 {
     return stonith_api_remove_level_full(st, options, node,
                                          NULL, NULL, NULL, level);
 }
 
 /*!
  * \internal
  * \brief Create XML for fence topology level registration request
  *
  * \param[in] node        If not NULL, target level by this node name
  * \param[in] pattern     If not NULL, target by node name using this regex
  * \param[in] attr        If not NULL, target by this node attribute
  * \param[in] value       If not NULL, target by this node attribute value
  * \param[in] level       Index number of level to register
  * \param[in] device_list List of devices in level
  *
  * \return Newly allocated XML tree on success, NULL otherwise
  *
  * \note The caller should set only one of node, pattern or attr/value.
  */
 xmlNode *
 create_level_registration_xml(const char *node, const char *pattern,
                               const char *attr, const char *value,
                               int level, stonith_key_value_t *device_list)
 {
     int len = 0;
     char *list = NULL;
     xmlNode *data;
 
     CRM_CHECK(node || pattern || (attr && value), return NULL);
 
     data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
     CRM_CHECK(data, return NULL);
 
     crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
     crm_xml_add_int(data, XML_ATTR_ID, level);
     crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
 
     if (node) {
         crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
 
     } else if (pattern) {
         crm_xml_add(data, XML_ATTR_STONITH_TARGET_PATTERN, pattern);
 
     } else {
         crm_xml_add(data, XML_ATTR_STONITH_TARGET_ATTRIBUTE, attr);
         crm_xml_add(data, XML_ATTR_STONITH_TARGET_VALUE, value);
     }
 
     for (; device_list; device_list = device_list->next) {
 
         int adding = strlen(device_list->value);
         if(list) {
             adding++;                                      /* +1 space */
         }
 
         crm_trace("Adding %s (%dc) at offset %d", device_list->value, adding, len);
         list = realloc_safe(list, len + adding + 1);       /* +1 EOS */
         if (list == NULL) {
             crm_perror(LOG_CRIT, "Could not create device list");
             free_xml(data);
             return NULL;
         }
         sprintf(list + len, "%s%s", len?",":"", device_list->value);
         len += adding;
     }
 
     crm_xml_add(data, XML_ATTR_STONITH_DEVICES, list);
 
     free(list);
     return data;
 }
 
 static int
 stonith_api_register_level_full(stonith_t * st, int options, const char *node,
                                 const char *pattern,
                                 const char *attr, const char *value,
                                 int level, stonith_key_value_t *device_list)
 {
     int rc = 0;
     xmlNode *data = create_level_registration_xml(node, pattern, attr, value,
                                                   level, device_list);
     CRM_CHECK(data != NULL, return -EINVAL);
 
     rc = stonith_send_command(st, STONITH_OP_LEVEL_ADD, data, NULL, options, 0);
     free_xml(data);
 
     return rc;
 }
 
 static int
 stonith_api_register_level(stonith_t * st, int options, const char *node, int level,
                            stonith_key_value_t * device_list)
 {
     return stonith_api_register_level_full(st, options, node, NULL, NULL, NULL,
                                            level, device_list);
 }
 
 static void
 append_arg(const char *key, const char *value, char **args)
 {
     int len = 3;                /* =, \n, \0 */
     int last = 0;
 
     CRM_CHECK(key != NULL, return);
     CRM_CHECK(value != NULL, return);
 
     if (strstr(key, "pcmk_")) {
         return;
     } else if (strstr(key, CRM_META)) {
         return;
     } else if (safe_str_eq(key, "crm_feature_set")) {
         return;
     }
 
     len += strlen(key);
     len += strlen(value);
     if (*args != NULL) {
         last = strlen(*args);
     }
 
     *args = realloc_safe(*args, last + len);
     crm_trace("Appending: %s=%s", key, value);
     sprintf((*args) + last, "%s=%s\n", key, value);
 }
 
 static void
 append_config_arg(gpointer key, gpointer value, gpointer user_data)
 {
     /* The fencer will filter action out when it registers the device,
      * but ignore it here just in case any other library callers
      * fail to do so.
      */
     if (safe_str_neq(key, STONITH_ATTR_ACTION_OP)) {
         append_arg(key, value, user_data);
         return;
     }
 }
 
 static char *
 make_args(const char *agent, const char *action, const char *victim, uint32_t victim_nodeid, GHashTable * device_args,
           GHashTable * port_map)
 {
     char buffer[512];
     char *arg_list = NULL;
     const char *value = NULL;
 
     CRM_CHECK(action != NULL, return NULL);
 
     snprintf(buffer, sizeof(buffer), "pcmk_%s_action", action);
     if (device_args) {
         value = g_hash_table_lookup(device_args, buffer);
     }
     if (value) {
         crm_info("Substituting action '%s' for requested operation '%s'", value, action);
         action = value;
     }
 
     append_arg(STONITH_ATTR_ACTION_OP, action, &arg_list);
     if (victim && device_args) {
         const char *alias = victim;
         const char *param = g_hash_table_lookup(device_args, STONITH_ATTR_HOSTARG);
 
         if (port_map && g_hash_table_lookup(port_map, victim)) {
             alias = g_hash_table_lookup(port_map, victim);
         }
 
-        /* Always supply the node's name too:
-         *    https://fedorahosted.org/cluster/wiki/FenceAgentAPI
+        /* Always supply the node's name, too:
+         * https://github.com/ClusterLabs/fence-agents/blob/master/doc/FenceAgentAPI.md
          */
         append_arg("nodename", victim, &arg_list);
         if (victim_nodeid) {
             char nodeid_str[33] = { 0, };
             if (snprintf(nodeid_str, 33, "%u", (unsigned int)victim_nodeid)) {
                 crm_info("For stonith action (%s) for victim %s, adding nodeid (%s) to parameters",
                          action, victim, nodeid_str);
                 append_arg("nodeid", nodeid_str, &arg_list);
             }
         }
 
         /* Check if we need to supply the victim in any other form */
         if(safe_str_eq(agent, "fence_legacy")) {
             value = agent;
 
         } else if (param == NULL) {
             param = "port";
             value = g_hash_table_lookup(device_args, param);
 
         } else if (safe_str_eq(param, "none")) {
             value = param;      /* Nothing more to do */
 
         } else {
             value = g_hash_table_lookup(device_args, param);
         }
 
         /* Don't overwrite explictly set values for $param */
         if (value == NULL || safe_str_eq(value, "dynamic")) {
             crm_debug("Performing %s action for node '%s' as '%s=%s'", action, victim, param,
                       alias);
             append_arg(param, alias, &arg_list);
         }
     }
 
     if (device_args) {
         g_hash_table_foreach(device_args, append_config_arg, &arg_list);
     }
 
     return arg_list;
 }
 
 static gboolean
 st_child_term(gpointer data)
 {
     int rc = 0;
     stonith_action_t *track = data;
 
     crm_info("Child %d timed out, sending SIGTERM", track->pid);
     track->timer_sigterm = 0;
     track->last_timeout_signo = SIGTERM;
     rc = kill(-track->pid, SIGTERM);
     if (rc < 0) {
         crm_perror(LOG_ERR, "Couldn't send SIGTERM to %d", track->pid);
     }
     return FALSE;
 }
 
 static gboolean
 st_child_kill(gpointer data)
 {
     int rc = 0;
     stonith_action_t *track = data;
 
     crm_info("Child %d timed out, sending SIGKILL", track->pid);
     track->timer_sigkill = 0;
     track->last_timeout_signo = SIGKILL;
     rc = kill(-track->pid, SIGKILL);
     if (rc < 0) {
         crm_perror(LOG_ERR, "Couldn't send SIGKILL to %d", track->pid);
     }
     return FALSE;
 }
 
 static void
 stonith_action_clear_tracking_data(stonith_action_t * action)
 {
     if (action->timer_sigterm > 0) {
         g_source_remove(action->timer_sigterm);
         action->timer_sigterm = 0;
     }
     if (action->timer_sigkill > 0) {
         g_source_remove(action->timer_sigkill);
         action->timer_sigkill = 0;
     }
     if (action->fd_stdout) {
         close(action->fd_stdout);
         action->fd_stdout = 0;
     }
     if (action->fd_stderr) {
         close(action->fd_stderr);
         action->fd_stderr = 0;
     }
     free(action->output);
     action->output = NULL;
     free(action->error);
     action->error = NULL;
     action->rc = 0;
     action->pid = 0;
     action->last_timeout_signo = 0;
 }
 
 /*!
  * \internal
  * \brief Free all memory used by a stonith action
  *
  * \param[in,out] action  Action to free
  */
 void
 stonith__destroy_action(stonith_action_t *action)
 {
     if (action) {
         stonith_action_clear_tracking_data(action);
         free(action->agent);
         free(action->args);
         free(action->action);
         free(action->victim);
         free(action);
     }
 }
 
 /*!
  * \internal
  * \brief Get the result of an executed stonith action
  *
  * \param[in,out] action        Executed action
  * \param[out]    rc            Where to store result code (or NULL)
  * \param[out]    output        Where to store standard output (or NULL)
  * \param[out]    error_output  Where to store standard error output (or NULL)
  *
  * \note If output or error_output is not NULL, the caller is responsible for
  *       freeing the memory.
  */
 void
 stonith__action_result(stonith_action_t *action, int *rc, char **output,
                        char **error_output)
 {
     if (rc) {
         *rc = pcmk_ok;
     }
     if (output) {
         *output = NULL;
     }
     if (error_output) {
         *error_output = NULL;
     }
     if (action != NULL) {
         if (rc) {
             *rc = action->rc;
         }
         if (output && action->output) {
             *output = action->output;
             action->output = NULL; // hand off memory management to caller
         }
         if (error_output && action->error) {
             *error_output = action->error;
             action->error = NULL; // hand off memory management to caller
         }
     }
 }
 
 #define FAILURE_MAX_RETRIES 2
 stonith_action_t *
 stonith_action_create(const char *agent,
                       const char *_action,
                       const char *victim,
                       uint32_t victim_nodeid,
                       int timeout, GHashTable * device_args, GHashTable * port_map)
 {
     stonith_action_t *action;
 
     action = calloc(1, sizeof(stonith_action_t));
     action->args = make_args(agent, _action, victim, victim_nodeid, device_args, port_map);
     crm_debug("Preparing '%s' action for %s using agent %s",
               _action, (victim? victim : "no target"), agent);
     action->agent = strdup(agent);
     action->action = strdup(_action);
     if (victim) {
         action->victim = strdup(victim);
     }
     action->timeout = action->remaining_timeout = timeout;
     action->max_retries = FAILURE_MAX_RETRIES;
 
     if (device_args) {
         char buffer[512];
         const char *value = NULL;
 
         snprintf(buffer, sizeof(buffer), "pcmk_%s_retries", _action);
         value = g_hash_table_lookup(device_args, buffer);
 
         if (value) {
             action->max_retries = atoi(value);
         }
     }
 
     return action;
 }
 
 #define READ_MAX 500
 static char *
 read_output(int fd)
 {
     char buffer[READ_MAX];
     char *output = NULL;
     int len = 0;
     int more = 0;
 
     if (!fd) {
         return NULL;
     }
 
     do {
         errno = 0;
         memset(&buffer, 0, READ_MAX);
         more = read(fd, buffer, READ_MAX - 1);
 
         if (more > 0) {
             buffer[more] = 0; /* Make sure it's nul-terminated for logging
                               * 'more' is always less than our buffer size
                               */
             output = realloc_safe(output, len + more + 1);
             snprintf(output + len, more + 1, "%s", buffer);
             len += more;
         }
 
     } while (more == (READ_MAX - 1) || (more < 0 && errno == EINTR));
 
     return output;
 }
 
 static gboolean
 update_remaining_timeout(stonith_action_t * action)
 {
     int diff = time(NULL) - action->initial_start_time;
 
     if (action->tries >= action->max_retries) {
         crm_info("Attempted to execute agent %s (%s) the maximum number of times (%d) allowed",
                  action->agent, action->action, action->max_retries);
         action->remaining_timeout = 0;
     } else if ((action->rc != -ETIME) && diff < (action->timeout * 0.7)) {
         /* only set remaining timeout period if there is 30%
          * or greater of the original timeout period left */
         action->remaining_timeout = action->timeout - diff;
     } else {
         action->remaining_timeout = 0;
     }
     return action->remaining_timeout ? TRUE : FALSE;
 }
 
 static void
 stonith_action_async_done(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode)
 {
     stonith_action_t *action = mainloop_child_userdata(p);
 
     if (action->timer_sigterm > 0) {
         g_source_remove(action->timer_sigterm);
         action->timer_sigterm = 0;
     }
     if (action->timer_sigkill > 0) {
         g_source_remove(action->timer_sigkill);
         action->timer_sigkill = 0;
     }
 
     action->output = read_output(action->fd_stdout);
     action->error = read_output(action->fd_stderr);
 
     if (action->last_timeout_signo) {
         action->rc = -ETIME;
         crm_notice("Child process %d performing action '%s' timed out with signal %d",
                    pid, action->action, action->last_timeout_signo);
 
     } else if (signo) {
         action->rc = -ECONNABORTED;
         crm_notice("Child process %d performing action '%s' timed out with signal %d",
                    pid, action->action, signo);
 
     } else {
         crm_debug("Child process %d performing action '%s' exited with rc %d",
                   pid, action->action, exitcode);
         if (exitcode > 0) {
             /* Try to provide a useful error code based on the fence agent's
              * error output.
              */
             if (action->error == NULL) {
                 exitcode = -ENODATA;
 
             } else if (strstr(action->error, "imed out")) {
                 /* Some agents have their own internal timeouts */
                 exitcode = -ETIMEDOUT;
 
             } else if (strstr(action->error, "Unrecognised action")) {
                 exitcode = -EOPNOTSUPP;
 
             } else {
                 exitcode = -pcmk_err_generic;
             }
         }
         action->rc = exitcode;
     }
 
     log_action(action, pid);
 
     if (action->rc != pcmk_ok && update_remaining_timeout(action)) {
         int rc = internal_stonith_action_execute(action);
         if (rc == pcmk_ok) {
             return;
         }
     }
 
     if (action->done_cb) {
         action->done_cb(pid, action->rc, action->output, action->userdata);
     }
 
     stonith__destroy_action(action);
 }
 
 static int
 internal_stonith_action_execute(stonith_action_t * action)
 {
     int pid, status = 0, len, rc = -EPROTO;
     int ret;
     int total = 0;
     int p_read_fd, p_write_fd;  /* parent read/write file descriptors */
     int c_read_fd, c_write_fd;  /* child read/write file descriptors */
     int c_stderr_fd, p_stderr_fd; /* parent/child side file descriptors for stderr */
     int fd1[2];
     int fd2[2];
     int fd3[2];
     int is_retry = 0;
 
     /* clear any previous tracking data */
     stonith_action_clear_tracking_data(action);
 
     if (!action->tries) {
         action->initial_start_time = time(NULL);
     }
     action->tries++;
 
     if (action->tries > 1) {
         crm_info("Attempt %d to execute %s (%s). remaining timeout is %d",
                  action->tries, action->agent, action->action, action->remaining_timeout);
         is_retry = 1;
     }
 
     c_read_fd = c_write_fd = p_read_fd = p_write_fd = c_stderr_fd = p_stderr_fd = -1;
 
     if (action->args == NULL || action->agent == NULL)
         goto fail;
     len = strlen(action->args);
 
     if (pipe(fd1))
         goto fail;
     p_read_fd = fd1[0];
     c_write_fd = fd1[1];
 
     if (pipe(fd2))
         goto fail;
     c_read_fd = fd2[0];
     p_write_fd = fd2[1];
 
     if (pipe(fd3))
         goto fail;
     p_stderr_fd = fd3[0];
     c_stderr_fd = fd3[1];
 
     crm_debug("forking");
     pid = fork();
     if (pid < 0) {
         rc = -ECHILD;
         goto fail;
     }
 
     if (!pid) {
         /* child */
         setpgid(0, 0);
 
         close(1);
         /* coverity[leaked_handle] False positive */
         if (dup(c_write_fd) < 0)
             goto fail;
         close(2);
         /* coverity[leaked_handle] False positive */
         if (dup(c_stderr_fd) < 0)
             goto fail;
         close(0);
         /* coverity[leaked_handle] False positive */
         if (dup(c_read_fd) < 0)
             goto fail;
 
         /* keep c_stderr_fd open so parent can report all errors. */
         /* keep c_write_fd open so hostlist can be sent to parent. */
         close(c_read_fd);
         close(p_read_fd);
         close(p_write_fd);
         close(p_stderr_fd);
 
         /* keep retries from executing out of control */
         if (is_retry) {
             sleep(1);
         }
         execlp(action->agent, action->agent, NULL);
         exit(CRM_EX_ERROR);
     }
 
     /* parent */
     action->pid = pid;
     ret = crm_set_nonblocking(p_read_fd);
     if (ret < 0) {
         crm_notice("Could not set output of %s to be non-blocking: %s "
                    CRM_XS " rc=%d",
                    action->agent, pcmk_strerror(rc), rc);
     }
     ret = crm_set_nonblocking(p_stderr_fd);
     if (ret < 0) {
         crm_notice("Could not set error output of %s to be non-blocking: %s "
                    CRM_XS " rc=%d",
                    action->agent, pcmk_strerror(rc), rc);
     }
 
     errno = 0;
     do {
         crm_debug("sending args");
         ret = write(p_write_fd, action->args + total, len - total);
         if (ret > 0) {
             total += ret;
         }
 
     } while (errno == EINTR && total < len);
 
     if (total != len) {
         crm_perror(LOG_ERR, "Sent %d not %d bytes", total, len);
         if (ret >= 0) {
             rc = -ECOMM;
         }
         goto fail;
     }
 
     close(p_write_fd); p_write_fd = -1;
 
     /* async */
     if (action->async) {
         action->fd_stdout = p_read_fd;
         action->fd_stderr = p_stderr_fd;
         mainloop_child_add(pid, 0/* Move the timeout here? */, action->action, action, stonith_action_async_done);
         crm_trace("Op: %s on %s, pid: %d, timeout: %ds", action->action, action->agent, pid,
                   action->remaining_timeout);
         action->last_timeout_signo = 0;
         if (action->remaining_timeout) {
             action->timer_sigterm =
                 g_timeout_add(1000 * action->remaining_timeout, st_child_term, action);
             action->timer_sigkill =
                 g_timeout_add(1000 * (action->remaining_timeout + 5), st_child_kill, action);
         } else {
             crm_err("No timeout set for stonith operation %s with device %s",
                     action->action, action->agent);
         }
 
         close(c_write_fd);
         close(c_read_fd);
         close(c_stderr_fd);
         return 0;
 
     } else {
         /* sync */
         int timeout = action->remaining_timeout + 1;
         pid_t p = 0;
 
         while (action->remaining_timeout < 0 || timeout > 0) {
             p = waitpid(pid, &status, WNOHANG);
             if (p > 0) {
                 break;
             }
             sleep(1);
             timeout--;
         }
 
         if (timeout == 0) {
             int killrc = kill(-pid, SIGKILL);
 
             if (killrc && errno != ESRCH) {
                 crm_err("kill(%d, KILL) failed: %s (%d)", pid, pcmk_strerror(errno), errno);
             }
             /*
              * From sigprocmask(2):
              * It is not possible to block SIGKILL or SIGSTOP.  Attempts to do so are silently ignored.
              *
              * This makes it safe to skip WNOHANG here
              */
             p = waitpid(pid, &status, 0);
         }
 
         if (p <= 0) {
             crm_perror(LOG_ERR, "waitpid(%d)", pid);
 
         } else if (p != pid) {
             crm_err("Waited for %d, got %d", pid, p);
         }
 
         action->output = read_output(p_read_fd);
         action->error = read_output(p_stderr_fd);
 
         action->rc = -ECONNABORTED;
 
         log_action(action, pid);
 
         rc = action->rc;
         if (timeout == 0) {
             action->rc = -ETIME;
         } else if (WIFEXITED(status)) {
             crm_debug("result = %d", WEXITSTATUS(status));
             action->rc = -WEXITSTATUS(status);
             rc = 0;
 
         } else if (WIFSIGNALED(status)) {
             crm_err("call %s for %s exited due to signal %d", action->action, action->agent,
                     WTERMSIG(status));
 
         } else {
             crm_err("call %s for %s returned unexpected status %#x",
                     action->action, action->agent, status);
         }
     }
 
   fail:
 
     if (p_read_fd >= 0) {
         close(p_read_fd);
     }
     if (p_write_fd >= 0) {
         close(p_write_fd);
     }
     if (p_stderr_fd >= 0) {
         close(p_stderr_fd);
     }
 
     if (c_read_fd >= 0) {
         close(c_read_fd);
     }
     if (c_write_fd >= 0) {
         close(c_write_fd);
     }
     if (c_stderr_fd >= 0) {
         close(c_stderr_fd);
     }
 
     return rc;
 }
 
 GPid
 stonith_action_execute_async(stonith_action_t * action,
                              void *userdata,
                              void (*done) (GPid pid, int rc, const char *output,
                                            gpointer user_data))
 {
     int rc = 0;
 
     if (!action) {
         return -1;
     }
 
     action->userdata = userdata;
     action->done_cb = done;
     action->async = 1;
 
     rc = internal_stonith_action_execute(action);
 
     return rc < 0 ? rc : action->pid;
 }
 
 /*!
  * \internal
  * \brief Execute a stonith action
  *
  * \param[in,out] action  Action to execute
  *
  * \return pcmk_ok on success, -errno otherwise
  */
 int
 stonith__execute(stonith_action_t *action)
 {
     int rc = pcmk_ok;
 
     CRM_CHECK(action != NULL, return -EINVAL);
 
     // Keep trying until success, max retries, or timeout
     do {
         rc = internal_stonith_action_execute(action);
     } while ((rc != pcmk_ok) && update_remaining_timeout(action));
 
     return rc;
 }
 
 static int
 stonith_api_device_list(stonith_t * stonith, int call_options, const char *namespace,
                         stonith_key_value_t ** devices, int timeout)
 {
     int count = 0;
     enum stonith_namespace ns = stonith_text2namespace(namespace);
 
     if (devices == NULL) {
         crm_err("Parameter error: stonith_api_device_list");
         return -EFAULT;
     }
 
 #if HAVE_STONITH_STONITH_H
     // Include Linux-HA agents if requested
     if ((ns == st_namespace_any) || (ns == st_namespace_lha)) {
         count += stonith__list_lha_agents(devices);
     }
 #endif
 
     // Include Red Hat agents if requested
     if ((ns == st_namespace_any) || (ns == st_namespace_rhcs)) {
         count += stonith__list_rhcs_agents(devices);
     }
 
     return count;
 }
 
 static int
 stonith_api_device_metadata(stonith_t * stonith, int call_options, const char *agent,
                             const char *namespace, char **output, int timeout)
 {
     /* By executing meta-data directly, we can get it from stonith_admin when
      * the cluster is not running, which is important for higher-level tools.
      */
 
     enum stonith_namespace ns = stonith_get_namespace(agent, namespace);
 
     crm_trace("Looking up metadata for %s agent %s",
               stonith_namespace2text(ns), agent);
 
     switch (ns) {
         case st_namespace_rhcs:
             return stonith__rhcs_metadata(agent, timeout, output);
 
 #if HAVE_STONITH_STONITH_H
         case st_namespace_lha:
             return stonith__lha_metadata(agent, timeout, output);
 #endif
 
         default:
             errno = EINVAL;
             crm_perror(LOG_ERR,
                        "Agent %s not found or does not support meta-data",
                        agent);
             break;
     }
     return -EINVAL;
 }
 
 static int
 stonith_api_query(stonith_t * stonith, int call_options, const char *target,
                   stonith_key_value_t ** devices, int timeout)
 {
     int rc = 0, lpc = 0, max = 0;
 
     xmlNode *data = NULL;
     xmlNode *output = NULL;
     xmlXPathObjectPtr xpathObj = NULL;
 
     CRM_CHECK(devices != NULL, return -EINVAL);
 
     data = create_xml_node(NULL, F_STONITH_DEVICE);
     crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
     crm_xml_add(data, F_STONITH_TARGET, target);
     crm_xml_add(data, F_STONITH_ACTION, "off");
     rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout);
 
     if (rc < 0) {
         return rc;
     }
 
     xpathObj = xpath_search(output, "//@agent");
     if (xpathObj) {
         max = numXpathResults(xpathObj);
 
         for (lpc = 0; lpc < max; lpc++) {
             xmlNode *match = getXpathResult(xpathObj, lpc);
 
             CRM_LOG_ASSERT(match != NULL);
             if(match != NULL) {
                 xmlChar *match_path = xmlGetNodePath(match);
 
                 crm_info("%s[%d] = %s", "//@agent", lpc, match_path);
                 free(match_path);
                 *devices = stonith_key_value_add(*devices, NULL, crm_element_value(match, XML_ATTR_ID));
             }
         }
 
         freeXpathObject(xpathObj);
     }
 
     free_xml(output);
     free_xml(data);
     return max;
 }
 
 static int
 stonith_api_call(stonith_t * stonith,
                  int call_options,
                  const char *id,
                  const char *action, const char *victim, int timeout, xmlNode ** output)
 {
     int rc = 0;
     xmlNode *data = NULL;
 
     data = create_xml_node(NULL, F_STONITH_DEVICE);
     crm_xml_add(data, F_STONITH_ORIGIN, __FUNCTION__);
     crm_xml_add(data, F_STONITH_DEVICE, id);
     crm_xml_add(data, F_STONITH_ACTION, action);
     crm_xml_add(data, F_STONITH_TARGET, victim);
 
     rc = stonith_send_command(stonith, STONITH_OP_EXEC, data, output, call_options, timeout);
     free_xml(data);
 
     return rc;
 }
 
 static int
 stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **list_info,
                  int timeout)
 {
     int rc;
     xmlNode *output = NULL;
 
     rc = stonith_api_call(stonith, call_options, id, "list", NULL, timeout, &output);
 
     if (output && list_info) {
         const char *list_str;
 
         list_str = crm_element_value(output, "st_output");
 
         if (list_str) {
             *list_info = strdup(list_str);
         }
     }
 
     if (output) {
         free_xml(output);
     }
 
     return rc;
 }
 
 static int
 stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout)
 {
     return stonith_api_call(stonith, call_options, id, "monitor", NULL, timeout, NULL);
 }
 
 static int
 stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port,
                    int timeout)
 {
     return stonith_api_call(stonith, call_options, id, "status", port, timeout, NULL);
 }
 
 static int
 stonith_api_fence(stonith_t * stonith, int call_options, const char *node, const char *action,
                   int timeout, int tolerance)
 {
     int rc = 0;
     xmlNode *data = NULL;
 
     data = create_xml_node(NULL, __FUNCTION__);
     crm_xml_add(data, F_STONITH_TARGET, node);
     crm_xml_add(data, F_STONITH_ACTION, action);
     crm_xml_add_int(data, F_STONITH_TIMEOUT, timeout);
     crm_xml_add_int(data, F_STONITH_TOLERANCE, tolerance);
 
     rc = stonith_send_command(stonith, STONITH_OP_FENCE, data, NULL, call_options, timeout);
     free_xml(data);
 
     return rc;
 }
 
 static int
 stonith_api_confirm(stonith_t * stonith, int call_options, const char *target)
 {
     return stonith_api_fence(stonith, call_options | st_opt_manual_ack, target, "off", 0, 0);
 }
 
 static int
 stonith_api_history(stonith_t * stonith, int call_options, const char *node,
                     stonith_history_t ** history, int timeout)
 {
     int rc = 0;
     xmlNode *data = NULL;
     xmlNode *output = NULL;
     stonith_history_t *last = NULL;
 
     *history = NULL;
 
     if (node) {
         data = create_xml_node(NULL, __FUNCTION__);
         crm_xml_add(data, F_STONITH_TARGET, node);
     }
 
     rc = stonith_send_command(stonith, STONITH_OP_FENCE_HISTORY, data, &output,
                               call_options | st_opt_sync_call, timeout);
     free_xml(data);
 
     if (rc == 0) {
         xmlNode *op = NULL;
         xmlNode *reply = get_xpath_object("//" F_STONITH_HISTORY_LIST, output, LOG_ERR);
 
         for (op = __xml_first_child(reply); op != NULL; op = __xml_next(op)) {
             stonith_history_t *kvp;
             int completed;
 
             kvp = calloc(1, sizeof(stonith_history_t));
             kvp->target = crm_element_value_copy(op, F_STONITH_TARGET);
             kvp->action = crm_element_value_copy(op, F_STONITH_ACTION);
             kvp->origin = crm_element_value_copy(op, F_STONITH_ORIGIN);
             kvp->delegate = crm_element_value_copy(op, F_STONITH_DELEGATE);
             kvp->client = crm_element_value_copy(op, F_STONITH_CLIENTNAME);
             crm_element_value_int(op, F_STONITH_DATE, &completed);
             kvp->completed = (time_t) completed;
             crm_element_value_int(op, F_STONITH_STATE, &kvp->state);
 
             if (last) {
                 last->next = kvp;
             } else {
                 *history = kvp;
             }
             last = kvp;
         }
     }
     return rc;
 }
 
 void stonith_history_free(stonith_history_t *history)
 {
     stonith_history_t *hp, *hp_old;
 
     for (hp = history; hp; hp_old = hp, hp = hp->next, free(hp_old)) {
         free(hp->target);
         free(hp->action);
         free(hp->origin);
         free(hp->delegate);
         free(hp->client);
     }
 }
 
 /*!
  * \brief Deprecated (use stonith_get_namespace() instead)
  */
 const char *
 get_stonith_provider(const char *agent, const char *provider)
 {
     return stonith_namespace2text(stonith_get_namespace(agent, provider));
 }
 
 static gint
 stonithlib_GCompareFunc(gconstpointer a, gconstpointer b)
 {
     int rc = 0;
     const stonith_notify_client_t *a_client = a;
     const stonith_notify_client_t *b_client = b;
 
     CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0);
     rc = strcmp(a_client->event, b_client->event);
     if (rc == 0) {
         if (a_client->notify == NULL || b_client->notify == NULL) {
             return 0;
 
         } else if (a_client->notify == b_client->notify) {
             return 0;
 
         } else if (((long)a_client->notify) < ((long)b_client->notify)) {
             crm_err("callbacks for %s are not equal: %p vs. %p",
                     a_client->event, a_client->notify, b_client->notify);
             return -1;
         }
         crm_err("callbacks for %s are not equal: %p vs. %p",
                 a_client->event, a_client->notify, b_client->notify);
         return 1;
     }
     return rc;
 }
 
 xmlNode *
 stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options)
 {
     xmlNode *op_msg = create_xml_node(NULL, "stonith_command");
 
     CRM_CHECK(op_msg != NULL, return NULL);
     CRM_CHECK(token != NULL, return NULL);
 
     crm_xml_add(op_msg, F_XML_TAGNAME, "stonith_command");
 
     crm_xml_add(op_msg, F_TYPE, T_STONITH_NG);
     crm_xml_add(op_msg, F_STONITH_CALLBACK_TOKEN, token);
     crm_xml_add(op_msg, F_STONITH_OPERATION, op);
     crm_xml_add_int(op_msg, F_STONITH_CALLID, call_id);
     crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options);
     crm_xml_add_int(op_msg, F_STONITH_CALLOPTS, call_options);
 
     if (data != NULL) {
         add_message_xml(op_msg, F_STONITH_CALLDATA, data);
     }
 
     return op_msg;
 }
 
 static void
 stonith_destroy_op_callback(gpointer data)
 {
     stonith_callback_client_t *blob = data;
 
     if (blob->timer && blob->timer->ref > 0) {
         g_source_remove(blob->timer->ref);
     }
     free(blob->timer);
     free(blob);
 }
 
 static int
 stonith_api_signoff(stonith_t * stonith)
 {
     stonith_private_t *native = stonith->st_private;
 
     crm_debug("Disconnecting from the fencer");
 
     if (native->source != NULL) {
         /* Attached to mainloop */
         mainloop_del_ipc_client(native->source);
         native->source = NULL;
         native->ipc = NULL;
 
     } else if (native->ipc) {
         /* Not attached to mainloop */
         crm_ipc_t *ipc = native->ipc;
 
         native->ipc = NULL;
         crm_ipc_close(ipc);
         crm_ipc_destroy(ipc);
     }
 
     free(native->token); native->token = NULL;
     stonith->state = stonith_disconnected;
     return pcmk_ok;
 }
 
 static int
 stonith_api_del_callback(stonith_t * stonith, int call_id, bool all_callbacks)
 {
     stonith_private_t *private = stonith->st_private;
 
     if (all_callbacks) {
         private->op_callback = NULL;
         g_hash_table_destroy(private->stonith_op_callback_table);
         private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
                                                                    NULL,
                                                                    stonith_destroy_op_callback);
 
     } else if (call_id == 0) {
         private->op_callback = NULL;
 
     } else {
         g_hash_table_remove(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
     }
     return pcmk_ok;
 }
 
 static void
 invoke_callback(stonith_t * st, int call_id, int rc, void *userdata,
                 void (*callback) (stonith_t * st, stonith_callback_data_t * data))
 {
     stonith_callback_data_t data = { 0, };
 
     data.call_id = call_id;
     data.rc = rc;
     data.userdata = userdata;
 
     callback(st, &data);
 }
 
 static void
 stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc)
 {
     stonith_private_t *private = NULL;
     stonith_callback_client_t *blob = NULL;
     stonith_callback_client_t local_blob;
 
     CRM_CHECK(stonith != NULL, return);
     CRM_CHECK(stonith->st_private != NULL, return);
 
     private = stonith->st_private;
 
     local_blob.id = NULL;
     local_blob.callback = NULL;
     local_blob.user_data = NULL;
     local_blob.only_success = FALSE;
 
     if (msg != NULL) {
         crm_element_value_int(msg, F_STONITH_RC, &rc);
         crm_element_value_int(msg, F_STONITH_CALLID, &call_id);
     }
 
     CRM_CHECK(call_id > 0, crm_log_xml_err(msg, "Bad result"));
 
     blob = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
 
     if (blob != NULL) {
         local_blob = *blob;
         blob = NULL;
 
         stonith_api_del_callback(stonith, call_id, FALSE);
 
     } else {
         crm_trace("No callback found for call %d", call_id);
         local_blob.callback = NULL;
     }
 
     if (local_blob.callback != NULL && (rc == pcmk_ok || local_blob.only_success == FALSE)) {
         crm_trace("Invoking callback %s for call %d", crm_str(local_blob.id), call_id);
         invoke_callback(stonith, call_id, rc, local_blob.user_data, local_blob.callback);
 
     } else if (private->op_callback == NULL && rc != pcmk_ok) {
         crm_warn("Fencing command failed: %s", pcmk_strerror(rc));
         crm_log_xml_debug(msg, "Failed fence update");
     }
 
     if (private->op_callback != NULL) {
         crm_trace("Invoking global callback for call %d", call_id);
         invoke_callback(stonith, call_id, rc, NULL, private->op_callback);
     }
     crm_trace("OP callback activated.");
 }
 
 static gboolean
 stonith_async_timeout_handler(gpointer data)
 {
     struct timer_rec_s *timer = data;
 
     crm_err("Async call %d timed out after %dms", timer->call_id, timer->timeout);
     stonith_perform_callback(timer->stonith, NULL, timer->call_id, -ETIME);
 
     /* Always return TRUE, never remove the handler
      * We do that in stonith_del_callback()
      */
     return TRUE;
 }
 
 static void
 set_callback_timeout(stonith_callback_client_t * callback, stonith_t * stonith, int call_id,
                      int timeout)
 {
     struct timer_rec_s *async_timer = callback->timer;
 
     if (timeout <= 0) {
         return;
     }
 
     if (!async_timer) {
         async_timer = calloc(1, sizeof(struct timer_rec_s));
         callback->timer = async_timer;
     }
 
     async_timer->stonith = stonith;
     async_timer->call_id = call_id;
     /* Allow a fair bit of grace to allow the server to tell us of a timeout
      * This is only a fallback
      */
     async_timer->timeout = (timeout + 60) * 1000;
     if (async_timer->ref) {
         g_source_remove(async_timer->ref);
     }
     async_timer->ref =
         g_timeout_add(async_timer->timeout, stonith_async_timeout_handler, async_timer);
 }
 
 static void
 update_callback_timeout(int call_id, int timeout, stonith_t * st)
 {
     stonith_callback_client_t *callback = NULL;
     stonith_private_t *private = st->st_private;
 
     callback = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
     if (!callback || !callback->allow_timeout_updates) {
         return;
     }
 
     set_callback_timeout(callback, st, call_id, timeout);
 }
 
 static int
 stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata)
 {
     const char *type = NULL;
     struct notify_blob_s blob;
 
     stonith_t *st = userdata;
     stonith_private_t *private = NULL;
 
     CRM_ASSERT(st != NULL);
     private = st->st_private;
 
     blob.stonith = st;
     blob.xml = string2xml(buffer);
     if (blob.xml == NULL) {
         crm_warn("Received malformed message from fencer: %s", buffer);
         return 0;
     }
 
     /* do callbacks */
     type = crm_element_value(blob.xml, F_TYPE);
     crm_trace("Activating %s callbacks...", type);
 
     if (safe_str_eq(type, T_STONITH_NG)) {
         stonith_perform_callback(st, blob.xml, 0, 0);
 
     } else if (safe_str_eq(type, T_STONITH_NOTIFY)) {
         g_list_foreach(private->notify_list, stonith_send_notification, &blob);
     } else if (safe_str_eq(type, T_STONITH_TIMEOUT_VALUE)) {
         int call_id = 0;
         int timeout = 0;
 
         crm_element_value_int(blob.xml, F_STONITH_TIMEOUT, &timeout);
         crm_element_value_int(blob.xml, F_STONITH_CALLID, &call_id);
 
         update_callback_timeout(call_id, timeout, st);
     } else {
         crm_err("Unknown message type: %s", type);
         crm_log_xml_warn(blob.xml, "BadReply");
     }
 
     free_xml(blob.xml);
     return 1;
 }
 
 static int
 stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd)
 {
     int rc = pcmk_ok;
     stonith_private_t *native = stonith->st_private;
 
     static struct ipc_client_callbacks st_callbacks = {
         .dispatch = stonith_dispatch_internal,
         .destroy = stonith_connection_destroy
     };
 
     crm_trace("Connecting command channel");
 
     stonith->state = stonith_connected_command;
     if (stonith_fd) {
         /* No mainloop */
         native->ipc = crm_ipc_new("stonith-ng", 0);
 
         if (native->ipc && crm_ipc_connect(native->ipc)) {
             *stonith_fd = crm_ipc_get_fd(native->ipc);
         } else if (native->ipc) {
             crm_perror(LOG_ERR, "Connection to fencer failed");
             rc = -ENOTCONN;
         }
 
     } else {
         /* With mainloop */
         native->source =
             mainloop_add_ipc_client("stonith-ng", G_PRIORITY_MEDIUM, 0, stonith, &st_callbacks);
         native->ipc = mainloop_get_ipc_client(native->source);
     }
 
     if (native->ipc == NULL) {
         crm_debug("Could not connect to the Stonith API");
         rc = -ENOTCONN;
     }
 
     if (rc == pcmk_ok) {
         xmlNode *reply = NULL;
         xmlNode *hello = create_xml_node(NULL, "stonith_command");
 
         crm_xml_add(hello, F_TYPE, T_STONITH_NG);
         crm_xml_add(hello, F_STONITH_OPERATION, CRM_OP_REGISTER);
         crm_xml_add(hello, F_STONITH_CLIENTNAME, name);
         rc = crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply);
 
         if (rc < 0) {
             crm_perror(LOG_DEBUG, "Couldn't complete registration with the fencing API: %d", rc);
             rc = -ECOMM;
 
         } else if (reply == NULL) {
             crm_err("Did not receive registration reply");
             rc = -EPROTO;
 
         } else {
             const char *msg_type = crm_element_value(reply, F_STONITH_OPERATION);
             const char *tmp_ticket = crm_element_value(reply, F_STONITH_CLIENTID);
 
             if (safe_str_neq(msg_type, CRM_OP_REGISTER)) {
                 crm_err("Invalid registration message: %s", msg_type);
                 crm_log_xml_err(reply, "Bad reply");
                 rc = -EPROTO;
 
             } else if (tmp_ticket == NULL) {
                 crm_err("No registration token provided");
                 crm_log_xml_err(reply, "Bad reply");
                 rc = -EPROTO;
 
             } else {
                 crm_trace("Obtained registration token: %s", tmp_ticket);
                 native->token = strdup(tmp_ticket);
                 rc = pcmk_ok;
             }
         }
 
         free_xml(reply);
         free_xml(hello);
     }
 
     if (rc == pcmk_ok) {
 #if HAVE_MSGFROMIPC_TIMEOUT
         stonith->call_timeout = MAX_IPC_DELAY;
 #endif
         crm_debug("Connection to fencer successful");
         return pcmk_ok;
     }
 
     crm_debug("Connection to fencer failed: %s", pcmk_strerror(rc));
     stonith->cmds->disconnect(stonith);
     return rc;
 }
 
 static int
 stonith_set_notification(stonith_t * stonith, const char *callback, int enabled)
 {
     int rc = pcmk_ok;
     xmlNode *notify_msg = create_xml_node(NULL, __FUNCTION__);
     stonith_private_t *native = stonith->st_private;
 
     if (stonith->state != stonith_disconnected) {
 
         crm_xml_add(notify_msg, F_STONITH_OPERATION, T_STONITH_NOTIFY);
         if (enabled) {
             crm_xml_add(notify_msg, F_STONITH_NOTIFY_ACTIVATE, callback);
         } else {
             crm_xml_add(notify_msg, F_STONITH_NOTIFY_DEACTIVATE, callback);
         }
 
         rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, -1, NULL);
         if (rc < 0) {
             crm_perror(LOG_DEBUG, "Couldn't register for fencing notifications: %d", rc);
             rc = -ECOMM;
         } else {
             rc = pcmk_ok;
         }
     }
 
     free_xml(notify_msg);
     return rc;
 }
 
 static int
 stonith_api_add_notification(stonith_t * stonith, const char *event,
                              void (*callback) (stonith_t * stonith, stonith_event_t * e))
 {
     GList *list_item = NULL;
     stonith_notify_client_t *new_client = NULL;
     stonith_private_t *private = NULL;
 
     private = stonith->st_private;
     crm_trace("Adding callback for %s events (%d)", event, g_list_length(private->notify_list));
 
     new_client = calloc(1, sizeof(stonith_notify_client_t));
     new_client->event = event;
     new_client->notify = callback;
 
     list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc);
 
     if (list_item != NULL) {
         crm_warn("Callback already present");
         free(new_client);
         return -ENOTUNIQ;
 
     } else {
         private->notify_list = g_list_append(private->notify_list, new_client);
 
         stonith_set_notification(stonith, event, 1);
 
         crm_trace("Callback added (%d)", g_list_length(private->notify_list));
     }
     return pcmk_ok;
 }
 
 static int
 stonith_api_del_notification(stonith_t * stonith, const char *event)
 {
     GList *list_item = NULL;
     stonith_notify_client_t *new_client = NULL;
     stonith_private_t *private = NULL;
 
     crm_debug("Removing callback for %s events", event);
 
     private = stonith->st_private;
     new_client = calloc(1, sizeof(stonith_notify_client_t));
     new_client->event = event;
     new_client->notify = NULL;
 
     list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc);
 
     stonith_set_notification(stonith, event, 0);
 
     if (list_item != NULL) {
         stonith_notify_client_t *list_client = list_item->data;
 
         private->notify_list = g_list_remove(private->notify_list, list_client);
         free(list_client);
 
         crm_trace("Removed callback");
 
     } else {
         crm_trace("Callback not present");
     }
     free(new_client);
     return pcmk_ok;
 }
 
 static int
 stonith_api_add_callback(stonith_t * stonith, int call_id, int timeout, int options,
                          void *user_data, const char *callback_name,
                          void (*callback) (stonith_t * st, stonith_callback_data_t * data))
 {
     stonith_callback_client_t *blob = NULL;
     stonith_private_t *private = NULL;
 
     CRM_CHECK(stonith != NULL, return -EINVAL);
     CRM_CHECK(stonith->st_private != NULL, return -EINVAL);
     private = stonith->st_private;
 
     if (call_id == 0) {
         private->op_callback = callback;
 
     } else if (call_id < 0) {
         if (!(options & st_opt_report_only_success)) {
             crm_trace("Call failed, calling %s: %s", callback_name, pcmk_strerror(call_id));
             invoke_callback(stonith, call_id, call_id, user_data, callback);
         } else {
             crm_warn("Fencer call failed: %s", pcmk_strerror(call_id));
         }
         return FALSE;
     }
 
     blob = calloc(1, sizeof(stonith_callback_client_t));
     blob->id = callback_name;
     blob->only_success = (options & st_opt_report_only_success) ? TRUE : FALSE;
     blob->user_data = user_data;
     blob->callback = callback;
     blob->allow_timeout_updates = (options & st_opt_timeout_updates) ? TRUE : FALSE;
 
     if (timeout > 0) {
         set_callback_timeout(blob, stonith, call_id, timeout);
     }
 
     g_hash_table_insert(private->stonith_op_callback_table, GINT_TO_POINTER(call_id), blob);
     crm_trace("Added callback to %s for call %d", callback_name, call_id);
 
     return TRUE;
 }
 
 static void
 stonith_dump_pending_op(gpointer key, gpointer value, gpointer user_data)
 {
     int call = GPOINTER_TO_INT(key);
     stonith_callback_client_t *blob = value;
 
     crm_debug("Call %d (%s): pending", call, crm_str(blob->id));
 }
 
 void
 stonith_dump_pending_callbacks(stonith_t * stonith)
 {
     stonith_private_t *private = stonith->st_private;
 
     if (private->stonith_op_callback_table == NULL) {
         return;
     }
     return g_hash_table_foreach(private->stonith_op_callback_table, stonith_dump_pending_op, NULL);
 }
 
 /*
  <notify t="st_notify" subt="st_device_register" st_op="st_device_register" st_rc="0" >
    <st_calldata >
      <stonith_command t="stonith-ng" st_async_id="088fb640-431a-48b9-b2fc-c4ff78d0a2d9" st_op="st_device_register" st_callid="2" st_callopt="4096" st_timeout="0" st_clientid="088fb640-431a-48b9-b2fc-c4ff78d0a2d9" st_clientname="cts-fence-helper" >
        <st_calldata >
          <st_device_id id="test-id" origin="create_device_registration_xml" agent="fence_virsh" namespace="stonith-ng" >
            <attributes ipaddr="localhost" pcmk-portmal="some-host=pcmk-1 pcmk-3=3,4" login="root" identity_file="/root/.ssh/id_dsa" />
          </st_device_id>
        </st_calldata>
      </stonith_command>
    </st_calldata>
  </notify>
 
  <notify t="st_notify" subt="st_notify_fence" st_op="st_notify_fence" st_rc="0" >
    <st_calldata >
      <st_notify_fence st_rc="0" st_target="some-host" st_op="st_fence" st_delegate="test-id" st_origin="61dd7759-e229-4be7-b1f8-ef49dd14d9f0" />
    </st_calldata>
  </notify>
 */
 static stonith_event_t *
 xml_to_event(xmlNode * msg)
 {
     stonith_event_t *event = calloc(1, sizeof(stonith_event_t));
     const char *ntype = crm_element_value(msg, F_SUBTYPE);
     char *data_addr = crm_strdup_printf("//%s", ntype);
     xmlNode *data = get_xpath_object(data_addr, msg, LOG_DEBUG);
 
     crm_log_xml_trace(msg, "stonith_notify");
 
     crm_element_value_int(msg, F_STONITH_RC, &(event->result));
 
     if (safe_str_eq(ntype, T_STONITH_NOTIFY_FENCE)) {
         event->operation = crm_element_value_copy(msg, F_STONITH_OPERATION);
 
         if (data) {
             event->origin = crm_element_value_copy(data, F_STONITH_ORIGIN);
             event->action = crm_element_value_copy(data, F_STONITH_ACTION);
             event->target = crm_element_value_copy(data, F_STONITH_TARGET);
             event->executioner = crm_element_value_copy(data, F_STONITH_DELEGATE);
             event->id = crm_element_value_copy(data, F_STONITH_REMOTE_OP_ID);
             event->client_origin = crm_element_value_copy(data, F_STONITH_CLIENTNAME);
             event->device = crm_element_value_copy(data, F_STONITH_DEVICE);
 
         } else {
             crm_err("No data for %s event", ntype);
             crm_log_xml_notice(msg, "BadEvent");
         }
     }
 
     free(data_addr);
     return event;
 }
 
 static void
 event_free(stonith_event_t * event)
 {
     free(event->id);
     free(event->type);
     free(event->message);
     free(event->operation);
     free(event->origin);
     free(event->action);
     free(event->target);
     free(event->executioner);
     free(event->device);
     free(event->client_origin);
     free(event);
 }
 
 static void
 stonith_send_notification(gpointer data, gpointer user_data)
 {
     struct notify_blob_s *blob = user_data;
     stonith_notify_client_t *entry = data;
     stonith_event_t *st_event = NULL;
     const char *event = NULL;
 
     if (blob->xml == NULL) {
         crm_warn("Skipping callback - NULL message");
         return;
     }
 
     event = crm_element_value(blob->xml, F_SUBTYPE);
 
     if (entry == NULL) {
         crm_warn("Skipping callback - NULL callback client");
         return;
 
     } else if (entry->notify == NULL) {
         crm_warn("Skipping callback - NULL callback");
         return;
 
     } else if (safe_str_neq(entry->event, event)) {
         crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event);
         return;
     }
 
     st_event = xml_to_event(blob->xml);
 
     crm_trace("Invoking callback for %p/%s event...", entry, event);
     entry->notify(blob->stonith, st_event);
     crm_trace("Callback invoked...");
 
     event_free(st_event);
 }
 
 int
 stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data,
                      int call_options, int timeout)
 {
     int rc = 0;
     int reply_id = -1;
     enum crm_ipc_flags ipc_flags = crm_ipc_flags_none;
 
     xmlNode *op_msg = NULL;
     xmlNode *op_reply = NULL;
 
     stonith_private_t *native = stonith->st_private;
 
     if (stonith->state == stonith_disconnected) {
         return -ENOTCONN;
     }
 
     if (output_data != NULL) {
         *output_data = NULL;
     }
 
     if (op == NULL) {
         crm_err("No operation specified");
         return -EINVAL;
     }
 
     if (call_options & st_opt_sync_call) {
         ipc_flags |= crm_ipc_client_response;
     }
 
     stonith->call_id++;
     /* prevent call_id from being negative (or zero) and conflicting
      *    with the stonith_errors enum
      * use 2 because we use it as (stonith->call_id - 1) below
      */
     if (stonith->call_id < 1) {
         stonith->call_id = 1;
     }
 
     CRM_CHECK(native->token != NULL,;
         );
     op_msg = stonith_create_op(stonith->call_id, native->token, op, data, call_options);
     if (op_msg == NULL) {
         return -EINVAL;
     }
 
     crm_xml_add_int(op_msg, F_STONITH_TIMEOUT, timeout);
     crm_trace("Sending %s message to fencer with timeout %ds", op, timeout);
 
     rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, 1000 * (timeout + 60), &op_reply);
     free_xml(op_msg);
 
     if (rc < 0) {
         crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%ds): %d", op, timeout, rc);
         rc = -ECOMM;
         goto done;
     }
 
     crm_log_xml_trace(op_reply, "Reply");
 
     if (!(call_options & st_opt_sync_call)) {
         crm_trace("Async call %d, returning", stonith->call_id);
         CRM_CHECK(stonith->call_id != 0, return -EPROTO);
         free_xml(op_reply);
 
         return stonith->call_id;
     }
 
     rc = pcmk_ok;
     crm_element_value_int(op_reply, F_STONITH_CALLID, &reply_id);
 
     if (reply_id == stonith->call_id) {
         crm_trace("Synchronous reply %d received", reply_id);
 
         if (crm_element_value_int(op_reply, F_STONITH_RC, &rc) != 0) {
             rc = -ENOMSG;
         }
 
         if ((call_options & st_opt_discard_reply) || output_data == NULL) {
             crm_trace("Discarding reply");
 
         } else {
             *output_data = op_reply;
             op_reply = NULL;    /* Prevent subsequent free */
         }
 
     } else if (reply_id <= 0) {
         crm_err("Received bad reply: No id set");
         crm_log_xml_err(op_reply, "Bad reply");
         free_xml(op_reply);
         rc = -ENOMSG;
 
     } else {
         crm_err("Received bad reply: %d (wanted %d)", reply_id, stonith->call_id);
         crm_log_xml_err(op_reply, "Old reply");
         free_xml(op_reply);
         rc = -ENOMSG;
     }
 
   done:
     if (crm_ipc_connected(native->ipc) == FALSE) {
         crm_err("Fencer disconnected");
         stonith->state = stonith_disconnected;
     }
 
     free_xml(op_reply);
     return rc;
 }
 
 /* Not used with mainloop */
 bool
 stonith_dispatch(stonith_t * st)
 {
     gboolean stay_connected = TRUE;
     stonith_private_t *private = NULL;
 
     CRM_ASSERT(st != NULL);
     private = st->st_private;
 
     while (crm_ipc_ready(private->ipc)) {
 
         if (crm_ipc_read(private->ipc) > 0) {
             const char *msg = crm_ipc_buffer(private->ipc);
 
             stonith_dispatch_internal(msg, strlen(msg), st);
         }
 
         if (crm_ipc_connected(private->ipc) == FALSE) {
             crm_err("Connection closed");
             stay_connected = FALSE;
         }
     }
 
     return stay_connected;
 }
 
 static int
 stonith_api_free(stonith_t * stonith)
 {
     int rc = pcmk_ok;
 
     crm_trace("Destroying %p", stonith);
 
     if (stonith->state != stonith_disconnected) {
         crm_trace("Disconnecting %p first", stonith);
         rc = stonith->cmds->disconnect(stonith);
     }
 
     if (stonith->state == stonith_disconnected) {
         stonith_private_t *private = stonith->st_private;
 
         crm_trace("Removing %d callbacks", g_hash_table_size(private->stonith_op_callback_table));
         g_hash_table_destroy(private->stonith_op_callback_table);
 
         crm_trace("Destroying %d notification clients", g_list_length(private->notify_list));
         g_list_free_full(private->notify_list, free);
 
         free(stonith->st_private);
         free(stonith->cmds);
         free(stonith);
 
     } else {
         crm_err("Not free'ing active connection: %s (%d)", pcmk_strerror(rc), rc);
     }
 
     return rc;
 }
 
 void
 stonith_api_delete(stonith_t * stonith)
 {
     crm_trace("Destroying %p", stonith);
     if(stonith) {
         stonith->cmds->free(stonith);
     }
 }
 
 static int
 stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id,
                      const char *namespace_s, const char *agent,
                      stonith_key_value_t *params, int timeout, char **output,
                      char **error_output)
 {
     /* Validation should be done directly via the agent, so we can get it from
      * stonith_admin when the cluster is not running, which is important for
      * higher-level tools.
      */
 
     int rc = pcmk_ok;
 
     /* Use a dummy node name in case the agent requires a target. We assume the
      * actual target doesn't matter for validation purposes (if in practice,
      * that is incorrect, we will need to allow the caller to pass the target).
      */
     const char *target = "node1";
 
     GHashTable *params_table = crm_str_table_new();
 
     // Convert parameter list to a hash table
     for (; params; params = params->next) {
 
         // Strip out Pacemaker-implemented parameters
         if (!crm_starts_with(params->key, "pcmk_")
                 && strcmp(params->key, "provides")
                 && strcmp(params->key, "stonith-timeout")) {
             g_hash_table_insert(params_table, strdup(params->key),
                                 strdup(params->value));
         }
     }
 
 #if SUPPORT_CIBSECRETS
     rc = replace_secret_params(rsc_id, params_table);
     if (rc < 0) {
         crm_warn("Could not replace secret parameters for validation of %s: %s",
                  agent, pcmk_strerror(rc));
     }
 #endif
 
     if (output) {
         *output = NULL;
     }
     if (error_output) {
         *error_output = NULL;
     }
 
     switch (stonith_get_namespace(agent, namespace_s)) {
         case st_namespace_rhcs:
             rc = stonith__rhcs_validate(st, call_options, target, agent,
                                         params_table, timeout, output,
                                         error_output);
             break;
 
 #if HAVE_STONITH_STONITH_H
         case st_namespace_lha:
             rc = stonith__lha_validate(st, call_options, target, agent,
                                        params_table, timeout, output,
                                        error_output);
             break;
 #endif
 
         default:
             rc = -EINVAL;
             errno = EINVAL;
             crm_perror(LOG_ERR,
                        "Agent %s not found or does not support validation",
                        agent);
             break;
     }
     g_hash_table_destroy(params_table);
     return rc;
 }
 
 stonith_t *
 stonith_api_new(void)
 {
     stonith_t *new_stonith = NULL;
     stonith_private_t *private = NULL;
 
     new_stonith = calloc(1, sizeof(stonith_t));
     private = calloc(1, sizeof(stonith_private_t));
     new_stonith->st_private = private;
 
     private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
                                                                NULL, stonith_destroy_op_callback);
     private->notify_list = NULL;
 
     new_stonith->call_id = 1;
     new_stonith->state = stonith_disconnected;
 
     new_stonith->cmds = calloc(1, sizeof(stonith_api_operations_t));
 
 /* *INDENT-OFF* */
     new_stonith->cmds->free       = stonith_api_free;
     new_stonith->cmds->connect    = stonith_api_signon;
     new_stonith->cmds->disconnect = stonith_api_signoff;
 
     new_stonith->cmds->list       = stonith_api_list;
     new_stonith->cmds->monitor    = stonith_api_monitor;
     new_stonith->cmds->status     = stonith_api_status;
     new_stonith->cmds->fence      = stonith_api_fence;
     new_stonith->cmds->confirm    = stonith_api_confirm;
     new_stonith->cmds->history    = stonith_api_history;
 
     new_stonith->cmds->list_agents  = stonith_api_device_list;
     new_stonith->cmds->metadata     = stonith_api_device_metadata;
 
     new_stonith->cmds->query           = stonith_api_query;
     new_stonith->cmds->remove_device   = stonith_api_remove_device;
     new_stonith->cmds->register_device = stonith_api_register_device;
 
     new_stonith->cmds->remove_level          = stonith_api_remove_level;
     new_stonith->cmds->remove_level_full     = stonith_api_remove_level_full;
     new_stonith->cmds->register_level        = stonith_api_register_level;
     new_stonith->cmds->register_level_full   = stonith_api_register_level_full;
 
     new_stonith->cmds->remove_callback       = stonith_api_del_callback;
     new_stonith->cmds->register_callback     = stonith_api_add_callback;
     new_stonith->cmds->remove_notification   = stonith_api_del_notification;
     new_stonith->cmds->register_notification = stonith_api_add_notification;
 
     new_stonith->cmds->validate              = stonith_api_validate;
 /* *INDENT-ON* */
 
     return new_stonith;
 }
 
 stonith_key_value_t *
 stonith_key_value_add(stonith_key_value_t * head, const char *key, const char *value)
 {
     stonith_key_value_t *p, *end;
 
     p = calloc(1, sizeof(stonith_key_value_t));
     if (key) {
         p->key = strdup(key);
     }
     if (value) {
         p->value = strdup(value);
     }
 
     end = head;
     while (end && end->next) {
         end = end->next;
     }
 
     if (end) {
         end->next = p;
     } else {
         head = p;
     }
 
     return head;
 }
 
 void
 stonith_key_value_freeall(stonith_key_value_t * head, int keys, int values)
 {
     stonith_key_value_t *p;
 
     while (head) {
         p = head->next;
         if (keys) {
             free(head->key);
         }
         if (values) {
             free(head->value);
         }
         free(head);
         head = p;
     }
 }
 
 #define api_log_open() openlog("stonith-api", LOG_CONS | LOG_NDELAY | LOG_PID, LOG_DAEMON)
 #define api_log(level, fmt, args...) syslog(level, "%s: "fmt, __FUNCTION__, args)
 
 int
 stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off)
 {
     char *name = NULL;
     const char *action = "reboot";
 
     int rc = -EPROTO;
     stonith_t *st = NULL;
     enum stonith_call_options opts = st_opt_sync_call | st_opt_allow_suicide;
 
     api_log_open();
     st = stonith_api_new();
     if (st) {
         rc = st->cmds->connect(st, "stonith-api", NULL);
         if(rc != pcmk_ok) {
             api_log(LOG_ERR, "Connection failed, could not kick (%s) node %u/%s : %s (%d)", action, nodeid, uname, pcmk_strerror(rc), rc);
         }
     }
 
     if (uname != NULL) {
         name = strdup(uname);
 
     } else if (nodeid > 0) {
         opts |= st_opt_cs_nodeid;
         name = crm_itoa(nodeid);
     }
 
     if (off) {
         action = "off";
     }
 
     if (rc == pcmk_ok) {
         rc = st->cmds->fence(st, opts, name, action, timeout, 0);
         if(rc != pcmk_ok) {
             api_log(LOG_ERR, "Could not kick (%s) node %u/%s : %s (%d)", action, nodeid, uname, pcmk_strerror(rc), rc);
         } else {
             api_log(LOG_NOTICE, "Node %u/%s kicked: %s ", nodeid, uname, action);
         }
     }
 
     if (st) {
         st->cmds->disconnect(st);
         stonith_api_delete(st);
     }
 
     free(name);
     return rc;
 }
 
 time_t
 stonith_api_time(uint32_t nodeid, const char *uname, bool in_progress)
 {
     int rc = 0;
     char *name = NULL;
 
     time_t when = 0;
     stonith_t *st = NULL;
     stonith_history_t *history = NULL, *hp = NULL;
     enum stonith_call_options opts = st_opt_sync_call;
 
     st = stonith_api_new();
     if (st) {
         rc = st->cmds->connect(st, "stonith-api", NULL);
         if(rc != pcmk_ok) {
             api_log(LOG_NOTICE, "Connection failed: %s (%d)", pcmk_strerror(rc), rc);
         }
     }
 
     if (uname != NULL) {
         name = strdup(uname);
 
     } else if (nodeid > 0) {
         opts |= st_opt_cs_nodeid;
         name = crm_itoa(nodeid);
     }
 
     if (st && rc == pcmk_ok) {
         int entries = 0;
         int progress = 0;
         int completed = 0;
 
         rc = st->cmds->history(st, opts, name, &history, 120);
 
         for (hp = history; hp; hp = hp->next) {
             entries++;
             if (in_progress) {
                 progress++;
                 if (hp->state != st_done && hp->state != st_failed) {
                     when = time(NULL);
                 }
 
             } else if (hp->state == st_done) {
                 completed++;
                 if (hp->completed > when) {
                     when = hp->completed;
                 }
             }
         }
 
         stonith_history_free(history);
 
         if(rc == pcmk_ok) {
             api_log(LOG_INFO, "Found %d entries for %u/%s: %d in progress, %d completed", entries, nodeid, uname, progress, completed);
         } else {
             api_log(LOG_ERR, "Could not retrieve fence history for %u/%s: %s (%d)", nodeid, uname, pcmk_strerror(rc), rc);
         }
     }
 
     if (st) {
         st->cmds->disconnect(st);
         stonith_api_delete(st);
     }
 
     if(when) {
         api_log(LOG_INFO, "Node %u/%s last kicked at: %ld", nodeid, uname, (long int)when);
     }
     free(name);
     return when;
 }
diff --git a/lib/pengine/container.c b/lib/pengine/container.c
index d629654058..4578af399f 100644
--- a/lib/pengine/container.c
+++ b/lib/pengine/container.c
@@ -1,1464 +1,1463 @@
 /*
  * Copyright 2004-2018 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <ctype.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <unpack.h>
 #include <crm/msg_xml.h>
 
 #define VARIANT_CONTAINER 1
 #include "./variant.h"
 
 void tuple_free(container_grouping_t *tuple);
 
 static char *
 next_ip(const char *last_ip)
 {
     unsigned int oct1 = 0;
     unsigned int oct2 = 0;
     unsigned int oct3 = 0;
     unsigned int oct4 = 0;
     int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
 
     if (rc != 4) {
         /*@ TODO check for IPv6 */
         return NULL;
 
     } else if (oct3 > 253) {
         return NULL;
 
     } else if (oct4 > 253) {
         ++oct3;
         oct4 = 1;
 
     } else {
         ++oct4;
     }
 
     return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
 }
 
 static int
 allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max) 
 {
     if(data->ip_range_start == NULL) {
         return 0;
 
     } else if(data->ip_last) {
         tuple->ipaddr = next_ip(data->ip_last);
 
     } else {
         tuple->ipaddr = strdup(data->ip_range_start);
     }
 
     data->ip_last = tuple->ipaddr;
 #if 0
     return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d",
                     data->prefix, tuple->offset, tuple->ipaddr,
                     data->prefix, tuple->offset, data->prefix, tuple->offset);
 #else
     if (data->type == PE_CONTAINER_TYPE_DOCKER) {
         if (data->add_host == FALSE) {
             return 0;
         }
         return snprintf(buffer, max, " --add-host=%s-%d:%s",
                         data->prefix, tuple->offset, tuple->ipaddr);
     } else if (data->type == PE_CONTAINER_TYPE_RKT) {
         return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
                         tuple->ipaddr, data->prefix, tuple->offset);
     } else {
         return 0;
     }
 #endif
 }
 
 static xmlNode *
 create_resource(const char *name, const char *provider, const char *kind) 
 {
     xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
 
     crm_xml_add(rsc, XML_ATTR_ID, name);
     crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
     crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
     crm_xml_add(rsc, XML_ATTR_TYPE, kind);
 
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Check whether cluster can manage resource inside container
  *
  * \param[in] data  Container variant data
  *
  * \return TRUE if networking configuration is acceptable, FALSE otherwise
  *
  * \note The resource is manageable if an IP range or control port has been
  *       specified. If a control port is used without an IP range, replicas per
  *       host must be 1.
  */
 static bool
 valid_network(container_variant_data_t *data)
 {
     if(data->ip_range_start) {
         return TRUE;
     }
     if(data->control_port) {
         if(data->replicas_per_host > 1) {
             pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
             data->replicas_per_host = 1;
             /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
         }
         return TRUE;
     }
     return FALSE;
 }
 
 static bool
 create_ip_resource(
     resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
     pe_working_set_t * data_set) 
 {
     if(data->ip_range_start) {
         char *id = NULL;
         xmlNode *xml_ip = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr);
         crm_xml_sanitize_id(id);
         xml_ip = create_resource(id, "heartbeat", "IPaddr2");
         free(id);
 
         xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "ip", tuple->ipaddr);
         if(data->host_network) {
             crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
         }
 
         if(data->host_netmask) {
             crm_create_nvpair_xml(xml_obj, NULL,
                                   "cidr_netmask", data->host_netmask);
 
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
         }
 
         xml_obj = create_xml_node(xml_ip, "operations");
         crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         crm_log_xml_trace(xml_ip, "Container-ip");
         if (common_unpack(xml_ip, &tuple->ip, parent, data_set) == false) {
             return FALSE;
         }
 
         parent->children = g_list_append(parent->children, tuple->ip);
     }
     return TRUE;
 }
 
 static bool
 create_docker_resource(
     resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
     pe_working_set_t * data_set) 
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_docker = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset);
         crm_xml_sanitize_id(id);
         xml_docker = create_resource(id, "heartbeat", "docker");
         free(id);
 
         xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
         offset += snprintf(buffer+offset, max-offset, " --restart=no");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
                                data->prefix, tuple->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " -e PCMK_stderr=1");
 
         if(data->docker_network) {
 //        offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
             offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             container_mount_t *mount = pIter->data;
 
             if(mount->flags) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, tuple->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
             }
             if(mount->options) {
                 offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
             }
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             container_port_t *port = pIter->data;
 
             if(tuple->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
                                    tuple->ipaddr, port->source, port->target);
             } else if(safe_str_neq(data->docker_network, "host")) {
                 // No need to do port mapping if net=host
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
             }
         }
 
         if(data->docker_run_options) {
             offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
         }
 
         if(data->docker_host_options) {
             offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if(tuple->child) {
             if(data->docker_run_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->docker_run_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         /* } else if(child && data->untrusted) {
          * Support this use-case?
          *
          * The ability to have resources started/stopped by us, but
          * unable to set attributes, etc.
          *
          * Arguably better to control API access this with ACLs like
          * "normal" remote nodes
          *
          *     crm_create_nvpair_xml(xml_obj, NULL,
          *                           "run_cmd",
          *                           "/usr/libexec/pacemaker/pacemaker-execd");
          *     crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
          *         "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
          */
         } else {
             if(data->docker_run_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->docker_run_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_docker, "operations");
         crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
         crm_log_xml_trace(xml_docker, "Container-docker");
         if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, tuple->docker);
         return TRUE;
 }
 
 static bool
 create_rkt_resource(
     resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
     pe_working_set_t * data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_docker = NULL;
         xmlNode *xml_obj = NULL;
 
         int volid = 0;
 
         id = crm_strdup_printf("%s-rkt-%d", data->prefix, tuple->offset);
         crm_xml_sanitize_id(id);
         xml_docker = create_resource(id, "heartbeat", "rkt");
         free(id);
 
         xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
                                data->prefix, tuple->offset);
         }
 
         offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_stderr=1");
 
         if(data->docker_network) {
 //        offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
             offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             container_mount_t *mount = pIter->data;
 
             if(mount->flags) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, tuple->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
                 if(mount->options) {
                     offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
                 }
                 offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
                 if(mount->options) {
                     offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
                 }
                 offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
             }
             volid++;
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             container_port_t *port = pIter->data;
 
             if(tuple->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset, " --port=%s:%s:%s",
                                    port->target, tuple->ipaddr, port->source);
             } else {
                 offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
             }
         }
 
         if(data->docker_run_options) {
             offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
         }
 
         if(data->docker_host_options) {
             offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if(tuple->child) {
             if(data->docker_run_command) {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->docker_run_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       SBIN_DIR "/pacemaker-remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         /* } else if(child && data->untrusted) {
          * Support this use-case?
          *
          * The ability to have resources started/stopped by us, but
          * unable to set attributes, etc.
          *
          * Arguably better to control API access this with ACLs like
          * "normal" remote nodes
          *
          *     crm_create_nvpair_xml(xml_obj, NULL,
          *                           "run_cmd",
          *                           "/usr/libexec/pacemaker/pacemaker-execd");
          *     crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
          *         "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
          */
         } else {
             if(data->docker_run_command) {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       data->docker_run_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_docker, "operations");
         crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         crm_log_xml_trace(xml_docker, "Container-rkt");
         if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, tuple->docker);
         return TRUE;
 }
 
 /*!
  * \brief Ban a node from a resource's (and its children's) allowed nodes list
  *
  * \param[in,out] rsc    Resource to modify
  * \param[in]     uname  Name of node to ban
  */
 static void
 disallow_node(resource_t *rsc, const char *uname)
 {
     gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
 
     if (match) {
         ((pe_node_t *) match)->weight = -INFINITY;
         ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
     }
     if (rsc->children) {
         GListPtr child;
 
         for (child = rsc->children; child != NULL; child = child->next) {
             disallow_node((resource_t *) (child->data), uname);
         }
     }
 }
 
 static bool
 create_remote_resource(
     resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
     pe_working_set_t * data_set) 
 {
     if (tuple->child && valid_network(data)) {
         GHashTableIter gIter;
         GListPtr rsc_iter = NULL;
         node_t *node = NULL;
         xmlNode *xml_remote = NULL;
         char *id = crm_strdup_printf("%s-%d", data->prefix, tuple->offset);
         char *port_s = NULL;
         const char *uname = NULL;
         const char *connect_name = NULL;
 
         if (remote_id_conflict(id, data_set)) {
             free(id);
             // The biggest hammer we have
             id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset);
             CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE);
         }
 
         /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
          * connection does not have its own IP is a magic string that we use to
          * support nested remotes (i.e. a bundle running on a remote node).
          */
         connect_name = (tuple->ipaddr? tuple->ipaddr : "#uname");
 
         if (data->control_port == NULL) {
             port_s = crm_itoa(DEFAULT_REMOTE_PORT);
         }
 
         /* This sets tuple->docker as tuple->remote's container, which is
          * similar to what happens with guest nodes. This is how the PE knows
          * that the bundle node is fenced by recovering docker, and that
          * remote should be ordered relative to docker.
          */
         xml_remote = pe_create_remote_xml(NULL, id, tuple->docker->id,
                                           NULL, NULL, NULL,
                                           connect_name, (data->control_port?
                                           data->control_port : port_s));
         free(port_s);
 
         /* Abandon our created ID, and pull the copy from the XML, because we
          * need something that will get freed during data set cleanup to use as
          * the node ID and uname.
          */
         free(id);
         id = NULL;
         uname = ID(xml_remote);
 
         /* Ensure a node has been created for the guest (it may have already
          * been, if it has a permanent node attribute), and ensure its weight is
          * -INFINITY so no other resources can run on it.
          */
         node = pe_find_node(data_set->nodes, uname);
         if (node == NULL) {
             node = pe_create_node(uname, uname, "remote", "-INFINITY",
                                   data_set);
         } else {
             node->weight = -INFINITY;
         }
         node->rsc_discover_mode = pe_discover_never;
 
         /* unpack_remote_nodes() ensures that each remote node and guest node
          * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
          * Unfortunately, a bundle has to be mostly unpacked before it's obvious
          * what nodes will be needed, so we do it just above.
          *
          * Worse, that means that the node may have been utilized while
          * unpacking other resources, without our weight correction. The most
          * likely place for this to happen is when common_unpack() calls
          * resource_location() to set a default score in symmetric clusters.
          * This adds a node *copy* to each resource's allowed nodes, and these
          * copies will have the wrong weight.
          *
          * As a hacky workaround, fix those copies here.
          *
          * @TODO Possible alternative: ensure bundles are unpacked before other
          * resources, so the weight is correct before any copies are made.
          */
         for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
             disallow_node((resource_t *) (rsc_iter->data), uname);
         }
 
         tuple->node = node_copy(node);
         tuple->node->weight = 500;
         tuple->node->rsc_discover_mode = pe_discover_exclusive;
 
         /* Ensure the node shows up as allowed and with the correct discovery set */
         if (tuple->child->allowed_nodes != NULL) {
             g_hash_table_destroy(tuple->child->allowed_nodes);
         }
         tuple->child->allowed_nodes = g_hash_table_new_full(crm_str_hash,
                                                             g_str_equal, NULL,
                                                             free);
         g_hash_table_insert(tuple->child->allowed_nodes, (gpointer) tuple->node->details->id, node_copy(tuple->node));
 
         {
             node_t *copy = node_copy(tuple->node);
             copy->weight = -INFINITY;
             g_hash_table_insert(tuple->child->parent->allowed_nodes, (gpointer) tuple->node->details->id, copy);
         }
         crm_log_xml_trace(xml_remote, "Container-remote");
         if (common_unpack(xml_remote, &tuple->remote, parent, data_set) == FALSE) {
             return FALSE;
         }
 
         g_hash_table_iter_init(&gIter, tuple->remote->allowed_nodes);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
             if(is_remote_node(node)) {
                 /* Remote resources can only run on 'normal' cluster node */
                 node->weight = -INFINITY;
             }
         }
 
         tuple->node->details->remote_rsc = tuple->remote;
         tuple->remote->container = tuple->docker; // Ensures is_container_remote_node() functions correctly immediately
 
         /* A bundle's #kind is closer to "container" (guest node) than the
          * "remote" set by pe_create_node().
          */
         g_hash_table_insert(tuple->node->details->attrs,
                             strdup(CRM_ATTR_KIND), strdup("container"));
 
         /* One effect of this is that setup_container() will add
          * tuple->remote to tuple->docker's fillers, which will make
          * rsc_contains_remote_node() true for tuple->docker.
          *
          * tuple->child does NOT get added to tuple->docker's fillers.
          * The only noticeable effect if it did would be for its fail count to
          * be taken into account when checking tuple->docker's migration
          * threshold.
          */
         parent->children = g_list_append(parent->children, tuple->remote);
     }
     return TRUE;
 }
 
 static bool
 create_container(
     resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
     pe_working_set_t * data_set)
 {
 
     if (data->type == PE_CONTAINER_TYPE_DOCKER &&
           create_docker_resource(parent, data, tuple, data_set) == FALSE) {
         return FALSE;
     }
     if (data->type == PE_CONTAINER_TYPE_RKT &&
           create_rkt_resource(parent, data, tuple, data_set) == FALSE) {
         return FALSE;
     }
 
     if(create_ip_resource(parent, data, tuple, data_set) == FALSE) {
         return FALSE;
     }
     if(create_remote_resource(parent, data, tuple, data_set) == FALSE) {
         return FALSE;
     }
     if(tuple->child && tuple->ipaddr) {
         add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr);
     }
 
     if(tuple->remote) {
         /*
          * Allow the remote connection resource to be allocated to a
          * different node than the one on which the docker container
          * is active.
          *
          * This makes it possible to have Pacemaker Remote nodes running
          * containers with pacemaker-remoted inside in order to start
          * services inside those containers.
          */
         set_bit(tuple->remote->flags, pe_rsc_allow_remote_remotes);
     }
 
     return TRUE;
 }
 
 static void
 mount_add(container_variant_data_t *container_data, const char *source,
           const char *target, const char *options, int flags)
 {
     container_mount_t *mount = calloc(1, sizeof(container_mount_t));
 
     mount->source = strdup(source);
     mount->target = strdup(target);
     if (options) {
         mount->options = strdup(options);
     }
     mount->flags = flags;
     container_data->mounts = g_list_append(container_data->mounts, mount);
 }
 
 static void mount_free(container_mount_t *mount)
 {
     free(mount->source);
     free(mount->target);
     free(mount->options);
     free(mount);
 }
 
 static void port_free(container_port_t *port)
 {
     free(port->source);
     free(port->target);
     free(port);
 }
 
 static container_grouping_t *
 tuple_for_remote(resource_t *remote) 
 {
     resource_t *top = remote;
     container_variant_data_t *container_data = NULL;
 
     if (top == NULL) {
         return NULL;
     }
 
     while (top->parent != NULL) {
         top = top->parent;
     }
 
     get_container_variant_data(container_data, top);
     for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
         container_grouping_t *tuple = (container_grouping_t *)gIter->data;
         if(tuple->remote == remote) {
             return tuple;
         }
     }
     CRM_LOG_ASSERT(FALSE);
     return NULL;
 }
 
 bool
 container_fix_remote_addr(resource_t *rsc) 
 {
     const char *name;
     const char *value;
     const char *attr_list[] = {
         XML_ATTR_TYPE,
         XML_AGENT_ATTR_CLASS,
         XML_AGENT_ATTR_PROVIDER
     };
     const char *value_list[] = {
         "remote",
         PCMK_RESOURCE_CLASS_OCF,
         "pacemaker"
     };
 
     if(rsc == NULL) {
         return FALSE;
     }
 
     name = "addr";
     value = g_hash_table_lookup(rsc->parameters, name);
     if (safe_str_eq(value, "#uname") == FALSE) {
         return FALSE;
     }
 
     for (int lpc = 0; lpc < DIMOF(attr_list); lpc++) {
-        name = attr_list[lpc];
         value = crm_element_value(rsc->xml, attr_list[lpc]);
         if (safe_str_eq(value, value_list[lpc]) == FALSE) {
             return FALSE;
         }
     }
     return TRUE;
 }
 
 const char *
 container_fix_remote_addr_in(resource_t *rsc, xmlNode *xml, const char *field) 
 {
     // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
 
     pe_node_t *node = NULL;
     container_grouping_t *tuple = NULL;
 
     if(container_fix_remote_addr(rsc) == FALSE) {
         return NULL;
     }
 
     tuple = tuple_for_remote(rsc);
     if(tuple == NULL) {
         return NULL;
     }
 
     node = tuple->docker->allocated_to;
     if (node == NULL) {
         /* If it won't be running anywhere after the
          * transition, go with where it's running now.
          */
         node = pe__current_node(tuple->docker);
     }
 
     if(node == NULL) {
         crm_trace("Cannot fix address for %s", tuple->remote->id);
         return NULL;
     }
 
     crm_trace("Fixing addr for %s on %s", rsc->id, node->details->uname);
     if(xml != NULL && field != NULL) {
         crm_xml_add(xml, field, node->details->uname);
     }
 
     return node->details->uname;
 }
 
 gboolean
 container_unpack(resource_t * rsc, pe_working_set_t * data_set)
 {
     const char *value = NULL;
     xmlNode *xml_obj = NULL;
     xmlNode *xml_resource = NULL;
     container_variant_data_t *container_data = NULL;
 
     CRM_ASSERT(rsc != NULL);
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     container_data = calloc(1, sizeof(container_variant_data_t));
     rsc->variant_opaque = container_data;
     container_data->prefix = strdup(rsc->id);
 
     xml_obj = first_named_child(rsc->xml, "docker");
     if (xml_obj != NULL) {
         container_data->type = PE_CONTAINER_TYPE_DOCKER;
     } else {
         xml_obj = first_named_child(rsc->xml, "rkt");
         if (xml_obj != NULL) {
             container_data->type = PE_CONTAINER_TYPE_RKT;
         } else {
             return FALSE;
         }
     }
 
     value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
     if (value == NULL) {
         // @COMPAT deprecated since 2.0.0
         value = crm_element_value(xml_obj, "masters");
     }
     container_data->promoted_max = crm_parse_int(value, "0");
     if (container_data->promoted_max < 0) {
         pe_err("%s for %s must be nonnegative integer, using 0",
                XML_RSC_ATTR_PROMOTED_MAX, rsc->id);
         container_data->promoted_max = 0;
     }
 
     value = crm_element_value(xml_obj, "replicas");
     if ((value == NULL) && container_data->promoted_max) {
         container_data->replicas = container_data->promoted_max;
     } else {
         container_data->replicas = crm_parse_int(value, "1");
     }
     if (container_data->replicas < 1) {
         pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
         container_data->replicas = 1;
     }
 
     /*
      * Communication between containers on the same host via the
      * floating IPs only works if docker is started with:
      *   --userland-proxy=false --ip-masq=false
      */
     value = crm_element_value(xml_obj, "replicas-per-host");
     container_data->replicas_per_host = crm_parse_int(value, "1");
     if (container_data->replicas_per_host < 1) {
         pe_err("'replicas-per-host' for %s must be positive integer, using 1",
                rsc->id);
         container_data->replicas_per_host = 1;
     }
     if (container_data->replicas_per_host == 1) {
         clear_bit(rsc->flags, pe_rsc_unique);
     }
 
     container_data->docker_run_command = crm_element_value_copy(xml_obj, "run-command");
     container_data->docker_run_options = crm_element_value_copy(xml_obj, "options");
     container_data->image = crm_element_value_copy(xml_obj, "image");
     container_data->docker_network = crm_element_value_copy(xml_obj, "network");
 
     xml_obj = first_named_child(rsc->xml, "network");
     if(xml_obj) {
 
         container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
         container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
         container_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
         container_data->control_port = crm_element_value_copy(xml_obj, "control-port");
         value = crm_element_value(xml_obj, "add-host");
         if (check_boolean(value) == FALSE) {
             container_data->add_host = TRUE;
         } else {
             crm_str_to_boolean(value, &container_data->add_host);
         }
 
         for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
              xml_child = __xml_next_element(xml_child)) {
 
             container_port_t *port = calloc(1, sizeof(container_port_t));
             port->source = crm_element_value_copy(xml_child, "port");
 
             if(port->source == NULL) {
                 port->source = crm_element_value_copy(xml_child, "range");
             } else {
                 port->target = crm_element_value_copy(xml_child, "internal-port");
             }
 
             if(port->source != NULL && strlen(port->source) > 0) {
                 if(port->target == NULL) {
                     port->target = strdup(port->source);
                 }
                 container_data->ports = g_list_append(container_data->ports, port);
 
             } else {
                 pe_err("Invalid port directive %s", ID(xml_child));
                 port_free(port);
             }
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "storage");
     for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
          xml_child = __xml_next_element(xml_child)) {
 
         const char *source = crm_element_value(xml_child, "source-dir");
         const char *target = crm_element_value(xml_child, "target-dir");
         const char *options = crm_element_value(xml_child, "options");
         int flags = 0;
 
         if (source == NULL) {
             source = crm_element_value(xml_child, "source-dir-root");
             flags = 1;
         }
 
         if (source && target) {
             mount_add(container_data, source, target, options, flags);
         } else {
             pe_err("Invalid mount directive %s", ID(xml_child));
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "primitive");
     if (xml_obj && valid_network(container_data)) {
         char *value = NULL;
         xmlNode *xml_set = NULL;
 
         xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
 
         /* @COMPAT We no longer use the <master> tag, but we need to keep it as
          * part of the resource name, so that bundles don't restart in a rolling
          * upgrade. (It also avoids needing to change regression tests.)
          */
         crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix,
                       (container_data->promoted_max? "master"
                       : (const char *)xml_resource->name));
 
         xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
         crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name);
 
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
 
         value = crm_itoa(container_data->replicas);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_MAX, value);
         free(value);
 
         value = crm_itoa(container_data->replicas_per_host);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_NODEMAX, value);
         free(value);
 
         crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
                 (container_data->replicas_per_host > 1)?
                 XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
 
         if (container_data->promoted_max) {
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
 
             value = crm_itoa(container_data->promoted_max);
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTED_MAX, value);
             free(value);
         }
 
         //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix);
         add_node_copy(xml_resource, xml_obj);
 
     } else if(xml_obj) {
         pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
                rsc->id, ID(xml_obj));
         return FALSE;
     }
 
     if(xml_resource) {
         int lpc = 0;
         GListPtr childIter = NULL;
         resource_t *new_rsc = NULL;
         container_port_t *port = NULL;
 
         int offset = 0, max = 1024;
         char *buffer = NULL;
 
         if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
             pe_err("Failed unpacking resource %s", ID(rsc->xml));
             if (new_rsc != NULL && new_rsc->fns != NULL) {
                 new_rsc->fns->free(new_rsc);
             }
             return FALSE;
         }
 
         container_data->child = new_rsc;
 
         /* Currently, we always map the default authentication key location
          * into the same location inside the container.
          *
          * Ideally, we would respect the host's PCMK_authkey_location, but:
          * - it may be different on different nodes;
          * - the actual connection will do extra checking to make sure the key
          *   file exists and is readable, that we can't do here on the DC
          * - tools such as crm_resource and crm_simulate may not have the same
          *   environment variables as the cluster, causing operation digests to
          *   differ
          *
          * Always using the default location inside the container is fine,
          * because we control the pacemaker_remote environment, and it avoids
          * having to pass another environment variable to the container.
          *
          * @TODO A better solution may be to have only pacemaker_remote use the
          * environment variable, and have the cluster nodes use a new
          * cluster option for key location. This would introduce the limitation
          * of the location being the same on all cluster nodes, but that's
          * reasonable.
          */
         mount_add(container_data, DEFAULT_REMOTE_KEY_LOCATION,
                   DEFAULT_REMOTE_KEY_LOCATION, NULL, 0);
 
         mount_add(container_data, CRM_BUNDLE_DIR, "/var/log", NULL, 1);
 
         port = calloc(1, sizeof(container_port_t));
         if(container_data->control_port) {
             port->source = strdup(container_data->control_port);
         } else {
             /* If we wanted to respect PCMK_remote_port, we could use
              * crm_default_remote_port() here and elsewhere in this file instead
              * of DEFAULT_REMOTE_PORT.
              *
              * However, it gains nothing, since we control both the container
              * environment and the connection resource parameters, and the user
              * can use a different port if desired by setting control-port.
              */
             port->source = crm_itoa(DEFAULT_REMOTE_PORT);
         }
         port->target = strdup(port->source);
         container_data->ports = g_list_append(container_data->ports, port);
 
         buffer = calloc(1, max+1);
         for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) {
             container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
             tuple->child = childIter->data;
             tuple->child->exclusive_discover = TRUE;
             tuple->offset = lpc++;
 
             // Ensure the child's notify gets set based on the underlying primitive's value
             if(is_set(tuple->child->flags, pe_rsc_notify)) {
                 set_bit(container_data->child->flags, pe_rsc_notify);
             }
 
             offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
             container_data->tuples = g_list_append(container_data->tuples, tuple);
             container_data->attribute_target = g_hash_table_lookup(tuple->child->meta, XML_RSC_ATTR_TARGET);
         }
         container_data->docker_host_options = buffer;
         if(container_data->attribute_target) {
             g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
             g_hash_table_replace(container_data->child->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
         }
 
     } else {
         // Just a naked container, no pacemaker-remote
         int offset = 0, max = 1024;
         char *buffer = calloc(1, max+1);
 
         for(int lpc = 0; lpc < container_data->replicas; lpc++) {
             container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
             tuple->offset = lpc;
             offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
             container_data->tuples = g_list_append(container_data->tuples, tuple);
         }
 
         container_data->docker_host_options = buffer;
     }
 
     for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
         container_grouping_t *tuple = (container_grouping_t *)gIter->data;
         if (create_container(rsc, container_data, tuple, data_set) == FALSE) {
             pe_err("Failed unpacking resource %s", rsc->id);
             rsc->fns->free(rsc);
             return FALSE;
         }
     }
 
     if(container_data->child) {
         rsc->children = g_list_append(rsc->children, container_data->child);
     }
     return TRUE;
 }
 
 static int
 tuple_rsc_active(resource_t *rsc, gboolean all)
 {
     if (rsc) {
         gboolean child_active = rsc->fns->active(rsc, all);
 
         if (child_active && !all) {
             return TRUE;
         } else if (!child_active && all) {
             return FALSE;
         }
     }
     return -1;
 }
 
 gboolean
 container_active(resource_t * rsc, gboolean all)
 {
     container_variant_data_t *container_data = NULL;
     GListPtr iter = NULL;
 
     get_container_variant_data(container_data, rsc);
     for (iter = container_data->tuples; iter != NULL; iter = iter->next) {
         container_grouping_t *tuple = (container_grouping_t *)(iter->data);
         int rsc_active;
 
         rsc_active = tuple_rsc_active(tuple->ip, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = tuple_rsc_active(tuple->child, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = tuple_rsc_active(tuple->docker, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = tuple_rsc_active(tuple->remote, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
     }
 
     /* If "all" is TRUE, we've already checked that no resources were inactive,
      * so return TRUE; if "all" is FALSE, we didn't find any active resources,
      * so return FALSE.
      */
     return all;
 }
 
 /*!
  * \internal
  * \brief Find the container child corresponding to a given node
  *
  * \param[in] bundle  Top-level bundle resource
  * \param[in] node    Node to search for
  *
  * \return Container child if found, NULL otherwise
  */
 resource_t *
 find_container_child(const resource_t *bundle, const node_t *node)
 {
     container_variant_data_t *container_data = NULL;
     CRM_ASSERT(bundle && node);
 
     get_container_variant_data(container_data, bundle);
     for (GListPtr gIter = container_data->tuples; gIter != NULL;
          gIter = gIter->next) {
         container_grouping_t *tuple = (container_grouping_t *)gIter->data;
 
         CRM_ASSERT(tuple && tuple->node);
         if (tuple->node->details == node->details) {
             return tuple->child;
         }
     }
     return NULL;
 }
 
 static void
 print_rsc_in_list(resource_t *rsc, const char *pre_text, long options,
                   void *print_data)
 {
     if (rsc != NULL) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         rsc->fns->print(rsc, pre_text, options, print_data);
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
 }
 
 static const char*
 container_type_as_string(enum container_type t)
 {
     if (t == PE_CONTAINER_TYPE_DOCKER) {
         return PE_CONTAINER_TYPE_DOCKER_S;
     } else if (t == PE_CONTAINER_TYPE_RKT) {
         return PE_CONTAINER_TYPE_RKT_S;
     } else {
         return PE_CONTAINER_TYPE_UNKNOWN_S;
     }
 }
 
 static void
 container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     container_variant_data_t *container_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (pre_text == NULL) {
         pre_text = "";
     }
     child_text = crm_concat(pre_text, "       ", ' ');
 
     get_container_variant_data(container_data, rsc);
 
     status_print("%s<bundle ", pre_text);
     status_print("id=\"%s\" ", rsc->id);
 
     // Always lowercase the container technology type for use as XML value
     status_print("type=\"");
     for (const char *c = container_type_as_string(container_data->type);
          *c; ++c) {
         status_print("%c", tolower(*c));
     }
     status_print("\" ");
 
     status_print("image=\"%s\" ", container_data->image);
     status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
     status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
     status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
     status_print(">\n");
 
     for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
         container_grouping_t *tuple = (container_grouping_t *)gIter->data;
 
         CRM_ASSERT(tuple);
         status_print("%s    <replica id=\"%d\">\n", pre_text, tuple->offset);
         print_rsc_in_list(tuple->ip, child_text, options, print_data);
         print_rsc_in_list(tuple->child, child_text, options, print_data);
         print_rsc_in_list(tuple->docker, child_text, options, print_data);
         print_rsc_in_list(tuple->remote, child_text, options, print_data);
         status_print("%s    </replica>\n", pre_text);
     }
     status_print("%s</bundle>\n", pre_text);
     free(child_text);
 }
 
 static void
 tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data)
 {
     node_t *node = NULL;
     resource_t *rsc = tuple->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = tuple->docker;
     }
 
     if(tuple->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker));
     }
     if(tuple->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr);
     }
 
     node = pe__current_node(tuple->docker);
     common_print(rsc, pre_text, buffer, node, options, print_data);
 }
 
 void
 container_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     container_variant_data_t *container_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (options & pe_print_xml) {
         container_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_container_variant_data(container_data, rsc);
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     status_print("%s%s container%s: %s [%s]%s%s\n",
                  pre_text, container_type_as_string(container_data->type),
                  container_data->replicas>1?" set":"", rsc->id, container_data->image,
                  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
     if (options & pe_print_html) {
         status_print("<br />\n<ul>\n");
     }
 
 
     for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
         container_grouping_t *tuple = (container_grouping_t *)gIter->data;
 
         CRM_ASSERT(tuple);
         if (options & pe_print_html) {
             status_print("<li>");
         }
 
         if (is_set(options, pe_print_implicit)) {
             child_text = crm_strdup_printf("     %s", pre_text);
             if(g_list_length(container_data->tuples) > 1) {
                 status_print("  %sReplica[%d]\n", pre_text, tuple->offset);
             }
             if (options & pe_print_html) {
                 status_print("<br />\n<ul>\n");
             }
             print_rsc_in_list(tuple->ip, child_text, options, print_data);
             print_rsc_in_list(tuple->docker, child_text, options, print_data);
             print_rsc_in_list(tuple->remote, child_text, options, print_data);
             print_rsc_in_list(tuple->child, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</ul>\n");
             }
         } else {
             child_text = crm_strdup_printf("%s  ", pre_text);
             tuple_print(tuple, child_text, options, print_data);
         }
         free(child_text);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 }
 
 void
 tuple_free(container_grouping_t *tuple) 
 {
     if(tuple == NULL) {
         return;
     }
 
     if(tuple->node) {
         free(tuple->node);
         tuple->node = NULL;
     }
 
     if(tuple->ip) {
         free_xml(tuple->ip->xml);
         tuple->ip->xml = NULL;
         tuple->ip->fns->free(tuple->ip);
         tuple->ip = NULL;
     }
     if(tuple->docker) {
         free_xml(tuple->docker->xml);
         tuple->docker->xml = NULL;
         tuple->docker->fns->free(tuple->docker);
         tuple->docker = NULL;
     }
     if(tuple->remote) {
         free_xml(tuple->remote->xml);
         tuple->remote->xml = NULL;
         tuple->remote->fns->free(tuple->remote);
         tuple->remote = NULL;
     }
     free(tuple->ipaddr);
     free(tuple);
 }
 
 void
 container_free(resource_t * rsc)
 {
     container_variant_data_t *container_data = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     get_container_variant_data(container_data, rsc);
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     free(container_data->prefix);
     free(container_data->image);
     free(container_data->control_port);
     free(container_data->host_network);
     free(container_data->host_netmask);
     free(container_data->ip_range_start);
     free(container_data->docker_network);
     free(container_data->docker_run_options);
     free(container_data->docker_run_command);
     free(container_data->docker_host_options);
 
     g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free);
     g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free);
     g_list_free_full(container_data->ports, (GDestroyNotify)port_free);
     g_list_free(rsc->children);
 
     if(container_data->child) {
         free_xml(container_data->child->xml);
         container_data->child->xml = NULL;
         container_data->child->fns->free(container_data->child);
     }
     common_free(rsc);
 }
 
 enum rsc_role_e
 container_resource_state(const resource_t * rsc, gboolean current)
 {
     enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
     return container_role;
 }
 
 /*!
  * \brief Get the number of configured replicas in a bundle
  *
  * \param[in] rsc  Bundle resource
  *
  * \return Number of configured replicas, or 0 on error
  */
 int
 pe_bundle_replicas(const resource_t *rsc)
 {
     if ((rsc == NULL) || (rsc->variant != pe_container)) {
         return 0;
     } else {
         container_variant_data_t *container_data = NULL;
 
         get_container_variant_data(container_data, rsc);
         return container_data->replicas;
     }
 }
diff --git a/pacemaker.spec.in b/pacemaker.spec.in
index e7e3f6b100..92b5fa4530 100644
--- a/pacemaker.spec.in
+++ b/pacemaker.spec.in
@@ -1,784 +1,790 @@
 # Globals and defines to control package behavior (configure these as desired)
 
 ## User and group to use for nonprivileged services
 %global uname hacluster
 %global gname haclient
 
 ## Where to install Pacemaker documentation
 %global pcmk_docdir %{_docdir}/%{name}
 
 ## GitHub entity that distributes source (for ease of using a fork)
 %global github_owner ClusterLabs
 
 ## Upstream pacemaker version, and its package version (specversion
 ## can be incremented to build packages reliably considered "newer"
 ## than previously built packages with the same pcmkversion)
 %global pcmkversion 2.0.0
 %global specversion 1
 
 ## Upstream commit (or git tag, such as "Pacemaker-" plus the
 ## {pcmkversion} macro for an official release) to use for this package
 %global commit HEAD
 ## Since git v2.11, the extent of abbreviation is autoscaled by default
 ## (used to be constant of 7), so we need to convey it for non-tags, too.
 %global commit_abbrev 7
 
 ## Python major version to use (2, 3, or 0 for auto-detect)
 %global python_major 0
 
 
 # Define globals for convenient use later
 
 ## Workaround to use parentheses in other globals
 %global lparen (
 %global rparen )
 
 ## Short version of git commit
 %define shortcommit %(c=%{commit}; case ${c} in
                       Pacemaker-*%{rparen} echo ${c:10};;
                       *%{rparen} echo ${c:0:%{commit_abbrev}};; esac)
 
 ## Whether this is a tagged release
 %define tag_release %([ %{commit} != Pacemaker-%{shortcommit} ]; echo $?)
 
 ## Whether this is a release candidate (in case of a tagged release)
 %define pre_release %([ "%{tag_release}" -eq 0 ] || {
                       case "%{shortcommit}" in *-rc[[:digit:]]*%{rparen} false;;
                       esac; }; echo $?)
 
 ## Heuristic used to infer bleeding-edge deployments that are
 ## less likely to have working versions of the documentation tools
 %define bleeding %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?)
 
 ## Whether this platform defaults to using systemd as an init system
 ## (needs to be evaluated prior to BuildRequires being enumerated and
 ## installed as it's intended to conditionally select some of these, and
 ## for that there are only few indicators with varying reliability:
 ## - presence of systemd-defined macros (when building in a full-fledged
 ##   environment, which is not the case with ordinary mock-based builds)
 ## - systemd-aware rpm as manifested with the presence of particular
 ##   macro (rpm itself will trivially always be present when building)
 ## - existence of /usr/lib/os-release file, which is something heavily
 ##   propagated by systemd project
 ## - when not good enough, there's always a possibility to check
 ##   particular distro-specific macros (incl. version comparison)
 %define systemd_native (%{?_unitdir:1}%{!?_unitdir:0}%{nil \
   } || %{?__transaction_systemd_inhibit:1}%{!?__transaction_systemd_inhibit:0}%{nil \
   } || %(test -f /usr/lib/os-release; test $? -ne 0; echo $?))
 
 %if 0%{?fedora} > 20 || 0%{?rhel} > 7
 ## Base GnuTLS cipher priorities (presumably only the initial, required keyword)
 ## overridable with "rpmbuild --define 'pcmk_gnutls_priorities PRIORITY-SPEC'"
 %define gnutls_priorities %{?pcmk_gnutls_priorities}%{!?pcmk_gnutls_priorities:@SYSTEM}
 %endif
 
 # Python-related definitions
 
 ## Use Python 3 on certain platforms if major version not specified
 %if %{?python_major} == 0
 %if 0%{?fedora} > 26 || 0%{?rhel} > 7
 %global python_major 3
 %endif
 %endif
 
 ## Turn off auto-compilation of Python files outside site-packages directory,
 ## so that the -libs-devel package is multilib-compliant (no *.py[co] files)
 %global __os_install_post %(echo '%{__os_install_post}' | {
                             sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g'; })
 
 ## Values that differ by Python major version
 %if 0%{?python_major} > 2
 %if 0%{?rhel} > 7
 %global python_path %{__python3}
 %else
 %global python_path /usr/bin/python%{?python3_pkgversion}%{!?python3_pkgversion:3}
 %endif
 %global python_pkg python3
 %global python_min 3.2
 %define py_site %{?python3_sitelib}%{!?python3_sitelib:%(
   python3 -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)}
 %else
 %if 0%{?python_major} > 1
 %global python_path /usr/bin/python%{?python2_pkgversion}%{!?python2_pkgversion:2}
 %global python_pkg python2
 %global python_min 2.7
 %define py_site %{?python2_sitelib}%{!?python2_sitelib:%(
   python2 -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)}
 %else
 %global python_min 2.7
 %global python_pkg python
 %define py_site %{?python_sitelib}%{!?python_sitelib:%(
   python -c 'from distutils.sysconfig import get_python_lib as gpl; print(gpl(1))' 2>/dev/null)}
 %endif
 %endif
 
 
 # Definitions for backward compatibility with older RPM versions
 
 ## Ensure the license macro behaves consistently (older RPM will otherwise
 ## overwrite it once it encounters "License:"). Courtesy Jason Tibbitts:
 ## https://pkgs.fedoraproject.org/cgit/rpms/epel-rpm-macros.git/tree/macros.zzz-epel?h=el6&id=e1adcb77
 %if !%{defined _licensedir}
 %define description %{lua:
     rpm.define("license %doc")
     print("%description")
 }
 %endif
 
 
 # Define conditionals so that "rpmbuild --with <feature>" and
 # "rpmbuild --without <feature>" can enable and disable specific features
 
 ## Add option to enable support for stonith/external fencing agents
 %bcond_with stonithd
 
 ## Add option to create binaries suitable for use with profiling tools
 %bcond_with profiling
 
 ## Add option to create binaries with coverage analysis
 %bcond_with coverage
 
 ## Add option to skip generating documentation
 ## (the build tools aren't available everywhere)
 %bcond_without doc
 
 ## Add option to prefix package version with "0."
 ## (so later "official" packages will be considered updates)
 %bcond_with pre_release
 
 ## Add option to ship Upstart job files
 %bcond_with upstart_job
 
 ## Add option to turn off hardening of libraries and daemon executables
 %bcond_without hardening
 
 ## Add option to disable links for legacy daemon names
 %bcond_without legacy_links
 
 
 # Keep sane profiling data if requested
 %if %{with profiling}
 
 ## Disable -debuginfo package and stripping binaries/libraries
 %define debug_package %{nil}
 
 %endif
 
 
 # Define the release version
 # (do not look at externally enforced pre-release flag for tagged releases
 # as only -rc tags, captured with the second condition, implies that then)
 %if (!%{tag_release} && %{with pre_release}) || 0%{pre_release}
 %if 0%{pre_release}
 %define pcmk_release 0.%{specversion}.%(s=%{shortcommit}; echo ${s: -3})
 %else
 %define pcmk_release 0.%{specversion}.%{shortcommit}.git
 %endif
 %else
 %if 0%{tag_release}
 %define pcmk_release %{specversion}
 %else
 %define pcmk_release %{specversion}.%{shortcommit}.git
 %endif
 %endif
 
 Name:          pacemaker
 Summary:       Scalable High-Availability cluster resource manager
 Version:       %{pcmkversion}
 Release:       %{pcmk_release}%{?dist}
 %if %{defined _unitdir}
 License:       GPLv2+ and LGPLv2+
 %else
 # initscript is Revised BSD
 License:       GPLv2+ and LGPLv2+ and BSD
 %endif
 Url:           http://www.clusterlabs.org
 Group:         System Environment/Daemons
 
 # Hint: use "spectool -s 0 pacemaker.spec" (rpmdevtools) to check the final URL:
 # https://github.com/ClusterLabs/pacemaker/archive/e91769e5a39f5cb2f7b097d3c612368f0530535e/pacemaker-e91769e.tar.gz
 Source0:       https://github.com/%{github_owner}/%{name}/archive/%{commit}/%{name}-%{shortcommit}.tar.gz
 Requires:      resource-agents
 Requires:      %{name}-libs = %{version}-%{release}
 Requires:      %{name}-cluster-libs = %{version}-%{release}
 Requires:      %{name}-cli = %{version}-%{release}
 %if !%{defined _unitdir}
 Requires:      procps-ng
 Requires:      psmisc
 %endif
 %{?systemd_requires}
 
 # Pacemaker requires a minimum Python functionality
 Requires:      %{python_pkg} >= %{python_min}
 BuildRequires: %{python_pkg}-devel >= %{python_min}
 
 # Pacemaker requires a minimum libqb functionality
 Requires:      libqb >= 0.13.0
 BuildRequires: libqb-devel >= 0.13.0
 
 # Basics required for the build (even if usually satisfied through other BRs)
 BuildRequires: coreutils findutils grep sed
 
 # Required for core functionality
 BuildRequires: automake autoconf gcc libtool pkgconfig libtool-ltdl-devel
 BuildRequires: pkgconfig(glib-2.0) >= 2.16
 BuildRequires: libxml2-devel libxslt-devel libuuid-devel
 BuildRequires: bzip2-devel
 
 # Enables optional functionality
 BuildRequires: ncurses-devel docbook-style-xsl
 BuildRequires: help2man gnutls-devel pam-devel pkgconfig(dbus-1)
 
 %if %{systemd_native}
 BuildRequires: pkgconfig(systemd)
 %endif
 
 Requires:      corosync >= 2.0.0
 BuildRequires: corosynclib-devel >= 2.0.0
 
 %if %{with stonithd}
 BuildRequires: cluster-glue-libs-devel
 %endif
 
 ## (note no avoiding effect when building through non-customized mock)
 %if !%{bleeding}
 %if %{with doc}
 BuildRequires: inkscape asciidoc publican
 %endif
 %endif
 
 %description
 Pacemaker is an advanced, scalable High-Availability cluster resource
 manager.
 
 It supports more than 16 node clusters with significant capabilities
 for managing resources and dependencies.
 
 It will run scripts at initialization, when machines go up or down,
 when related resources fail and can be configured to periodically check
 resource health.
 
 Available rpmbuild rebuild options:
   --with(out) : coverage doc stonithd hardening pre_release profiling
                 upstart_job
 
 %package cli
 License:       GPLv2+ and LGPLv2+
 Summary:       Command line tools for controlling Pacemaker clusters
 Group:         System Environment/Daemons
 Requires:      %{name}-libs = %{version}-%{release}
 Requires:      perl-TimeDate
 Requires:      procps-ng
 Requires:      psmisc
 Requires(post):coreutils
 
 %description cli
 Pacemaker is an advanced, scalable High-Availability cluster resource
 manager.
 
 The %{name}-cli package contains command line tools that can be used
 to query and control the cluster from machines that may, or may not,
 be part of the cluster.
 
 %package libs
 License:       GPLv2+ and LGPLv2+
 Summary:       Core Pacemaker libraries
 Group:         System Environment/Daemons
 Requires(pre): shadow-utils
 
 %description libs
 Pacemaker is an advanced, scalable High-Availability cluster resource
 manager.
 
 The %{name}-libs package contains shared libraries needed for cluster
 nodes and those just running the CLI tools.
 
 %package cluster-libs
 License:       GPLv2+ and LGPLv2+
 Summary:       Cluster Libraries used by Pacemaker
 Group:         System Environment/Daemons
 Requires:      %{name}-libs = %{version}-%{release}
 
 %description cluster-libs
 Pacemaker is an advanced, scalable High-Availability cluster resource
 manager.
 
 The %{name}-cluster-libs package contains cluster-aware shared
 libraries needed for nodes that will form part of the cluster nodes.
 
 %package remote
 %if %{defined _unitdir}
 License:       GPLv2+ and LGPLv2+
 %else
 # initscript is Revised BSD
 License:       GPLv2+ and LGPLv2+ and BSD
 %endif
 Summary:       Pacemaker remote daemon for non-cluster nodes
 Group:         System Environment/Daemons
 Requires:      %{name}-libs = %{version}-%{release}
 Requires:      %{name}-cli = %{version}-%{release}
 Requires:      resource-agents
 %if !%{defined _unitdir}
 Requires:      procps-ng
 %endif
 # -remote can be fully independent of systemd
 %{?systemd_ordering}%{!?systemd_ordering:%{?systemd_requires}}
 
 %description remote
 Pacemaker is an advanced, scalable High-Availability cluster resource
 manager.
 
 The %{name}-remote package contains the Pacemaker Remote daemon
 which is capable of extending pacemaker functionality to remote
 nodes not running the full corosync/cluster stack.
 
 %package libs-devel
 License:       GPLv2+ and LGPLv2+
 Summary:       Pacemaker development package
 Group:         Development/Libraries
 Requires:      %{name}-libs%{?_isa} = %{version}-%{release}
 Requires:      %{name}-cluster-libs%{?_isa} = %{version}-%{release}
 Requires:      libuuid-devel%{?_isa} libtool-ltdl-devel%{?_isa}
 Requires:      libxml2-devel%{?_isa} libxslt-devel%{?_isa}
 Requires:      bzip2-devel%{?_isa} glib2-devel%{?_isa}
 Requires:      libqb-devel%{?_isa}
 Requires:      corosynclib-devel%{?_isa} >= 2.0.0
 
 %description libs-devel
 Pacemaker is an advanced, scalable High-Availability cluster resource
 manager.
 
 The %{name}-libs-devel package contains headers and shared libraries
 for developing tools for Pacemaker.
 
 %package       cts
 License:       GPLv2+ and LGPLv2+
 Summary:       Test framework for cluster-related technologies like Pacemaker
 Group:         System Environment/Daemons
 Requires:      %{python_pkg} >= %{python_min}
 Requires:      %{name}-libs = %{version}-%{release}
 Requires:      procps-ng
 Requires:      psmisc
 BuildArch:     noarch
 
 # systemd python bindings are separate package in some distros
 %if %{defined systemd_requires}
 
 %if 0%{?fedora} > 22 || 0%{?rhel} > 7
 Requires:      %{python_pkg}-systemd
 %else
 %if 0%{?fedora} > 20 || 0%{?rhel} > 6
 Requires:      systemd-python
 %endif
 %endif
 
 %endif
 
 %description   cts
 Test framework for cluster-related technologies like Pacemaker
 
 %package       doc
 License:       CC-BY-SA-4.0
 Summary:       Documentation for Pacemaker
 Group:         Documentation
 BuildArch:     noarch
 
 %description   doc
 Documentation for Pacemaker.
 
 Pacemaker is an advanced, scalable High-Availability cluster resource
 manager.
 
 %prep
 %setup -q -n %{name}-%{commit}
 
 %build
 
 # Early versions of autotools (e.g. RHEL <= 5) do not support --docdir
 export docdir=%{pcmk_docdir}
 
 export systemdunitdir=%{?_unitdir}%{!?_unitdir:no}
 
 %if %{with hardening}
 # prefer distro-provided hardening flags in case they are defined
 # through _hardening_{c,ld}flags macros, configure script will
 # use its own defaults otherwise; if such hardenings are completely
 # undesired, rpmbuild using "--without hardening"
 # (or "--define '_without_hardening 1'")
 export CFLAGS_HARDENED_EXE="%{?_hardening_cflags}"
 export CFLAGS_HARDENED_LIB="%{?_hardening_cflags}"
 export LDFLAGS_HARDENED_EXE="%{?_hardening_ldflags}"
 export LDFLAGS_HARDENED_LIB="%{?_hardening_ldflags}"
 %endif
 
 ./autogen.sh
 
 %{configure}                                                                    \
         %{?python_path:        PYTHON=%{python_path}}                           \
         %{!?with_hardening:    --disable-hardening}                             \
         %{!?with_legacy_links: --disable-legacy-links}                          \
         %{?with_profiling:     --with-profiling}                                \
         %{?with_coverage:      --with-coverage}                                 \
         %{!?with_doc:          --with-brand=}                                   \
         %{?gnutls_priorities:  --with-gnutls-priorities="%{gnutls_priorities}"} \
         --with-initdir=%{_initrddir}                                            \
         --localstatedir=%{_var}                                                 \
         --with-version=%{version}-%{release}
 
 %if 0%{?suse_version} >= 1200
 # Fedora handles rpath removal automagically
 sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|g' libtool
 sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|g' libtool
 %endif
 
 make %{_smp_mflags} V=1 all
 
 %check
 { cts/cts-scheduler --run one-or-more-unrunnable-instances \
   && cts/cts-cli \
   && touch .CHECKED
 } 2>&1 | sed 's/[fF]ail/faiil/g'  # prevent false positives in rpmlint
 [ -f .CHECKED ] && rm -f -- .CHECKED
 exit $?  # TODO remove when rpm<4.14 compatibility irrelevant
 
 %install
 make DESTDIR=%{buildroot} docdir=%{pcmk_docdir} V=1 install
 
 mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig
 install -m 644 daemons/pacemakerd/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker
 install -m 644 tools/crm_mon.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/crm_mon
 
 %if %{with upstart_job}
 mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/init
 install -m 644 pacemakerd/pacemaker.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.conf
 install -m 644 pacemakerd/pacemaker.combined.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/pacemaker.combined.conf
 install -m 644 tools/crm_mon.upstart ${RPM_BUILD_ROOT}%{_sysconfdir}/init/crm_mon.conf
 %endif
 
 %if %{defined _unitdir}
 mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name}
 %endif
 
 # Don't package static libs
 find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f
 find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f
 
+# For now, don't package the servicelog-related binaries built only for
+# ppc64le when certain dependencies are installed. If they get more exercise by
+# advanced users, we can reconsider.
+rm -f %{buildroot}/%{_sbindir}/notifyServicelogEvent
+rm -f %{buildroot}/%{_sbindir}/ipmiservicelogd
+
 # Don't ship init scripts for systemd based platforms
 %if %{defined _unitdir}
 rm -f %{buildroot}/%{_initrddir}/pacemaker
 rm -f %{buildroot}/%{_initrddir}/pacemaker_remote
 %endif
 
 %if %{with coverage}
 GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov
 mkdir -p $GCOV_BASE
 find . -name '*.gcno' -type f | while read F ; do
         D=`dirname $F`
         mkdir -p ${GCOV_BASE}/$D
         cp $F ${GCOV_BASE}/$D
 done
 %endif
 
 %post
 %if %{defined _unitdir}
 %systemd_post pacemaker.service
 %else
 /sbin/chkconfig --add pacemaker || :
 %endif
 
 %preun
 %if %{defined _unitdir}
 %systemd_preun pacemaker.service
 %else
 /sbin/service pacemaker stop >/dev/null 2>&1 || :
 if [ "$1" -eq 0 ]; then
     # Package removal, not upgrade
     /sbin/chkconfig --del pacemaker || :
 fi
 %endif
 
 %postun
 %if %{defined _unitdir}
 %systemd_postun_with_restart pacemaker.service
 %endif
 
 %pre remote
 %if %{defined _unitdir}
 # Stop the service before anything is touched, and remember to restart
 # it as one of the last actions (compared to using systemd_postun_with_restart,
 # this avoids suicide when sbd is in use)
 systemctl --quiet is-active pacemaker_remote
 if [ $? -eq 0 ] ; then
     mkdir -p %{_localstatedir}/lib/rpm-state/%{name}
     touch %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
     systemctl stop pacemaker_remote >/dev/null 2>&1
 else
     rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
 fi
 %endif
 
 %post remote
 %if %{defined _unitdir}
 %systemd_post pacemaker_remote.service
 %else
 /sbin/chkconfig --add pacemaker_remote || :
 %endif
 
 %preun remote
 %if %{defined _unitdir}
 %systemd_preun pacemaker_remote.service
 %else
 /sbin/service pacemaker_remote stop >/dev/null 2>&1 || :
 if [ "$1" -eq 0 ]; then
     # Package removal, not upgrade
     /sbin/chkconfig --del pacemaker_remote || :
 fi
 %endif
 
 %postun remote
 %if %{defined _unitdir}
 # This next line is a no-op, because we stopped the service earlier, but
 # we leave it here because it allows us to revert to the standard behavior
 # in the future if desired
 %systemd_postun_with_restart pacemaker_remote.service
 # Explicitly take care of removing the flag-file(s) upon final removal
 if [ "$1" -eq 0 ] ; then
     rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
 fi
 %endif
 
 %posttrans remote
 %if %{defined _unitdir}
 if [ -e %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote ] ; then
     systemctl start pacemaker_remote >/dev/null 2>&1
     rm -f %{_localstatedir}/lib/rpm-state/%{name}/restart_pacemaker_remote
 fi
 %endif
 
 %post cli
 %if %{defined _unitdir}
 %systemd_post crm_mon.service
 %endif
 if [ "$1" -eq 2 ]; then
     # Package upgrade, not initial install:
     # Move any pre-2.0 logs to new location to ensure they get rotated
     { mv -fbS.rpmsave %{_var}/log/pacemaker.log* %{_var}/log/pacemaker \
       || mv -f %{_var}/log/pacemaker.log* %{_var}/log/pacemaker
     } >/dev/null 2>/dev/null || :
 fi
 
 %preun cli
 %if %{defined _unitdir}
 %systemd_preun crm_mon.service
 %endif
 
 %postun cli
 %if %{defined _unitdir}
 %systemd_postun_with_restart crm_mon.service
 %endif
 
 %pre libs
 getent group %{gname} >/dev/null || groupadd -r %{gname} -g 189
 getent passwd %{uname} >/dev/null || useradd -r -g %{gname} -u 189 -s /sbin/nologin -c "cluster user" %{uname}
 exit 0
 
 %if %{defined ldconfig_scriptlets}
 %ldconfig_scriptlets libs
 %ldconfig_scriptlets cluster-libs
 %else
 %post libs -p /sbin/ldconfig
 %postun libs -p /sbin/ldconfig
 
 %post cluster-libs -p /sbin/ldconfig
 %postun cluster-libs -p /sbin/ldconfig
 %endif
 
 %files
 ###########################################################
 %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker
 %{_sbindir}/pacemakerd
 
 %if %{defined _unitdir}
 %{_unitdir}/pacemaker.service
 %else
 %{_initrddir}/pacemaker
 %endif
 
 %exclude %{_libexecdir}/pacemaker/cts-log-watcher
 %exclude %{_libexecdir}/pacemaker/cts-support
 %exclude %{_sbindir}/pacemaker-remoted
 %if %{with legacy_links}
 %exclude %{_sbindir}/pacemaker_remoted
 %endif
 %{_libexecdir}/pacemaker/*
 
 %{_sbindir}/crm_attribute
 %{_sbindir}/crm_master
 %{_sbindir}/fence_legacy
 %{_sbindir}/stonith_admin
 
 %doc %{_mandir}/man7/pacemaker-controld.*
 %doc %{_mandir}/man7/pacemaker-schedulerd.*
 %doc %{_mandir}/man7/pacemaker-fenced.*
 %doc %{_mandir}/man7/ocf_pacemaker_controld.*
 %doc %{_mandir}/man7/ocf_pacemaker_o2cb.*
 %doc %{_mandir}/man7/ocf_pacemaker_remote.*
 %doc %{_mandir}/man8/crm_attribute.*
 %doc %{_mandir}/man8/crm_master.*
 %doc %{_mandir}/man8/fence_legacy.*
 %doc %{_mandir}/man8/pacemakerd.*
 %doc %{_mandir}/man8/stonith_admin.*
 
 %doc %{_datadir}/pacemaker/alerts
 
 %license licenses/GPLv2
 %doc COPYING
 %doc ChangeLog
 
 %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cib
 %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/pengine
 /usr/lib/ocf/resource.d/pacemaker/controld
 /usr/lib/ocf/resource.d/pacemaker/o2cb
 /usr/lib/ocf/resource.d/pacemaker/remote
 
 %if %{with upstart_job}
 %config(noreplace) %{_sysconfdir}/init/pacemaker.conf
 %config(noreplace) %{_sysconfdir}/init/pacemaker.combined.conf
 %endif
 
 %files cli
 %dir %attr (750, root, %{gname}) %{_sysconfdir}/pacemaker
 %config(noreplace) %{_sysconfdir}/logrotate.d/pacemaker
 %config(noreplace) %{_sysconfdir}/sysconfig/crm_mon
 
 %if %{defined _unitdir}
 %{_unitdir}/crm_mon.service
 %endif
 
 %if %{with upstart_job}
 %config(noreplace) %{_sysconfdir}/init/crm_mon.conf
 %endif
 
 %{_sbindir}/attrd_updater
 %{_sbindir}/cibadmin
 %{_sbindir}/crm_diff
 %{_sbindir}/crm_error
 %{_sbindir}/crm_failcount
 %{_sbindir}/crm_mon
 %{_sbindir}/crm_node
 %{_sbindir}/crm_resource
 %{_sbindir}/crm_standby
 %{_sbindir}/crm_verify
 %{_sbindir}/crmadmin
 %{_sbindir}/iso8601
 %{_sbindir}/crm_shadow
 %{_sbindir}/crm_simulate
 %{_sbindir}/crm_report
 %{_sbindir}/crm_ticket
 %exclude %{_datadir}/pacemaker/alerts
 %exclude %{_datadir}/pacemaker/tests
 %{_datadir}/pacemaker
 %{_datadir}/snmp/mibs/PCMK-MIB.txt
 
 %exclude /usr/lib/ocf/resource.d/pacemaker/controld
 %exclude /usr/lib/ocf/resource.d/pacemaker/o2cb
 %exclude /usr/lib/ocf/resource.d/pacemaker/remote
 
 %dir /usr/lib/ocf
 %dir /usr/lib/ocf/resource.d
 /usr/lib/ocf/resource.d/pacemaker
 
 %doc %{_mandir}/man7/*
 %exclude %{_mandir}/man7/pacemaker-controld.*
 %exclude %{_mandir}/man7/pacemaker-schedulerd.*
 %exclude %{_mandir}/man7/pacemaker-fenced.*
 %exclude %{_mandir}/man7/ocf_pacemaker_controld.*
 %exclude %{_mandir}/man7/ocf_pacemaker_o2cb.*
 %exclude %{_mandir}/man7/ocf_pacemaker_remote.*
 %doc %{_mandir}/man8/*
 %exclude %{_mandir}/man8/crm_attribute.*
 %exclude %{_mandir}/man8/crm_master.*
 %exclude %{_mandir}/man8/fence_legacy.*
 %exclude %{_mandir}/man8/pacemakerd.*
 %exclude %{_mandir}/man8/pacemaker-remoted.*
 %exclude %{_mandir}/man8/stonith_admin.*
 
 %license licenses/GPLv2
 %doc COPYING
 %doc ChangeLog
 
 %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker
 %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/blackbox
 %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pacemaker/cores
 %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker
 %dir %attr (770, %{uname}, %{gname}) %{_var}/log/pacemaker/bundles
 
 %files libs
 %{_libdir}/libcib.so.*
 %{_libdir}/liblrmd.so.*
 %{_libdir}/libcrmservice.so.*
 %{_libdir}/libcrmcommon.so.*
 %{_libdir}/libpe_status.so.*
 %{_libdir}/libpe_rules.so.*
 %{_libdir}/libpengine.so.*
 %{_libdir}/libstonithd.so.*
 %{_libdir}/libtransitioner.so.*
 %license licenses/LGPLv2.1
 %doc COPYING
 %doc ChangeLog
 
 %files cluster-libs
 %{_libdir}/libcrmcluster.so.*
 %license licenses/LGPLv2.1
 %doc COPYING
 %doc ChangeLog
 
 %files remote
 %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker
 %if %{defined _unitdir}
 # state directory is shared between the subpackets
 # let rpm take care of removing it once it isn't
 # referenced anymore and empty
 %ghost %dir %{_localstatedir}/lib/rpm-state/%{name}
 %{_unitdir}/pacemaker_remote.service
 %else
 %{_initrddir}/pacemaker_remote
 %endif
 
 %{_sbindir}/pacemaker-remoted
 %if %{with legacy_links}
 %{_sbindir}/pacemaker_remoted
 %endif
 %{_mandir}/man8/pacemaker-remoted.*
 %license licenses/GPLv2
 %doc COPYING
 %doc ChangeLog
 
 %files doc
 %doc %{pcmk_docdir}
 %license licenses/CC-BY-SA-4.0
 
 %files cts
 %{py_site}/cts
 %{_datadir}/pacemaker/tests
 
 %{_libexecdir}/pacemaker/cts-log-watcher
 %{_libexecdir}/pacemaker/cts-support
 
 %license licenses/GPLv2
 %doc COPYING
 %doc ChangeLog
 
 %files libs-devel
 %{_includedir}/pacemaker
 %{_libdir}/*.so
 %if %{with coverage}
 %{_var}/lib/pacemaker/gcov
 %endif
 %{_libdir}/pkgconfig/*.pc
 %license licenses/LGPLv2.1
 %doc COPYING
 %doc ChangeLog
 
 %changelog