Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/Makefile.am b/Makefile.am
index 93d6a81f26..7414aaa7e4 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,51 +1,51 @@
#
# Pacemaker code
#
# Copyright (C) 2004 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
EXTRA_DIST = autogen.sh ConfigureMe README.in libltdl.tar
MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure DRF/config-h.in \
DRF/stamp-h.in libtool.m4 ltdl.m4 libltdl.tar
-SUBDIRS = debian $(LIBLTDL_DIR) replace include lib pengine cib crmd fencing tools shell xml cts extra doc
+SUBDIRS = $(LIBLTDL_DIR) replace include lib pengine cib crmd fencing tools shell xml cts extra doc
doc_DATA = AUTHORS COPYING COPYING.LIB
AUTOMAKE_OPTIONS = foreign
##ACLOCAL = aclocal -I $(auxdir)
install-exec-local:
$(INSTALL) -d $(DESTDIR)/$(LCRSODIR)
$(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_CONFIG_DIR)
$(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_STATE_DIR)
-chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_CONFIG_DIR)
-chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_STATE_DIR)
if BUILD_AIS_SUPPORT
rm -f $(DESTDIR)$(LCRSODIR)/pacemaker.lcrso $(DESTDIR)$(LCRSODIR)/service_crm.so
cp $(DESTDIR)$(libdir)/service_crm.so $(DESTDIR)$(LCRSODIR)/pacemaker.lcrso
endif
# Use chown because the user/group may not exist
dist-clean-local:
rm -f autoconf automake autoheader $(TARFILE)
maintainer-clean-local:
rm -f libltdl.tar
.PHONY: rpm pkg handy handy-copy
diff --git a/configure.ac b/configure.ac
index 336e6ae59d..dc067d98b8 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,1482 +1,1479 @@
dnl
dnl autoconf for Pacemaker
dnl
dnl License: GNU General Public License (GPL)
dnl ===============================================
dnl Bootstrap
dnl ===============================================
AC_PREREQ(2.53)
dnl Suggested structure:
dnl information on the package
dnl checks for programs
dnl checks for libraries
dnl checks for header files
dnl checks for types
dnl checks for structures
dnl checks for compiler characteristics
dnl checks for library functions
dnl checks for system services
AC_INIT(pacemaker, 1.1.0, pacemaker@oss.clusterlabs.org)
CRM_DTD_VERSION="1.0"
PKG_FEATURES=""
HB_PKG=heartbeat
AC_CONFIG_AUX_DIR(.)
AC_CANONICAL_HOST
dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below)
dnl
dnl Internal header: include/config.h
dnl - Contains ALL defines
dnl - include/config.h.in is generated automatically by autoheader
dnl - NOT to be included in any header files except lha_internal.h
dnl (which is also not to be included in any other header files)
dnl
dnl External header: include/crm_config.h
dnl - Contains a subset of defines checked here
dnl - Manually edit include/crm_config.h.in to have configure include
dnl new defines
dnl - Should not include HAVE_* defines
dnl - Safe to include anywhere
AM_CONFIG_HEADER(include/config.h include/crm_config.h)
ALL_LINGUAS="en fr"
AC_ARG_WITH(version,
[ --with-version=version Override package version (if you're a packager needing to pretend) ],
[ PACKAGE_VERSION="$withval" ])
AC_ARG_WITH(pkg-name,
[ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ],
[ PACKAGE_NAME="$withval" ])
AM_INIT_AUTOMAKE($PACKAGE_NAME, $PACKAGE_VERSION)
AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", Current pacemaker version)
dnl automake >= 1.11 offers --enable-silent-rules for suppressing the output from
dnl normal compilation. When a failure occurs, it will then display the full
dnl command line
dnl Wrap in m4_ifdef to avoid breaking on older platforms
m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES])
CC_IN_CONFIGURE=yes
export CC_IN_CONFIGURE
LDD=ldd
dnl ========================================================================
dnl Compiler characteristics
dnl ========================================================================
AC_PROG_CC dnl Can force other with environment variable "CC".
AM_PROG_CC_C_O
AC_PROG_CC_STDC
AC_LIBTOOL_DLOPEN dnl Enable dlopen support...
AC_LIBLTDL_CONVENIENCE dnl make libltdl a convenience lib
AC_PROG_LIBTOOL
AC_C_STRINGIZE
AC_TYPE_SIZE_T
AC_CHECK_SIZEOF(char)
AC_CHECK_SIZEOF(short)
AC_CHECK_SIZEOF(int)
AC_CHECK_SIZEOF(long)
AC_CHECK_SIZEOF(long long)
AC_STRUCT_TIMEZONE
dnl ===============================================
dnl Helpers
dnl ===============================================
cc_supports_flag() {
local CFLAGS="$@"
AC_MSG_CHECKING(whether $CC supports "$@")
AC_COMPILE_IFELSE([int main(){return 0;}] ,[RC=0; AC_MSG_RESULT(yes)],[RC=1; AC_MSG_RESULT(no)])
return $RC
}
extract_header_define() {
AC_MSG_CHECKING(for $2 in $1)
Cfile=/tmp/extract_define.$2.${$}
printf "#include <stdio.h>\n" > ${Cfile}.c
printf "#include <%s>\n" $1 >> ${Cfile}.c
printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c
$CC $CFLAGS ${Cfile}.c -o ${Cfile}
value=`${Cfile}`
AC_MSG_RESULT($value)
printf $value
rm -f ${Cfile}.c ${Cfile}
}
dnl ===============================================
dnl Configure Options
dnl ===============================================
dnl Some systems, like Solaris require a custom package name
AC_ARG_WITH(pkgname,
[ --with-pkgname=name name for pkg (typically for Solaris) ],
[ PKGNAME="$withval" ],
[ PKGNAME="LXHAhb" ],
)
AC_SUBST(PKGNAME)
AC_ARG_ENABLE([ansi],
[ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers.
[default=yes]])
AC_ARG_ENABLE([fatal-warnings],
[ --enable-fatal-warnings very pedantic and fatal warnings for gcc
[default=yes]])
AC_ARG_ENABLE([pretty],
[ --enable-pretty
Pretty-print compiler output unless there is an error
[default=no]])
AC_ARG_ENABLE([quiet],
[ --enable-quiet
Supress make output unless there is an error
[default=no]])
AC_ARG_ENABLE([thread-safe],
[ --enable-thread-safe Enable some client libraries to be thread safe.
[default=no]])
AC_ARG_ENABLE([bundled-ltdl],
[ --enable-bundled-ltdl Configure, build and install the standalone ltdl library bundled with ${PACKAGE} [default=no]])
LTDL_LIBS=""
AC_ARG_WITH(ais,
[ --with-ais
Support the OpenAIS messaging and membership layer ],
[ SUPPORT_AIS=$withval ],
[ SUPPORT_AIS=try ],
)
AC_ARG_WITH(heartbeat,
[ --with-heartbeat
Support the Heartbeat messaging and membership layer ],
[ SUPPORT_HEARTBEAT=$withval ],
[ SUPPORT_HEARTBEAT=try ],
)
AC_ARG_WITH(snmp,
[ --with-snmp
Support the SNMP protocol ],
[ SUPPORT_SNMP=$withval ],
[ SUPPORT_SNMP=try ],
)
AC_ARG_WITH(esmtp,
[ --with-esmtp
Support the sending mail notifications with the esmtp library ],
[ SUPPORT_ESMTP=$withval ],
[ SUPPORT_ESMTP=try ],
)
AISPREFIX=""
AC_ARG_WITH(ais-prefix,
[ --with-ais-prefix=DIR Prefix used when OpenAIS was installed [$prefix]],
[ AISPREFIX=$withval ],
[ AISPREFIX=$prefix ])
LCRSODIR=""
AC_ARG_WITH(lcrso-dir,
[ --with-lcrso-dir=DIR OpenAIS lcrso files. ],
[ LCRSODIR="$withval" ])
INITDIR=""
AC_ARG_WITH(initdir,
[ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]],
[ INITDIR="$withval" ])
dnl ===============================================
dnl General Processing
dnl ===============================================
AC_SUBST(HB_PKG)
INIT_EXT=""
echo Our Host OS: $host_os/$host
AC_MSG_NOTICE(Sanitizing prefix: ${prefix})
case $prefix in
NONE) prefix=/usr;;
esac
AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix})
case $exec_prefix in
dnl For consistency with Heartbeat, map NONE->$prefix
NONE) exec_prefix=$prefix;;
prefix) exec_prefix=$prefix;;
esac
AC_MSG_NOTICE(Sanitizing ais_prefix: ${AISPREFIX})
case $AISPREFIX in
dnl For consistency with Heartbeat, map NONE->$prefix
NONE) AISPREFIX=$prefix;;
prefix) AISPREFIX=$prefix;;
esac
AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR})
case $INITDIR in
prefix) INITDIR=$prefix;;
"")
AC_MSG_CHECKING(which init (rc) directory to use)
for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \
/usr/local/etc/rc.d /etc/rc.d
do
if
test -d $initdir
then
INITDIR=$initdir
break
fi
done
AC_MSG_RESULT($INITDIR);;
esac
AC_SUBST(INITDIR)
AC_MSG_NOTICE(Sanitizing libdir: ${libdir})
case $libdir in
dnl For consistency with Heartbeat, map NONE->$prefix
*prefix*|NONE)
AC_MSG_CHECKING(which lib directory to use)
for aDir in lib64 lib
do
trydir="${exec_prefix}/${aDir}"
if
test -d ${trydir}
then
libdir=${trydir}
break
fi
done
AC_MSG_RESULT($libdir);
;;
esac
dnl Expand autoconf variables so that we dont end up with '${prefix}'
dnl in #defines and python scripts
dnl NOTE: Autoconf deliberately leaves them unexpanded to allow
dnl make exec_prefix=/foo install
dnl No longer being able to do this seems like no great loss to me...
eval prefix="`eval echo ${prefix}`"
eval exec_prefix="`eval echo ${exec_prefix}`"
eval bindir="`eval echo ${bindir}`"
eval sbindir="`eval echo ${sbindir}`"
eval libexecdir="`eval echo ${libexecdir}`"
eval datadir="`eval echo ${datadir}`"
eval sysconfdir="`eval echo ${sysconfdir}`"
eval sharedstatedir="`eval echo ${sharedstatedir}`"
eval localstatedir="`eval echo ${localstatedir}`"
eval libdir="`eval echo ${libdir}`"
eval includedir="`eval echo ${includedir}`"
eval oldincludedir="`eval echo ${oldincludedir}`"
eval infodir="`eval echo ${infodir}`"
eval mandir="`eval echo ${mandir}`"
dnl Home-grown variables
eval INITDIR="${INITDIR}"
eval docdir="`eval echo ${docdir}`"
if test x"${docdir}" = x""; then
docdir=${datadir}/doc/${PACKAGE}-${VERSION}
#docdir=${datadir}/doc/packages/${PACKAGE}
fi
AC_SUBST(docdir)
for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \
sharedstatedir localstatedir libdir includedir oldincludedir infodir \
mandir INITDIR docdir
do
dirname=`eval echo '${'${j}'}'`
if
test ! -d "$dirname"
then
AC_MSG_WARN([$j directory ($dirname) does not exist!])
fi
done
dnl This OS-based decision-making is poor autotools practice;
dnl feature-based mechanisms are strongly preferred.
dnl
dnl So keep this section to a bare minimum; regard as a "necessary evil".
case "$host_os" in
*bsd*) LIBS="-L/usr/local/lib"
CPPFLAGS="$CPPFLAGS -I/usr/local/include"
INIT_EXT=".sh"
;;
*solaris*)
;;
*linux*)
AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform)
CFLAGS="$CFLAGS -I${prefix}/include"
;;
darwin*)
AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform)
LIBS="$LIBS -L${prefix}/lib"
CFLAGS="$CFLAGS -I${prefix}/include"
;;
esac
dnl Eventually remove this
CFLAGS="$CFLAGS -I${prefix}/include/heartbeat"
AC_SUBST(INIT_EXT)
AC_DEFINE_UNQUOTED(HA_LOG_FACILITY, LOG_DAEMON, Default logging facility)
AC_MSG_NOTICE(Host CPU: $host_cpu)
case "$host_cpu" in
ppc64|powerpc64)
case $CFLAGS in
*powerpc64*) ;;
*) if test "$GCC" = yes; then
CFLAGS="$CFLAGS -m64"
fi ;;
esac
esac
AC_MSG_CHECKING(which format is needed to print uint64_t)
case "$host_cpu" in
s390x)U64T="%lu";;
*64*) U64T="%lu";;
*) U64T="%llu";;
esac
AC_MSG_RESULT($U64T)
AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t)
AC_CHECK_HEADERS(hb_config.h)
AC_CHECK_HEADERS(glue_config.h)
GLUE_HEADER=none
if test "$ac_cv_header_glue_config_h" = "yes"; then
GLUE_HEADER=glue_config.h
elif test "$ac_cv_header_hb_config_h" = "yes"; then
GLUE_HEADER=hb_config.h
else
AC_MSG_FAILURE(Core development headers were not found)
fi
dnl Variables needed for substitution
CRM_DTD_DIRECTORY="${datadir}/pacemaker"
AC_DEFINE_UNQUOTED(CRM_DTD_DIRECTORY,"$CRM_DTD_DIRECTORY", Location for the Pacemaker Relax-NG Schema)
AC_SUBST(CRM_DTD_DIRECTORY)
AC_DEFINE_UNQUOTED(CRM_DTD_VERSION,"$CRM_DTD_VERSION", Current version of the Pacemaker Relax-NG Schema)
AC_SUBST(CRM_DTD_VERSION)
CRM_DAEMON_USER=`extract_header_define $GLUE_HEADER HA_CCMUSER`
AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as)
AC_SUBST(CRM_DAEMON_USER)
CRM_DAEMON_GROUP=`extract_header_define $GLUE_HEADER HA_APIGROUP`
AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as)
AC_SUBST(CRM_DAEMON_GROUP)
CRM_STATE_DIR=${localstatedir}/run/crm
AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets)
AC_SUBST(CRM_STATE_DIR)
PE_STATE_DIR="${localstatedir}/lib/pengine"
AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep PEngine outputs)
AC_SUBST(PE_STATE_DIR)
dnl Eventually move out of the heartbeat dir tree and create compatability code
CRM_CONFIG_DIR="${localstatedir}/lib/heartbeat/crm"
AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep CIB configuration files)
AC_SUBST(CRM_CONFIG_DIR)
dnl Eventually move out of the heartbeat dir tree and create symlinks when needed
CRM_DAEMON_DIR=`extract_header_define $GLUE_HEADER HA_LIBHBDIR`
AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons)
AC_SUBST(CRM_DAEMON_DIR)
dnl Needed so that the AIS plugin can clear out the directory as Heartbeat does
HA_STATE_DIR=`extract_header_define $GLUE_HEADER HA_VARRUNDIR`
AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where Heartbeat keeps state files and sockets)
AC_SUBST(HA_STATE_DIR)
dnl Needed for the location of hostcache in CTS.py
HA_VARLIBHBDIR=`extract_header_define $GLUE_HEADER HA_VARLIBHBDIR`
AC_SUBST(HA_VARLIBHBDIR)
AC_DEFINE_UNQUOTED(UUID_FILE,"$localstatedir/lib/heartbeat/hb_uuid", Location of Heartbeat's UUID file)
OCF_ROOT_DIR=`extract_header_define $GLUE_HEADER OCF_ROOT_DIR`
if test "X$OCF_ROOT_DIR" = X; then
AC_MSG_ERROR(Could not locate OCF directory)
fi
AC_SUBST(OCF_ROOT_DIR)
OCF_RA_DIR=`extract_header_define $GLUE_HEADER OCF_RA_DIR`
AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs)
AC_SUBST(OCF_RA_DIR)
dnl Extract this value from glue_config.h once we no longer support anything else
STONITH_PLUGIN_DIR="$libdir/stonith/plugins/stonith/"
AC_DEFINE_UNQUOTED(STONITH_PLUGIN_DIR,"$STONITH_PLUGIN_DIR", Location for Stonith plugins)
AC_SUBST(STONITH_PLUGIN_DIR)
RH_STONITH_DIR="$sbindir"
AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents)
RH_STONITH_PREFIX="fence_"
AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents)
AC_PATH_PROGS(HG, hg false)
AC_MSG_CHECKING(build version)
BUILD_VERSION=unknown
if test -f $srcdir/.hg_archival.txt; then
BUILD_VERSION=`cat $srcdir/.hg_archival.txt | awk '/node:/ { print $2 }'`
elif test -x $HG -a -d .hg; then
BUILD_VERSION=`$HG id -itb`
if test $? != 0; then
BUILD_VERSION=unknown
fi
fi
AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version)
AC_MSG_RESULT($BUILD_VERSION)
AC_SUBST(BUILD_VERSION)
dnl ===============================================
dnl Program Paths
dnl ===============================================
PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
export PATH
dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL
dnl was NOT being expanded all the time thus causing things to fail.
AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13)
AM_PATH_PYTHON
AC_CHECK_PROGS(MAKE, gmake make)
AC_PATH_PROGS(HTML2TXT, lynx w3m)
AC_PATH_PROGS(HELP2MAN, help2man)
AC_PATH_PROGS(POD2MAN, pod2man, pod2man)
AC_PATH_PROGS(ASCIIDOC, asciidoc)
AC_PATH_PROGS(PUBLICAN, publican)
AC_PATH_PROGS(FOP, fop)
AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh)
AC_PATH_PROGS(SCP, scp, /usr/bin/scp)
AC_PATH_PROGS(HG, hg, /bin/false)
AC_PATH_PROGS(TAR, tar)
AC_PATH_PROGS(MD5, md5)
AC_PATH_PROGS(TEST, test)
AC_PATH_PROGS(PKGCONFIG, pkg-config)
AC_PATH_PROGS(XML2CONFIG, xml2-config)
AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind)
AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command)
if test x"${LIBTOOL}" = x""; then
AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE})
fi
if test x"${MAKE}" = x""; then
AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE})
fi
AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"")
if test x"${HELP2MAN}" != x""; then
PKG_FEATURES="$PKG_FEATURES manpages"
fi
AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"")
if test x"${ASCIIDOC}" != x""; then
PKG_FEATURES="$PKG_FEATURES asciidoc"
fi
AM_CONDITIONAL(BUILD_DOCBOOK, test ${PUBLICAN} != x"")
if test ${PUBLICAN} != x""; then
PKG_FEATURES="$PKG_FEATURES publican"
fi
dnl ===============================================
dnl Libraries
dnl ===============================================
AC_CHECK_LIB(socket, socket) dnl -lsocket
AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc...
AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux)
AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64)
AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available )
AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available)
AC_CHECK_LIB(uuid, uuid_parse) dnl e2fsprogs
AC_CHECK_LIB(uuid, uuid_create) dnl ossp
if test x"${PKGCONFIG}" = x""; then
AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE})
fi
dnl
dnl On many systems libcrypto is needed when linking against libsnmp.
dnl Check to see if it exists, and if so use it.
dnl
AC_CHECK_LIB(crypto, CRYPTO_free, CRYPTOLIB="-lcrypto",)
AC_SUBST(CRYPTOLIB)
if test "x${enable_thread_safe}" = "xyes"; then
GPKGNAME="gthread-2.0"
else
GPKGNAME="glib-2.0"
fi
if
$PKGCONFIG --exists $GPKGNAME
then
GLIBCONFIG="$PKGCONFIG $GPKGNAME"
else
set -x
echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH
$PKGCONFIG --exists $GPKGNAME; echo $?
$PKGCONFIG --cflags $GPKGNAME; echo $?
$PKGCONFIG $GPKGNAME; echo $?
set +x
AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE})
fi
AC_MSG_RESULT(using $GLIBCONFIG)
#
# Where is dlopen?
#
if test "$ac_cv_lib_c_dlopen" = yes; then
LIBADD_DL=""
elif test "$ac_cv_lib_dl_dlopen" = yes; then
LIBADD_DL=-ldl
else
LIBADD_DL=${lt_cv_dlopen_libs}
fi
dnl
dnl Check for location of gettext
dnl
dnl On at least Solaris 2.x, where it is in libc, specifying lintl causes
dnl grief. Ensure minimal result, not the sum of all possibilities.
dnl And do libc first.
dnl Known examples:
dnl c: Linux, Solaris 2.6+
dnl intl: BSD, AIX
AC_CHECK_LIB(c, gettext)
if test x$ac_cv_lib_c_gettext != xyes; then
AC_CHECK_LIB(intl, gettext)
fi
if test x$ac_cv_lib_c_gettext != xyes -a x$ac_cv_lib_intl_gettext != xyes; then
AC_MSG_ERROR(You need gettext installed in order to build ${PACKAGE})
fi
if test "X$GLIBCONFIG" != X; then
AC_MSG_CHECKING(for special glib includes: )
GLIBHEAD=`$GLIBCONFIG --cflags`
AC_MSG_RESULT($GLIBHEAD)
CPPFLAGS="$CPPFLAGS $GLIBHEAD"
AC_MSG_CHECKING(for glib library flags)
GLIBLIB=`$GLIBCONFIG --libs`
AC_MSG_RESULT($GLIBLIB)
LIBS="$LIBS $GLIBLIB"
fi
dnl ========================================================================
dnl Headers
dnl ========================================================================
AC_HEADER_STDC
AC_CHECK_HEADERS(arpa/inet.h)
AC_CHECK_HEADERS(asm/types.h)
AC_CHECK_HEADERS(assert.h)
AC_CHECK_HEADERS(auth-client.h)
AC_CHECK_HEADERS(ctype.h)
AC_CHECK_HEADERS(dirent.h)
AC_CHECK_HEADERS(errno.h)
AC_CHECK_HEADERS(fcntl.h)
AC_CHECK_HEADERS(getopt.h)
AC_CHECK_HEADERS(glib.h)
AC_CHECK_HEADERS(grp.h)
AC_CHECK_HEADERS(limits.h)
AC_CHECK_HEADERS(linux/errqueue.h)
AC_CHECK_HEADERS(malloc.h)
AC_CHECK_HEADERS(netdb.h)
AC_CHECK_HEADERS(netinet/in.h)
AC_CHECK_HEADERS(netinet/ip.h)
AC_CHECK_HEADERS(pam/pam_appl.h)
AC_CHECK_HEADERS(pthread.h)
AC_CHECK_HEADERS(pwd.h)
AC_CHECK_HEADERS(security/pam_appl.h)
AC_CHECK_HEADERS(sgtty.h)
AC_CHECK_HEADERS(signal.h)
AC_CHECK_HEADERS(stdarg.h)
AC_CHECK_HEADERS(stddef.h)
AC_CHECK_HEADERS(stdio.h)
AC_CHECK_HEADERS(stdlib.h)
AC_CHECK_HEADERS(string.h)
AC_CHECK_HEADERS(strings.h)
AC_CHECK_HEADERS(sys/dir.h)
AC_CHECK_HEADERS(sys/ioctl.h)
AC_CHECK_HEADERS(sys/param.h)
AC_CHECK_HEADERS(sys/poll.h)
AC_CHECK_HEADERS(sys/resource.h)
AC_CHECK_HEADERS(sys/select.h)
AC_CHECK_HEADERS(sys/socket.h)
AC_CHECK_HEADERS(sys/sockio.h)
AC_CHECK_HEADERS(sys/stat.h)
AC_CHECK_HEADERS(sys/time.h)
AC_CHECK_HEADERS(sys/timeb.h)
AC_CHECK_HEADERS(sys/types.h)
AC_CHECK_HEADERS(sys/uio.h)
AC_CHECK_HEADERS(sys/un.h)
AC_CHECK_HEADERS(sys/utsname.h)
AC_CHECK_HEADERS(sys/wait.h)
AC_CHECK_HEADERS(time.h)
AC_CHECK_HEADERS(unistd.h)
AC_CHECK_HEADERS(winsock.h)
dnl These headers need prerequisits before the tests will pass
dnl AC_CHECK_HEADERS(net/if.h)
dnl AC_CHECK_HEADERS(netinet/icmp6.h)
dnl AC_CHECK_HEADERS(netinet/ip6.h)
dnl AC_CHECK_HEADERS(netinet/ip_icmp.h)
AC_MSG_CHECKING(for special libxml2 includes)
if test "x$XML2CONFIG" = "x"; then
AC_MSG_ERROR(libxml2 config not found)
else
XML2HEAD="`$XML2CONFIG --cflags`"
AC_MSG_RESULT($XML2HEAD)
AC_CHECK_LIB(xml2, xmlReadMemory)
AC_CHECK_LIB(xslt, xsltApplyStylesheet)
fi
CPPFLAGS="$CPPFLAGS $XML2HEAD"
AC_CHECK_HEADERS(libxml/xpath.h)
AC_CHECK_HEADERS(libxslt/xslt.h)
if test "$ac_cv_header_libxml_xpath_h" != "yes"; then
AC_MSG_ERROR(The libxml developement headers were not found)
fi
if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then
AC_MSG_ERROR(The libxslt developement headers were not found)
fi
dnl ========================================================================
dnl Structures
dnl ========================================================================
AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include <time.h>]])
dnl ========================================================================
dnl Functions
dnl ========================================================================
AC_CHECK_FUNCS(g_log_set_default_handler)
AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function]))
dnl ========================================================================
dnl ltdl
dnl ========================================================================
AC_CHECK_LIB(ltdl, lt_dlopen, [LTDL_foo=1])
if test "x${enable_bundled_ltdl}" = "xyes"; then
if test $ac_cv_lib_ltdl_lt_dlopen = yes; then
AC_MSG_NOTICE([Disabling usage of installed ltdl])
fi
ac_cv_lib_ltdl_lt_dlopen=no
fi
LIBLTDL_DIR=""
if test $ac_cv_lib_ltdl_lt_dlopen != yes ; then
AC_MSG_NOTICE([Installing local ltdl])
LIBLTDL_DIR=libltdl
( cd $srcdir ; $TAR -xvf libltdl.tar )
if test "$?" -ne 0; then
AC_MSG_ERROR([$TAR of libltdl.tar in $srcdir failed])
fi
AC_CONFIG_SUBDIRS(libltdl)
else
LIBS="$LIBS -lltdl"
AC_MSG_NOTICE([Using installed ltdl])
INCLTDL=""
LIBLTDL=""
fi
AC_SUBST(INCLTDL)
AC_SUBST(LIBLTDL)
AC_SUBST(LIBLTDL_DIR)
dnl ========================================================================
dnl bzip2
dnl ========================================================================
AC_CHECK_HEADERS(bzlib.h)
AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress)
if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then
AC_MSG_ERROR(BZ2 libraries not found)
fi
if test x$ac_cv_header_bzlib_h != xyes; then
AC_MSG_ERROR(BZ2 Development headers not found)
fi
dnl ========================================================================
dnl ncurses
dnl ========================================================================
dnl
dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses".
dnl Many non-Linux deliver "curses"; sites may add "ncurses".
dnl
dnl However, the source-code recommendation for both is to #include "curses.h"
dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h").
dnl
dnl ncurse takes precedence.
dnl
AC_CHECK_HEADERS(curses.h)
AC_CHECK_HEADERS(curses/curses.h)
AC_CHECK_HEADERS(ncurses.h)
AC_CHECK_HEADERS(ncurses/ncurses.h)
dnl Although n-library is preferred, only look for it if the n-header was found.
CURSESLIBS=''
if test "$ac_cv_header_ncurses_h" = "yes"; then
AC_CHECK_LIB(ncurses, printw,
[CURSESLIBS='-lncurses'; AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]
)
fi
if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then
AC_CHECK_LIB(ncurses, printw,
[CURSESLIBS='-lncurses'; AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)]
)
fi
dnl Only look for non-n-library if there was no n-library.
if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then
AC_CHECK_LIB(curses, printw,
[CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]
)
fi
dnl Only look for non-n-library if there was no n-library.
if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then
AC_CHECK_LIB(curses, printw,
[CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)]
)
fi
if test "x$CURSESLIBS" != "x"; then
PKG_FEATURES="$PKG_FEATURES ncurses"
fi
dnl Check for printw() prototype compatibility
if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual && cc_supports_flag -Werror; then
AC_MSG_CHECKING(whether printw() requires argument of "const char *")
ac_save_LIBS=$LIBS
LIBS="$CURSESLIBS $LIBS"
ac_save_CFLAGS=$CFLAGS
CFLAGS="-Wcast-qual -Werror"
AC_LINK_IFELSE(
[AC_LANG_PROGRAM(
[
#if defined(HAVE_CURSES_H)
# include <curses.h>
#elif defined(HAVE_NCURSES_H)
# include <ncurses.h>
#endif
],
[printw((const char *)"Test");]
)],
[ac_cv_compatible_printw=yes],
[ac_cv_compatible_printw=no]
)
LIBS=$ac_save_LIBS
CFLAGS=$ac_save_CFLAGS
AC_MSG_RESULT([$ac_cv_compatible_printw])
if test "$ac_cv_compatible_printw" = no; then
AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.])
AC_MSG_NOTICE([Disabling curses])
AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?])
fi
fi
AC_SUBST(CURSESLIBS)
dnl ========================================================================
dnl Cluster infrastructure - Heartbeat
dnl ========================================================================
dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols
dnl So check for all the depenancies (so they're added to LIBS) before checking for -lplumb
AC_CHECK_LIB(pils, PILLoadPlugin)
AC_CHECK_LIB(plumb, G_main_add_IPC_Channel)
if test x"$ac_cv_lib_plumb_G_main_add_IPC_Channel" != x"yes"; then
AC_MSG_FAILURE(Core Heartbeat utility libraries not found: $ac_cv_lib_plumb_G_main_add_IPC_Channel)
fi
dnl Compatability checks
AC_CHECK_FUNCS(msgfromIPC_timeout)
AC_CHECK_MEMBERS([struct lrm_ops.fail_rsc],,,[[#include <lrm/lrm_api.h>]])
dnl ========================================================================
dnl Cluster stack - Heartbeat
dnl ========================================================================
case $SUPPORT_HEARTBEAT in
1|yes|true)
AC_CHECK_LIB(hbclient, ll_cluster_new,
[SUPPORT_HEARTBEAT=1], [AC_MSG_FAILURE(Unable to support Heartbeat: client libraries not found)]);;
try)
AC_CHECK_LIB(hbclient, ll_cluster_new,
[SUPPORT_HEARTBEAT=1], [SUPPORT_HEARTBEAT=0]);;
*) SUPPORT_HEARTBEAT=0;;
esac
AM_CONDITIONAL(BUILD_HEARTBEAT_SUPPORT, test $SUPPORT_HEARTBEAT = 1)
AC_DEFINE_UNQUOTED(SUPPORT_HEARTBEAT, $SUPPORT_HEARTBEAT, Support the Heartbeat messaging and membership layer)
dnl ========================================================================
dnl Cluster stack - OpenAIS
dnl ========================================================================
AISLIB=""
dnl Normalize the values
case $SUPPORT_AIS in
1|yes|true) missingisfatal=1;;
try) missingisfatal=0;;
*) SUPPORT_AIS=no;;
esac
AC_MSG_CHECKING(for native AIS)
AISMSGLIB=""
AIS_VERSION="none"
COROSYNC_PKG="$PKGCONFIG libcoroipcc"
if test $SUPPORT_AIS = no; then
AC_MSG_RESULT(no... not requested.)
else
AC_MSG_RESULT($SUPPORT_AIS, with '$AISPREFIX')
AC_CHECK_HEADERS(openais/saAis.h)
AC_CHECK_HEADERS(corosync/coroipcc.h)
$COROSYNC_PKG --exists
if test $? = 0; then
AIS_VERSION="corosync"
elif test "$ac_cv_header_openais_saAis_h" = "yes"; then
AIS_VERSION="whitetank"
else
aisreason="Whitetank headers not found"
fi
fi
if test $AIS_VERSION != "none"; then
AC_MSG_CHECKING(for OpenAIS branch)
AC_MSG_RESULT($AIS_VERSION)
fi
if test $AIS_VERSION = "corosync"; then
if test "$ac_cv_header_corosync_coroipcc_h" != "yes"; then
AIS_VERSION="none"
aisreason="Corosync headers not found"
fi
saveLIBS="$LIBS"
LIBS="$LIBS `$COROSYNC_PKG --libs-only-L`"
AC_CHECK_LIB(coroipcc, coroipcc_msg_send_reply_receive, [])
LIBS="$saveLIBS"
if test $ac_cv_lib_coroipcc_coroipcc_msg_send_reply_receive != yes; then
AC_MSG_RESULT(Cannot locate AIS messaging library)
aisreason="requred Corosync libraries not found"
AIS_VERSION="none"
fi
fi
dnl continue?
if test $AIS_VERSION = "whitetank"; then
dnl Find it in lib, lib64, or wherever it wants to live...
AC_MSG_CHECKING(location of OpenAIS libraries)
dnl CoroSync location
alib=`ls ${AISPREFIX}/*/libcpg.so | head -n 1`
if test -z "$alib"; then
dnl Whitetank location
alib=`ls ${AISPREFIX}/*/*/libcpg.so | head -n 1`
fi
AISLIB=`dirname $alib`
AC_MSG_RESULT($AISLIB)
if test "x$AISLIB" = "x"; then
AC_MSG_WARN(Use --with-ais-prefix to specify the prefix OpenAIS was installed with)
aisreason="library directory not found"
AIS_VERSION="none"
elif test ! -d "$AISLIB"; then
AC_MSG_WARN(Use --with-ais-prefix to specify the prefix OpenAIS was installed with)
aisreason="specified library directory does not exist"
AIS_VERSION="none"
fi
fi
dnl continue?
if test $AIS_VERSION = "whitetank"; then
AC_MSG_CHECKING(location of OpenAIS plugins)
if test -z "$LCRSODIR"; then
LCRSODIR="$libexecdir/lcrso"
alib=`ls ${AISPREFIX}/*/lcrso/objdb.lcrso | head -n 1`
LCRSODIR=`dirname $alib`
fi
AC_MSG_RESULT($LCRSODIR)
if test "x$LCRSODIR" = "x"; then
AC_MSG_RESULT(Invalid. Please specify the correct location with --with-lcrso-dir)
aisreason="plugin directory not found"
AIS_VERSION="none"
elif test ! -d "$LCRSODIR"; then
AC_MSG_RESULT(Invalid. Please specify the correct location with --with-lcrso-dir)
aisreason="specified plugin directory does not exist"
AIS_VERSION="none"
fi
fi
dnl continue?
if test $AIS_VERSION = "whitetank"; then
dnl Don't add the messaging library to LIBS since most daemons don't need/use it
saveLIBS="$LIBS"
LIBS="$LIBS -L${AISLIB} -R${AISLIB}"
AC_CHECK_LIB(SaMsg, saSendReceiveReply, [])
AC_CHECK_LIB(SaMsg, openais_msg_send_reply_receive, [])
if test $ac_cv_lib_SaMsg_openais_msg_send_reply_receive = yes; then
: OpenAIS
elif test $ac_cv_lib_SaMsg_saSendReceiveReply = yes; then
: OpenAIS
AC_DEFINE_UNQUOTED(TRADITIONAL_AIS_IPC, 1, "Use the 'old' AIS IPC interface")
else
AC_MSG_RESULT(Cannot locate AIS messaging library)
aisreason="requred libraries not found"
AIS_VERSION="none"
fi
LIBS="$saveLIBS"
fi
SUPPORT_AIS=1
case $AIS_VERSION in
corosync)
AC_DEFINE_UNQUOTED(AIS_COROSYNC, 1, "AIS target is the corosync series")
LCRSODIR=`$PKGCONFIG corosync --variable=lcrsodir`
CFLAGS="$CFLAGS `$COROSYNC_PKG --cflags`"
AISMSGLIB=`$COROSYNC_PKG --libs`
;;
whitetank)
AC_DEFINE_UNQUOTED(AIS_WHITETANK, 1, "AIS target is the whitetank series")
CFLAGS="$CFLAGS -I$AISPREFIX/include/openais"
AISMSGLIB="-L${AISLIB} -R${AISLIB} -lSaMsg"
;;
none)
SUPPORT_AIS=0
if test "x$aisreason" != x; then
if test $missingisfatal = 0; then
AC_MSG_WARN(Unable to support OpenAIS: $aisreason)
else
AC_MSG_FAILURE(Unable to support OpenAIS: $aisreason)
fi
fi
;;
*) AC_MSG_FAILURE(Unknown OpenAIS branch: $AIS_VERSION);;
esac
AC_DEFINE_UNQUOTED(SUPPORT_AIS, $SUPPORT_AIS, Support the OpenAIS messaging and membership layer)
AM_CONDITIONAL(BUILD_AIS_SUPPORT, test $SUPPORT_AIS = 1)
dnl
dnl Cluster stack - Sanity
dnl
STACKS=""
CLUSTERLIBS=""
if test $SUPPORT_HEARTBEAT = 1; then
STACKS="$STACKS heartbeat"
CLUSTERLIBS="$CLUSTERLIBS -lhbclient -lccmclient"
fi
if test $SUPPORT_AIS = 1; then
STACKS="$STACKS $AIS_VERSION"
CLUSTERLIBS="$CLUSTERLIBS ${AISMSGLIB}"
else
AISPREFIX=""
LCRSODIR="$libdir"
fi
PKG_FEATURES="$PKG_FEATURES$STACKS"
AC_MSG_CHECKING(for supported stacks)
if test x"$STACKS" = x; then
AC_MSG_FAILURE(You must choose at least one cluster stack to support)
fi
AC_MSG_RESULT($STACKS)
AC_SUBST(CLUSTERLIBS)
AC_SUBST(LCRSODIR)
dnl ========================================================================
dnl SNMP
dnl ========================================================================
case $SUPPORT_SNMP in
1|yes|true) missingisfatal=1;;
try) missingisfatal=0;;
*) SUPPORT_SNMP=no;;
esac
SNMPLIB=""
AC_MSG_CHECKING(for snmp support)
if test $SUPPORT_SNMP = no; then
AC_MSG_RESULT(no... not requested.)
SUPPORT_SNMP=0
else
SNMPCONFIG=""
AC_MSG_RESULT($SUPPORT_SNMP)
AC_CHECK_HEADERS(net-snmp/net-snmp-config.h)
if test "x${ac_cv_header_net_snmp_net_snmp_config_h}" != "xyes"; then
SUPPORT_SNMP="no"
fi
if test $SUPPORT_SNMP != no; then
AC_PATH_PROGS(SNMPCONFIG, net-snmp-config)
if test "X${SNMPCONFIG}" = "X"; then
AC_MSG_RESULT(You need the net_snmp development package to continue.)
SUPPORT_SNMP=no
fi
fi
if test $SUPPORT_SNMP != no; then
AC_MSG_CHECKING(for special snmp libraries)
SNMPLIBS=`$SNMPCONFIG --agent-libs`
AC_MSG_RESULT($SNMPLIBS)
fi
if test $SUPPORT_SNMP != no; then
savedLibs=$LIBS
LIBS="$LIBS $SNMPLIBS"
AC_CHECK_FUNCS(netsnmp_transport_open_client)
if test $ac_cv_func_netsnmp_transport_open_client != yes; then
SUPPORT_SNMP=no
fi
LIBS=$savedLibs
fi
if test $SUPPORT_SNMP = no; then
SUPPORT_SNMP=0
if test $missingisfatal = 0; then
AC_MSG_WARN(Unable to support SNMP)
else
AC_MSG_FAILURE(Unable to support SNMP)
fi
else
SUPPORT_SNMP=1
fi
fi
if test $SUPPORT_SNMP = 1; then
PKG_FEATURES="$PKG_FEATURES snmp"
fi
AC_SUBST(SNMPLIBS)
AM_CONDITIONAL(ENABLE_SNMP, test "$SUPPORT_SNMP" = "1")
AC_DEFINE_UNQUOTED(ENABLE_SNMP, $SUPPORT_SNMP, Build in support for sending SNMP traps)
dnl ========================================================================
dnl ESMTP
dnl ========================================================================
case $SUPPORT_ESMTP in
1|yes|true) missingisfatal=1;;
try) missingisfatal=0;;
*) SUPPORT_ESMTP=no;;
esac
ESMTPLIB=""
AC_MSG_CHECKING(for esmtp support)
if test $SUPPORT_ESMTP = no; then
AC_MSG_RESULT(no... not requested.)
SUPPORT_ESMTP=0
else
ESMTPCONFIG=""
AC_MSG_RESULT($SUPPORT_ESMTP)
AC_CHECK_HEADERS(libesmtp.h)
if test "x${ac_cv_header_libesmtp_h}" != "xyes"; then
ENABLE_ESMTP="no"
fi
if test $SUPPORT_ESMTP != no; then
AC_PATH_PROGS(ESMTPCONFIG, libesmtp-config)
if test "X${ESMTPCONFIG}" = "X"; then
AC_MSG_RESULT(You need the libesmtp development package to continue.)
SUPPORT_ESMTP=no
fi
fi
if test $SUPPORT_ESMTP != no; then
AC_MSG_CHECKING(for special esmtp libraries)
ESMTPLIBS=`$ESMTPCONFIG --libs | tr '\n' ' '`
AC_MSG_RESULT($ESMTPLIBS)
fi
if test $SUPPORT_ESMTP = no; then
SUPPORT_ESMTP=0
if test $missingisfatal = 0; then
AC_MSG_WARN(Unable to support ESMTP)
else
AC_MSG_FAILURE(Unable to support ESMTP)
fi
else
SUPPORT_ESMTP=1
fi
fi
if test $SUPPORT_ESMTP = 1; then
PKG_FEATURES="$PKG_FEATURES libesmtp"
fi
AC_SUBST(ESMTPLIBS)
AM_CONDITIONAL(ENABLE_ESMTP, test "$SUPPORT_ESMTP" = "1")
AC_DEFINE_UNQUOTED(ENABLE_ESMTP, $SUPPORT_ESMTP, Build in support for sending mail notifications with ESMTP)
dnl ========================================================================
dnl GnuTLS
dnl ========================================================================
AC_CHECK_HEADERS(gnutls/gnutls.h)
AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h)
dnl GNUTLS library: Attempt to determine by 'libgnutls-config' program.
dnl If no 'libgnutls-config', try traditional autoconf means.
AC_PATH_PROGS(LIBGNUTLS_CONFIG, libgnutls-config)
if test -n "$LIBGNUTLS_CONFIG"; then
AC_MSG_CHECKING(for gnutls header flags)
GNUTLSHEAD="`$LIBGNUTLS_CONFIG --cflags`";
AC_MSG_RESULT($GNUTLSHEAD)
AC_MSG_CHECKING(for gnutls library flags)
GNUTLSLIBS="`$LIBGNUTLS_CONFIG --libs`";
AC_MSG_RESULT($GNUTLSLIBS)
else
AC_CHECK_LIB(gnutls, gnutls_init)
fi
AC_SUBST(GNUTLSHEAD)
AC_SUBST(GNUTLSLIBS)
dnl ========================================================================
dnl System Health
dnl ========================================================================
dnl Check if servicelog development package is installed
SERVICELOG=servicelog-1
SERVICELOG_EXISTS="no"
AC_MSG_CHECKING(for $SERVICELOG packages)
if
$PKGCONFIG --exists $SERVICELOG
then
SERVICELOG_EXISTS="yes"
fi
AC_MSG_RESULT($SERVICELOG_EXISTS)
AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes")
dnl Check if OpenIMPI packages and servicelog are installed
OPENIPMI="OpenIPMI OpenIPMIposix"
OPENIPMI_SERVICELOG_EXISTS="no"
AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages)
if
$PKGCONFIG --exists $OPENIPMI $SERVICELOG
then
OPENIPMI_SERICELOG_EXISTS="yes"
fi
AC_MSG_RESULT($OPENIPMI_SERICELOG_EXISTS)
AM_CONDITIONAL(BUILD_OPENIPMI_SERICELOG, test "$OPENIPMI_SERICELOG_EXISTS" = "yes")
dnl ========================================================================
dnl checks for library functions to replace them
dnl
dnl NoSuchFunctionName:
dnl is a dummy function which no system supplies. It is here to make
dnl the system compile semi-correctly on OpenBSD which doesn't know
dnl how to create an empty archive
dnl
dnl scandir: Only on BSD.
dnl System-V systems may have it, but hidden and/or deprecated.
dnl A replacement function is supplied for it.
dnl
dnl setenv: is some bsdish function that should also be avoided (use
dnl putenv instead)
dnl On the other hand, putenv doesn't provide the right API for the
dnl code and has memory leaks designed in (sigh...) Fortunately this
dnl A replacement function is supplied for it.
dnl
dnl strerror: returns a string that corresponds to an errno.
dnl A replacement function is supplied for it.
dnl
dnl unsetenv: is some bsdish function that should also be avoided (No
dnl replacement)
dnl A replacement function is supplied for it.
dnl
dnl strnlen: is a gnu function similar to strlen, but safer.
dnl We wrote a tolearably-fast replacement function for it.
dnl
dnl strndup: is a gnu function similar to strdup, but safer.
dnl We wrote a tolearably-fast replacement function for it.
dnl
dnl daemon: is a GNU function. The daemon() function is for programs wishing to
dnl detach themselves from the controlling terminal and run in the
dnl background as system daemon
dnl A replacement function is supplied for it.
AC_REPLACE_FUNCS(alphasort inet_pton NoSuchFunctionName scandir setenv strerror unsetenv strnlen strndup daemon strlcpy strlcat)
dnl ========================================================================
dnl Compiler flags
dnl ========================================================================
dnl Make sure that CFLAGS is not exported. If the user did
dnl not have CFLAGS in their environment then this should have
dnl no effect. However if CFLAGS was exported from the user's
dnl environment, then the new CFLAGS will also be exported
dnl to sub processes.
CC_ERRORS=""
CC_EXTRAS=""
if export | fgrep " CFLAGS=" > /dev/null; then
export -n CFLAGS || true # We don't want to bomb out if this fails
fi
if test "$GCC" != yes; then
CFLAGS="$CFLAGS -g"
enable_fatal_warnings=no
else
CFLAGS="$CFLAGS -ggdb3 -O0"
# We had to eliminate -Wnested-externs because of libtool changes
EXTRA_FLAGS="-fgnu89-inline
-fstack-protector-all
-Wall
-Waggregate-return
-Wbad-function-cast
-Wcast-qual
-Wcast-align
-Wdeclaration-after-statement
-Wendif-labels
-Wfloat-equal
-Wformat=2
-Wformat-security
-Wformat-nonliteral
-Winline
-Wmissing-prototypes
-Wmissing-declarations
-Wnested-externs
-Wno-long-long
-Wno-strict-aliasing
-Wpointer-arith
-Wstrict-prototypes
-Wunsigned-char
-Wwrite-strings"
# Additional warnings it might be nice to enable one day
# -Wshadow
# -Wunreachable-code
for j in $EXTRA_FLAGS
do
if
cc_supports_flag $j
then
CC_EXTRAS="$CC_EXTRAS $j"
fi
done
dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x
GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'`
AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4)
dnl System specific options
case "$host_os" in
*linux*|*bsd*)
if test "${enable_fatal_warnings}" = "unknown"; then
enable_fatal_warnings=yes
fi
;;
esac
if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then
enable_fatal_warnings=yes
else
enable_fatal_warnings=no
fi
if test "x${enable_ansi}" != xno && cc_supports_flag -std=iso9899:199409 ; then
AC_MSG_NOTICE(Enabling ANSI Compatibility)
CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY"
fi
AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS})
fi
CFLAGS="$CFLAGS $CC_EXTRAS"
NON_FATAL_CFLAGS="$CFLAGS"
AC_SUBST(NON_FATAL_CFLAGS)
dnl
dnl We reset CFLAGS to include our warnings *after* all function
dnl checking goes on, so that our warning flags don't keep the
dnl AC_*FUNCS() calls above from working. In particular, -Werror will
dnl *always* cause us troubles if we set it before here.
dnl
dnl
if test "x${enable_fatal_warnings}" = xyes ; then
AC_MSG_NOTICE(Enabling Fatal Warnings)
CFLAGS="$CFLAGS -Werror"
fi
AC_SUBST(CFLAGS)
dnl This is useful for use in Makefiles that need to remove one specific flag
CFLAGS_COPY="$CFLAGS"
AC_SUBST(CFLAGS_COPY)
AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries
AC_SUBST(LIBADD_INTL) dnl extra flags for GNU gettext stuff...
AC_SUBST(LOCALE)
dnl Options for cleaning up the compiler output
QUIET_LIBTOOL_OPTS=""
QUIET_MAKE_OPTS=""
if test "x${enable_quiet}" = "xyes"; then
QUIET_LIBTOOL_OPTS="--quiet"
QUIET_MAKE_OPTS="--quiet"
fi
AC_MSG_RESULT(Supress make details: ${enable_quiet})
dnl Put the above variables to use
LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)"
MAKE="${MAKE} \$(QUIET_MAKE_OPTS)"
AC_SUBST(CC)
AC_SUBST(MAKE)
AC_SUBST(LIBTOOL)
AC_SUBST(QUIET_MAKE_OPTS)
AC_SUBST(QUIET_LIBTOOL_OPTS)
dnl The Makefiles and shell scripts we output
AC_CONFIG_FILES(Makefile \
cts/Makefile \
cts/CTSvars.py \
cts/LSBDummy \
cib/Makefile \
crmd/Makefile \
pengine/Makefile \
pengine/regression.core.sh \
-debian/Makefile \
doc/Makefile \
doc/cibadmin.8 \
doc/crm_resource.8 \
include/Makefile \
include/crm/Makefile \
include/crm/common/Makefile \
include/crm/pengine/Makefile \
replace/Makefile \
lib/Makefile \
lib/ais/Makefile \
lib/common/Makefile \
lib/cib/Makefile \
lib/pengine/Makefile \
lib/transition/Makefile \
lib/fencing/Makefile \
lib/plugins/Makefile \
lib/plugins/lrm/Makefile \
fencing/Makefile \
extra/Makefile \
extra/resources/Makefile \
tools/Makefile \
- tools/haresources2cib.py \
tools/hb2openais.sh \
tools/crm_primitive.py \
shell/Makefile \
- shell/setup.py \
- shell/templates/Makefile \
- shell/regression/Makefile \
- shell/regression/regression.sh \
- shell/regression/lrmregtest-lsb \
- shell/regression/testcases/Makefile \
- shell/modules/Makefile \
- shell/modules/ui.py \
- shell/modules/ra.py \
- shell/modules/vars.py \
- shell/modules/help.py \
+ shell/templates/Makefile \
+ shell/regression/Makefile \
+ shell/regression/regression.sh \
+ shell/regression/lrmregtest-lsb \
+ shell/regression/testcases/Makefile \
+ shell/modules/Makefile \
+ shell/modules/ui.py \
+ shell/modules/ra.py \
+ shell/modules/vars.py \
+ shell/modules/help.py \
xml/Makefile \
xml/pacemaker.rng \
xml/resources.rng \
xml/constraints.rng \
xml/rule.rng \
xml/nvset.rng \
)
dnl Now process the entire list of files added by previous
dnl calls to AC_CONFIG_FILES()
AC_OUTPUT()
dnl *****************
dnl Configure summary
dnl *****************
AC_MSG_RESULT([])
AC_MSG_RESULT([$PACKAGE configuration:])
AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)])
AC_MSG_RESULT([ Features =${PKG_FEATURES}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ Prefix = ${prefix}])
AC_MSG_RESULT([ Executables = ${sbindir}])
AC_MSG_RESULT([ Man pages = ${mandir}])
AC_MSG_RESULT([ Libraries = ${libdir}])
AC_MSG_RESULT([ Header files = ${includedir}])
AC_MSG_RESULT([ Arch-independent files = ${datadir}])
AC_MSG_RESULT([ State information = ${localstatedir}])
AC_MSG_RESULT([ System configuration = ${sysconfdir}])
AC_MSG_RESULT([ AIS Plugins = ${LCRSODIR}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ Use system LTDL = ${ac_cv_lib_ltdl_lt_dlopen}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}])
AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}])
AC_MSG_RESULT([])
AC_MSG_RESULT([ CFLAGS = ${CFLAGS}])
AC_MSG_RESULT([ Libraries = ${LIBS}])
AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}])
diff --git a/cts/CIB.py b/cts/CIB.py
index a3c5de4283..5d5e121ac2 100644
--- a/cts/CIB.py
+++ b/cts/CIB.py
@@ -1,560 +1,560 @@
'''CTS: Cluster Testing System: CIB generator
'''
__copyright__='''
Author: Andrew Beekhof <abeekhof@suse.de>
Copyright (C) 2008 Andrew Beekhof
'''
from UserDict import UserDict
import sys, time, types, syslog, os, struct, string, signal, traceback, warnings
-from CTSvars import *
-from CTS import ClusterManager
+from cts.CTSvars import *
+from cts.CTS import ClusterManager
class CibBase:
cts_cib = None
cib_tmpfile = None
version = "unknown"
feature_set = "unknown"
target = None
def __init__(self, CM, tmpfile=None):
self.CM = CM
#self.target = self.CM.Env["nodes"][0]
if not tmpfile:
warnings.filterwarnings("ignore")
self.cib_tmpfile=os.tmpnam()
warnings.resetwarnings()
else:
self.cib_tmpfile = tmpfile
def version(self):
return self.version
def NextIP(self):
fields = string.split(self.CM.Env["IPBase"], '.')
fields[3] = str(int(fields[3])+1)
ip = string.join(fields, '.')
self.CM.Env["IPBase"] = ip
return ip
class CIB06(CibBase):
version = "transitional-0.6"
coloc_template = """<rsc_colocation id="%s" from="%s" to="%s" to_role="%s" score="%s"/>"""
cib_template ='''
<cib admin_epoch="1" epoch="0" num_updates="0" remote_access_port="9898">
<configuration>
<crm_config> %s
</crm_config>
<nodes/>
<resources> %s
</resources>
<constraints> %s
</constraints>
</configuration>
<status/>
</cib> '''
cib_option_template = '''
<cluster_property_set id="cib-bootstrap-options"><attributes>
<nvpair id="cib-bootstrap-1" name="start-failure-is-fatal" value="false"/>
<nvpair id="cib-bootstrap-2" name="stonith-enabled" value="%d"/>
<nvpair id="cib-bootstrap-3" name="pe-input-series-max" value="30000"/>
<nvpair id="cib-bootstrap-4" name="shutdown-escalation" value="5min"/>
<nvpair id="cib-bootstrap-5" name="startup-fencing" value="false"/>
<nvpair id="cib-bootstrap-6" name="batch-limit" value="10"/>
<nvpair id="cib-bootstrap-7" name="no-quorum-policy" value="%s"/>
</attributes></cluster_property_set>'''
lsb_resource = '''
<primitive id="lsb_dummy" class="lsb" type="''' +CTSvars.CTS_home+ '''/LSBDummy">
<operations>
<op id="ocf_lsb_monitor" name="monitor" interval="5s"/>
</operations>
</primitive> '''
clustermon_location_constraint = '''
<rsc_location id="run_cluster_mon" rsc="cluster_mon">
<rule id="cant_run_cluster_mon" score="-INFINITY" boolean_op="and">
<expression id="mon_expr" attribute="#is_dc" operation="eq" value="false"/>
</rule>
</rsc_location> '''
resource_group_template = '''<group id="group-1">%s %s %s</group>'''
per_node_constraint_template = '''
<rsc_location id="preferred-%s" rsc="%s" node="%s" score="100"/>'''
pingd_constraint_template = '''
<rsc_location id="%s-is-connected" rsc="%s">
<rule id="%s-connected-rule" role="%s" score="-INFINITY">
<expression id="%s-connected-expr" attribute="connected" operation="lt" value="%d"/>
</rule>
</rsc_location>'''
dummy_resource_template = '''
<primitive id="%s" class="ocf" type="Dummy" provider="heartbeat">
<operations>
<op id="mon-%s" name="monitor" interval="P10S"/>
</operations>
<instance_attributes id="%s-attrs"><attributes>
<nvpair id="migrate-%s" name="allow_migrate" value="1"/>
</attributes></instance_attributes>
</primitive> '''
clustermon_resource_template = '''
<primitive id="cluster_mon" class="ocf" type="ClusterMon" provider="heartbeat">
<operations>
<op id="cluster_mon-1" name="monitor" interval="5s" prereq="nothing"/>
<op id="cluster_mon-2" name="start" prereq="nothing"/>
</operations>
<instance_attributes id="cluster_mon-attrs">
<attributes>
<nvpair id="cluster_mon-1" name="htmlfile" value="/suse/abeekhof/Export/cluster.html"/>
<nvpair id="cluster_mon-2" name="update" value="10"/>
<nvpair id="cluster_mon-3" name="extra_options" value="-n -r"/>
<nvpair id="cluster_mon-4" name="user" value="abeekhof"/>
</attributes>
</instance_attributes>
</primitive> '''
master_slave_resource = '''
<master_slave id="master-1">
<instance_attributes id="master_rsc">
<attributes>
<nvpair id="clone_max_1" name="clone_max" value="%d"/>
<nvpair id="clone_node_max_2" name="clone_node_max" value="%d"/>
<nvpair id="master_max_3" name="master_max" value="%d"/>
<nvpair id="master_node_max_4" name="master_node_max" value="%d"/>
</attributes>
</instance_attributes>
<primitive id="ocf_msdummy" class="ocf" type="Stateful" provider="heartbeat">
<operations>
<op id="ocf_msdummy_monitor" name="monitor" interval="15s"/>
<op id="ocf_msdummy_monitor_master" name="monitor" interval="16s" role="Master"/>
</operations>
</primitive>
</master_slave>'''
pingd_resource_template = """
<clone id="Connectivity">
<meta_attributes id="pingd-opts">
<attributes>
<nvpair id="pingd-opt-1" name="globally_unique" value="false"/>
</attributes>
</meta_attributes>
<primitive id="pingd" class="ocf" provider="pacemaker" type="pingd">
<operations>
<op id="pingd-op-1" name="monitor" interval="120s"/>
</operations>
<instance_attributes id="pingd-attrs">
<attributes>
<nvpair id="pingd-attr-1" name="host_list" value="%s"/>
<nvpair id="pingd-attr-2" name="name" value="connected"/>
</attributes>
</instance_attributes>
</primitive>
</clone>"""
stonith_resource_template = """
<clone id="DoFencing">
<meta_attributes id="fencing">
<attributes>
<nvpair id="DoFencing-attr-1" name="resource_failure_stickiness" value="-1"/>
<nvpair id="DoFencing-attr-2" name="globally_unique" value="false"/>
</attributes>
</meta_attributes>
<primitive id="child_DoFencing" class="stonith" type="%s">
<operations>
<op id="DoFencing-op-1" name="monitor" interval="120s" prereq="nothing" timeout="300s"/>
<op id="DoFencing-op-2" name="start" prereq="nothing" timeout="180s"/>
<op id="DoFencing-op-3" name="stop" timeout="180s"/>
</operations>
<instance_attributes id="fencing-child">
<attributes>
<nvpair id="child_DoFencing-1" name="%s" value="%s"/>
<nvpair id="child_DoFencing-2" name="livedangerously" value="yes"/>
</attributes>
</instance_attributes>
</primitive>
</clone>"""
bsc_template = '''
<cluster_property_set id="bsc-options">
<attributes>
<nvpair id="bsc-options-ident-string" name="ident-string" value="Linux-HA TEST configuration file - REMOVEME!!"/>
</attributes>
</cluster_property_set>'''
def NewIP(self, name=None):
template = '''
<primitive id="%s" class="ocf" type="IPaddr" provider="heartbeat">
<operations>
<op id="mon-%s" name="monitor" interval="5s"/>
</operations>
<instance_attributes id="attrs-%s"><attributes>
<nvpair id="netmask-%s" name="cidr_netmask" value="32"/>
<nvpair id="ip-%s" name="ip" value="%s"/>
</attributes></instance_attributes>
</primitive> '''
ip = self.NextIP()
if not name:
name = "r"+ip
return template % (name, name, name, name, name, ip)
def NewHBIP(self, name=None):
template = '''
<primitive id="%s" class="heartbeat" type="IPaddr">
<operations>
<op id="mon-%s" name="monitor" interval="5s"/>
</operations>
<instance_attributes id="attrs-%s"><attributes>
<nvpair id="ip-%s" name="1" value="%s/32"/>
</attributes></instance_attributes>
</primitive> '''
ip = self.NextIP()
if not name:
name = "r"+ip
return template % (name, name, name, name, ip)
def NewDummy(self, name):
return self.dummy_resource_template % (name, name, name, name)
def install(self, target):
self.CM.rsh("localhost", "echo \'" + self.contents(target) + "\' > " + self.cib_tmpfile)
rc = self.CM.rsh.cp(cib_file, "root@%s:%s/cib.xml" + (target, CTSvars.CRM_CONFIG_DIR))
if rc != 0:
raise ValueError("Can not copy %s to %s (%d)"%(self.cib_tmpfile, target, rc))
self.CM.rsh(target, "chown "+CTSvars.CRM_DAEMON_USER+" "+CTSvars.CRM_CONFIG_DIR+"/cib.xml")
self.CM.rsh("localhost", "rm -f "+self.cib_tmpfile)
def contents(self, target=None):
# fencing resource
if self.cts_cib:
return self.cts_cib
nodelist = ""
num_nodes = 0
for node in self.CM.Env["nodes"]:
nodelist += node + " "
num_nodes = num_nodes + 1
no_quorum = "stop"
if num_nodes < 3:
no_quorum = "ignore"
self.CM.debug("Cluster only has %d nodes, ignoring quorum" % num_nodes)
#make up crm config
cib_options = self.cib_option_template % (self.CM.Env["DoFencing"], no_quorum)
#create resources and their constraints
resources = ""
constraints = ""
if self.CM.Env["DoBSC"] == 1:
cib_options = cib_options + self.bsc_template
if self.CM.Env["CIBResource"] != 1:
# generate cib
self.cts_cib = self.cib_template % (cib_options, resources, constraints)
return self.cts_cib
if self.CM.cluster_monitor == 1:
resources += self.clustermon_resource_template
constraints += self.clustermon_location_constraint
ip1_rsc = self.NewIP()
ip2_rsc = self.NewHBIP()
ip3_rsc = self.NewIP()
resources += self.resource_group_template % (ip1_rsc, ip2_rsc, ip3_rsc)
# lsb resource
resources += self.lsb_resource
# Mirgator
resources += self.NewDummy("migrator")
constraints += self.coloc_template % ("group-with-master", "group-1", "master-1", "Master", "INFINITY")
constraints += self.coloc_template % ("lsb-with-group", "lsb_dummy", "group-1", "Started", "INFINITY")
# per node resource
for node in self.CM.Env["nodes"]:
per_node_resources = self.NewIP("rsc_"+node)
per_node_constraint = self.per_node_constraint_template % (node, "rsc_"+node, node)
resources += per_node_resources
constraints += per_node_constraint
# Ping the test master
resources += self.pingd_resource_template % os.uname()[1]
# Require conectivity to run
constraints += self.pingd_constraint_template % ("master-1", "master-1", "m", "Started", "m", 1)
if self.CM.Env["DoFencing"]:
p_name = None
p_value = None
entries = string.split(self.CM.Env["stonith-params"], ',')
for entry in entries:
(p_name, p_value) = string.split(entry, '=')
if p_name == "hostlist" and p_value == "all":
p_value = string.join(self.CM.Env["nodes"], " ")
stonith_resource = self.stonith_resource_template % (self.CM.Env["stonith-type"], p_name, p_value)
resources += stonith_resource
#master slave resource
resources += self.master_slave_resource % (num_nodes, 1, 1, 1)
# generate cib
self.cts_cib = self.cib_template % (cib_options, resources, constraints)
return self.cts_cib
class CIB10(CibBase):
feature_set = "3.0"
version = "pacemaker-1.0"
cib_template = '''
<cib crm_feature_set='%s' admin_epoch='1' epoch='0' num_updates='0' validate-with='%s' %s>
<configuration>
<crm_config/>
<nodes/>
<resources/>
<constraints/>
</configuration>
<status/>
</cib>'''
def _create(self, command):
- fixed = "CIB_file="+self.cib_tmpfile+" crm configure " + command
+ fixed = "HOME=/root CIB_file="+self.cib_tmpfile+" crm --force configure " + command
rc = self.CM.rsh(self.target, fixed)
if rc != 0:
self.CM.log("Configure call failed: "+fixed)
sys.exit(1)
def _show(self, command=""):
output = ""
- (rc, result) = self.CM.rsh(self.target, "CIB_file="+self.cib_tmpfile+" crm configure show "+command, None, )
+ (rc, result) = self.CM.rsh(self.target, "HOME=/root CIB_file="+self.cib_tmpfile+" crm configure show "+command, None, )
for line in result:
output += line
self.CM.debug("Generated Config: "+line)
return output
def NewIP(self, name=None, standard="ocf:heartbeat"):
ip = self.NextIP()
if not name:
name = "r"+ip
if not standard:
standard = ""
else:
standard += ":"
self._create('''primitive %s %sIPaddr params ip=%s cidr_netmask=32 op monitor interval=5s'''
% (name, standard, ip))
return name
def install(self, target):
old = self.cib_tmpfile
# Force a rebuild
self.cts_cib = None
self.cib_tmpfile = CTSvars.CRM_CONFIG_DIR+"/cib.xml"
self.contents(target)
self.CM.rsh(self.target, "chown "+CTSvars.CRM_DAEMON_USER+" "+self.cib_tmpfile)
self.cib_tmpfile = old
def contents(self, target=None):
# fencing resource
if self.cts_cib:
return self.cts_cib
if not target:
self.target = self.CM.Env["nodes"][0]
else:
self.target = target
cib_base = self.cib_template % (self.feature_set, self.version, ''' remote-tls-port='9898' remote-clear-port='9999' ''')
self.CM.rsh(self.target, '''echo "%s" > %s''' % (cib_base, self.cib_tmpfile))
#self.CM.rsh.cp(self.cib_tmpfile, "root@%s:%s" % (self.target, self.cib_tmpfile))
nodelist = ""
self.num_nodes = 0
for node in self.CM.Env["nodes"]:
nodelist += node + " "
self.num_nodes = self.num_nodes + 1
no_quorum = "stop"
if self.num_nodes < 3:
no_quorum = "ignore"
self.CM.debug("Cluster only has %d nodes, ignoring quorum" % self.num_nodes)
# The shell no longer functions when the lrmd isn't running, how wonderful
# Start one here and let the cluster clean it up when the full stack starts
# Just hope target has the same location for lrmd
self.CM.rsh(self.target, CTSvars.CRM_DAEMON_DIR+"/lrmd", blocking=0)
# Tell the shell to mind its own business, we know what we're doing
self.CM.rsh(self.target, "crm options check-mode relaxed")
self.CM.rsh(self.target, "crm options skill-level export")
# Now stop the shell from rejecting every update because we've not defined stonith resources yet
self._create('''property stonith-enabled=false''')
self._create('''property start-failure-is-fatal=false pe-input-series-max=5000''')
self._create('''property shutdown-escalation=5min startup-fencing=false batch-limit=10''')
self._create('''property no-quorum-policy=%s expected-quorum-votes=%d''' % (no_quorum, self.num_nodes))
if self.CM.Env["DoBSC"] == 1:
self._create('''property ident-string="Linux-HA TEST configuration file - REMOVEME!!"''')
# Add resources?
if self.CM.Env["CIBResource"] == 1:
self.add_resources()
# Fencing resource
if self.CM.Env["DoFencing"]:
params = None
entries = string.split(self.CM.Env["stonith-params"], ',')
for entry in entries:
(name, value) = string.split(entry, '=')
if name == "hostlist" and value == "all":
value = string.join(self.CM.Env["nodes"], " ")
if params:
params = ("""%s '%s="%s"' """ % (params, name, value))
else:
params = ("""'%s="%s"' """ % (name, value))
if params:
params = "params %s" % params
else:
params = ""
self._create('''primitive FencingChild stonith::%s %s op monitor interval=120s timeout=300 op start interval=0 timeout=180s op stop interval=0 timeout=180s''' % (self.CM.Env["stonith-type"], params))
# Set a threshold for unreliable stonith devices such as the vmware one
self._create('''clone Fencing FencingChild meta globally-unique=false migration-threshold=5''')
if self.CM.cluster_monitor == 1:
self._create('''primitive cluster_mon ocf:pacemaker:ClusterMon params update=10 extra_options="-r -n" user=abeekhof htmlfile=/suse/abeekhof/Export/cluster.html op start interval=0 requires=nothing op monitor interval=5s requires=nothing''')
self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''')
self._create('''property stonith-enabled=%s''' % (self.CM.Env["DoFencing"]))
# generate cib
self.cts_cib = self._show("xml")
if self.cib_tmpfile != CTSvars.CRM_CONFIG_DIR+"/cib.xml":
self.CM.rsh(self.target, "rm -f "+self.cib_tmpfile)
return self.cts_cib
def add_resources(self):
# Group Resource
r1 = self.NewIP()
ip = self.NextIP()
r2 = "r"+ip
self._create('''primitive %s heartbeat::IPaddr params 1=%s/32 op monitor interval=5s''' % (r2, ip))
r3 = self.NewIP()
self._create('''group group-1 %s %s %s''' % (r1, r2, r3))
# Per-node resources
for node in self.CM.Env["nodes"]:
r = self.NewIP("rsc_"+node)
self._create('''location prefer-%s %s rule 100: \#uname eq %s''' % (node, r, node))
# LSB resource
self._create('''primitive lsb-dummy lsb::''' +CTSvars.CTS_home+ '''/LSBDummy op monitor interval=5s''')
self._create('''colocation lsb-with-group INFINITY: lsb-dummy group-1''')
self._create('''order lsb-after-group mandatory: group-1 lsb-dummy symmetrical=true''')
# Migrator
self._create('''primitive migrator ocf:pacemaker:Dummy meta allow-migrate=1 op monitor interval=P10S''')
# Ping the test master
self._create('''primitive ping-1 ocf:pacemaker:pingd params host_list=%s name=connected op monitor interval=120s''' % os.uname()[1])
self._create('''clone Connectivity ping-1 meta globally-unique=false''')
#master slave resource
self._create('''primitive stateful-1 ocf:pacemaker:Stateful op monitor interval=15s op monitor interval=16s role=Master''')
self._create('''ms master-1 stateful-1 meta clone-max=%d clone-node-max=%d master-max=%d master-node-max=%d'''
% (self.num_nodes, 1, 1, 1))
# Require conectivity to run the master
self._create('''location %s-is-connected %s rule -INFINITY: connected lt %d''' % ("m1", "master-1", 1))
# Group with the master
self._create('''colocation group-with-master INFINITY: group-1 master-1:Master''')
self._create('''order group-after-master mandatory: master-1:promote group-1:start symmetrical=true''')
class HASI(CIB10):
def add_resources(self):
# DLM resource
self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''')
self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''')
# O2CB resource
self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''')
self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''')
self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''')
self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''')
class ConfigFactory:
def __init__(self, CM):
self.CM = CM
self.register("pacemaker06", CIB06, CM)
self.register("pacemaker10", CIB10, CM)
self.register("hae", HASI, CM)
def register(self, methodName, constructor, *args, **kargs):
"""register a constructor"""
_args = [constructor]
_args.extend(args)
setattr(self, methodName, apply(ConfigFactoryItem,_args, kargs))
def unregister(self, methodName):
"""unregister a constructor"""
delattr(self, methodName)
def createConfig(self, name="pacemaker-1.0"):
if name == "pacemaker-0.6":
name = "pacemaker06";
elif name == "pacemaker-1.0":
name = "pacemaker10";
elif name == "hasi":
name = "hae";
if hasattr(self, name):
return getattr(self, name)()
else:
self.CM.log("Configuration variant '%s' is unknown. Defaulting to latest config" % name)
return self.pacemaker10()
class ConfigFactoryItem:
def __init__(self, function, *args, **kargs):
assert callable(function), "function should be a callable obj"
self._function = function
self._args = args
self._kargs = kargs
def __call__(self, *args, **kargs):
"""call function"""
_args = list(self._args)
_args.extend(args)
_kargs = self._kargs.copy()
_kargs.update(kargs)
return apply(self._function,_args,_kargs)
#CibFactory = ConfigFactory()
diff --git a/cts/CM_ais.py b/cts/CM_ais.py
index 8601ae7bd6..1e87606652 100644
--- a/cts/CM_ais.py
+++ b/cts/CM_ais.py
@@ -1,289 +1,286 @@
'''CTS: Cluster Testing System: AIS dependent modules...
'''
__copyright__='''
Copyright (C) 2007 Andrew Beekhof <andrew@suse.de>
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-import os,sys,CTS,CTSaudits,CTStests, warnings
-from CTSvars import *
-from CTS import *
-from CM_lha import crm_lha
-from CTSaudits import ClusterAudit
-from CTStests import *
-from CIB import *
+import os, sys, warnings
+from cts.CTSvars import *
+from cts.CM_lha import crm_lha
+from cts.CTS import Process
#######################################################################
#
# LinuxHA v2 dependent modules
#
#######################################################################
class crm_ais(crm_lha):
'''
The crm version 3 cluster manager class.
It implements the things we need to talk to and manipulate
crm clusters running on top of openais
'''
def __init__(self, Environment, randseed=None):
crm_lha.__init__(self, Environment, randseed=randseed)
self.update({
"Name" : "crm-ais",
"UUIDQueryCmd" : "crmadmin -N",
"EpocheCmd" : "crm_node -e",
"QuorumCmd" : "crm_node -q",
"ParitionCmd" : "crm_node -p",
"Pat:They_stopped" : "%s crmd:.*Node %s: .* state=lost .new",
"Pat:ChildExit" : "Child process .* exited",
# Bad news Regexes. Should never occur.
"BadRegexes" : (
r"ERROR:",
r"CRIT:",
r"Shutting down\.",
r"Forcing shutdown\.",
r"Timer I_TERMINATE just popped",
r"input=I_ERROR",
r"input=I_FAIL",
r"input=I_INTEGRATED cause=C_TIMER_POPPED",
r"input=I_FINALIZED cause=C_TIMER_POPPED",
r"input=I_ERROR",
r", exiting\.",
r"WARN.*Ignoring HA message.*vote.*not in our membership list",
r"pengine.*Attempting recovery of resource",
r"is taking more than 2x its timeout",
r"Confirm not received from",
r"Welcome reply not received from",
r"Attempting to schedule .* after a stop",
r"Resource .* was active at shutdown",
r"duplicate entries for call_id",
r"Search terminated:",
r"No need to invoke the TE",
r":global_timer_callback",
r"Faking parameter digest creation",
r"Parameters to .* action changed:",
r"Parameters to .* changed",
r"Child process .* terminated with signal 11",
r"Executing .* fencing operation",
),
})
def errorstoignore(self):
# At some point implement a more elegant solution that
# also produces a report at the end
'''Return list of errors which are known and very noisey should be ignored'''
if 1:
return [
"crm_mon:",
"crmadmin:",
"async_notify: strange, client not found",
"ERROR: Message hist queue is filling up"
]
return []
def NodeUUID(self, node):
return node
def ais_components(self):
self.complist = []
self.common_ignore = [
"Pending action:",
"ERROR: crm_log_message_adv:",
"ERROR: MSG: No message to dump",
"pending LRM operations at shutdown",
"Lost connection to the CIB service",
"Connection to the CIB terminated...",
"Sending message to CIB service FAILED",
"apply_xml_diff: Diff application failed!",
"crmd: .*Action A_RECOVER .* not supported",
"pingd: .*ERROR: send_update: Could not send update",
"send_ipc_message: IPC Channel to .* is not connected",
"unconfirmed_actions: Waiting on .* unconfirmed actions",
"cib_native_msgready: Message pending on command channel",
"crmd:.*do_exit: Performing A_EXIT_1 - forcefully exiting the CRMd",
"verify_stopped: Resource .* was active at shutdown. You may ignore this error if it is unmanaged.",
"ERROR: attrd_connection_destroy: Lost connection to attrd",
"nfo: te_fence_node: Executing .* fencing operation",
]
self.complist.append(Process(self, "cib", pats = [
"State transition S_IDLE",
"Respawning .* crmd",
"Respawning .* attrd",
"Lost connection to the CIB service",
"Connection to the CIB terminated...",
"Child process crmd exited .* rc=2",
"Child process attrd exited .* rc=1",
"crmd: .*Input I_TERMINATE from do_recover",
"crmd: .*I_ERROR.*crmd_cib_connection_destroy",
"crmd:.*do_exit: Could not recover from internal error",
], badnews_ignore = self.common_ignore))
self.complist.append(Process(self, "lrmd", pats = [
"State transition S_IDLE",
"LRM Connection failed",
"Respawning .* crmd",
"crmd: .*I_ERROR.*lrm_connection_destroy",
"Child process crmd exited .* rc=2",
"crmd: .*Input I_TERMINATE from do_recover",
"crmd:.*do_exit: Could not recover from internal error",
], badnews_ignore = self.common_ignore))
self.complist.append(Process(self, "crmd", pats = [
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling Node .* for STONITH",
# "Executing .* fencing operation",
# Only if the node wasn't the DC: "State transition S_IDLE",
"State transition .* -> S_IDLE",
], badnews_ignore = self.common_ignore))
self.complist.append(Process(self, "attrd", pats = [
"crmd: .*ERROR: attrd_connection_destroy: Lost connection to attrd"
], badnews_ignore = self.common_ignore))
self.complist.append(Process(self, "pengine", dc_pats = [
"State transition S_IDLE",
"Respawning .* crmd",
"Child process crmd exited .* rc=2",
"crmd: .*pe_connection_destroy: Connection to the Policy Engine failed",
"crmd: .*I_ERROR.*save_cib_contents",
"crmd: .*Input I_TERMINATE from do_recover",
"crmd:.*do_exit: Could not recover from internal error",
], badnews_ignore = self.common_ignore))
if self.Env["DoFencing"] == 1 :
stonith_ignore = [
"update_failcount: Updating failcount for child_DoFencing",
"ERROR: te_connect_stonith: Sign-in failed: triggered a retry",
]
stonith_ignore.extend(self.common_ignore)
self.complist.append(Process(self, "stonith-ng", process="stonithd", pats = [
"CRIT: stonith_dispatch: Lost connection to the STONITH service",
"tengine_stonith_connection_destroy: Fencing daemon connection failed",
"Attempting connection to fencing daemon",
"te_connect_stonith: Connected",
], badnews_ignore = stonith_ignore))
return self.complist
class crm_whitetank(crm_ais):
'''
The crm version 3 cluster manager class.
It implements the things we need to talk to and manipulate
crm clusters running on top of openais
'''
def __init__(self, Environment, randseed=None):
crm_ais.__init__(self, Environment, randseed=randseed)
self.update({
"Name" : "crm-whitetank",
"StartCmd" : CTSvars.INITDIR+"/openais start",
"StopCmd" : CTSvars.INITDIR+"/openais stop",
"Pat:We_stopped" : "%s.*openais.*pcmk_shutdown: Shutdown complete",
"Pat:They_stopped" : "%s crmd:.*Node %s: .* state=lost .new",
"Pat:They_dead" : "openais:.*Node %s is now: lost",
"Pat:ChildKilled" : "%s openais.*Child process %s terminated with signal 9",
"Pat:ChildRespawn" : "%s openais.*Respawning failed child process: %s",
"Pat:ChildExit" : "Child process .* exited",
})
def Components(self):
self.ais_components()
aisexec_ignore = [
"ERROR: ais_dispatch: Receiving message .* failed",
"crmd: .*I_ERROR.*crmd_cib_connection_destroy",
"cib: .*ERROR: cib_ais_destroy: AIS connection terminated",
#"crmd: .*ERROR: crm_ais_destroy: AIS connection terminated",
"crmd:.*do_exit: Could not recover from internal error",
"crmd: .*I_TERMINATE.*do_recover",
"attrd: .*CRIT: attrd_ais_destroy: Lost connection to OpenAIS service!",
"stonithd: .*ERROR: AIS connection terminated",
]
aisexec_ignore.extend(self.common_ignore)
self.complist.append(Process(self, "aisexec", pats = [
"ERROR: ais_dispatch: AIS connection failed",
"crmd: .*ERROR: do_exit: Could not recover from internal error",
"pengine: .*Scheduling Node .* for STONITH",
"stonithd: .*requests a STONITH operation RESET on node",
"stonithd: .*Succeeded to STONITH the node",
], badnews_ignore = aisexec_ignore))
class crm_flatiron(crm_ais):
'''
The crm version 3 cluster manager class.
It implements the things we need to talk to and manipulate
crm clusters running on top of openais
'''
def __init__(self, Environment, randseed=None):
crm_ais.__init__(self, Environment, randseed=randseed)
self.update({
"Name" : "crm-flatiron",
"StartCmd" : CTSvars.INITDIR+"/corosync start",
"StopCmd" : CTSvars.INITDIR+"/corosync stop",
# The next pattern is too early
# "Pat:We_stopped" : "%s.*Service engine unloaded: Pacemaker Cluster Manager",
# The next pattern would be preferred, but it doesn't always come out
# "Pat:We_stopped" : "%s.*Corosync Cluster Engine exiting with status",
"Pat:We_stopped" : "%s.*Service engine unloaded: corosync cluster quorum service",
"Pat:They_stopped" : "%s crmd:.*Node %s: .* state=lost .new",
"Pat:They_dead" : "corosync:.*Node %s is now: lost",
"Pat:ChildKilled" : "%s corosync.*Child process %s terminated with signal 9",
"Pat:ChildRespawn" : "%s corosync.*Respawning failed child process: %s",
"Pat:ChildExit" : "Child process .* exited",
})
def Components(self):
self.ais_components()
corosync_ignore = [
"ERROR: ais_dispatch: Receiving message .* failed",
"crmd: .*I_ERROR.*crmd_cib_connection_destroy",
"cib: .*ERROR: cib_ais_destroy: AIS connection terminated",
#"crmd: .*ERROR: crm_ais_destroy: AIS connection terminated",
"crmd:.*do_exit: Could not recover from internal error",
"crmd: .*I_TERMINATE.*do_recover",
"attrd: .*CRIT: attrd_ais_destroy: Lost connection to Corosync service!",
"stonithd: .*ERROR: AIS connection terminated",
]
corosync_ignore.extend(self.common_ignore)
# self.complist.append(Process(self, "corosync", pats = [
# "ERROR: ais_dispatch: AIS connection failed",
# "crmd: .*ERROR: do_exit: Could not recover from internal error",
# "pengine: .*Scheduling Node .* for STONITH",
# "stonithd: .*requests a STONITH operation RESET on node",
# "stonithd: .*Succeeded to STONITH the node",
# ], badnews_ignore = corosync_ignore))
return self.complist
diff --git a/cts/CM_lha.py b/cts/CM_lha.py
index 6664c097d1..1b2375ec7c 100755
--- a/cts/CM_lha.py
+++ b/cts/CM_lha.py
@@ -1,607 +1,608 @@
'''CTS: Cluster Testing System: LinuxHA v2 dependent modules...
'''
__copyright__='''
Author: Huang Zhen <zhenhltc@cn.ibm.com>
Copyright (C) 2004 International Business Machines
Additional Audits, Revised Start action, Default Configuration:
Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-import os,sys,CTS,CTSaudits,CTStests, warnings
-from CTSvars import *
-from CTS import *
-from CTSaudits import ClusterAudit
-from CTStests import *
-from CIB import *
+import os, sys, warnings
+from cts import CTS
+from cts.CTSvars import *
+from cts.CTS import *
+from cts.CIB import *
+from cts.CTStests import AuditResource
+
try:
from xml.dom.minidom import *
except ImportError:
sys.__stdout__.write("Python module xml.dom.minidom not found\n")
sys.__stdout__.write("Please install python-xml or similar before continuing\n")
sys.__stdout__.flush()
sys.exit(1)
#######################################################################
#
# LinuxHA v2 dependent modules
#
#######################################################################
class crm_lha(ClusterManager):
'''
The linux-ha version 2 cluster manager class.
It implements the things we need to talk to and manipulate
linux-ha version 2 clusters
'''
def __init__(self, Environment, randseed=None):
ClusterManager.__init__(self, Environment, randseed=randseed)
#HeartbeatCM.__init__(self, Environment, randseed=randseed)
self.fastfail = 0
self.clear_cache = 0
self.cib_installed = 0
self.config = None
self.cluster_monitor = 0
self.use_short_names = 1
self.update({
"Name" : "crm-lha",
"DeadTime" : 300,
"StartTime" : 300, # Max time to start up
"StableTime" : 30,
"StartCmd" : CTSvars.INITDIR+"/heartbeat start > /dev/null 2>&1",
"StopCmd" : CTSvars.INITDIR+"/heartbeat stop > /dev/null 2>&1",
"ElectionCmd" : "crmadmin -E %s",
"StatusCmd" : "crmadmin -t 60000 -S %s 2>/dev/null",
"EpocheCmd" : "crm_node -H -e",
"QuorumCmd" : "crm_node -H -q",
"ParitionCmd" : "crm_node -H -p",
"CibQuery" : "cibadmin -Ql",
"ExecuteRscOp" : "lrmadmin -n %s -E %s %s 0 %d EVERYTIME 2>&1",
"CIBfile" : "%s:"+CTSvars.CRM_CONFIG_DIR+"/cib.xml",
"TmpDir" : "/tmp",
"BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
"FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
# tc qdisc add dev lo root handle 1: cbq avpkt 1000 bandwidth 1000mbit
# tc class add dev lo parent 1: classid 1:1 cbq rate "$RATE"kbps allot 17000 prio 5 bounded isolated
# tc filter add dev lo parent 1: protocol ip prio 16 u32 match ip dst 127.0.0.1 match ip sport $PORT 0xFFFF flowid 1:1
# tc qdisc add dev lo parent 1: netem delay "$LATENCY"msec "$(($LATENCY/4))"msec 10% 2> /dev/null > /dev/null
"ReduceCommCmd" : "",
"RestoreCommCmd" : "tc qdisc del dev lo root",
"LogFileName" : Environment["LogFileName"],
"UUIDQueryCmd" : "crmadmin -N",
"StandbyCmd" : "crm_standby -U %s -v %s 2>/dev/null",
"StandbyQueryCmd" : "crm_standby -GQ -U %s 2>/dev/null",
# Patterns to look for in the log files for various occasions...
"Pat:DC_IDLE" : "crmd.*State transition.*-> S_IDLE",
# This wont work if we have multiple partitions
"Pat:Local_started" : "%s crmd:.*The local CRM is operational",
"Pat:Slave_started" : "%s crmd:.*State transition.*-> S_NOT_DC",
"Pat:Master_started" : "%s crmd:.* State transition.*-> S_IDLE",
"Pat:We_stopped" : "heartbeat.*%s.*Heartbeat shutdown complete",
"Pat:Logd_stopped" : "%s logd:.*Exiting write process",
"Pat:They_stopped" : "%s crmd:.*LOST:.* %s ",
"Pat:They_dead" : "node %s.*: is dead",
"Pat:TransitionComplete" : "Transition status: Complete: complete",
"Pat:ChildKilled" : "%s heartbeat.*%s.*killed by signal 9",
"Pat:ChildRespawn" : "%s heartbeat.*Respawning client.*%s",
"Pat:ChildExit" : "ERROR: Client .* exited with return code",
# Bad news Regexes. Should never occur.
"BadRegexes" : (
r"ERROR:",
r"CRIT:",
r"Shutting down\.",
r"Forcing shutdown\.",
r"Timer I_TERMINATE just popped",
r"input=I_ERROR",
r"input=I_FAIL",
r"input=I_INTEGRATED cause=C_TIMER_POPPED",
r"input=I_FINALIZED cause=C_TIMER_POPPED",
r"input=I_ERROR",
r", exiting\.",
r"WARN.*Ignoring HA message.*vote.*not in our membership list",
r"pengine.*Attempting recovery of resource",
r"is taking more than 2x its timeout",
r"Confirm not received from",
r"Welcome reply not received from",
r"Attempting to schedule .* after a stop",
r"Resource .* was active at shutdown",
r"duplicate entries for call_id",
r"Search terminated:",
r"No need to invoke the TE",
r"global_timer_callback:",
r"Faking parameter digest creation",
r"Parameters to .* action changed:",
r"Parameters to .* changed",
),
})
if self.Env["DoBSC"]:
del self["Pat:They_stopped"]
del self["Pat:Logd_stopped"]
self.Env["use_logd"] = 0
self._finalConditions()
self.check_transitions = 0
self.check_elections = 0
self.CIBsync = {}
self.CibFactory = ConfigFactory(self)
self.cib = self.CibFactory.createConfig(self.Env["Schema"])
def errorstoignore(self):
# At some point implement a more elegant solution that
# also produces a report at the end
'''Return list of errors which are known and very noisey should be ignored'''
if 1:
return [
"ERROR: crm_abort: crm_glib_handler: ",
"ERROR: Message hist queue is filling up",
"stonithd: .*CRIT: external_hostlist: 'vmware gethosts' returned an empty hostlist",
"stonithd: .*ERROR: Could not list nodes for stonith RA external/vmware.",
"pengine: Preventing .* from re-starting",
]
return []
def install_config(self, node):
if not self.ns.WaitForNodeToComeUp(node):
self.log("Node %s is not up." % node)
return None
if not self.CIBsync.has_key(node) and self.Env["ClobberCIB"] == 1:
self.CIBsync[node] = 1
self.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*")
# Only install the CIB on the first node, all the other ones will pick it up from there
if self.cib_installed == 1:
return None
self.cib_installed = 1
if self.Env["CIBfilename"] == None:
self.debug("Installing Generated CIB on node %s" %(node))
self.cib.install(node)
else:
self.log("Installing CIB (%s) on node %s" %(self.Env["CIBfilename"], node))
if 0 != self.rsh.cp(self.Env["CIBfilename"], "root@" + (self["CIBfile"]%node)):
raise ValueError("Can not scp file to %s %d"%(node))
self.rsh(node, "chown "+CTSvars.CRM_DAEMON_USER+" "+CTSvars.CRM_CONFIG_DIR+"/cib.xml")
def prepare(self):
'''Finish the Initialization process. Prepare to test...'''
self.partitions_expected = 1
for node in self.Env["nodes"]:
self.ShouldBeStatus[node] = ""
self.unisolate_node(node)
self.StataCM(node)
def test_node_CM(self, node):
'''Report the status of the cluster manager on a given node'''
watchpats = [ ]
watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)")
watchpats.append(self["Pat:Slave_started"]%node)
watchpats.append(self["Pat:Master_started"]%node)
idle_watch = CTS.LogWatcher(self["LogFileName"], watchpats)
idle_watch.setwatch()
out = self.rsh(node, self["StatusCmd"]%node, 1)
self.debug("Node %s status: '%s'" %(node, out))
if not out or string.find(out, 'ok') < 0:
if self.ShouldBeStatus[node] == "up":
self.log(
"Node status for %s is %s but we think it should be %s"
%(node, "down", self.ShouldBeStatus[node]))
self.ShouldBeStatus[node]="down"
return 0
if self.ShouldBeStatus[node] == "down":
self.log(
"Node status for %s is %s but we think it should be %s: %s"
%(node, "up", self.ShouldBeStatus[node], out))
self.ShouldBeStatus[node]="up"
# check the output first - because syslog-ng looses messages
if string.find(out, 'S_NOT_DC') != -1:
# Up and stable
return 2
if string.find(out, 'S_IDLE') != -1:
# Up and stable
return 2
# fall back to syslog-ng and wait
if not idle_watch.look():
# just up
self.debug("Warn: Node %s is unstable: %s" %(node, out))
return 1
# Up and stable
return 2
# Is the node up or is the node down
def StataCM(self, node):
'''Report the status of the cluster manager on a given node'''
if self.test_node_CM(node) > 0:
return 1
return None
# Being up and being stable is not the same question...
def node_stable(self, node):
'''Report the status of the cluster manager on a given node'''
if self.test_node_CM(node) == 2:
return 1
self.log("Warn: Node %s not stable" %(node))
return None
def partition_stable(self, nodes, timeout=None):
watchpats = [ ]
watchpats.append("Current ping state: S_IDLE")
watchpats.append(self["Pat:DC_IDLE"])
self.debug("Waiting for cluster stability...")
if timeout == None:
timeout = self["DeadTime"]
idle_watch = CTS.LogWatcher(self["LogFileName"], watchpats, timeout)
idle_watch.setwatch()
any_up = 0
for node in self.Env["nodes"]:
# have each node dump its current state
if self.ShouldBeStatus[node] == "up":
self.rsh(node, self["StatusCmd"] %node, 1)
any_up = 1
if any_up == 0:
self.debug("Cluster is inactive")
return 1
ret = idle_watch.look()
while ret:
self.debug(ret)
for node in nodes:
if re.search(node, ret):
return 1
ret = idle_watch.look()
self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout))
return None
def cluster_stable(self, timeout=None):
partitions = self.find_partitions()
for partition in partitions:
if not self.partition_stable(partition, timeout):
return None
return 1
def is_node_dc(self, node, status_line=None):
rc = 0
if not status_line:
status_line = self.rsh(node, self["StatusCmd"]%node, 1)
if not status_line:
rc = 0
elif string.find(status_line, 'S_IDLE') != -1:
rc = 1
elif string.find(status_line, 'S_INTEGRATION') != -1:
rc = 1
elif string.find(status_line, 'S_FINALIZE_JOIN') != -1:
rc = 1
elif string.find(status_line, 'S_POLICY_ENGINE') != -1:
rc = 1
elif string.find(status_line, 'S_TRANSITION_ENGINE') != -1:
rc = 1
return rc
def active_resources(self, node):
# [SM].* {node} matches Started, Slave, Master
# Stopped wont be matched as it wont include {node}
(rc, output) = self.rsh(node, """crm_resource -c""", None)
resources = []
for line in output:
if re.search("^Resource", line):
tmp = AuditResource(self, line)
if tmp.type == "primitive" and tmp.host == node:
resources.append(tmp.id)
return resources
def ResourceOp(self, resource, op, node, interval=0, app="lrmadmin"):
'''
Execute an operation on a resource
'''
cmd = self["ExecuteRscOp"] % (app, resource, op, interval)
(rc, lines) = self.rsh(node, cmd, None)
if rc == 127:
self.log("Command '%s' failed. Binary not installed?" % cmd)
for line in lines:
self.log("Output: "+line)
return rc
def ResourceLocation(self, rid):
ResourceNodes = []
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
dummy = 0
rc = self.ResourceOp(rid, "monitor", node)
# Strange error codes from remote_py
# 65024 == not installed
# 2048 == 8
# 1792 == 7
# 0 == 0
if rc == 127:
dummy = 1
elif rc == 254 or rc == 65024:
dummy = 1
#self.debug("%s is not installed on %s: %d" % (rid, node, rc))
elif rc == 0 or rc == 2048 or rc == 8:
ResourceNodes.append(node)
elif rc == 7 or rc == 1792:
dummy = 1
#self.debug("%s is not running on %s: %d" % (rid, node, rc))
else:
# not active on this node?
self.log("Unknown rc code for %s on %s: %d" % (rid, node, rc))
return ResourceNodes
def find_partitions(self):
ccm_partitions = []
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
partition = self.rsh(node, self["ParitionCmd"], 1)
if not partition:
self.log("no partition details for %s" %node)
elif len(partition) > 2:
partition = partition[:-1]
found=0
for a_partition in ccm_partitions:
if partition == a_partition:
found = 1
if found == 0:
self.debug("Adding partition from %s: %s" %(node, partition))
ccm_partitions.append(partition)
else:
self.debug("Partition '%s' from %s is consistent with existing entries" %(partition, node))
else:
self.log("bad partition details for %s" %node)
else:
self.debug("Node %s is down... skipping" %node)
return ccm_partitions
def HasQuorum(self, node_list):
# If we are auditing a partition, then one side will
# have quorum and the other not.
# So the caller needs to tell us which we are checking
# If no value for node_list is specified... assume all nodes
if not node_list:
node_list = self.Env["nodes"]
for node in node_list:
if self.ShouldBeStatus[node] == "up":
quorum = self.rsh(node, self["QuorumCmd"], 1)
if string.find(quorum, "1") != -1:
return 1
elif string.find(quorum, "0") != -1:
return 0
else:
self.log("WARN: Unexpected quorum test result from "+ node +":"+ quorum)
return 0
def Components(self):
complist = []
common_ignore = [
"Pending action:",
"ERROR: crm_log_message_adv:",
"ERROR: MSG: No message to dump",
"pending LRM operations at shutdown",
"Lost connection to the CIB service",
"Connection to the CIB terminated...",
"Sending message to CIB service FAILED",
"crmd: .*Action A_RECOVER .* not supported",
"ERROR: stonithd_op_result_ready: not signed on",
"pingd: .*ERROR: send_update: Could not send update",
"send_ipc_message: IPC Channel to .* is not connected",
"unconfirmed_actions: Waiting on .* unconfirmed actions",
"cib_native_msgready: Message pending on command channel",
"crmd:.*do_exit: Performing A_EXIT_1 - forcefully exiting the CRMd",
"verify_stopped: Resource .* was active at shutdown. You may ignore this error if it is unmanaged.",
]
stonith_ignore = [
"ERROR: stonithd_signon: ",
"update_failcount: Updating failcount for child_DoFencing",
"ERROR: te_connect_stonith: Sign-in failed: triggered a retry",
]
stonith_ignore.extend(common_ignore)
ccm_ignore = [
"ERROR: get_channel_token: No reply message - disconnected"
]
ccm_ignore.extend(common_ignore)
ccm = Process(self, "ccm", triggersreboot=self.fastfail, pats = [
"State transition S_IDLE",
"CCM connection appears to have failed",
"crmd: .*Action A_RECOVER .* not supported",
"crmd: .*Input I_TERMINATE from do_recover",
"Exiting to recover from CCM connection failure",
"crmd:.*do_exit: Could not recover from internal error",
"crmd: .*I_ERROR.*(ccm_dispatch|crmd_cib_connection_destroy)",
"crmd .*exited with return code 2.",
"attrd .*exited with return code 1.",
"cib .*exited with return code 2.",
# Not if it was fenced
# "A new node joined the cluster",
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling Node .* for STONITH",
# "Executing .* fencing operation",
# "tengine_stonith_callback: .*result=0",
# "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE",
# "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN",
"State transition S_STARTING -> S_PENDING",
], badnews_ignore = common_ignore)
cib = Process(self, "cib", triggersreboot=self.fastfail, pats = [
"State transition S_IDLE",
"Lost connection to the CIB service",
"Connection to the CIB terminated...",
"crmd: .*Input I_TERMINATE from do_recover",
"crmd: .*I_ERROR.*crmd_cib_connection_destroy",
"crmd:.*do_exit: Could not recover from internal error",
"crmd .*exited with return code 2.",
"attrd .*exited with return code 1.",
], badnews_ignore = common_ignore)
lrmd = Process(self, "lrmd", triggersreboot=self.fastfail, pats = [
"State transition S_IDLE",
"LRM Connection failed",
"crmd: .*I_ERROR.*lrm_connection_destroy",
"State transition S_STARTING -> S_PENDING",
"crmd: .*Input I_TERMINATE from do_recover",
"crmd:.*do_exit: Could not recover from internal error",
"crmd .*exited with return code 2.",
], badnews_ignore = common_ignore)
crmd = Process(self, "crmd", triggersreboot=self.fastfail, pats = [
# "WARN: determine_online_status: Node .* is unclean",
# "Scheduling Node .* for STONITH",
# "Executing .* fencing operation",
# "tengine_stonith_callback: .*result=0",
"State transition .* S_IDLE",
"State transition S_STARTING -> S_PENDING",
], badnews_ignore = common_ignore)
pengine = Process(self, "pengine", triggersreboot=self.fastfail, pats = [
"State transition S_IDLE",
"crmd .*exited with return code 2.",
"crmd: .*Input I_TERMINATE from do_recover",
"crmd: .*do_exit: Could not recover from internal error",
"crmd: .*CRIT: pe_connection_destroy: Connection to the Policy Engine failed",
"crmd: .*I_ERROR.*save_cib_contents",
"crmd .*exited with return code 2.",
], badnews_ignore = common_ignore, dc_only=1)
if self.Env["DoFencing"] == 1 :
complist.append(Process(self, "stoniths", triggersreboot=self.fastfail, dc_pats = [
"crmd: .*CRIT: tengine_stonith_connection_destroy: Fencing daemon connection failed",
"Attempting connection to fencing daemon",
"te_connect_stonith: Connected",
], badnews_ignore = stonith_ignore))
if self.fastfail == 0:
ccm.pats.extend([
"attrd .* exited with return code 1",
"ERROR: Respawning client .*attrd",
"cib .* exited with return code 2",
"ERROR: Respawning client .*cib",
"crmd .* exited with return code 2",
"ERROR: Respawning client .*crmd"
])
cib.pats.extend([
"attrd .* exited with return code 1",
"ERROR: Respawning client .*attrd",
"crmd .* exited with return code 2",
"ERROR: Respawning client .*crmd"
])
lrmd.pats.extend([
"crmd .* exited with return code 2",
"ERROR: Respawning client .*crmd"
])
pengine.pats.extend([
"ERROR: Respawning client .*crmd"
])
complist.append(ccm)
complist.append(cib)
complist.append(lrmd)
complist.append(crmd)
complist.append(pengine)
return complist
def NodeUUID(self, node):
lines = self.rsh(node, self["UUIDQueryCmd"], 1)
for line in lines:
self.debug("UUIDLine:"+ line)
m = re.search(r'%s.+\((.+)\)' % node, line)
if m:
return m.group(1)
return ""
def StandbyStatus(self, node):
out=self.rsh(node, self["StandbyQueryCmd"]%node, 1)
if not out:
return "off"
out = out[:-1]
self.debug("Standby result: "+out)
return out
# status == "on" : Enter Standby mode
# status == "off": Enter Active mode
def SetStandbyMode(self, node, status):
current_status = self.StandbyStatus(node)
cmd = self["StandbyCmd"] % (node, status)
ret = self.rsh(node, cmd)
return True
#######################################################################
#
# A little test code...
#
# Which you are advised to completely ignore...
#
#######################################################################
if __name__ == '__main__':
pass
diff --git a/cts/CTS.py b/cts/CTS.py
index 20043d5784..e1dfb96c44 100755
--- a/cts/CTS.py
+++ b/cts/CTS.py
@@ -1,1201 +1,1201 @@
'''CTS: Cluster Testing System: Main module
Classes related to testing high-availability clusters...
'''
__copyright__='''
Copyright (C) 2000, 2001 Alan Robertson <alanr@unix.sh>
Licensed under the GNU GPL.
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import types, string, select, sys, time, re, os, struct, os, signal
import base64, pickle, binascii
from UserDict import UserDict
from syslog import *
from subprocess import Popen,PIPE
-from CTSvars import *
+from cts.CTSvars import *
class RemoteExec:
'''This is an abstract remote execution class. It runs a command on another
machine - somehow. The somehow is up to us. This particular
class uses ssh.
Most of the work is done by fork/exec of ssh or scp.
'''
def __init__(self, Env=None):
self.Env = Env
# -n: no stdin, -x: no X11
self.Command = "ssh -l root -n -x"
# -B: batch mode, -q: no stats (quiet)
self.CpCommand = "scp -B -q"
self.OurNode=string.lower(os.uname()[1])
def enable_qarsh(self):
# http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/
self.log("Using QARSH for connections to cluster nodes")
- self.Command = "qarsh -l root HOME=/root"
+ self.Command = "qarsh -l root"
self.CpCommand = "qacp"
def _fixcmd(self, cmd):
return re.sub("\'", "'\\''", cmd)
def _cmd(self, *args):
'''Compute the string that will run the given command on the
given remote system'''
args= args[0]
sysname = args[0]
command = args[1]
#print "sysname: %s, us: %s" % (sysname, self.OurNode)
if sysname == None or string.lower(sysname) == self.OurNode or sysname == "localhost":
ret = self._fixcmd(command)
else:
ret = self.Command + " " + sysname + " '" + self._fixcmd(command) + "'"
#print ("About to run %s\n" % ret)
return ret
def log(self, args):
if not self.Env:
print (args)
else:
self.Env.log(args)
def debug(self, args):
if not self.Env:
print (args)
else:
self.Env.debug(args)
def __call__(self, node, command, stdout=0, blocking=1):
'''Run the given command on the given remote system
If you call this class like a function, this is the function that gets
called. It just runs it roughly as though it were a system() call
on the remote machine. The first argument is name of the machine to
run it on.
'''
rc = 0
result = None
if not blocking:
proc = Popen(self._cmd([node, command]),
stdout = PIPE, stderr = PIPE, close_fds = True, shell = True)
self.debug("cmd: async: target=%s, rc=%d: %s" % (node, proc.pid, command))
if proc.pid > 0:
return 0
return -1
proc = Popen(self._cmd([node, command]),
stdout = PIPE, stderr = PIPE, close_fds = True, shell = True)
if stdout == 1:
result = proc.stdout.readline()
else:
result = proc.stdout.readlines()
proc.stdout.close()
rc = proc.wait()
self.debug("cmd: target=%s, rc=%d: %s" % (node, rc, command))
if stdout == 1:
return result
if proc.stderr:
errors = proc.stderr.readlines()
proc.stderr.close()
for err in errors:
self.debug("cmd: stderr: %s" % err)
if stdout == 0:
if result:
for line in result:
self.debug("cmd: stdout: %s" % line)
return rc
return (rc, result)
def cp(self, *args):
'''Perform a remote copy'''
cpstring = self.CpCommand
for arg in args:
cpstring = cpstring + " \'" + arg + "\'"
rc = os.system(cpstring)
self.debug("cmd: rc=%d: %s" % (rc, cpstring))
return rc
class LogWatcher:
'''This class watches logs for messages that fit certain regular
expressions. Watching logs for events isn't the ideal way
to do business, but it's better than nothing :-)
On the other hand, this class is really pretty cool ;-)
The way you use this class is as follows:
Construct a LogWatcher object
Call setwatch() when you want to start watching the log
Call look() to scan the log looking for the patterns
'''
def __init__(self, log, regexes, timeout=10, debug=None):
'''This is the constructor for the LogWatcher class. It takes a
log name to watch, and a list of regular expressions to watch for."
'''
# Validate our arguments. Better sooner than later ;-)
for regex in regexes:
assert re.compile(regex)
self.regexes = regexes
self.filename = log
self.debug=debug
self.whichmatch = -1
self.unmatched = None
if self.debug:
print "Debug now on for for log", log
self.Timeout = int(timeout)
self.returnonlymatch = None
if not os.access(log, os.R_OK):
raise ValueError("File [" + log + "] not accessible (r)")
def setwatch(self, frombeginning=None):
'''Mark the place to start watching the log from.
'''
self.file = open(self.filename, "r")
self.size = os.path.getsize(self.filename)
if not frombeginning:
self.file.seek(0, 2) # 2 means seek to EOF
def ReturnOnlyMatch(self, onlymatch=1):
'''Mark the place to start watching the log from.
'''
self.returnonlymatch = onlymatch
def look(self, timeout=None):
'''Examine the log looking for the given patterns.
It starts looking from the place marked by setwatch().
This function looks in the file in the fashion of tail -f.
It properly recovers from log file truncation, but not from
removing and recreating the log. It would be nice if it
recovered from this as well :-)
We return the first line which matches any of our patterns.
'''
last_line=None
first_line=None
if timeout == None: timeout = self.Timeout
done=time.time()+timeout+1
if self.debug:
print "starting search: timeout=%d" % timeout
for regex in self.regexes:
print "Looking for regex: ", regex
while (timeout <= 0 or time.time() <= done):
newsize=os.path.getsize(self.filename)
if self.debug > 4: print "newsize = %d" % newsize
if newsize < self.size:
# Somebody truncated the log!
if self.debug: print "Log truncated!"
self.setwatch(frombeginning=1)
continue
if newsize > self.file.tell():
line=self.file.readline()
if self.debug > 2: print "Looking at line:", line
if line:
last_line=line
if not first_line:
first_line=line
if self.debug: print "First line: "+ line
which=-1
for regex in self.regexes:
which=which+1
if self.debug > 3: print "Comparing line to ", regex
#matchobj = re.search(string.lower(regex), string.lower(line))
matchobj = re.search(regex, line)
if matchobj:
self.whichmatch=which
if self.returnonlymatch:
return matchobj.group(self.returnonlymatch)
else:
if self.debug: print "Returning line"
return line
newsize=os.path.getsize(self.filename)
if self.file.tell() == newsize:
if timeout > 0:
time.sleep(0.025)
else:
if self.debug: print "End of file"
if self.debug: print "Last line: "+last_line
return None
if self.debug: print "Timeout"
if self.debug: print "Last line: "+last_line
return None
def lookforall(self, timeout=None, allow_multiple_matches=None):
'''Examine the log looking for ALL of the given patterns.
It starts looking from the place marked by setwatch().
We return when the timeout is reached, or when we have found
ALL of the regexes that were part of the watch
'''
if timeout == None: timeout = self.Timeout
save_regexes = self.regexes
returnresult = []
while (len(self.regexes) > 0):
oneresult = self.look(timeout)
if not oneresult:
self.unmatched = self.regexes
self.matched = returnresult
self.regexes = save_regexes
return None
returnresult.append(oneresult)
if not allow_multiple_matches:
del self.regexes[self.whichmatch]
else:
# Allow multiple regexes to match a single line
tmp_regexes = self.regexes
self.regexes = []
which = 0
for regex in tmp_regexes:
matchobj = re.search(regex, oneresult)
if not matchobj:
self.regexes.append(regex)
self.unmatched = None
self.matched = returnresult
self.regexes = save_regexes
return returnresult
class NodeStatus:
def __init__(self, Env):
self.Env = Env
def IsNodeBooted(self, node):
'''Return TRUE if the given node is booted (responds to pings)'''
return self.Env.rsh("localhost", "ping -nq -c1 -w1 %s" % node) == 0
def IsSshdUp(self, node):
#return self.rsh(node, "true") == 0;
rc = self.Env.rsh(node, "true")
return rc == 0
def WaitForNodeToComeUp(self, node, Timeout=300):
'''Return TRUE when given node comes up, or None/FALSE if timeout'''
timeout=Timeout
anytimeouts=0
while timeout > 0:
if self.IsNodeBooted(node) and self.IsSshdUp(node):
if anytimeouts:
# Fudge to wait for the system to finish coming up
time.sleep(30)
self.Env.debug("Node %s now up" % node)
return 1
time.sleep(30)
if (not anytimeouts):
self.Env.debug("Waiting for node %s to come up" % node)
anytimeouts=1
timeout = timeout - 1
self.Env.log("%s did not come up within %d tries" % (node, Timeout))
answer = raw_input('Continue? [nY]')
if answer and answer == "n":
raise ValueError("%s did not come up within %d tries" % (node, Timeout))
def WaitForAllNodesToComeUp(self, nodes, timeout=300):
'''Return TRUE when all nodes come up, or FALSE if timeout'''
for node in nodes:
if not self.WaitForNodeToComeUp(node, timeout):
return None
return 1
class ClusterManager(UserDict):
'''The Cluster Manager class.
This is an subclass of the Python dictionary class.
(this is because it contains lots of {name,value} pairs,
not because it's behavior is that terribly similar to a
dictionary in other ways.)
This is an abstract class which class implements high-level
operations on the cluster and/or its cluster managers.
Actual cluster managers classes are subclassed from this type.
One of the things we do is track the state we think every node should
be in.
'''
def __InitialConditions(self):
#if os.geteuid() != 0:
# raise ValueError("Must Be Root!")
None
def _finalConditions(self):
for key in self.keys():
if self[key] == None:
raise ValueError("Improper derivation: self[" + key
+ "] must be overridden by subclass.")
def __init__(self, Environment, randseed=None):
self.Env = Environment
self.__InitialConditions()
self.clear_cache = 0
self.TestLoggingLevel=0
self.data = {
"up" : "up", # Status meaning up
"down" : "down", # Status meaning down
"StonithCmd" : "stonith -t baytech -p '10.10.10.100 admin admin' %s",
"DeadTime" : 30, # Max time to detect dead node...
"StartTime" : 90, # Max time to start up
#
# These next values need to be overridden in the derived class.
#
"Name" : None,
"StartCmd" : None,
"StopCmd" : None,
"StatusCmd" : None,
#"RereadCmd" : None,
"BreakCommCmd" : None,
"FixCommCmd" : None,
#"TestConfigDir" : None,
"LogFileName" : None,
#"Pat:Master_started" : None,
#"Pat:Slave_started" : None,
"Pat:We_stopped" : None,
"Pat:They_stopped" : None,
"BadRegexes" : None, # A set of "bad news" regexes
# to apply to the log
}
self.rsh = self.Env.rsh
self.ShouldBeStatus={}
self.OurNode=string.lower(os.uname()[1])
self.ShouldBeStatus={}
self.ns = NodeStatus(self.Env)
def errorstoignore(self):
'''Return list of errors which are 'normal' and should be ignored'''
return []
def log(self, args):
self.Env.log(args)
def debug(self, args):
self.Env.debug(args)
def prepare(self):
'''Finish the Initialization process. Prepare to test...'''
for node in self.Env["nodes"]:
if self.StataCM(node):
self.ShouldBeStatus[node]="up"
else:
self.ShouldBeStatus[node]="down"
self.unisolate_node(node)
def upcount(self):
'''How many nodes are up?'''
count=0
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node]=="up":
count=count+1
return count
def install_config(self, node):
return None
def clear_all_caches(self):
if self.clear_cache:
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "down":
self.debug("Removing cache file on: "+node)
self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
else:
self.debug("NOT Removing cache file on: "+node)
def StartaCM(self, node):
'''Start up the cluster manager on a given node'''
self.debug("Starting %s on node %s" %(self["Name"], node))
ret = 1
if not self.ShouldBeStatus.has_key(node):
self.ShouldBeStatus[node] = "down"
if self.ShouldBeStatus[node] != "down":
return 1
patterns = []
# Technically we should always be able to notice ourselves starting
patterns.append(self["Pat:Local_started"] % node)
if self.upcount() == 0:
patterns.append(self["Pat:Master_started"] % node)
else:
patterns.append(self["Pat:Slave_started"] % node)
watch = LogWatcher(
self["LogFileName"], patterns, timeout=self["StartTime"]+10)
watch.setwatch()
self.install_config(node)
self.ShouldBeStatus[node] = "any"
if self.StataCM(node) and self.cluster_stable(self["DeadTime"]):
self.log ("%s was already started" %(node))
return 1
# Clear out the host cache so autojoin can be exercised
if self.clear_cache:
self.debug("Removing cache file on: "+node)
self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
if not(self.Env["valgrind-tests"]):
startCmd = self["StartCmd"]
else:
if self.Env["valgrind-prefix"]:
prefix = self.Env["valgrind-prefix"]
else:
prefix = "cts"
startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self["StartCmd"])
if self.rsh(node, startCmd) != 0:
self.log ("Warn: Start command failed on node %s" %(node))
return None
self.ShouldBeStatus[node]="up"
watch_result = watch.lookforall()
if watch.unmatched:
for regex in watch.unmatched:
self.log ("Warn: Startup pattern not found: %s" %(regex))
if watch_result:
#self.debug("Found match: "+ repr(watch_result))
self.cluster_stable(self["DeadTime"])
return 1
if self.StataCM(node) and self.cluster_stable(self["DeadTime"]):
return 1
self.log ("Warn: Start failed for node %s" %(node))
return None
def StartaCMnoBlock(self, node):
'''Start up the cluster manager on a given node with none-block mode'''
self.debug("Starting %s on node %s" %(self["Name"], node))
# Clear out the host cache so autojoin can be exercised
if self.clear_cache:
self.debug("Removing cache file on: "+node)
self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
if not(self.Env["valgrind-tests"]):
startCmd = self["StartCmd"]
else:
if self.Env["valgrind-prefix"]:
prefix = self.Env["valgrind-prefix"]
else:
prefix = "cts"
startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self["StartCmd"])
self.rsh(node, startCmd, blocking=0)
self.ShouldBeStatus[node]="up"
return 1
def StopaCM(self, node):
'''Stop the cluster manager on a given node'''
self.debug("Stopping %s on node %s" %(self["Name"], node))
if self.ShouldBeStatus[node] != "up":
return 1
if self.rsh(node, self["StopCmd"]) == 0:
self.ShouldBeStatus[node]="down"
self.cluster_stable(self["DeadTime"])
return 1
else:
self.log ("Could not stop %s on node %s" %(self["Name"], node))
return None
def StopaCMnoBlock(self, node):
'''Stop the cluster manager on a given node with none-block mode'''
self.debug("Stopping %s on node %s" %(self["Name"], node))
self.rsh(node, self["StopCmd"], blocking=0)
self.ShouldBeStatus[node]="down"
return 1
def cluster_stable(self, timeout = None):
time.sleep(self["StableTime"])
return 1
def node_stable(self, node):
return 1
def RereadCM(self, node):
'''Force the cluster manager on a given node to reread its config
This may be a no-op on certain cluster managers.
'''
rc=self.rsh(node, self["RereadCmd"])
if rc == 0:
return 1
else:
self.log ("Could not force %s on node %s to reread its config"
% (self["Name"], node))
return None
def StataCM(self, node):
'''Report the status of the cluster manager on a given node'''
out=self.rsh(node, self["StatusCmd"], 1)
ret= (string.find(out, 'stopped') == -1)
try:
if ret:
if self.ShouldBeStatus[node] == "down":
self.log(
"Node status for %s is %s but we think it should be %s"
% (node, "up", self.ShouldBeStatus[node]))
else:
if self.ShouldBeStatus[node] == "up":
self.log(
"Node status for %s is %s but we think it should be %s"
% (node, "down", self.ShouldBeStatus[node]))
except KeyError: pass
if ret: self.ShouldBeStatus[node]="up"
else: self.ShouldBeStatus[node]="down"
return ret
def startall(self, nodelist=None):
'''Start the cluster manager on every node in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
ret = 1
map = {}
if not nodelist:
nodelist=self.Env["nodes"]
for node in nodelist:
if self.ShouldBeStatus[node] == "down":
if not self.StartaCM(node):
ret = 0
return ret
def stopall(self, nodelist=None):
'''Stop the cluster managers on every node in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
ret = 1
map = {}
if not nodelist:
nodelist=self.Env["nodes"]
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
if not self.StopaCM(node):
ret = 0
return ret
def rereadall(self, nodelist=None):
'''Force the cluster managers on every node in the cluster
to reread their config files. We can do it on a subset of the
cluster if nodelist is not None.
'''
map = {}
if not nodelist:
nodelist=self.Env["nodes"]
for node in self.Env["nodes"]:
if self.ShouldBeStatus[node] == "up":
self.RereadCM(node)
def statall(self, nodelist=None):
'''Return the status of the cluster managers in the cluster.
We can do it on a subset of the cluster if nodelist is not None.
'''
result={}
if not nodelist:
nodelist=self.Env["nodes"]
for node in nodelist:
if self.StataCM(node):
result[node] = "up"
else:
result[node] = "down"
return result
def isolate_node(self, target, nodes=None):
'''isolate the communication between the nodes'''
if not nodes:
nodes = self.Env["nodes"]
for node in nodes:
if node != target:
rc = self.rsh(target, self["BreakCommCmd"] % node)
if rc != 0:
self.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
return None
else:
self.debug("Communication cut between %s and %s" % (target, node))
return 1
def unisolate_node(self, target, nodes=None):
'''fix the communication between the nodes'''
if not nodes:
nodes = self.Env["nodes"]
for node in nodes:
if node != target:
restored = 0
# Limit the amount of time we have asynchronous connectivity for
# Restore both sides as simultaneously as possible
self.rsh(target, self["FixCommCmd"] % node, blocking=0)
self.rsh(node, self["FixCommCmd"] % target, blocking=0)
self.debug("Communication restored between %s and %s" % (target, node))
def reducecomm_node(self,node):
'''reduce the communication between the nodes'''
rc = self.rsh(node, self["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"]))
if rc == 0:
return 1
else:
self.log("Could not reduce the communication between the nodes from node: %s" % node)
return None
def restorecomm_node(self,node):
'''restore the saved communication between the nodes'''
rc = 0
if float(self.Env["XmitLoss"])!=0 or float(self.Env["RecvLoss"])!=0 :
rc = self.rsh(node, self["RestoreCommCmd"]);
if rc == 0:
return 1
else:
self.log("Could not restore the communication between the nodes from node: %s" % node)
return None
def HasQuorum(self, node_list):
"Return TRUE if the cluster currently has quorum"
# If we are auditing a partition, then one side will
# have quorum and the other not.
# So the caller needs to tell us which we are checking
# If no value for node_list is specified... assume all nodes
raise ValueError("Abstract Class member (HasQuorum)")
def Components(self):
raise ValueError("Abstract Class member (Components)")
def oprofileStart(self, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileStart(n)
elif node in self.Env["oprofile"]:
self.debug("Enabling oprofile on %s" % node)
self.rsh(node, "opcontrol --init")
self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
self.rsh(node, "opcontrol --start")
self.rsh(node, "opcontrol --reset")
def oprofileSave(self, test, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileSave(test, n)
elif node in self.Env["oprofile"]:
self.rsh(node, "opcontrol --dump")
self.rsh(node, "opcontrol --save=cts.%d" % test)
# Read back with: opreport -l session:cts.0 image:/usr/lib/heartbeat/c*
if None:
self.rsh(node, "opcontrol --reset")
else:
self.oprofileStop(node)
self.oprofileStart(node)
def oprofileStop(self, node=None):
if not node:
for n in self.Env["oprofile"]:
self.oprofileStop(n)
elif node in self.Env["oprofile"]:
self.debug("Stopping oprofile on %s" % node)
self.rsh(node, "opcontrol --reset")
self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
class Resource:
'''
This is an HA resource (not a resource group).
A resource group is just an ordered list of Resource objects.
'''
def __init__(self, cm, rsctype=None, instance=None):
self.CM = cm
self.ResourceType = rsctype
self.Instance = instance
self.needs_quorum = 1
def Type(self):
return self.ResourceType
def Instance(self, nodename):
return self.Instance
def IsRunningOn(self, nodename):
'''
This member function returns true if our resource is running
on the given node in the cluster.
It is analagous to the "status" operation on SystemV init scripts and
heartbeat scripts. FailSafe calls it the "exclusive" operation.
'''
raise ValueError("Abstract Class member (IsRunningOn)")
return None
def IsWorkingCorrectly(self, nodename):
'''
This member function returns true if our resource is operating
correctly on the given node in the cluster.
Heartbeat does not require this operation, but it might be called
the Monitor operation, which is what FailSafe calls it.
For remotely monitorable resources (like IP addresses), they *should*
be monitored remotely for testing.
'''
raise ValueError("Abstract Class member (IsWorkingCorrectly)")
return None
def Start(self, nodename):
'''
This member function starts or activates the resource.
'''
raise ValueError("Abstract Class member (Start)")
return None
def Stop(self, nodename):
'''
This member function stops or deactivates the resource.
'''
raise ValueError("Abstract Class member (Stop)")
return None
def __repr__(self):
if (self.Instance and len(self.Instance) > 1):
return "{" + self.ResourceType + "::" + self.Instance + "}"
else:
return "{" + self.ResourceType + "}"
class Component:
def kill(self, node):
None
class Process(Component):
def __init__(self, cm, name, process=None, dc_only=0, pats=[], dc_pats=[], badnews_ignore=[], triggersreboot=0):
self.name = str(name)
self.dc_only = dc_only
self.pats = pats
self.dc_pats = dc_pats
self.CM = cm
self.badnews_ignore = badnews_ignore
self.triggersreboot = triggersreboot
if process:
self.proc = str(process)
else:
self.proc = str(name)
self.KillCmd = "killall -9 " + self.proc
def kill(self, node):
if self.CM.rsh(node, self.KillCmd) != 0:
self.CM.log ("ERROR: Kill %s failed on node %s" %(self.name,node))
return None
return 1
class ScenarioComponent:
def __init__(self, Env):
self.Env = Env
def IsApplicable(self):
'''Return TRUE if the current ScenarioComponent is applicable
in the given LabEnvironment given to the constructor.
'''
raise ValueError("Abstract Class member (IsApplicable)")
def SetUp(self, CM):
'''Set up the given ScenarioComponent'''
raise ValueError("Abstract Class member (Setup)")
def TearDown(self, CM):
'''Tear down (undo) the given ScenarioComponent'''
raise ValueError("Abstract Class member (Setup)")
class Scenario:
(
'''The basic idea of a scenario is that of an ordered list of
ScenarioComponent objects. Each ScenarioComponent is SetUp() in turn,
and then after the tests have been run, they are torn down using TearDown()
(in reverse order).
A Scenario is applicable to a particular cluster manager iff each
ScenarioComponent is applicable.
A partially set up scenario is torn down if it fails during setup.
''')
def __init__(self, Components):
"Initialize the Scenario from the list of ScenarioComponents"
for comp in Components:
if not issubclass(comp.__class__, ScenarioComponent):
raise ValueError("Init value must be subclass of"
" ScenarioComponent")
self.Components = Components
def IsApplicable(self):
(
'''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable()
'''
)
for comp in self.Components:
if not comp.IsApplicable():
return None
return 1
def SetUp(self, CM):
'''Set up the Scenario. Return TRUE on success.'''
j=0
while j < len(self.Components):
if not self.Components[j].SetUp(CM):
# OOPS! We failed. Tear partial setups down.
CM.log("Tearing down partial setup")
self.TearDown(CM, j)
return None
j=j+1
return 1
def TearDown(self, CM, max=None):
'''Tear Down the Scenario - in reverse order.'''
if max == None:
max = len(self.Components)-1
j=max
while j >= 0:
self.Components[j].TearDown(CM)
j=j-1
class InitClusterManager(ScenarioComponent):
(
'''InitClusterManager is the most basic of ScenarioComponents.
This ScenarioComponent simply starts the cluster manager on all the nodes.
It is fairly robust as it waits for all nodes to come up before starting
as they might have been rebooted or crashed for some reason beforehand.
''')
def __init__(self, Env):
pass
def IsApplicable(self):
'''InitClusterManager is so generic it is always Applicable'''
return 1
def SetUp(self, CM):
'''Basic Cluster Manager startup. Start everything'''
CM.prepare()
# Clear out the cobwebs ;-)
self.TearDown(CM)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on all nodes.")
return CM.startall()
def TearDown(self, CM):
'''Set up the given ScenarioComponent'''
# Stop the cluster manager everywhere
CM.log("Stopping Cluster Manager on all nodes")
return CM.stopall()
class PingFest(ScenarioComponent):
(
'''PingFest does a flood ping to each node in the cluster from the test machine.
If the LabEnvironment Parameter PingSize is set, it will be used as the size
of ping packet requested (via the -s option). If it is not set, it defaults
to 1024 bytes.
According to the manual page for ping:
Outputs packets as fast as they come back or one hundred times per
second, whichever is more. For every ECHO_REQUEST sent a period ``.''
is printed, while for every ECHO_REPLY received a backspace is printed.
This provides a rapid display of how many packets are being dropped.
Only the super-user may use this option. This can be very hard on a net-
work and should be used with caution.
''' )
def __init__(self, Env):
self.Env = Env
def IsApplicable(self):
'''PingFests are always applicable ;-)
'''
return 1
def SetUp(self, CM):
'''Start the PingFest!'''
self.PingSize=1024
if CM.Env.has_key("PingSize"):
self.PingSize=CM.Env["PingSize"]
CM.log("Starting %d byte flood pings" % self.PingSize)
self.PingPids=[]
for node in CM.Env["nodes"]:
self.PingPids.append(self._pingchild(node))
CM.log("Ping PIDs: " + repr(self.PingPids))
return 1
def TearDown(self, CM):
'''Stop it right now! My ears are pinging!!'''
for pid in self.PingPids:
if pid != None:
CM.log("Stopping ping process %d" % pid)
os.kill(pid, signal.SIGKILL)
def _pingchild(self, node):
Args = ["ping", "-qfn", "-s", str(self.PingSize), node]
sys.stdin.flush()
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if pid < 0:
self.Env.log("Cannot fork ping child")
return None
if pid > 0:
return pid
# Otherwise, we're the child process.
os.execvp("ping", Args)
self.Env.log("Cannot execvp ping: " + repr(Args))
sys.exit(1)
class PacketLoss(ScenarioComponent):
(
'''
It would be useful to do some testing of CTS with a modest amount of packet loss
enabled - so we could see that everything runs like it should with a certain
amount of packet loss present.
''')
def IsApplicable(self):
'''always Applicable'''
return 1
def SetUp(self, CM):
'''Reduce the reliability of communications'''
if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 :
return 1
for node in CM.Env["nodes"]:
CM.reducecomm_node(node)
CM.log("Reduce the reliability of communications")
return 1
def TearDown(self, CM):
'''Fix the reliability of communications'''
if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 :
return 1
for node in CM.Env["nodes"]:
CM.unisolate_node(node)
CM.log("Fix the reliability of communications")
class BasicSanityCheck(ScenarioComponent):
(
'''
''')
def IsApplicable(self):
return self.Env["DoBSC"]
def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
self.TearDown(CM)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on BSC node(s).")
return CM.startall()
def TearDown(self, CM):
CM.log("Stopping Cluster Manager on BSC node(s).")
return CM.stopall()
class Benchmark(ScenarioComponent):
(
'''
''')
def IsApplicable(self):
return self.Env["benchmark"]
def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
self.TearDown(CM)
# Now start the Cluster Manager on all the nodes.
CM.log("Starting Cluster Manager on all node(s).")
return CM.startall()
def TearDown(self, CM):
CM.log("Stopping Cluster Manager on all node(s).")
return CM.stopall()
class RollingUpgrade(ScenarioComponent):
(
'''
Test a rolling upgrade between two versions of the stack
''')
def __init__(self, Env):
self.Env = Env
def IsApplicable(self):
if not self.Env["rpm-dir"]:
return None
if not self.Env["current-version"]:
return None
if not self.Env["previous-version"]:
return None
return 1
def install(self, node, version):
target_dir = "/tmp/rpm-%s" % version
src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version)
rc = self.CM.rsh(node, "mkdir -p %s" % target_dir)
rc = self.CM.cp("%s/*.rpm %s:%s" % (src_dir, node, target_dir))
rc = self.CM.rsh(node, "rpm -Uvh --force %s/*.rpm" % (target_dir))
return self.success()
def upgrade(self, node):
return self.install(node, self.CM.Env["current-version"])
def downgrade(self, node):
return self.install(node, self.CM.Env["previous-version"])
def SetUp(self, CM):
CM.prepare()
# Clear out the cobwebs
CM.stopall()
CM.log("Downgrading all nodes to %s." % self.Env["previous-version"])
for node in self.Env["nodes"]:
if not self.downgrade(node):
CM.log("Couldn't downgrade %s" % node)
return None
return 1
def TearDown(self, CM):
# Stop everything
CM.log("Stopping Cluster Manager on Upgrade nodes.")
CM.stopall()
CM.log("Upgrading all nodes to %s." % self.Env["current-version"])
for node in self.Env["nodes"]:
if not self.upgrade(node):
CM.log("Couldn't upgrade %s" % node)
return None
return 1
diff --git a/cts/CTSaudits.py b/cts/CTSaudits.py
index d00ce92703..d2dc3b8152 100755
--- a/cts/CTSaudits.py
+++ b/cts/CTSaudits.py
@@ -1,751 +1,751 @@
'''CTS: Cluster Testing System: Audit module
'''
__copyright__='''
Copyright (C) 2000, 2001,2005 Alan Robertson <alanr@unix.sh>
Licensed under the GNU GPL.
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import time, os, string, re
-import CTS
+from cts import CTS
class ClusterAudit:
def __init__(self, cm):
self.CM = cm
def __call__(self):
raise ValueError("Abstract Class member (__call__)")
def is_applicable(self):
'''Return TRUE if we are applicable in the current test configuration'''
raise ValueError("Abstract Class member (is_applicable)")
return 1
def log(self, args):
self.CM.log("audit: %s" % args)
def debug(self, args):
self.CM.debug("audit: %s" % args)
def name(self):
raise ValueError("Abstract Class member (name)")
AllAuditClasses = [ ]
class LogAudit(ClusterAudit):
def name(self):
return "LogAudit"
def __init__(self, cm):
self.CM = cm
def RestartClusterLogging(self, nodes=None):
if not nodes:
nodes = self.CM.Env["nodes"]
self.CM.log("Restarting logging on: %s" % repr(nodes))
for node in nodes:
cmd=self.CM.Env["logrestartcmd"]
if self.CM.rsh(node, cmd, blocking=0) != 0:
self.CM.log ("ERROR: Cannot restart logging on %s [%s failed]" % (node, cmd))
def TestLogging(self):
patterns= []
prefix="Test message from"
for node in self.CM.Env["nodes"]:
# Look for the node name in two places to make sure
# that syslog is logging with the correct hostname
patterns.append("%s.*%s %s" % (node, prefix, node))
watch = CTS.LogWatcher(self.CM.Env["LogFileName"], patterns, 60)
watch.setwatch()
for node in self.CM.Env["nodes"]:
cmd="logger -p %s.info %s %s" % (self.CM.Env["SyslogFacility"], prefix, node)
if self.CM.rsh(node, cmd, blocking=0) != 0:
self.CM.log ("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node))
watch_result = watch.lookforall()
if watch.unmatched:
for regex in watch.unmatched:
self.CM.log ("Test message [%s] not found in logs." % (regex))
return 0
return 1
def __call__(self):
max=3
attempt=0
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"])
while attempt <= max and self.TestLogging() == 0:
attempt = attempt + 1
self.RestartClusterLogging()
time.sleep(60*attempt)
if attempt > max:
self.CM.log("ERROR: Cluster logging unrecoverable.")
return 0
return 1
def is_applicable(self):
if self.CM.Env["DoBSC"]:
return 0
return 1
class DiskAudit(ClusterAudit):
def name(self):
return "DiskspaceAudit"
def __init__(self, cm):
self.CM = cm
def __call__(self):
result=1
dfcmd="df -k /var/log | tail -1 | tr -s ' ' | cut -d' ' -f2"
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"])
for node in self.CM.Env["nodes"]:
dfout=self.CM.rsh(node, dfcmd, 1)
if not dfout:
self.CM.log ("ERROR: Cannot execute remote df command [%s] on %s" % (dfcmd, node))
else:
try:
idfout = int(dfout)
except (ValueError, TypeError):
self.CM.log("Warning: df output from %s was invalid [%s]" % (node, dfout))
else:
if idfout == 0:
self.CM.log("CRIT: Completely out of log disk space on %s" % node)
result=None
elif idfout <= 1000:
self.CM.log("WARN: Low on log disk space (%d Mbytes) on %s" % (idfout, node))
return result
def is_applicable(self):
if self.CM.Env["DoBSC"]:
return 0
return 1
class AuditResource:
def __init__(self, cm, line):
fields = line.split()
self.CM = cm
self.line = line
self.type = fields[1]
self.id = fields[2]
self.clone_id = fields[3]
self.parent = fields[4]
self.rprovider = fields[5]
self.rclass = fields[6]
self.rtype = fields[7]
self.host = fields[8]
self.needs_quorum = fields[9]
self.flags = int(fields[10])
self.flags_s = fields[11]
if self.parent == "NA":
self.parent = None
def unique(self):
if self.flags & int("0x00000020", 16):
return 1
return 0
def orphan(self):
if self.flags & int("0x00000001", 16):
return 1
return 0
def managed(self):
if self.flags & int("0x00000002", 16):
return 1
return 0
class AuditConstraint:
def __init__(self, cm, line):
fields = line.split()
self.CM = cm
self.line = line
self.type = fields[1]
self.id = fields[2]
self.rsc = fields[3]
self.target = fields[4]
self.score = fields[5]
self.rsc_role = fields[6]
self.target_role = fields[7]
if self.rsc_role == "NA":
self.rsc_role = None
if self.target_role == "NA":
self.target_role = None
class PrimitiveAudit(ClusterAudit):
def name(self):
return "PrimitiveAudit"
def __init__(self, cm):
self.CM = cm
def doResourceAudit(self, resource):
rc=1
active = self.CM.ResourceLocation(resource.id)
if len(active) == 1:
if self.CM.HasQuorum(None):
self.debug("Resource %s active on %s" % (resource.id, repr(active)))
elif resource.needs_quorum == 1:
self.CM.log("Resource %s active without quorum: %s"
% (resource.id, repr(active)))
rc=0
elif not resource.managed():
self.CM.log("Resource %s not managed. Active on %s"
% (resource.id, repr(active)))
elif not resource.unique():
# TODO: Figure out a clever way to actually audit these resource types
if len(active) > 1:
self.debug("Non-unique resource %s is active on: %s"
% (resource.id, repr(active)))
else:
self.debug("Non-unique resource %s is not active" % resource.id)
elif len(active) > 1:
self.CM.log("Resource %s is active multiple times: %s"
% (resource.id, repr(active)))
rc=0
elif resource.orphan():
self.debug("Resource %s is an inactive orphan" % resource.id)
elif len(self.inactive_nodes) == 0:
self.CM.log("WARN: Resource %s not served anywhere" % resource.id)
rc=0
elif self.CM.Env["warn-inactive"] == 1:
if self.CM.HasQuorum(None) or not resource.needs_quorum:
self.CM.log("WARN: Resource %s not served anywhere (Inactive nodes: %s)"
% (resource.id, repr(self.inactive_nodes)))
else:
self.debug("Resource %s not served anywhere (Inactive nodes: %s)"
% (resource.id, repr(self.inactive_nodes)))
elif self.CM.HasQuorum(None) or not resource.needs_quorum:
self.debug("Resource %s not served anywhere (Inactive nodes: %s)"
% (resource.id, repr(self.inactive_nodes)))
return rc
def setup(self):
self.target = None
self.resources = []
self.constraints = []
self.active_nodes = []
self.inactive_nodes = []
self.debug("Do Audit %s"%self.name())
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "up":
self.active_nodes.append(node)
else:
self.inactive_nodes.append(node)
for node in self.CM.Env["nodes"]:
if self.target == None and self.CM.ShouldBeStatus[node] == "up":
self.target = node
if not self.target:
# TODO: In Pacemaker 1.0 clusters we'll be able to run crm_resource
# with CIB_file=/path/to/cib.xml even when the cluster isn't running
self.debug("No nodes active - skipping %s" % self.name())
return 0
(rc, lines) = self.CM.rsh(self.target, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
self.resources.append(AuditResource(self.CM, line))
elif re.search("^Constraint", line):
self.constraints.append(AuditConstraint(self.CM, line))
else:
self.CM.log("Unknown entry: %s" % line);
return 1
def __call__(self):
rc = 1
if not self.setup():
return 1
for resource in self.resources:
if resource.type == "primitive":
if self.doResourceAudit(resource) == 0:
rc = 0
return rc
def is_applicable(self):
if self.CM["Name"] == "crm-lha":
return 1
if self.CM["Name"] == "crm-ais":
return 1
return 0
class GroupAudit(PrimitiveAudit):
def name(self):
return "GroupAudit"
def __call__(self):
rc = 1
if not self.setup():
return 1
for group in self.resources:
if group.type == "group":
first_match = 1
group_location = None
for child in self.resources:
if child.parent == group.id:
nodes = self.CM.ResourceLocation(child.id)
if first_match and len(nodes) > 0:
group_location = nodes[0]
first_match = 0
if len(nodes) > 1:
rc = 0
self.CM.log("Child %s of %s is active more than once: %s"
% (child.id, group.id, repr(nodes)))
elif len(nodes) == 0:
# Groups are allowed to be partially active
# However we do need to make sure later children aren't running
group_location = None
self.debug("Child %s of %s is stopped" % (child.id, group.id))
elif nodes[0] != group_location:
rc = 0
self.CM.log("Child %s of %s is active on the wrong node (%s) expected %s"
% (child.id, group.id, nodes[0], group_location))
else:
self.debug("Child %s of %s is active on %s" % (child.id, group.id, nodes[0]))
return rc
class CloneAudit(PrimitiveAudit):
def name(self):
return "CloneAudit"
def __call__(self):
rc = 1
if not self.setup():
return 1
for clone in self.resources:
if clone.type == "clone":
for child in self.resources:
if child.parent == clone.id and child.type == "primitive":
self.debug("Checking child %s of %s..." % (child.id, clone.id))
# Check max and node_max
# Obtain with:
# crm_resource -g clone_max --meta -r child.id
# crm_resource -g clone_node_max --meta -r child.id
return rc
class ColocationAudit(PrimitiveAudit):
def name(self):
return "ColocationAudit"
def crm_location(self, resource):
(rc, lines) = self.CM.rsh(self.target, "crm_resource -W -r %s -Q"%resource, None)
hosts = []
if rc == 0:
for line in lines:
fields = line.split()
hosts.append(fields[0])
return hosts
def __call__(self):
rc = 1
if not self.setup():
return 1
for coloc in self.constraints:
if coloc.type == "rsc_colocation":
source = self.crm_location(coloc.rsc)
target = self.crm_location(coloc.target)
if len(source) == 0:
self.debug("Colocation audit (%s): %s not running" % (coloc.id, coloc.rsc))
else:
for node in source:
if not node in target:
rc = 0
self.CM.log("Colocation audit (%s): %s running on %s (not in %s)"
% (coloc.id, coloc.rsc, node, repr(target)))
else:
self.debug("Colocation audit (%s): %s running on %s (in %s)"
% (coloc.id, coloc.rsc, node, repr(target)))
return rc
class CrmdStateAudit(ClusterAudit):
def __init__(self, cm):
self.CM = cm
self.Stats = {"calls":0
, "success":0
, "failure":0
, "skipped":0
, "auditfail":0}
def has_key(self, key):
return self.Stats.has_key(key)
def __setitem__(self, key, value):
self.Stats[key] = value
def __getitem__(self, key):
return self.Stats[key]
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not self.Stats.has_key(name):
self.Stats[name]=0
self.Stats[name] = self.Stats[name]+1
def __call__(self):
passed = 1
up_are_down = 0
down_are_up = 0
unstable_list = []
self.debug("Do Audit %s"%self.name())
for node in self.CM.Env["nodes"]:
should_be = self.CM.ShouldBeStatus[node]
rc = self.CM.test_node_CM(node)
if rc > 0:
if should_be == "down":
down_are_up = down_are_up + 1
if rc == 1:
unstable_list.append(node)
elif should_be == "up":
up_are_down = up_are_down + 1
if len(unstable_list) > 0:
passed = 0
self.CM.log("Cluster is not stable: %d (of %d): %s"
%(len(unstable_list), self.CM.upcount(), repr(unstable_list)))
if up_are_down > 0:
passed = 0
self.CM.log("%d (of %d) nodes expected to be up were down."
%(up_are_down, len(self.CM.Env["nodes"])))
if down_are_up > 0:
passed = 0
self.CM.log("%d (of %d) nodes expected to be down were up."
%(down_are_up, len(self.CM.Env["nodes"])))
return passed
def name(self):
return "CrmdStateAudit"
def is_applicable(self):
if self.CM["Name"] == "crm-lha":
return 1
if self.CM["Name"] == "crm-ais":
return 1
return 0
class CIBAudit(ClusterAudit):
def __init__(self, cm):
self.CM = cm
self.Stats = {"calls":0
, "success":0
, "failure":0
, "skipped":0
, "auditfail":0}
def has_key(self, key):
return self.Stats.has_key(key)
def __setitem__(self, key, value):
self.Stats[key] = value
def __getitem__(self, key):
return self.Stats[key]
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not self.Stats.has_key(name):
self.Stats[name]=0
self.Stats[name] = self.Stats[name]+1
def __call__(self):
self.debug("Do Audit %s"%self.name())
passed = 1
ccm_partitions = self.CM.find_partitions()
if len(ccm_partitions) == 0:
self.debug("\tNo partitions to audit")
return 1
for partition in ccm_partitions:
self.debug("\tAuditing CIB consistency for: %s" %partition)
partition_passed = 0
if self.audit_cib_contents(partition) == 0:
passed = 0
return passed
def audit_cib_contents(self, hostlist):
passed = 1
node0 = None
node0_xml = None
partition_hosts = hostlist.split()
for node in partition_hosts:
node_xml = self.store_remote_cib(node, node0)
if node_xml == None:
self.CM.log("Could not perform audit: No configuration from %s" % node)
passed = 0
elif node0 == None:
node0 = node
node0_xml = node_xml
elif node0_xml == None:
self.CM.log("Could not perform audit: No configuration from %s" % node0)
passed = 0
else:
(rc, result) = self.CM.rsh(
node0, "crm_diff -VV -cf --new %s --original %s" % (node_xml, node0_xml), None)
if rc != 0:
self.CM.log("Diff between %s and %s failed: %d" % (node0_xml, node_xml, rc))
passed = 0
for line in result:
if not re.search("<diff/>", line):
passed = 0
self.debug("CibDiff[%s-%s]: %s" % (node0, node, line))
else:
self.debug("CibDiff[%s-%s] Ignoring: %s" % (node0, node, line))
# self.CM.rsh(node0, "rm -f %s" % node_xml)
# self.CM.rsh(node0, "rm -f %s" % node0_xml)
return passed
def store_remote_cib(self, node, target):
combined = ""
filename="/tmp/ctsaudit.%s.xml" % node
if not target:
target = node
(rc, lines) = self.CM.rsh(node, self.CM["CibQuery"], None)
if rc != 0:
self.CM.log("Could not retrieve configuration")
return None
self.CM.rsh("localhost", "rm -f %s" % filename)
for line in lines:
self.CM.rsh("localhost", "echo \'%s\' >> %s" % (line[:-1], filename))
if self.CM.rsh.cp(filename, "root@%s:%s" % (target, filename)) != 0:
self.CM.log("Could not store configuration")
return None
return filename
def name(self):
return "CibAudit"
def is_applicable(self):
if self.CM["Name"] == "crm-lha":
return 1
if self.CM["Name"] == "crm-ais":
return 1
return 0
class PartitionAudit(ClusterAudit):
def __init__(self, cm):
self.CM = cm
self.Stats = {"calls":0
, "success":0
, "failure":0
, "skipped":0
, "auditfail":0}
self.NodeEpoche={}
self.NodeState={}
self.NodeQuorum={}
def has_key(self, key):
return self.Stats.has_key(key)
def __setitem__(self, key, value):
self.Stats[key] = value
def __getitem__(self, key):
return self.Stats[key]
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not self.Stats.has_key(name):
self.Stats[name]=0
self.Stats[name] = self.Stats[name]+1
def __call__(self):
self.debug("Do Audit %s"%self.name())
passed = 1
ccm_partitions = self.CM.find_partitions()
if ccm_partitions == None or len(ccm_partitions) == 0:
return 1
if len(ccm_partitions) != self.CM.partitions_expected:
self.CM.log("ERROR: %d cluster partitions detected:" %len(ccm_partitions))
passed = 0
for partition in ccm_partitions:
self.CM.log("\t %s" %partition)
for partition in ccm_partitions:
partition_passed = 0
if self.audit_partition(partition) == 0:
passed = 0
return passed
def trim_string(self, avalue):
if not avalue:
return None
if len(avalue) > 1:
return avalue[:-1]
def trim2int(self, avalue):
if not avalue:
return None
if len(avalue) > 1:
return int(avalue[:-1])
def audit_partition(self, partition):
passed = 1
dc_found = []
dc_allowed_list = []
lowest_epoche = None
node_list = partition.split()
self.debug("Auditing partition: %s" %(partition))
for node in node_list:
if self.CM.ShouldBeStatus[node] != "up":
self.CM.log("Warn: Node %s appeared out of nowhere" %(node))
self.CM.ShouldBeStatus[node] = "up"
# not in itself a reason to fail the audit (not what we're
# checking for in this audit)
self.NodeState[node] = self.CM.rsh(node, self.CM["StatusCmd"]%node, 1)
self.NodeEpoche[node] = self.CM.rsh(node, self.CM["EpocheCmd"], 1)
self.NodeQuorum[node] = self.CM.rsh(node, self.CM["QuorumCmd"], 1)
self.debug("Node %s: %s - %s - %s." %(node, self.NodeState[node], self.NodeEpoche[node], self.NodeQuorum[node]))
self.NodeState[node] = self.trim_string(self.NodeState[node])
self.NodeEpoche[node] = self.trim2int(self.NodeEpoche[node])
self.NodeQuorum[node] = self.trim_string(self.NodeQuorum[node])
if not self.NodeEpoche[node]:
self.CM.log("Warn: Node %s dissappeared: cant determin epoche" %(node))
self.CM.ShouldBeStatus[node] = "down"
# not in itself a reason to fail the audit (not what we're
# checking for in this audit)
elif lowest_epoche == None or self.NodeEpoche[node] < lowest_epoche:
lowest_epoche = self.NodeEpoche[node]
if not lowest_epoche:
self.CM.log("Lowest epoche not determined in %s" % (partition))
passed = 0
for node in node_list:
if self.CM.ShouldBeStatus[node] == "up":
if self.CM.is_node_dc(node, self.NodeState[node]):
dc_found.append(node)
if self.NodeEpoche[node] == lowest_epoche:
self.debug("%s: OK" % node)
elif not self.NodeEpoche[node]:
self.debug("Check on %s ignored: no node epoche" % node)
elif not lowest_epoche:
self.debug("Check on %s ignored: no lowest epoche" % node)
else:
self.CM.log("DC %s is not the oldest node (%d vs. %d)"
%(node, self.NodeEpoche[node], lowest_epoche))
passed = 0
if len(dc_found) == 0:
self.CM.log("DC not found on any of the %d allowed nodes: %s (of %s)"
%(len(dc_allowed_list), str(dc_allowed_list), str(node_list)))
elif len(dc_found) > 1:
self.CM.log("%d DCs (%s) found in cluster partition: %s"
%(len(dc_found), str(dc_found), str(node_list)))
passed = 0
if passed == 0:
for node in node_list:
if self.CM.ShouldBeStatus[node] == "up":
self.CM.log("epoche %s : %s"
%(self.NodeEpoche[node], self.NodeState[node]))
return passed
def name(self):
return "PartitionAudit"
def is_applicable(self):
if self.CM["Name"] == "crm-lha":
return 1
if self.CM["Name"] == "crm-ais":
return 1
return 0
AllAuditClasses.append(DiskAudit)
AllAuditClasses.append(LogAudit)
AllAuditClasses.append(CrmdStateAudit)
AllAuditClasses.append(PartitionAudit)
AllAuditClasses.append(PrimitiveAudit)
AllAuditClasses.append(GroupAudit)
AllAuditClasses.append(CloneAudit)
AllAuditClasses.append(ColocationAudit)
AllAuditClasses.append(CIBAudit)
def AuditList(cm):
result = []
for auditclass in AllAuditClasses:
result.append(auditclass(cm))
return result
diff --git a/cts/CTSlab.py b/cts/CTSlab.py
index d72777bdf6..a708d67674 100755
--- a/cts/CTSlab.py
+++ b/cts/CTSlab.py
@@ -1,724 +1,731 @@
#!/usr/bin/python
'''CTS: Cluster Testing System: Lab environment module
'''
__copyright__='''
Copyright (C) 2001,2005 Alan Robertson <alanr@unix.sh>
Licensed under the GNU GPL.
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from UserDict import UserDict
import sys, time, types, string, syslog, random, os, string, signal, traceback
-from CTSvars import *
-from CTS import ClusterManager, RemoteExec
-from CTStests import BSC_AddResource
from socket import gethostbyname_ex
-from CM_ais import *
-from CM_lha import crm_lha
+
+sys.path.append("..") # So that things work from the source directory
+
+try:
+ from cts.CTSvars import *
+ from cts.CTS import ClusterManager, RemoteExec
+ from cts.CTStests import BSC_AddResource
+ from cts.CM_ais import *
+ from cts.CM_lha import crm_lha
+ from cts.CTSaudits import AuditList
+ from cts.CTStests import TestList, RandomTests, AllTests, BenchTests, BenchTestList
+ from cts.CTS import Scenario, InitClusterManager, PingFest, PacketLoss, BasicSanityCheck, Benchmark
+
+except ImportError:
+ sys.stderr.write("abort: couldn't find cts libraries in [%s]\n" %
+ ' '.join(sys.path))
+ sys.stderr.write("(check your install and PYTHONPATH)\n")
+ sys.exit(-1)
tests = None
cm = None
old_handler = None
DefaultFacility = "daemon"
def sig_handler(signum, frame) :
if cm != None:
cm.log("Interrupted by signal %d"%signum)
if signum == 10 and tests != None :
tests.summarize()
if signum == 15 :
sys.exit(1)
class Logger:
TimeFormat = "%b %d %H:%M:%S\t"
def __call__(self, lines):
raise ValueError("Abstract class member (__call__)")
def write(self, line):
return self(line.rstrip())
def writelines(self, lines):
for s in lines:
self.write(s)
return 1
def flush(self):
return 1
def isatty(self):
return None
class SysLog(Logger):
# http://docs.python.org/lib/module-syslog.html
defaultsource="CTS"
map = {
"kernel": syslog.LOG_KERN,
"user": syslog.LOG_USER,
"mail": syslog.LOG_MAIL,
"daemon": syslog.LOG_DAEMON,
"auth": syslog.LOG_AUTH,
"lpr": syslog.LOG_LPR,
"news": syslog.LOG_NEWS,
"uucp": syslog.LOG_UUCP,
"cron": syslog.LOG_CRON,
"local0": syslog.LOG_LOCAL0,
"local1": syslog.LOG_LOCAL1,
"local2": syslog.LOG_LOCAL2,
"local3": syslog.LOG_LOCAL3,
"local4": syslog.LOG_LOCAL4,
"local5": syslog.LOG_LOCAL5,
"local6": syslog.LOG_LOCAL6,
"local7": syslog.LOG_LOCAL7,
}
def __init__(self, labinfo):
if labinfo.has_key("syslogsource"):
self.source=labinfo["syslogsource"]
else:
self.source=SysLog.defaultsource
self.facility=DefaultFacility
if labinfo.has_key("SyslogFacility") \
and labinfo["SyslogFacility"]:
if SysLog.map.has_key(labinfo["SyslogFacility"]):
self.facility=labinfo["SyslogFacility"]
else:
raise ValueError("%s: bad syslog facility"%labinfo["SyslogFacility"])
self.facility=SysLog.map[self.facility]
syslog.openlog(self.source, 0, self.facility)
def setfacility(self, facility):
self.facility = facility
if SysLog.map.has_key(self.facility):
self.facility=SysLog.map[self.facility]
syslog.closelog()
syslog.openlog(self.source, 0, self.facility)
def __call__(self, lines):
if isinstance(lines, types.StringType):
syslog.syslog(lines)
else:
for line in lines:
syslog.syslog(line)
def name(self):
return "Syslog"
class StdErrLog(Logger):
def __init__(self, labinfo):
pass
def __call__(self, lines):
t = time.strftime(Logger.TimeFormat, time.localtime(time.time()))
if isinstance(lines, types.StringType):
sys.__stderr__.writelines([t, lines, "\n"])
else:
for line in lines:
sys.__stderr__.writelines([t, line, "\n"])
sys.__stderr__.flush()
def name(self):
return "StdErrLog"
class FileLog(Logger):
def __init__(self, labinfo, filename=None):
if filename == None:
filename=labinfo["LogFileName"]
self.logfile=filename
- import os
self.hostname = os.uname()[1]+" "
self.source = "CTS: "
def __call__(self, lines):
fd = open(self.logfile, "a")
t = time.strftime(Logger.TimeFormat, time.localtime(time.time()))
if isinstance(lines, types.StringType):
fd.writelines([t, self.hostname, self.source, lines, "\n"])
else:
for line in lines:
fd.writelines([t, self.hostname, self.source, line, "\n"])
fd.close()
def name(self):
return "FileLog"
class CtsLab(UserDict):
'''This class defines the Lab Environment for the Cluster Test System.
It defines those things which are expected to change from test
environment to test environment for the same cluster manager.
It is where you define the set of nodes that are in your test lab
what kind of reset mechanism you use, etc.
This class is derived from a UserDict because we hold many
different parameters of different kinds, and this provides
provide a uniform and extensible interface useful for any kind of
communication between the user/administrator/tester and CTS.
At this point in time, it is the intent of this class to model static
configuration and/or environmental data about the environment which
doesn't change as the tests proceed.
Well-known names (keys) are an important concept in this class.
The HasMinimalKeys member function knows the minimal set of
well-known names for the class.
The following names are standard (well-known) at this time:
nodes An array of the nodes in the cluster
reset A ResetMechanism object
logger An array of objects that log strings...
CMclass The type of ClusterManager we are running
(This is a class object, not a class instance)
RandSeed Random seed. It is a triple of bytes. (optional)
The CTS code ignores names it doesn't know about/need.
The individual tests have access to this information, and it is
perfectly acceptable to provide hints, tweaks, fine-tuning
directions or other information to the tests through this mechanism.
'''
def __init__(self):
self.data = {}
self.rsh = RemoteExec(self)
self.RandomGen = random.Random()
# Get a random seed for the random number generator.
self["LogFileName"] = "/var/log/messages"
self["SyslogFacility"] = None
self["DoStonith"] = 1
self["DoStandby"] = 1
self["DoFencing"] = 1
self["XmitLoss"] = "0.0"
self["RecvLoss"] = "0.0"
self["IPBase"] = "127.0.0.10"
self["ClobberCIB"] = 0
self["CIBfilename"] = None
self["CIBResource"] = 0
self["DoBSC"] = 0
self["use_logd"] = 0
self["oprofile"] = []
self["warn-inactive"] = 0
self["ListTests"] = 0
self["benchmark"] = 0
self["CMclass"] = crm_whitetank
self["logrestartcmd"] = "/etc/init.d/syslog-ng restart 2>&1 > /dev/null"
self["Schema"] = "pacemaker-0.6"
self["Stack"] = "openais"
self["stonith-type"] = "external/ssh"
self["stonith-params"] = "hostlist=all,livedangerously=yes"
self["at-boot"] = 1 # Does the cluster software start automatically when the node boot
self["logger"] = ([StdErrLog(self)])
self["loop-minutes"] = 60
self["valgrind-prefix"] = None
self["valgrind-procs"] = "cib crmd attrd pengine"
self["valgrind-opts"] = """--leak-check=full --show-reachable=yes --trace-children=no --num-callers=25 --gen-suppressions=all --suppressions="""+CTSvars.CTS_home+"""/cts.supp"""
self["experimental-tests"] = 0
self["valgrind-tests"] = 0
self["unsafe-tests"] = 1
self["loop-tests"] = 1
self["all-once"] = 0
self.SeedRandom()
def SeedRandom(self, seed=None):
if not seed:
seed = int(time.time())
if self.has_key("RandSeed"):
self.log("New random seed is: " + str(seed))
else:
self.log("Random seed is: " + str(seed))
self["RandSeed"] = seed
self.RandomGen.seed(str(seed))
def HasMinimalKeys(self):
'Return TRUE if our object has the minimal set of keys/values in it'
result = 1
for key in self.MinimalKeys:
if not self.has_key(key):
result = None
return result
def log(self, args):
"Log using each of the supplied logging methods"
for logfcn in self._logfunctions:
logfcn(string.strip(args))
def debug(self, args):
"Log using each of the supplied logging methods"
for logfcn in self._logfunctions:
if logfcn.name() != "StdErrLog":
logfcn("debug: %s" % string.strip(args))
def __setitem__(self, key, value):
'''Since this function gets called whenever we modify the
dictionary (object), we can (and do) validate those keys that we
know how to validate. For the most part, we know how to validate
the "MinimalKeys" elements.
'''
#
# List of nodes in the system
#
if key == "nodes":
self.Nodes = {}
for node in value:
# I don't think I need the IP address, etc. but this validates
# the node name against /etc/hosts and/or DNS, so it's a
# GoodThing(tm).
try:
self.Nodes[node] = gethostbyname_ex(node)
except:
print node+" not found in DNS... aborting"
raise
#
# List of Logging Mechanism(s)
#
elif key == "logger":
if len(value) < 1:
raise ValueError("Must have at least one logging mechanism")
for logger in value:
if not callable(logger):
raise ValueError("'logger' elements must be callable")
self._logfunctions = value
#
# Cluster Manager Class
#
elif key == "CMclass":
if not issubclass(value, ClusterManager):
raise ValueError("'CMclass' must be a subclass of"
" ClusterManager")
#
# Initial Random seed...
#
#elif key == "RandSeed":
# if len(value) != 3:
# raise ValueError("'Randseed' must be a 3-element list/tuple")
# for elem in value:
# if not isinstance(elem, types.IntType):
# raise ValueError("'Randseed' list must all be ints")
self.data[key] = value
def IsValidNode(self, node):
'Return TRUE if the given node is valid'
return self.Nodes.has_key(node)
def __CheckNode(self, node):
"Raise a ValueError if the given node isn't valid"
if not self.IsValidNode(node):
raise ValueError("Invalid node [%s] in CheckNode" % node)
def RandomNode(self):
'''Choose a random node from the cluster'''
return self.RandomGen.choice(self["nodes"])
def usage(arg):
print "Illegal argument " + arg
print "usage: " + sys.argv[0] +" [options] number-of-iterations"
print "\nCommon options: "
print "\t [--at-boot (1|0)], does the cluster software start at boot time"
print "\t [--nodes 'node list'], list of cluster nodes separated by whitespace"
print "\t [--limit-nodes max], only use the first 'max' cluster nodes supplied with --nodes"
print "\t [--stack (heartbeat|ais)], which cluster stack is installed"
print "\t [--logfile path], where should the test software look for logs from cluster nodes"
print "\t [--syslog-facility name], which syslog facility should the test software log to"
print "\t [--choose testcase-name], run only the named test"
print "\t [--list-tests], list the valid tests"
print "\t [--benchmark], add the timing information"
print "\t "
print "Options for release testing: "
print "\t [--clobber-cib | -c ] Erase any existing configuration"
print "\t [--populate-resources | -r] Generate a sample configuration"
print "\t [--test-ip-base ip] Offset for generated IP address resources"
print "\t "
print "Additional (less common) options: "
print "\t [--trunc (truncate logfile before starting)]"
print "\t [--xmit-loss lost-rate(0.0-1.0)]"
print "\t [--recv-loss lost-rate(0.0-1.0)]"
print "\t [--standby (1 | 0 | yes | no)]"
print "\t [--fencing (1 | 0 | yes | no)]"
print "\t [--stonith (1 | 0 | yes | no)]"
print "\t [--stonith-type type]"
print "\t [--stonith-args name=value]"
print "\t [--bsc]"
print "\t [--once], run all valid tests once"
print "\t [--no-loop-tests], dont run looping/time-based tests"
print "\t [--no-unsafe-tests], dont run tests that are unsafe for use with ocfs2/drbd"
print "\t [--valgrind-tests], include tests using valgrind"
print "\t [--experimental-tests], include experimental tests"
print "\t [--oprofile 'node list'], list of cluster nodes to run oprofile on]"
print "\t [--qarsh] Use the QARSH backdoor to access nodes instead of SSH"
print "\t [--seed random_seed]"
print "\t [--set option=value]"
sys.exit(1)
#
# A little test code...
#
if __name__ == '__main__':
-
- from CTSaudits import AuditList
- from CTStests import TestList,RandomTests,AllTests,BenchTests,BenchTestList
- from CTS import Scenario, InitClusterManager, PingFest, PacketLoss, BasicSanityCheck, Benchmark
-
Environment = CtsLab()
NumIter = 0
Version = 1
LimitNodes = 0
TestCase = None
TruncateLog = 0
ListTests = 0
HaveSeed = 0
node_list = ''
#
# The values of the rest of the parameters are now properly derived from
# the configuration files.
#
# Stonith is configurable because it's slow, I have a few machines which
# don't reboot very reliably, and it can mild damage to your machine if
# you're using a real power switch.
#
# Standby is configurable because the test is very heartbeat specific
# and I haven't written the code to set it properly yet. Patches are
# being accepted...
# Set the signal handler
signal.signal(15, sig_handler)
signal.signal(10, sig_handler)
# Process arguments...
skipthis=None
args=sys.argv[1:]
for i in range(0, len(args)):
if skipthis:
skipthis=None
continue
elif args[i] == "-l" or args[i] == "--limit-nodes":
skipthis=1
LimitNodes = int(args[i+1])
elif args[i] == "-r" or args[i] == "--populate-resources":
Environment["CIBResource"] = 1
elif args[i] == "-L" or args[i] == "--logfile":
skipthis=1
Environment["LogFileName"] = args[i+1]
elif args[i] == "--test-ip-base":
skipthis=1
Environment["IPBase"] = args[i+1]
elif args[i] == "--oprofile":
skipthis=1
Environment["oprofile"] = args[i+1].split(' ')
elif args[i] == "--trunc":
Environment["TruncateLog"]=1
elif args[i] == "--list-tests":
Environment["ListTests"]=1
elif args[i] == "--benchmark":
Environment["benchmark"]=1
elif args[i] == "--bsc":
Environment["DoBSC"] = 1
elif args[i] == "--qarsh":
Environment.rsh.enable_qarsh()
elif args[i] == "--fencing":
skipthis=1
if args[i+1] == "1" or args[i+1] == "yes":
Environment["DoFencing"] = 1
elif args[i+1] == "0" or args[i+1] == "no":
Environment["DoFencing"] = 0
else:
usage(args[i+1])
elif args[i] == "--stonith":
skipthis=1
if args[i+1] == "1" or args[i+1] == "yes":
Environment["DoStonith"]=1
elif args[i+1] == "0" or args[i+1] == "no":
Environment["DoStonith"]=0
else:
usage(args[i+1])
elif args[i] == "--stonith-type":
Environment["stonith-type"] = args[i+1]
skipthis=1
elif args[i] == "--stonith-args":
Environment["stonith-params"] = args[i+1]
skipthis=1
elif args[i] == "--standby":
skipthis=1
if args[i+1] == "1" or args[i+1] == "yes":
Environment["DoStandby"] = 1
elif args[i+1] == "0" or args[i+1] == "no":
Environment["DoStandby"] = 0
else:
usage(args[i+1])
elif args[i] == "--clobber-cib" or args[i] == "-c":
Environment["ClobberCIB"] = 1
elif args[i] == "--cib-filename":
skipthis=1
Environment["CIBfilename"] = args[i+1]
elif args[i] == "--xmit-loss":
try:
float(args[i+1])
except ValueError:
print ("--xmit-loss parameter should be float")
usage(args[i+1])
skipthis=1
Environment["XmitLoss"] = args[i+1]
elif args[i] == "--recv-loss":
try:
float(args[i+1])
except ValueError:
print ("--recv-loss parameter should be float")
usage(args[i+1])
skipthis=1
Environment["RecvLoss"] = args[i+1]
elif args[i] == "--choose":
skipthis=1
TestCase = args[i+1]
elif args[i] == "--nodes":
skipthis=1
node_list = args[i+1].split(' ')
elif args[i] == "--syslog-facility" or args[i] == "--facility":
skipthis=1
Environment["SyslogFacility"] = args[i+1]
elif args[i] == "--seed":
skipthis=1
Environment.SeedRandom(args[i+1])
elif args[i] == "--warn-inactive":
Environment["warn-inactive"] = 1
elif args[i] == "--schema":
skipthis=1
Environment["Schema"] = args[i+1]
elif args[i] == "--ais":
Environment["Stack"] = "openais"
elif args[i] == "--at-boot" or args[i] == "--cluster-starts-at-boot":
skipthis=1
if args[i+1] == "1" or args[i+1] == "yes":
Environment["at-boot"] = 1
elif args[i+1] == "0" or args[i+1] == "no":
Environment["at-boot"] = 0
else:
usage(args[i+1])
elif args[i] == "--heartbeat" or args[i] == "--lha":
Environment["Stack"] = "heartbeat"
elif args[i] == "--hae":
Environment["Stack"] = "openais"
Environment["Schema"] = "hae"
elif args[i] == "--stack":
Environment["Stack"] = args[i+1]
skipthis=1
elif args[i] == "--once":
Environment["all-once"] = 1
elif args[i] == "--valgrind-tests":
Environment["valgrind-tests"] = 1
elif args[i] == "--no-loop-tests":
Environment["loop-tests"] = 0
elif args[i] == "--no-unsafe-tests":
Environment["unsafe-tests"] = 0
elif args[i] == "--experimental-tests":
Environment["experimental-tests"] = 1
elif args[i] == "--set":
skipthis=1
(name, value) = args[i+1].split('=')
Environment[name] = value
else:
try:
NumIter=int(args[i])
except ValueError:
usage(args[i])
Environment["loop-minutes"] = int(Environment["loop-minutes"])
if Environment["DoBSC"]:
NumIter = 2
LimitNodes = 1
Environment["ClobberCIB"] = 1
Environment["CIBResource"] = 0
Environment["logger"].append(FileLog(Environment))
else:
Environment["logger"].append(SysLog(Environment))
if Environment["Stack"] == "heartbeat" or Environment["Stack"] == "lha":
Environment["Stack"] = "heartbeat"
Environment['CMclass'] = crm_lha
elif Environment["Stack"] == "openais" or Environment["Stack"] == "ais" or Environment["Stack"] == "whitetank":
Environment["Stack"] = "openais (whitetank)"
Environment['CMclass'] = crm_whitetank
Environment["use_logd"] = 0
elif Environment["Stack"] == "corosync" or Environment["Stack"] == "cs" or Environment["Stack"] == "flatiron":
Environment["Stack"] = "corosync (flatiron)"
Environment['CMclass'] = crm_flatiron
Environment["use_logd"] = 0
else:
print "Unknown stack: "+Environment["Stack"]
sys.exit(1)
if len(node_list) < 1:
print "No nodes specified!"
sys.exit(1)
if LimitNodes > 0:
if len(node_list) > LimitNodes:
print("Limiting the number of nodes configured=%d (max=%d)"
%(len(node_list), LimitNodes))
while len(node_list) > LimitNodes:
node_list.pop(len(node_list)-1)
Environment["nodes"] = node_list
# Create the Cluster Manager object
cm = Environment['CMclass'](Environment)
Audits = AuditList(cm)
Tests = []
# Your basic start up the world type of test scenario...
# Scenario selection
if Environment["DoBSC"]:
scenario = Scenario([ BasicSanityCheck(Environment) ])
elif Environment["benchmark"]:
scenario = Scenario([ Benchmark(Environment) ])
else:
scenario = Scenario(
[ InitClusterManager(Environment), PacketLoss(Environment)])
#scenario = Scenario(
#[ InitClusterManager(Environment)
#, PingFest(Environment)])
if Environment["ListTests"] == 1 :
Tests = TestList(cm, Audits)
cm.log("Total %d tests"%len(Tests))
for test in Tests :
cm.log(str(test.name));
sys.exit(0)
if TruncateLog:
cm.log("Truncating %s" % LogFile)
lf = open(LogFile, "w");
if lf != None:
lf.truncate(0)
lf.close()
keys = []
for key in Environment.keys():
keys.append(key)
keys.sort()
for key in keys:
cm.debug("Environment["+key+"]:\t"+str(Environment[key]))
cm.log(">>>>>>>>>>>>>>>> BEGINNING " + repr(NumIter) + " TESTS ")
cm.log("System log files: %s" % Environment["LogFileName"])
cm.log("Stack: %s" % Environment["Stack"])
cm.log("Schema: %s" % Environment["Schema"])
cm.log("Random Seed: %s" % Environment["RandSeed"])
cm.log("Enable Stonith: %d" % Environment["DoStonith"])
cm.log("Enable Fencing: %d" % Environment["DoFencing"])
cm.log("Enable Standby: %d" % Environment["DoStandby"])
cm.log("Enable Resources: %d" % Environment["CIBResource"])
cm.ns.WaitForAllNodesToComeUp(Environment["nodes"])
cm.log("Cluster nodes: ")
for node in Environment["nodes"]:
cm.log(" * %s" % (node))
if Environment["DoBSC"]:
test = BSC_AddResource(cm)
Tests.append(test)
elif Environment["benchmark"]:
Tests = BenchTestList(cm, Audits)
elif TestCase != None:
for test in TestList(cm, Audits):
if test.name == TestCase:
Tests.append(test)
if Tests == []:
usage("--choose: No applicable/valid tests chosen")
else:
Tests = TestList(cm, Audits)
if Environment["benchmark"]:
Environment.ScenarioTests = BenchTests(scenario, cm, Tests, Audits)
elif Environment["all-once"] or NumIter == 0:
Environment.ScenarioTests = AllTests(scenario, cm, Tests, Audits)
else:
Environment.ScenarioTests = RandomTests(scenario, cm, Tests, Audits)
try :
overall, detailed = Environment.ScenarioTests.run(NumIter)
except :
cm.Env.log("Exception by %s" % sys.exc_info()[0])
for logmethod in Environment["logger"]:
traceback.print_exc(50, logmethod)
Environment.ScenarioTests.summarize()
if Environment.ScenarioTests.Stats["failure"] > 0:
sys.exit(Environment.ScenarioTests.Stats["failure"])
elif Environment.ScenarioTests.Stats["success"] != NumIter:
cm.Env.log("No failure count but success != requested iterations")
sys.exit(1)
diff --git a/cts/CTStests.py b/cts/CTStests.py
index e4f6aac398..6ff06db10c 100644
--- a/cts/CTStests.py
+++ b/cts/CTStests.py
@@ -1,2415 +1,2414 @@
'''CTS: Cluster Testing System: Tests module
There are a few things we want to do here:
'''
__copyright__='''
Copyright (C) 2000, 2001 Alan Robertson <alanr@unix.sh>
Licensed under the GNU GPL.
Add RecourceRecover testcase Zhao Kai <zhaokai@cn.ibm.com>
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# SPECIAL NOTE:
#
# Tests may NOT implement any cluster-manager-specific code in them.
# EXTEND the ClusterManager object to provide the base capabilities
# the test needs if you need to do something that the current CM classes
# do not. Otherwise you screw up the whole point of the object structure
# in CTS.
#
# Thank you.
#
-import CTS
-import CTSaudits
import time, os, re, types, string, tempfile, sys
-from CTSaudits import *
from stat import *
+from cts import CTS
+from cts.CTSaudits import *
# List of all class objects for tests which we ought to
# consider running.
class AllTests:
'''
A collection of tests which are run at random.
'''
def __init__(self, scenario, cm, tests, Audits):
self.CM = cm
self.Env = cm.Env
self.Scenario = scenario
self.Tests = []
self.Audits = []
self.ns=CTS.NodeStatus(self.Env)
self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0}
self.IndividualStats= {}
for audit in Audits:
if not issubclass(audit.__class__, ClusterAudit):
raise ValueError("Init value must be a subclass of ClusterAudit")
if audit.is_applicable():
self.Audits.append(audit)
for test in tests:
if not issubclass(test.__class__, CTSTest):
raise ValueError("Init value must be a subclass of CTSTest")
if test.is_applicable():
self.Tests.append(test)
if not scenario.IsApplicable():
raise ValueError("Scenario not applicable in"
" given Environment")
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not self.Stats.has_key(name):
self.Stats[name]=0
self.Stats[name] = self.Stats[name]+1
def audit(self, BadNews, test):
errcount=0
BadNewsDebug=0
#BadNews.debug=1
ignorelist = []
ignorelist.append(" CTS: ")
ignorelist.append("BadNews:")
ignorelist.extend(self.CM.errorstoignore())
if test:
ignorelist.extend(test.errorstoignore())
while errcount < 1000:
if BadNewsDebug: print "Looking for BadNews"
match=BadNews.look(0)
if match:
if BadNewsDebug: print "BadNews found: "+match
add_err = 1
for ignore in ignorelist:
if add_err == 1 and re.search(ignore, match):
if BadNewsDebug: print "Ignoring based on pattern: ("+ignore+")"
add_err = 0
if add_err == 1:
self.CM.log("BadNews: " + match)
self.incr("BadNews")
errcount=errcount+1
else:
break
else:
answer = raw_input('Big problems. Continue? [nY]')
if answer and answer == "n":
self.CM.log("Shutting down.")
self.CM.stopall()
self.summarize()
raise ValueError("Looks like we hit a BadNews jackpot!")
for audit in self.Audits:
if not audit():
self.CM.log("Audit " + audit.name() + " FAILED.")
self.incr("auditfail")
if test:
test.incr("auditfail")
def summarize(self):
self.CM.log("****************")
self.CM.log("Overall Results:" + repr(self.Stats))
self.CM.log("****************")
stat_filter = {
"calls":0,
"failure":0,
"skipped":0,
"auditfail":0,
}
self.CM.log("Test Summary")
for test in self.Tests:
for key in stat_filter.keys():
stat_filter[key] = test.Stats[key]
self.CM.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter))
self.CM.debug("Detailed Results")
for test in self.Tests:
self.CM.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats))
self.CM.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
def test_loop(self, BadNews, max):
testcount=1
self.CM.log("Executing all tests once")
for test in self.Tests:
if self.run_test(BadNews, test, testcount):
testcount += 1
return testcount
def run_test(self, BadNews, test, testcount):
nodechoice = self.Env.RandomNode()
ret = 1
where = ""
did_run = 0
self.CM.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) +"["+ ("%d" % testcount).rjust(3) +"]")
starttime = test.set_timer()
if not test.setup(nodechoice):
self.CM.log("Setup failed")
ret = 0
elif not test.canrunnow(nodechoice):
self.CM.log("Skipped")
test.skipped()
else:
did_run = 1
ret = test(nodechoice)
if not test.teardown(nodechoice):
self.CM.log("Teardown failed")
ret = 0
stoptime=time.time()
self.CM.oprofileSave(testcount)
elapsed_time = stoptime - starttime
test_time = stoptime - test.get_timer()
if not test.has_key("min_time"):
test["elapsed_time"] = elapsed_time
test["min_time"] = test_time
test["max_time"] = test_time
else:
test["elapsed_time"] = test["elapsed_time"] + elapsed_time
if test_time < test["min_time"]:
test["min_time"] = test_time
if test_time > test["max_time"]:
test["max_time"] = test_time
if ret:
self.incr("success")
test.log_timer()
else:
self.incr("failure")
self.CM.statall()
did_run = 1 # Force the test count to be incrimented anyway so test extraction works
self.audit(BadNews, test)
return did_run
def run(self, max=1):
(
'''
Set up the given scenario, then run the selected tests at
random for the selected number of iterations.
''')
BadNews=CTS.LogWatcher(self.CM["LogFileName"], self.CM["BadRegexes"]
, timeout=0)
BadNews.setwatch()
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"])
self.CM.oprofileStop()
self.CM.oprofileStart()
if not self.CM.Env["DoBSC"]:
audit = LogAudit(self.CM)
if not audit():
self.CM.log("Audit " + audit.name() + " FAILED.")
return (None, None)
else:
self.CM.log("Audit " + audit.name() + " passed.")
audit = DiskAudit(self.CM)
if not audit():
self.CM.log("Audit " + audit.name() + " FAILED.")
return (None, None)
else:
self.CM.log("Audit " + audit.name() + " passed.")
if not self.Scenario.SetUp(self.CM):
return (None, None)
self.CM.oprofileSave(0)
time.sleep(30)
# This makes sure everything is stabilized before starting...
self.audit(BadNews, None)
testcount = self.test_loop(BadNews, max)
self.Scenario.TearDown(self.CM)
self.CM.oprofileSave(testcount)
self.CM.oprofileStop()
self.audit(BadNews, None)
for test in self.Tests:
self.IndividualStats[test.name] = test.Stats
return self.Stats, self.IndividualStats
class RandomTests(AllTests):
def test_loop(self, BadNews, max):
testcount=1
self.CM.log("Executing tests at random")
while testcount <= max:
test = self.Env.RandomGen.choice(self.Tests)
if self.run_test(BadNews, test, testcount):
testcount += 1
return testcount
class BenchTests(AllTests):
'''
Nothing (yet) here.
'''
AllTestClasses = [ ]
class CTSTest:
'''
A Cluster test.
We implement the basic set of properties and behaviors for a generic
cluster test.
Cluster tests track their own statistics.
We keep each of the kinds of counts we track as separate {name,value}
pairs.
'''
def __init__(self, cm):
#self.name="the unnamed test"
self.Stats = {"calls":0
, "success":0
, "failure":0
, "skipped":0
, "auditfail":0}
# if not issubclass(cm.__class__, ClusterManager):
# raise ValueError("Must be a ClusterManager object")
self.CM = cm
self.Audits = []
self.timeout=120
self.passed = 1
self.is_loop = 0
self.is_unsafe = 0
self.is_experimental = 0
self.is_valgrind = 0
self.benchmark = 0 # which tests to benchmark
self.timer = {} # timers
def has_key(self, key):
return self.Stats.has_key(key)
def __setitem__(self, key, value):
self.Stats[key] = value
def __getitem__(self, key):
return self.Stats[key]
def log_mark(self, msg):
self.CM.debug("MARK: test %s %s %d" % (self.name,msg,time.time()))
return
def get_timer(self,key = "test"):
try: return self.timer[key]
except: return 0
def set_timer(self,key = "test"):
self.timer[key] = time.time()
return self.timer[key]
def log_timer(self,key = "test"):
elapsed = 0
if key in self.timer:
elapsed = time.time() - self.timer[key]
s = key == "test" and self.name or "%s:%s" %(self.name,key)
self.CM.debug("%s runtime: %.2f" % (s, elapsed))
del self.timer[key]
return elapsed
def incr(self, name):
'''Increment (or initialize) the value associated with the given name'''
if not self.Stats.has_key(name):
self.Stats[name]=0
self.Stats[name] = self.Stats[name]+1
# Reset the test passed boolean
if name == "calls":
self.passed = 1
def failure(self, reason="none"):
'''Increment the failure count'''
self.passed = 0
self.incr("failure")
self.CM.log(("Test %s" % self.name).ljust(35) +" FAILED: %s" % reason)
return None
def success(self):
'''Increment the success count'''
self.incr("success")
return 1
def skipped(self):
'''Increment the skipped count'''
self.incr("skipped")
return 1
def __call__(self, node):
'''Perform the given test'''
raise ValueError("Abstract Class member (__call__)")
self.incr("calls")
return self.failure()
def audit(self):
passed = 1
if len(self.Audits) > 0:
for audit in self.Audits:
if not audit():
self.CM.log("Internal %s Audit %s FAILED." % (self.name, audit.name()))
self.incr("auditfail")
passed = 0
return passed
def setup(self, node):
'''Setup the given test'''
return self.success()
def teardown(self, node):
'''Tear down the given test'''
return self.success()
def local_badnews(self, prefix, watch, local_ignore=[]):
errcount = 0
if not prefix:
prefix = "LocalBadNews:"
ignorelist = []
ignorelist.append(" CTS: ")
ignorelist.append(prefix)
ignorelist.extend(local_ignore)
while errcount < 100:
match=watch.look(0)
if match:
add_err = 1
for ignore in ignorelist:
if add_err == 1 and re.search(ignore, match):
add_err = 0
if add_err == 1:
self.CM.log(prefix + " " + match)
errcount=errcount+1
else:
break
else:
self.CM.log("Too many errors!")
return errcount
def is_applicable(self):
return self.is_applicable_common()
def is_applicable_common(self):
'''Return TRUE if we are applicable in the current test configuration'''
#raise ValueError("Abstract Class member (is_applicable)")
if self.is_loop and not self.CM.Env["loop-tests"]:
return 0
elif self.is_unsafe and not self.CM.Env["unsafe-tests"]:
return 0
elif self.is_valgrind and not self.CM.Env["valgrind-tests"]:
return 0
elif self.is_experimental and not self.CM.Env["experimental-tests"]:
return 0
return 1
def find_ocfs2_resources(self, node):
self.r_o2cb = None
self.r_ocfs2 = []
(rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rtype == "o2cb" and r.parent != "NA":
self.CM.debug("Found o2cb: %s" % self.r_o2cb)
self.r_o2cb = r.parent
if re.search("^Constraint", line):
c = AuditConstraint(self.CM, line)
if c.type == "rsc_colocation" and c.target == self.r_o2cb:
self.r_ocfs2.append(c.rsc)
self.CM.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2))
return len(self.r_ocfs2)
def canrunnow(self, node):
'''Return TRUE if we can meaningfully run right now'''
return 1
def errorstoignore(self):
'''Return list of errors which are 'normal' and should be ignored'''
return []
###################################################################
class StopTest(CTSTest):
###################################################################
'''Stop (deactivate) the cluster manager on a node'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name="Stop"
def __call__(self, node):
'''Perform the 'stop' test. '''
self.incr("calls")
if self.CM.ShouldBeStatus[node] != "up":
return self.skipped()
patterns = []
# Technically we should always be able to notice ourselves stopping
patterns.append(self.CM["Pat:We_stopped"] % node)
#if self.CM.Env["use_logd"]:
# patterns.append(self.CM["Pat:Logd_stopped"] % node)
# Any active node needs to notice this one left
# NOTE: This wont work if we have multiple partitions
for other in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[other] == "up" and other != node:
patterns.append(self.CM["Pat:They_stopped"] %(other, node))
#self.debug("Checking %s will notice %s left"%(other, node))
watch = CTS.LogWatcher(
self.CM["LogFileName"], patterns, self.CM["DeadTime"])
watch.setwatch()
if node == self.CM.OurNode:
self.incr("us")
else:
if self.CM.upcount() <= 1:
self.incr("all")
else:
self.incr("them")
self.CM.StopaCM(node)
watch_result = watch.lookforall()
failreason=None
UnmatchedList = "||"
if watch.unmatched:
(rc, output) = self.CM.rsh(node, "/bin/ps axf", None)
for line in output:
self.CM.debug(line)
for regex in watch.unmatched:
self.CM.log ("ERROR: Shutdown pattern not found: %s" % (regex))
UnmatchedList += regex + "||";
failreason="Missing shutdown pattern"
self.CM.cluster_stable(self.CM["DeadTime"])
if not watch.unmatched or self.CM.upcount() == 0:
return self.success()
if len(watch.unmatched) >= self.CM.upcount():
return self.failure("no match against (%s)" % UnmatchedList)
if failreason == None:
return self.success()
else:
return self.failure(failreason)
#
# We don't register StopTest because it's better when called by
# another test...
#
###################################################################
class StartTest(CTSTest):
###################################################################
'''Start (activate) the cluster manager on a node'''
def __init__(self, cm, debug=None):
CTSTest.__init__(self,cm)
self.name="start"
self.debug = debug
def __call__(self, node):
'''Perform the 'start' test. '''
self.incr("calls")
if self.CM.upcount() == 0:
self.incr("us")
else:
self.incr("them")
if self.CM.ShouldBeStatus[node] != "down":
return self.skipped()
elif self.CM.StartaCM(node):
return self.success()
else:
return self.failure("Startup %s on node %s failed"
%(self.CM["Name"], node))
#
# We don't register StartTest because it's better when called by
# another test...
#
###################################################################
class FlipTest(CTSTest):
###################################################################
'''If it's running, stop it. If it's stopped start it.
Overthrow the status quo...
'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Flip"
self.start = StartTest(cm)
self.stop = StopTest(cm)
def __call__(self, node):
'''Perform the 'Flip' test. '''
self.incr("calls")
if self.CM.ShouldBeStatus[node] == "up":
self.incr("stopped")
ret = self.stop(node)
type="up->down"
# Give the cluster time to recognize it's gone...
time.sleep(self.CM["StableTime"])
elif self.CM.ShouldBeStatus[node] == "down":
self.incr("started")
ret = self.start(node)
type="down->up"
else:
return self.skipped()
self.incr(type)
if ret:
return self.success()
else:
return self.failure("%s failure" % type)
# Register FlipTest as a good test to run
AllTestClasses.append(FlipTest)
###################################################################
class RestartTest(CTSTest):
###################################################################
'''Stop and restart a node'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Restart"
self.start = StartTest(cm)
self.stop = StopTest(cm)
self.benchmark = 1
def __call__(self, node):
'''Perform the 'restart' test. '''
self.incr("calls")
self.incr("node:" + node)
ret1 = 1
if self.CM.StataCM(node):
self.incr("WasStopped")
if not self.start(node):
return self.failure("start (setup) failure: "+node)
self.set_timer()
if not self.stop(node):
return self.failure("stop failure: "+node)
if not self.start(node):
return self.failure("start failure: "+node)
return self.success()
# Register RestartTest as a good test to run
AllTestClasses.append(RestartTest)
###################################################################
class StonithdTest(CTSTest):
###################################################################
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name="Stonithd"
self.startall = SimulStartLite(cm)
self.benchmark = 1
def __call__(self, node):
self.incr("calls")
if len(self.CM.Env["nodes"]) < 2:
return self.skipped()
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
watchpats = []
watchpats.append("Forcing node %s to be terminated" % node)
watchpats.append("Scheduling Node %s for STONITH" % node)
watchpats.append("Executing .* fencing operation")
watchpats.append("stonith-ng:.*Operation .* for host '%s' with device .* returned: 0" % node)
if not self.CM.is_node_dc(node):
# Won't be found if the DC is shot (and there's no equivalent message from stonithd)
watchpats.append("tengine_stonith_callback: .*: OK ")
# TODO else: look for the notification on a peer once implimented
if self.CM.Env["at-boot"] == 0:
self.CM.debug("Expecting %s to stay down" % node)
self.CM.ShouldBeStatus[node]="down"
else:
self.CM.debug("Expecting %s to come up again %d" % (node, self.CM.Env["at-boot"]))
watchpats.append("%s crmd: .* S_STARTING -> S_PENDING" % node)
watchpats.append("%s crmd: .* S_PENDING -> S_NOT_DC" % node)
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats,
self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"])
watch.setwatch()
self.CM.rsh(node, "crm_attribute --node %s --type status --attr-name terminate --attr-value true" % node)
self.set_timer("fence")
matched = watch.lookforall()
self.log_timer("fence")
self.set_timer("reform")
if matched:
self.CM.debug("Found: "+ repr(matched))
else:
self.CM.log("Patterns not found: " + repr(watch.unmatched))
self.CM.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
self.CM.debug("Waiting STONITHd node to come back up")
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600)
self.CM.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self.CM.cluster_stable(self.CM["StartTime"])
if not matched:
return self.failure("Didn't find all expected patterns")
elif not is_stable:
return self.failure("Cluster did not become stable")
self.log_timer("reform")
return self.success()
def errorstoignore(self):
return [ "Executing .* fencing operation" ]
def is_applicable(self):
if not self.is_applicable_common():
return 0
if self.CM.Env.has_key("DoStonith"):
return self.CM.Env["DoStonith"]
return 1
AllTestClasses.append(StonithdTest)
###################################################################
class StartOnebyOne(CTSTest):
###################################################################
'''Start all the nodes ~ one by one'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="StartOnebyOne"
self.stopall = SimulStopLite(cm)
self.start = StartTest(cm)
self.ns=CTS.NodeStatus(cm.Env)
def __call__(self, dummy):
'''Perform the 'StartOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return self.failure("Test setup failed")
failed=[]
self.set_timer()
for node in self.CM.Env["nodes"]:
if not self.start(node):
failed.append(node)
if len(failed) > 0:
return self.failure("Some node failed to start: " + repr(failed))
return self.success()
# Register StartOnebyOne as a good test to run
AllTestClasses.append(StartOnebyOne)
###################################################################
class SimulStart(CTSTest):
###################################################################
'''Start all the nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStart"
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
def __call__(self, dummy):
'''Perform the 'SimulStart' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return self.failure("Setup failed")
self.CM.clear_all_caches()
if not self.startall(None):
return self.failure("Startall failed")
return self.success()
# Register SimulStart as a good test to run
AllTestClasses.append(SimulStart)
###################################################################
class SimulStop(CTSTest):
###################################################################
'''Stop all the nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStop"
self.startall = SimulStartLite(cm)
self.stopall = SimulStopLite(cm)
def __call__(self, dummy):
'''Perform the 'SimulStop' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
if not self.stopall(None):
return self.failure("Stopall failed")
return self.success()
# Register SimulStop as a good test to run
AllTestClasses.append(SimulStop)
###################################################################
class StopOnebyOne(CTSTest):
###################################################################
'''Stop all the nodes in order'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="StopOnebyOne"
self.startall = SimulStartLite(cm)
self.stop = StopTest(cm)
def __call__(self, dummy):
'''Perform the 'StopOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
failed=[]
self.set_timer()
for node in self.CM.Env["nodes"]:
if not self.stop(node):
failed.append(node)
if len(failed) > 0:
return self.failure("Some node failed to stop: " + repr(failed))
self.CM.clear_all_caches()
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(StopOnebyOne)
###################################################################
class RestartOnebyOne(CTSTest):
###################################################################
'''Restart all the nodes in order'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="RestartOnebyOne"
self.startall = SimulStartLite(cm)
def __call__(self, dummy):
'''Perform the 'RestartOnebyOne' test. '''
self.incr("calls")
# We ignore the "node" parameter...
# Start up all the nodes...
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
did_fail=[]
self.set_timer()
self.restart = RestartTest(self.CM)
for node in self.CM.Env["nodes"]:
if not self.restart(node):
did_fail.append(node)
if did_fail:
return self.failure("Could not restart %d nodes: %s"
%(len(did_fail), repr(did_fail)))
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(RestartOnebyOne)
###################################################################
class PartialStart(CTSTest):
###################################################################
'''Start a node - but tell it to stop before it finishes starting up'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="PartialStart"
self.startall = SimulStartLite(cm)
self.stopall = SimulStopLite(cm)
#self.is_unsafe = 1
def __call__(self, node):
'''Perform the 'PartialStart' test. '''
self.incr("calls")
ret = self.stopall(None)
if not ret:
return self.failure("Setup failed")
# FIXME! This should use the CM class to get the pattern
# then it would be applicable in general
watchpats = []
watchpats.append("Starting crmd")
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats,
timeout=self.CM["DeadTime"]+10)
watch.setwatch()
self.CM.StartaCMnoBlock(node)
ret = watch.lookforall()
if not ret:
self.CM.log("Patterns not found: " + repr(watch.unmatched))
return self.failure("Setup of %s failed" % node)
ret = self.stopall(None)
if not ret:
return self.failure("%s did not stop in time" % node)
return self.success()
# Register StopOnebyOne as a good test to run
AllTestClasses.append(PartialStart)
#######################################################################
class StandbyTest(CTSTest):
#######################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Standby"
self.benchmark = 1
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
# make sure the node is active
# set the node to standby mode
# check resources, none resource should be running on the node
# set the node to active mode
# check resouces, resources should have been migrated back (SHOULD THEY?)
def __call__(self, node):
self.incr("calls")
ret=self.startall(None)
if not ret:
return self.failure("Start all nodes failed")
self.CM.debug("Make sure node %s is active" % node)
if self.CM.StandbyStatus(node) != "off":
if not self.CM.SetStandbyMode(node, "off"):
return self.failure("can't set node %s to active mode" % node)
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "off":
return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
self.CM.debug("Getting resources running on node %s" % node)
rsc_on_node = self.CM.active_resources(node)
self.CM.debug("Setting node %s to standby mode" % node)
if not self.CM.SetStandbyMode(node, "on"):
return self.failure("can't set node %s to standby mode" % node)
self.set_timer("on")
time.sleep(1) # Allow time for the update to be applied and cause something
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "on":
return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status))
self.log_timer("on")
self.CM.debug("Checking resources")
bad_run = self.CM.active_resources(node)
if len(bad_run) > 0:
rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run)))
self.CM.debug("Setting node %s to active mode" % node)
self.CM.SetStandbyMode(node, "off")
return rc
self.CM.debug("Setting node %s to active mode" % node)
if not self.CM.SetStandbyMode(node, "off"):
return self.failure("can't set node %s to active mode" % node)
self.set_timer("off")
self.CM.cluster_stable()
status = self.CM.StandbyStatus(node)
if status != "off":
return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
self.log_timer("off")
return self.success()
AllTestClasses.append(StandbyTest)
#######################################################################
class ValgrindTest(CTSTest):
#######################################################################
'''Check for memory leaks'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Valgrind"
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
self.is_valgrind = 1
self.is_loop = 1
def setup(self, node):
self.incr("calls")
ret=self.stopall(None)
if not ret:
return self.failure("Stop all nodes failed")
# Enable valgrind
self.logPat = "/tmp/%s-*.valgrind" % self.name
self.CM.Env["valgrind-prefix"] = self.name
self.CM.rsh(node, "rm -f %s" % self.logPat, None)
ret=self.startall(None)
if not ret:
return self.failure("Start all nodes failed")
for node in self.CM.Env["nodes"]:
(rc, output) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None)
for line in output:
self.CM.debug(line)
return self.success()
def teardown(self, node):
# Disable valgrind
self.CM.Env["valgrind-prefix"] = None
# Return all nodes to normal
ret=self.stopall(None)
if not ret:
return self.failure("Stop all nodes failed")
return self.success()
def find_leaks(self):
# Check for leaks
leaked = []
self.stop = StopTest(self.CM)
for node in self.CM.Env["nodes"]:
(rc, ps_out) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None)
rc = self.stop(node)
if not rc:
self.failure("Couldn't shut down %s" % node)
rc = self.CM.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e ERROR.*SUMMARY:.*[1-9].*errors %s" % self.logPat, 0)
if rc != 1:
leaked.append(node)
self.failure("Valgrind errors detected on %s" % node)
for line in ps_out:
self.CM.log(line)
(rc, output) = self.CM.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logPat, None)
for line in output:
self.CM.log(line)
(rc, output) = self.CM.rsh(node, "cat %s" % self.logPat, None)
for line in output:
self.CM.debug(line)
self.CM.rsh(node, "rm -f %s" % self.logPat, None)
return leaked
def __call__(self, node):
leaked = self.find_leaks()
if len(leaked) > 0:
return self.failure("Nodes %s leaked" % repr(leaked))
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [ """cib:.*readCibXmlFile:""", """HA_VALGRIND_ENABLED""" ]
#######################################################################
class StandbyLoopTest(ValgrindTest):
#######################################################################
'''Check for memory leaks by putting a node in and out of standby for an hour'''
def __init__(self, cm):
ValgrindTest.__init__(self,cm)
self.name="StandbyLoop"
def __call__(self, node):
lpc = 0
delay = 2
failed = 0
done=time.time() + self.CM.Env["loop-minutes"]*60
while time.time() <= done and not failed:
lpc = lpc + 1
time.sleep(delay)
if not self.CM.SetStandbyMode(node, "on"):
self.failure("can't set node %s to standby mode" % node)
failed = lpc
time.sleep(delay)
if not self.CM.SetStandbyMode(node, "off"):
self.failure("can't set node %s to active mode" % node)
failed = lpc
leaked = self.find_leaks()
if failed:
return self.failure("Iteration %d failed" % failed)
elif len(leaked) > 0:
return self.failure("Nodes %s leaked" % repr(leaked))
return self.success()
AllTestClasses.append(StandbyLoopTest)
##############################################################################
class BandwidthTest(CTSTest):
##############################################################################
# Tests should not be cluster-manager-specific
# If you need to find out cluster manager configuration to do this, then
# it should be added to the generic cluster manager API.
'''Test the bandwidth which heartbeat uses'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name = "Bandwidth"
self.start = StartTest(cm)
self.__setitem__("min",0)
self.__setitem__("max",0)
self.__setitem__("totalbandwidth",0)
self.tempfile = tempfile.mktemp(".cts")
self.startall = SimulStartLite(cm)
def __call__(self, node):
'''Perform the Bandwidth test'''
self.incr("calls")
if self.CM.upcount()<1:
return self.skipped()
Path = self.CM.InternalCommConfig()
if "ip" not in Path["mediatype"]:
return self.skipped()
port = Path["port"][0]
port = int(port)
ret = self.startall(None)
if not ret:
return self.failure("Test setup failed")
time.sleep(5) # We get extra messages right after startup.
fstmpfile = "/var/run/band_estimate"
dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \
% (port, fstmpfile)
rc = self.CM.rsh(node, dumpcmd)
if rc == 0:
farfile = "root@%s:%s" % (node, fstmpfile)
self.CM.rsh.cp(farfile, self.tempfile)
Bandwidth = self.countbandwidth(self.tempfile)
if not Bandwidth:
self.CM.log("Could not compute bandwidth.")
return self.success()
intband = int(Bandwidth + 0.5)
self.CM.log("...bandwidth: %d bits/sec" % intband)
self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth
if self.Stats["min"] == 0:
self.Stats["min"] = Bandwidth
if Bandwidth > self.Stats["max"]:
self.Stats["max"] = Bandwidth
if Bandwidth < self.Stats["min"]:
self.Stats["min"] = Bandwidth
self.CM.rsh(node, "rm -f %s" % fstmpfile)
os.unlink(self.tempfile)
return self.success()
else:
return self.failure("no response from tcpdump command [%d]!" % rc)
def countbandwidth(self, file):
fp = open(file, "r")
fp.seek(0)
count = 0
sum = 0
while 1:
line = fp.readline()
if not line:
return None
if re.search("udp",line) or re.search("UDP,", line):
count=count+1
linesplit = string.split(line," ")
for j in range(len(linesplit)-1):
if linesplit[j]=="udp": break
if linesplit[j]=="length:": break
try:
sum = sum + int(linesplit[j+1])
except ValueError:
self.CM.log("Invalid tcpdump line: %s" % line)
return None
T1 = linesplit[0]
timesplit = string.split(T1,":")
time2split = string.split(timesplit[2],".")
time1 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
break
while count < 100:
line = fp.readline()
if not line:
return None
if re.search("udp",line) or re.search("UDP,", line):
count = count+1
linessplit = string.split(line," ")
for j in range(len(linessplit)-1):
if linessplit[j] =="udp": break
if linesplit[j]=="length:": break
try:
sum=int(linessplit[j+1])+sum
except ValueError:
self.CM.log("Invalid tcpdump line: %s" % line)
return None
T2 = linessplit[0]
timesplit = string.split(T2,":")
time2split = string.split(timesplit[2],".")
time2 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
time = time2-time1
if (time <= 0):
return 0
return (sum*8)/time
def is_applicable(self):
'''BandwidthTest never applicable'''
return 0
AllTestClasses.append(BandwidthTest)
###################################################################
class ResourceRecover(CTSTest):
###################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="ResourceRecover"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
self.max=30
self.rid=None
#self.is_unsafe = 1
self.benchmark = 1
# these are the values used for the new LRM API call
self.action = "asyncmon"
self.interval = 0
def __call__(self, node):
'''Perform the 'ResourceRecover' test. '''
self.incr("calls")
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
resourcelist = self.CM.active_resources(node)
# if there are no resourcelist, return directly
if len(resourcelist)==0:
self.CM.log("No active resources on %s" % node)
return self.skipped()
self.rid = self.CM.Env.RandomGen.choice(resourcelist)
rsc = None
(rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
tmp = AuditResource(self.CM, line)
if tmp.id == self.rid:
rsc = tmp
# Handle anonymous clones that get renamed
self.rid = rsc.clone_id
break
if not rsc:
return self.failure("Could not find %s in the resource list" % self.rid)
self.CM.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id))
pats = []
pats.append("Updating failcount for %s on .* after .* %s"
% (self.rid, self.action))
if rsc.managed():
pats.append("crmd:.* Performing .* op=%s_stop_0" % self.rid)
if rsc.unique():
pats.append("crmd:.* Performing .* op=%s_start_0" % self.rid)
pats.append("crmd:.* LRM operation %s_start_0.*confirmed.*ok" % self.rid)
else:
# Anonymous clones may get restarted with a different clone number
pats.append("crmd:.* Performing .* op=.*_start_0")
pats.append("crmd:.* LRM operation .*_start_0.*confirmed.*ok")
watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60)
watch.setwatch()
self.CM.rsh(node, "crm_resource -F -r %s -H %s &>/dev/null" % (self.rid, node))
self.set_timer("recover")
watch.lookforall()
self.log_timer("recover")
self.CM.cluster_stable()
recovered=self.CM.ResourceLocation(self.rid)
if watch.unmatched:
return self.failure("Patterns not found: %s" % repr(watch.unmatched))
elif rsc.unique() and len(recovered) > 1:
return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered)))
elif len(recovered) > 0:
self.CM.debug("%s is running on: %s" %(self.rid, repr(recovered)))
elif rsc.managed():
return self.failure("%s was not recovered and is inactive" % self.rid)
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [ """Updating failcount for %s""" % self.rid,
"""Unknown operation: fail""",
"""ERROR: sending stonithRA op to stonithd failed.""",
"""ERROR: process_lrm_event: LRM operation %s_%s_%d""" % (self.rid, self.action, self.interval),
"""ERROR: process_graph_event: Action %s_%s_%d .* initiated outside of a transition""" % (self.rid, self.action, self.interval),
]
AllTestClasses.append(ResourceRecover)
###################################################################
class ComponentFail(CTSTest):
###################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="ComponentFail"
self.startall = SimulStartLite(cm)
self.complist = cm.Components()
self.patterns = []
self.okerrpatterns = []
self.is_unsafe = 1
def __call__(self, node):
'''Perform the 'ComponentFail' test. '''
self.incr("calls")
self.patterns = []
self.okerrpatterns = []
# start all nodes
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
if not self.CM.cluster_stable(self.CM["StableTime"]):
return self.failure("Setup failed - unstable")
node_is_dc = self.CM.is_node_dc(node, None)
# select a component to kill
chosen = self.CM.Env.RandomGen.choice(self.complist)
while chosen.dc_only == 1 and node_is_dc == 0:
chosen = self.CM.Env.RandomGen.choice(self.complist)
self.CM.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot))
self.incr(chosen.name)
if chosen.name != "aisexec":
if self.CM["Name"] != "crm-lha" or chosen.name != "pengine":
self.patterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name))
self.patterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name))
self.patterns.extend(chosen.pats)
if node_is_dc:
self.patterns.extend(chosen.dc_pats)
# In an ideal world, this next stuff should be in the "chosen" object as a member function
if self.CM["Name"] == "crm-lha" and chosen.triggersreboot:
# Make sure the node goes down and then comes back up if it should reboot...
for other in self.CM.Env["nodes"]:
if other != node:
self.patterns.append(self.CM["Pat:They_stopped"] %(other, node))
self.patterns.append(self.CM["Pat:Slave_started"] % node)
self.patterns.append(self.CM["Pat:Local_started"] % node)
if chosen.dc_only:
# Sometimes these will be in the log, and sometimes they won't...
self.okerrpatterns.append("%s crmd:.*Process %s:.* exited" %(node, chosen.name))
self.okerrpatterns.append("%s crmd:.*I_ERROR.*crmdManagedChildDied" %node)
self.okerrpatterns.append("%s crmd:.*The %s subsystem terminated unexpectedly" %(node, chosen.name))
self.okerrpatterns.append("ERROR: Client .* exited with return code")
else:
# Sometimes this won't be in the log...
self.okerrpatterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name))
self.okerrpatterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name))
self.okerrpatterns.append(self.CM["Pat:ChildExit"])
# supply a copy so self.patterns doesnt end up empty
tmpPats = []
tmpPats.extend(self.patterns)
self.patterns.extend(chosen.badnews_ignore)
# Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
stonithPats = []
stonithPats.append("stonith-ng:.*Operation .* for host '%s' with device .* returned: 0" % node)
stonith = CTS.LogWatcher(self.CM["LogFileName"], stonithPats, 0)
stonith.setwatch()
# set the watch for stable
watch = CTS.LogWatcher(
self.CM["LogFileName"], tmpPats,
self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"])
watch.setwatch()
# kill the component
chosen.kill(node)
# check to see Heartbeat noticed
matched = watch.lookforall(allow_multiple_matches=1)
if matched:
self.CM.debug("Found: "+ repr(matched))
else:
self.CM.log("Patterns not found: " + repr(watch.unmatched))
if self.CM.Env["at-boot"] == 0:
self.CM.debug("Checking if %s was shot" % node)
shot = stonith.look(60)
if shot:
self.CM.debug("Found: "+ repr(shot))
self.CM.ShouldBeStatus[node]="down"
self.CM.debug("Waiting for the cluster to recover")
self.CM.cluster_stable()
self.CM.debug("Waiting for any STONITHd node to come back up")
self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600)
self.CM.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self.CM.cluster_stable(self.CM["StartTime"])
if not matched:
return self.failure("Didn't find all expected patterns")
elif not is_stable:
return self.failure("Cluster did not become stable")
return self.success()
def errorstoignore(self):
'''Return list of errors which should be ignored'''
# Note that okerrpatterns refers to the last time we ran this test
# The good news is that this works fine for us...
self.okerrpatterns.extend(self.patterns)
return self.okerrpatterns
AllTestClasses.append(ComponentFail)
####################################################################
class SplitBrainTest(CTSTest):
####################################################################
'''It is used to test split-brain. when the path between the two nodes break
check the two nodes both take over the resource'''
def __init__(self,cm):
CTSTest.__init__(self,cm)
self.name = "SplitBrain"
self.start = StartTest(cm)
self.startall = SimulStartLite(cm)
self.is_experimental = 1
def isolate_partition(self, partition):
other_nodes = []
other_nodes.extend(self.CM.Env["nodes"])
for node in partition:
try:
other_nodes.remove(node)
except ValueError:
self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]) + " from " +repr(partition))
if len(other_nodes) == 0:
return 1
self.CM.debug("Creating partition: " + repr(partition))
self.CM.debug("Everyone else: " + repr(other_nodes))
for node in partition:
if not self.CM.isolate_node(node, other_nodes):
self.CM.log("Could not isolate %s" % node)
return 0
return 1
def heal_partition(self, partition):
other_nodes = []
other_nodes.extend(self.CM.Env["nodes"])
for node in partition:
try:
other_nodes.remove(node)
except ValueError:
self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]))
if len(other_nodes) == 0:
return 1
self.CM.debug("Healing partition: " + repr(partition))
self.CM.debug("Everyone else: " + repr(other_nodes))
for node in partition:
self.CM.unisolate_node(node, other_nodes)
def __call__(self, node):
'''Perform split-brain test'''
self.incr("calls")
self.passed = 1
partitions = {}
ret = self.startall(None)
if not ret:
return self.failure("Setup failed")
while 1:
# Retry until we get multiple partitions
partitions = {}
p_max = len(self.CM.Env["nodes"])
for node in self.CM.Env["nodes"]:
p = self.CM.Env.RandomGen.randint(1, p_max)
if not partitions.has_key(p):
partitions[p]= []
partitions[p].append(node)
p_max = len(partitions.keys())
if p_max > 1:
break
# else, try again
self.CM.debug("Created %d partitions" % p_max)
for key in partitions.keys():
self.CM.debug("Partition["+str(key)+"]:\t"+repr(partitions[key]))
# Disabling STONITH to reduce test complexity for now
self.CM.rsh(node, "crm_attribute -n stonith-enabled -v false")
for key in partitions.keys():
self.isolate_partition(partitions[key])
count = 30
while count > 0:
if len(self.CM.find_partitions()) != p_max:
time.sleep(10)
else:
break
else:
self.failure("Expected partitions were not created")
# Target number of partitions formed - wait for stability
if not self.CM.cluster_stable():
self.failure("Partitioned cluster not stable")
# Now audit the cluster state
self.CM.partitions_expected = p_max
if not self.audit():
self.failure("Audits failed")
self.CM.partitions_expected = 1
# And heal them again
for key in partitions.keys():
self.heal_partition(partitions[key])
# Wait for a single partition to form
count = 30
while count > 0:
if len(self.CM.find_partitions()) != 1:
time.sleep(10)
count -= 1
else:
break
else:
self.failure("Cluster did not reform")
# Wait for it to have the right number of members
count = 30
while count > 0:
members = []
partitions = self.CM.find_partitions()
if len(partitions) > 0:
members = partitions[0].split()
if len(members) != len(self.CM.Env["nodes"]):
time.sleep(10)
count -= 1
else:
break
else:
self.failure("Cluster did not completely reform")
# Wait up to 20 minutes - the delay is more preferable than
# trying to continue with in a messed up state
if not self.CM.cluster_stable(1200):
self.failure("Reformed cluster not stable")
answer = raw_input('Continue? [nY]')
if answer and answer == "n":
raise ValueError("Reformed cluster not stable")
# Turn fencing back on
if self.CM.Env["DoStonith"]:
self.CM.rsh(node, "crm_attribute -D -n stonith-enabled")
self.CM.cluster_stable()
if self.passed:
return self.success()
return self.failure("See previous errors")
def errorstoignore(self):
'''Return list of errors which are 'normal' and should be ignored'''
return [
"Another DC detected:",
"ERROR: attrd_cib_callback: .*Application of an update diff failed",
"crmd_ha_msg_callback:.*not in our membership list",
"CRIT:.*node.*returning after partition",
]
def is_applicable(self):
if not self.is_applicable_common():
return 0
return len(self.CM.Env["nodes"]) > 2
AllTestClasses.append(SplitBrainTest)
####################################################################
class Reattach(CTSTest):
####################################################################
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="Reattach"
self.startall = SimulStartLite(cm)
self.restart1 = RestartTest(cm)
self.stopall = SimulStopLite(cm)
self.is_unsafe = 0 # Handled by canrunnow()
def setup(self, node):
return self.startall(None)
def canrunnow(self, node):
'''Return TRUE if we can meaningfully run right now'''
if self.find_ocfs2_resources(node):
self.CM.log("Detach/Reattach scenarios are not possible with OCFS2 services present")
return 0
return 1
def __call__(self, node):
self.incr("calls")
pats = []
managed = CTS.LogWatcher(self.CM["LogFileName"], ["is-managed-default"], timeout=60)
managed.setwatch()
self.CM.debug("Disable resource management")
self.CM.rsh(node, "crm_attribute -n is-managed-default -v false")
if not managed.lookforall():
self.CM.log("Patterns not found: " + repr(managed.unmatched))
return self.failure("Resource management not disabled")
pats = []
pats.append("crmd:.*Performing.*_stop_0")
pats.append("crmd:.*Performing.*_start_0")
pats.append("crmd:.*Performing.*_promote_0")
pats.append("crmd:.*Performing.*_demote_0")
pats.append("crmd:.*Performing.*_migrate_.*_0")
watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60)
watch.setwatch()
self.CM.debug("Shutting down the cluster")
ret = self.stopall(None)
if not ret:
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "crm_attribute -D -n is-managed-default")
return self.failure("Couldn't shut down the cluster")
self.CM.debug("Bringing the cluster back up")
ret = self.startall(None)
if not ret:
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "crm_attribute -D -n is-managed-default")
return self.failure("Couldn't restart the cluster")
if self.local_badnews("ResourceActivity:", watch):
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "crm_attribute -D -n is-managed-default")
return self.failure("Resources stopped or started during cluster restart")
watch = CTS.LogWatcher(self.CM["LogFileName"], pats, timeout=60)
watch.setwatch()
managed = CTS.LogWatcher(self.CM["LogFileName"], ["is-managed-default"], timeout=60)
managed.setwatch()
self.CM.debug("Re-enable resource management")
self.CM.rsh(node, "crm_attribute -D -n is-managed-default")
if not managed.lookforall():
self.CM.log("Patterns not found: " + repr(managed.unmatched))
return self.failure("Resource management not enabled")
self.CM.cluster_stable()
# Ignore actions for STONITH resources
ignore = []
(rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rclass == "stonith":
self.CM.debug("Ignoring: crmd:.*Performing.*op=%s_.*_0" % r.id)
ignore.append("crmd:.*Performing.*op=%s_.*_0" % r.id)
if self.local_badnews("ResourceActivity:", watch, ignore):
return self.failure("Resources stopped or started after resource management was re-enabled")
return ret
def errorstoignore(self):
'''Return list of errors which should be ignored'''
return [
"You may ignore this error if it is unmanaged.",
"pingd: .*ERROR: send_ipc_message:",
"pingd: .*ERROR: send_update:",
"lrmd: .*ERROR: notify_client:",
]
def is_applicable(self):
if self.CM["Name"] == "crm-lha":
return None
return 1
AllTestClasses.append(Reattach)
####################################################################
class SpecialTest1(CTSTest):
####################################################################
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SpecialTest1"
self.startall = SimulStartLite(cm)
self.restart1 = RestartTest(cm)
self.stopall = SimulStopLite(cm)
def __call__(self, node):
'''Perform the 'SpecialTest1' test for Andrew. '''
self.incr("calls")
# Shut down all the nodes...
ret = self.stopall(None)
if not ret:
return ret
# Start the selected node
ret = self.restart1(node)
if not ret:
return ret
# Start all remaining nodes
ret = self.startall(None)
return ret
AllTestClasses.append(SpecialTest1)
####################################################################
class HAETest(CTSTest):
####################################################################
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="HAETest"
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
self.is_loop = 1
def setup(self, node):
# Start all remaining nodes
ret = self.startall(None)
if not ret:
return self.failure("Couldn't start all nodes")
return self.success()
def teardown(self, node):
# Stop everything
ret = self.stopall(None)
if not ret:
return self.failure("Couldn't stop all nodes")
return self.success()
def wait_on_state(self, node, resource, expected_clones, attempts=240):
while attempts > 0:
active=0
(rc, lines) = self.CM.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None)
# Hack until crm_resource does the right thing
if rc == 0 and lines:
active = len(lines)
if len(lines) == expected_clones:
return 1
elif rc == 1:
self.CM.debug("Resource %s is still inactive" % resource)
elif rc == 234:
self.CM.log("Unknown resource %s" % resource)
return 0
elif rc == 246:
self.CM.log("Cluster is inactive")
return 0
elif rc != 0:
self.CM.log("Call to crm_resource failed, rc=%d" % rc)
return 0
else:
self.CM.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones))
attempts -= 1
time.sleep(1)
return 0
def find_dlm(self, node):
self.r_dlm = None
(rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self.CM, line)
if r.rtype == "controld" and r.parent != "NA":
self.CM.debug("Found dlm: %s" % self.r_dlm)
self.r_dlm = r.parent
return 1
return 0
def find_hae_resources(self, node):
self.r_dlm = None
self.r_o2cb = None
self.r_ocfs2 = []
if self.find_dlm(node):
self.find_ocfs2_resources(node)
def is_applicable(self):
if not self.is_applicable_common():
return 0
if self.CM.Env["Schema"] == "hae":
return 1
return None
####################################################################
class HAERoleTest(HAETest):
####################################################################
def __init__(self, cm):
'''Lars' mount/unmount test for the HA extension. '''
HAETest.__init__(self,cm)
self.name="HAERoleTest"
def change_state(self, node, resource, target):
rc = self.CM.rsh(node, "crm_resource -r %s -p target-role -v %s --meta" % (resource, target))
return rc
def __call__(self, node):
self.incr("calls")
lpc = 0
failed = 0
delay = 2
done=time.time() + self.CM.Env["loop-minutes"]*60
self.find_hae_resources(node)
clone_max = len(self.CM.Env["nodes"])
while time.time() <= done and not failed:
lpc = lpc + 1
self.change_state(node, self.r_dlm, "Stopped")
if not self.wait_on_state(node, self.r_dlm, 0):
self.failure("%s did not go down correctly" % self.r_dlm)
failed = lpc
self.change_state(node, self.r_dlm, "Started")
if not self.wait_on_state(node, self.r_dlm, clone_max):
self.failure("%s did not come up correctly" % self.r_dlm)
failed = lpc
if not self.wait_on_state(node, self.r_o2cb, clone_max):
self.failure("%s did not come up correctly" % self.r_o2cb)
failed = lpc
for fs in self.r_ocfs2:
if not self.wait_on_state(node, fs, clone_max):
self.failure("%s did not come up correctly" % fs)
failed = lpc
if failed:
return self.failure("iteration %d failed" % failed)
return self.success()
AllTestClasses.append(HAERoleTest)
####################################################################
class HAEStandbyTest(HAETest):
####################################################################
'''Set up a custom test to cause quorum failure issues for Andrew'''
def __init__(self, cm):
HAETest.__init__(self,cm)
self.name="HAEStandbyTest"
def change_state(self, node, resource, target):
rc = self.CM.rsh(node, "crm_standby -l reboot -v %s" % (target))
return rc
def __call__(self, node):
self.incr("calls")
lpc = 0
failed = 0
done=time.time() + self.CM.Env["loop-minutes"]*60
self.find_hae_resources(node)
clone_max = len(self.CM.Env["nodes"])
while time.time() <= done and not failed:
lpc = lpc + 1
self.change_state(node, self.r_dlm, "true")
if not self.wait_on_state(node, self.r_dlm, clone_max-1):
self.failure("%s did not go down correctly" % self.r_dlm)
failed = lpc
self.change_state(node, self.r_dlm, "false")
if not self.wait_on_state(node, self.r_dlm, clone_max):
self.failure("%s did not come up correctly" % self.r_dlm)
failed = lpc
if not self.wait_on_state(node, self.r_o2cb, clone_max):
self.failure("%s did not come up correctly" % self.r_o2cb)
failed = lpc
for fs in self.r_ocfs2:
if not self.wait_on_state(node, fs, clone_max):
self.failure("%s did not come up correctly" % fs)
failed = lpc
if failed:
return self.failure("iteration %d failed" % failed)
return self.success()
AllTestClasses.append(HAEStandbyTest)
###################################################################
class NearQuorumPointTest(CTSTest):
###################################################################
'''
This test brings larger clusters near the quorum point (50%).
In addition, it will test doing starts and stops at the same time.
Here is how I think it should work:
- loop over the nodes and decide randomly which will be up and which
will be down Use a 50% probability for each of up/down.
- figure out what to do to get into that state from the current state
- in parallel, bring up those going up and bring those going down.
'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="NearQuorumPoint"
def __call__(self, dummy):
'''Perform the 'NearQuorumPoint' test. '''
self.incr("calls")
startset = []
stopset = []
#decide what to do with each node
for node in self.CM.Env["nodes"]:
action = self.CM.Env.RandomGen.choice(["start","stop"])
#action = self.CM.Env.RandomGen.choice(["start","stop","no change"])
if action == "start" :
startset.append(node)
elif action == "stop" :
stopset.append(node)
self.CM.debug("start nodes:" + repr(startset))
self.CM.debug("stop nodes:" + repr(stopset))
#add search patterns
watchpats = [ ]
for node in stopset:
if self.CM.ShouldBeStatus[node] == "up":
watchpats.append(self.CM["Pat:We_stopped"] % node)
for node in startset:
if self.CM.ShouldBeStatus[node] == "down":
#watchpats.append(self.CM["Pat:Slave_started"] % node)
watchpats.append(self.CM["Pat:Local_started"] % node)
else:
for stopping in stopset:
if self.CM.ShouldBeStatus[stopping] == "up":
watchpats.append(self.CM["Pat:They_stopped"] % (node, stopping))
if len(watchpats) == 0:
return self.skipped()
if len(startset) != 0:
watchpats.append(self.CM["Pat:DC_IDLE"])
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats
, timeout=self.CM["DeadTime"]+10)
watch.setwatch()
#begin actions
for node in stopset:
if self.CM.ShouldBeStatus[node] == "up":
self.CM.StopaCMnoBlock(node)
for node in startset:
if self.CM.ShouldBeStatus[node] == "down":
self.CM.StartaCMnoBlock(node)
#get the result
if watch.lookforall():
self.CM.cluster_stable()
return self.success()
self.CM.log("Warn: Patterns not found: " + repr(watch.unmatched))
#get the "bad" nodes
upnodes = []
for node in stopset:
if self.CM.StataCM(node) == 1:
upnodes.append(node)
downnodes = []
for node in startset:
if self.CM.StataCM(node) == 0:
downnodes.append(node)
if upnodes == [] and downnodes == []:
self.CM.cluster_stable()
# Make sure they're completely down with no residule
for node in stopset:
self.CM.rsh(node, self.CM["StopCmd"])
return self.success()
if len(upnodes) > 0:
self.CM.log("Warn: Unstoppable nodes: " + repr(upnodes))
if len(downnodes) > 0:
self.CM.log("Warn: Unstartable nodes: " + repr(downnodes))
return self.failure()
AllTestClasses.append(NearQuorumPointTest)
###################################################################
class RollingUpgradeTest(CTSTest):
###################################################################
'''Perform a rolling upgrade of the cluster'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="RollingUpgrade"
self.start = StartTest(cm)
self.stop = StopTest(cm)
self.stopall = SimulStopLite(cm)
self.startall = SimulStartLite(cm)
def setup(self, node):
# Start all remaining nodes
ret = self.stopall(None)
if not ret:
return self.failure("Couldn't stop all nodes")
for node in self.CM.Env["nodes"]:
if not self.downgrade(node, None):
return self.failure("Couldn't downgrade %s" % node)
ret = self.startall(None)
if not ret:
return self.failure("Couldn't start all nodes")
return self.success()
def teardown(self, node):
# Stop everything
ret = self.stopall(None)
if not ret:
return self.failure("Couldn't stop all nodes")
for node in self.CM.Env["nodes"]:
if not self.upgrade(node, None):
return self.failure("Couldn't upgrade %s" % node)
return self.success()
def install(self, node, version, start=1, flags="--force"):
target_dir = "/tmp/rpm-%s" % version
src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version)
self.CM.log("Installing %s on %s with %s" % (version, node, flags))
if not self.stop(node):
return self.failure("stop failure: "+node)
rc = self.CM.rsh(node, "mkdir -p %s" % target_dir)
rc = self.CM.rsh(node, "rm -f %s/*.rpm" % target_dir)
(rc, lines) = self.CM.rsh(node, "ls -1 %s/*.rpm" % src_dir, None)
for line in lines:
line = line[:-1]
rc = self.CM.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir))
rc = self.CM.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir))
if start and not self.start(node):
return self.failure("start failure: "+node)
return self.success()
def upgrade(self, node, start=1):
return self.install(node, self.CM.Env["current-version"], start)
def downgrade(self, node, start=1):
return self.install(node, self.CM.Env["previous-version"], start, "--force --nodeps")
def __call__(self, node):
'''Perform the 'Rolling Upgrade' test. '''
self.incr("calls")
for node in self.CM.Env["nodes"]:
if self.upgrade(node):
return self.failure("Couldn't upgrade %s" % node)
self.CM.cluster_stable()
return self.success()
def is_applicable(self):
if not self.is_applicable_common():
return None
if not self.CM.Env.has_key("rpm-dir"):
return None
if not self.CM.Env.has_key("current-version"):
return None
if not self.CM.Env.has_key("previous-version"):
return None
return 1
# Register RestartTest as a good test to run
AllTestClasses.append(RollingUpgradeTest)
###################################################################
class BSC_AddResource(CTSTest):
###################################################################
'''Add a resource to the cluster'''
def __init__(self, cm):
CTSTest.__init__(self, cm)
self.name="AddResource"
self.resource_offset = 0
self.cib_cmd="""cibadmin -C -o %s -X '%s' """
def __call__(self, node):
self.incr("calls")
self.resource_offset = self.resource_offset + 1
r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset)
start_pat = "crmd.*%s_start_0.*confirmed.*ok"
patterns = []
patterns.append(start_pat % r_id)
watch = CTS.LogWatcher(
self.CM["LogFileName"], patterns, self.CM["DeadTime"])
watch.setwatch()
fields = string.split(self.CM.Env["IPBase"], '.')
fields[3] = str(int(fields[3])+1)
ip = string.join(fields, '.')
self.CM.Env["IPBase"] = ip
if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip):
return self.failure("Make resource %s failed" % r_id)
failed = 0
watch_result = watch.lookforall()
if watch.unmatched:
for regex in watch.unmatched:
self.CM.log ("Warn: Pattern not found: %s" % (regex))
failed = 1
if failed:
return self.failure("Resource pattern(s) not found")
if not self.CM.cluster_stable(self.CM["DeadTime"]):
return self.failure("Unstable cluster")
return self.success()
def make_ip_resource(self, node, id, rclass, type, ip):
self.CM.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node))
rsc_xml="""
<primitive id="%s" class="%s" type="%s" provider="heartbeat">
<instance_attributes id="%s"><attributes>
<nvpair id="%s" name="ip" value="%s"/>
</attributes></instance_attributes>
</primitive>""" % (id, rclass, type, id, id, ip)
node_constraint="""
<rsc_location id="run_%s" rsc="%s">
<rule id="pref_run_%s" score="100">
<expression id="%s_loc_expr" attribute="#uname" operation="eq" value="%s"/>
</rule>
</rsc_location>""" % (id, id, id, id, node)
rc = 0
(rc, lines) = self.CM.rsh(node, self.cib_cmd % ("constraints", node_constraint), None)
if rc != 0:
self.CM.log("Constraint creation failed: %d" % rc)
return None
(rc, lines) = self.CM.rsh(node, self.cib_cmd % ("resources", rsc_xml), None)
if rc != 0:
self.CM.log("Resource creation failed: %d" % rc)
return None
return 1
def is_applicable(self):
if self.CM.Env["DoBSC"]:
return 1
return None
class SimulStopLite(CTSTest):
###################################################################
'''Stop any active nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStopLite"
def __call__(self, dummy):
'''Perform the 'SimulStopLite' setup work. '''
self.incr("calls")
self.CM.debug("Setup: " + self.name)
# We ignore the "node" parameter...
watchpats = [ ]
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "up":
self.incr("WasStarted")
watchpats.append(self.CM["Pat:We_stopped"] % node)
#if self.CM.Env["use_logd"]:
# watchpats.append(self.CM["Pat:Logd_stopped"] % node)
if len(watchpats) == 0:
self.CM.clear_all_caches()
return self.success()
# Stop all the nodes - at about the same time...
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats
, timeout=self.CM["DeadTime"]+10)
watch.setwatch()
self.set_timer()
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "up":
self.CM.StopaCMnoBlock(node)
if watch.lookforall():
self.CM.clear_all_caches()
# Make sure they're completely down with no residule
for node in self.CM.Env["nodes"]:
self.CM.rsh(node, self.CM["StopCmd"])
return self.success()
did_fail=0
up_nodes = []
for node in self.CM.Env["nodes"]:
if self.CM.StataCM(node) == 1:
did_fail=1
up_nodes.append(node)
if did_fail:
return self.failure("Active nodes exist: " + repr(up_nodes))
self.CM.log("Warn: All nodes stopped but CTS didnt detect: "
+ repr(watch.unmatched))
self.CM.clear_all_caches()
return self.failure("Missing log message: "+repr(watch.unmatched))
def is_applicable(self):
'''SimulStopLite is a setup test and never applicable'''
return 0
###################################################################
class SimulStartLite(CTSTest):
###################################################################
'''Start any stopped nodes ~ simultaneously'''
def __init__(self, cm):
CTSTest.__init__(self,cm)
self.name="SimulStartLite"
def __call__(self, dummy):
'''Perform the 'SimulStartList' setup work. '''
self.incr("calls")
self.CM.debug("Setup: " + self.name)
# We ignore the "node" parameter...
watchpats = [ ]
uppat = self.CM["Pat:Slave_started"]
if self.CM.upcount() == 0:
uppat = self.CM["Pat:Local_started"]
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "down":
self.incr("WasStopped")
watchpats.append(uppat % node)
if len(watchpats) == 0:
return self.success()
watchpats.append(self.CM["Pat:DC_IDLE"])
# Start all the nodes - at about the same time...
watch = CTS.LogWatcher(self.CM["LogFileName"], watchpats
, timeout=self.CM["DeadTime"]+10)
watch.setwatch()
self.set_timer()
for node in self.CM.Env["nodes"]:
if self.CM.ShouldBeStatus[node] == "down":
self.CM.StartaCMnoBlock(node)
if watch.lookforall():
for attempt in (1, 2, 3, 4, 5):
if self.CM.cluster_stable():
return self.success()
return self.failure("Cluster did not stabilize")
did_fail=0
unstable = []
for node in self.CM.Env["nodes"]:
if self.CM.StataCM(node) == 0:
did_fail=1
unstable.append(node)
if did_fail:
return self.failure("Unstarted nodes exist: " + repr(unstable))
unstable = []
for node in self.CM.Env["nodes"]:
if not self.CM.node_stable(node):
did_fail=1
unstable.append(node)
if did_fail:
return self.failure("Unstable cluster nodes exist: "
+ repr(unstable))
self.CM.log("ERROR: All nodes started but CTS didnt detect: "
+ repr(watch.unmatched))
return self.failure()
def is_applicable(self):
'''SimulStartLite is a setup test and never applicable'''
return 0
def TestList(cm, audits):
result = []
for testclass in AllTestClasses:
bound_test = testclass(cm)
if bound_test.is_applicable():
bound_test.Audits = audits
result.append(bound_test)
return result
def BenchTestList(cm, audits):
all = TestList(cm, audits)
result = []
for test in all:
if test.benchmark:
result.append(test)
return result
# vim:ts=4:sw=4:et:
diff --git a/cts/CTSvars.py.in b/cts/CTSvars.py.in
index cb5575b477..c591d81e6c 100755
--- a/cts/CTSvars.py.in
+++ b/cts/CTSvars.py.in
@@ -1,8 +1,8 @@
class CTSvars:
- CTS_home="@datadir@/@PACKAGE@/cts"
+ CTS_home="@datadir@/@PACKAGE@/tests/cts"
CRM_CONFIG_DIR="@CRM_CONFIG_DIR@"
CRM_DAEMON_USER="@CRM_DAEMON_USER@"
CRM_DAEMON_DIR="@CRM_DAEMON_DIR@"
HA_VARLIBHBDIR="@HA_VARLIBHBDIR@"
OCF_ROOT_DIR="@OCF_ROOT_DIR@"
INITDIR="@INITDIR@"
diff --git a/cts/Makefile.am b/cts/Makefile.am
index 72382b291e..d6e83d9d64 100644
--- a/cts/Makefile.am
+++ b/cts/Makefile.am
@@ -1,43 +1,44 @@
#
# heartbeat: Linux-HA heartbeat code
#
# Copyright (C) 2001 Michael Moerz
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
MAINTAINERCLEANFILES = Makefile.in
CLEANFILES = LSBDummy
EXTRA_DIST = $(cts_SCRIPTS) $(cts_DATA)
ctsdir = $(datadir)/$(PACKAGE)/tests/cts
+ctslibdir = $(pythondir)/cts
-cts_PYTHON = CTSvars.py \
+ctslib_PYTHON = __init__.py \
+ CTSvars.py \
CM_lha.py \
CM_ais.py \
CTS.py \
CTSaudits.py \
- CTSlab.py \
CTStests.py \
- extracttests.py \
- OCFIPraTest.py \
CIB.py
cts_DATA = README cts.supp
-cts_SCRIPTS = \
+cts_SCRIPTS = CTSlab.py \
+ extracttests.py \
cluster_test \
- LSBDummy
+ LSBDummy \
+ OCFIPraTest.py
diff --git a/cts/OCFIPraTest.py b/cts/OCFIPraTest.py
index ca09677de1..bb0747291e 100755
--- a/cts/OCFIPraTest.py
+++ b/cts/OCFIPraTest.py
@@ -1,176 +1,176 @@
#!/usr/bin/python
'''OCF IPaddr/IPaddr2 Resource Agent Test'''
__copyright__='''
Author: Huang Zhen <zhenhltc@cn.ibm.com>
Copyright (C) 2004 International Business Machines
Licensed under the GNU GPL.
'''
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import string,sys,struct,os,random,time,syslog
-from CTSvars import *
+from cts.CTSvars import *
def usage():
print "usage: " + sys.argv[0] \
+ " [-2]"\
+ " [--ipbase|-i first-test-ip]"\
+ " [--ipnum|-n test-ip-num]"\
+ " [--help|-h]"\
+ " [--perform|-p op]"\
+ " [number-of-iterations]"
sys.exit(1)
def perform_op(ra, ip, op):
os.environ["OCF_RA_VERSION_MAJOR"] = "1"
os.environ["OCF_RA_VERSION_MINOR"] = "0"
os.environ["OCF_ROOT"] = CTSvars.OCF_ROOT_DIR
os.environ["OCF_RESOURCE_INSTANCE"] = ip
os.environ["OCF_RESOURCE_TYPE"] = ra
os.environ["OCF_RESKEY_ip"] = ip
os.environ["HA_LOGFILE"] = "/dev/null"
os.environ["HA_LOGFACILITY"] = "local7"
path = CTSvars.OCF_ROOT_DIR +"/resource.d/heartbeat/" + ra
return os.spawnvpe(os.P_WAIT, path, [ra, op], os.environ)
def audit(ra, iplist, ipstatus, summary):
passed = 1
for ip in iplist:
ret = perform_op(ra, ip, "monitor")
if ret != ipstatus[ip]:
passed = 0
log("audit: status of %s should be %d but it is %d\t [failure]"%
(ip,ipstatus[ip],ret))
ipstatus[ip] = ret
summary["audit"]["called"] += 1;
if passed :
summary["audit"]["success"] += 1
else :
summary["audit"]["failure"] += 1
def log(towrite):
t = time.strftime("%Y/%m/%d_%H:%M:%S\t", time.localtime(time.time()))
logstr = t + " "+str(towrite)
syslog.syslog(logstr)
print logstr
if __name__ == '__main__':
ra = "IPaddr"
ipbase = "127.0.0.10"
ipnum = 1
itnum = 50
perform = None
summary = {
"start":{"called":0,"success":0,"failure":0},
"stop" :{"called":0,"success":0,"failure":0},
"audit":{"called":0,"success":0,"failure":0}
}
syslog.openlog(sys.argv[0], 0, syslog.LOG_LOCAL7)
# Process arguments...
skipthis = None
args = sys.argv[1:]
for i in range(0, len(args)) :
if skipthis :
skipthis = None
continue
elif args[i] == "-2" :
ra = "IPaddr2"
elif args[i] == "--ip" or args[i] == "-i" :
skipthis = 1
ipbase = args[i+1]
elif args[i] == "--ipnum" or args[i] == "-n" :
skipthis = 1
ipnum = int(args[i+1])
elif args[i] == "--perform" or args[i] == "-p" :
skipthis = 1
perform = args[i+1]
elif args[i] == "--help" or args[i] == "-h" :
usage()
else:
itnum = int(args[i])
log("Begin OCF IPaddr/IPaddr2 Test")
# Generate the test ips
iplist = []
ipstatus = {}
fields = string.split(ipbase, '.')
for i in range(0, ipnum) :
ip = string.join(fields, '.')
iplist.append(ip)
ipstatus[ip]=perform_op(ra,ip,"monitor")
fields[3] = str(int(fields[3])+1)
log("Test ip:" + str(iplist))
# If use ask perform an operation
if perform != None:
log("Perform opeartion %s"%perform)
for ip in iplist:
perform_op(ra, ip, perform)
log("Done")
sys.exit()
log("RA Type:" + ra)
log("Test Count:" + str(itnum))
# Prepare Random
f = open("/dev/urandom", "r")
seed=struct.unpack("BBB", f.read(3))
f.close()
#seed=(123,321,231)
rand = random.Random()
rand.seed(seed[0])
log("Test Random Seed:" + str(seed))
#
# Begin Tests
log(">>>>>>>>>>>>>>>>>>>>>>>>")
for i in range(0, itnum):
ip = rand.choice(iplist)
if ipstatus[ip] == 0:
op = "stop"
elif ipstatus[ip] == 7:
op = "start"
else :
op = rand.choice(["start","stop"])
ret = perform_op(ra, ip, op)
# update status
if op == "start" and ret == 0:
ipstatus[ip] = 0
elif op == "stop" and ret == 0:
ipstatus[ip] = 7
else :
ipstatus[ip] = 1
result = ""
if ret == 0:
result = "success"
else :
result = "failure"
summary[op]["called"] += 1
summary[op][result] += 1
log( "%d:%s %s \t[%s]"%(i, op, ip, result))
audit(ra, iplist, ipstatus, summary)
log("<<<<<<<<<<<<<<<<<<<<<<<<")
log("start:\t" + str(summary["start"]))
log("stop: \t" + str(summary["stop"]))
log("audit:\t" + str(summary["audit"]))
diff --git a/cts/__init__.py b/cts/__init__.py
new file mode 100644
index 0000000000..feff2bbd39
--- /dev/null
+++ b/cts/__init__.py
@@ -0,0 +1,2 @@
+# This file is required for python packages.
+# It is intentionally empty.
diff --git a/doc/Makefile.am b/doc/Makefile.am
index c88229ff28..74108f417a 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -1,109 +1,117 @@
#
# doc: Pacemaker code
#
# Copyright (C) 2008 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
MAINTAINERCLEANFILES = Makefile.in
helpdir = $(datadir)/$(PACKAGE)
ascii = crm_cli.txt crm_fencing.txt
help_DATA = crm_cli.txt
docbook = Pacemaker_Explained
man_MANS = cibadmin.8 crm_resource.8
doc_DATA = README.hb2openais $(ascii) $(generated_docs)
publican_docs =
generated_docs =
XML_FILES := $(wildcard *.xml)
PNG_FILES := $(wildcard images/*.png)
if BUILD_ASCIIDOC
generated_docs += $(ascii:%.txt=%.html)
endif
if BUILD_DOCBOOK
publican_docs += $(docbook)
+generated_docs += index.html
endif
EXTRA_DIST = $(man_MANS) $(docbook:%=%.xml)
index.html:
echo "Building documentation index"
echo "<html><body><p>The following <a href=\"http://www.clusterlabs.org/wiki/Pacemaker\">Pacemaker</a> documentation was generated on `date` from version: $(BUILD_VERSION)</p>" > index.html
echo "<ol>" >> index.html
for doc in $(generated_docs); do \
echo "<li><a href=\"$$doc\">$$doc</a></li>" >> index.html; \
done
if BUILD_DOCBOOK
for book in $(docbook); do \
for lang in `ls -1 $(docbook)/publish`; do \
echo "<li>$$book ($$lang)<ul>" >> index.html; \
find $$book/publish/$$lang -name "*.pdf" -exec echo -n "<li><a href=\"{}\">" \; -exec basename {} \; -exec echo "</a></li>" \; | sed s:$$book/publish/:: >> index.html ; \
find $$book/publish/$$lang -name "*.txt" -exec echo -n "<li><a href=\"{}\">" \; -exec basename {} \; -exec echo "</a></li>" \; | sed s:$$book/publish/:: >> index.html ; \
find $$book/publish/$$lang -name html -exec echo "<li><a href=\"{}/$$book/index.html\">$$book HTML</a></li>" \; | sed s:$$book/publish/:: >> index.html ; \
find $$book/publish/$$lang -name html-single -exec echo "<li><a href=\"{}/$$book/index.html\">$$book HTML (single page)</a></li>" \; | sed s:$$book/publish/:: >> index.html ; \
echo "</ul></li>" >> index.html; \
done; \
done
endif
echo "</ol>" >> index.html
echo "<p>You can find <a href=\"http://www.clusterlabs.org/wiki/Documentation\">additional documentation</a> and details about the Pacemaker project at <a href=\"http://www.clusterlabs.org\">http://www.clusterlabs.org</a></p>" >> index.html
echo "</body></html>" >> index.html
%.html: %.txt
$(ASCIIDOC) --unsafe --backend=xhtml11 $<
%.txt: %/en-US/*.xml
+ if [ -e /usr/share/publican/Common_Content/clusterlabs ]; then \
+ echo "Building ClusterLabs branded documentation set"; \
+ echo "brand: clusterlabs" >> $*/publican.cfg; \
+ fi
cd $* && $(PUBLICAN) build --publish --langs=all --formats=pdf,html,html-single,txt
- cp $*/publish/en-US/Pacemaker/1.0/txt/$*/$@ $@
+ if [ -e /usr/share/publican/Common_Content/clusterlabs ]; then \
+ sed -i.sed s/brand:.*// $*/publican.cfg; \
+ fi
+ cp $*/publish/en-US/Pacemaker/*/txt/$*/$@ $@
if BUILD_DOCBOOK
docbook_txt = $(docbook:%=%.txt)
all-local: $(docbook_txt)
#install-data-local: all-local
-install-data-local: all-local
+install-data-local: all-local
for book in $(docbook); do \
- filelist=`find $$book/publish -print`; \
+ filelist=`find $$book/publish/* -print`; \
for f in $$filelist; do \
p=`echo $$f | sed s:publish/:: | sed s:Pacemaker/::`; \
if [ -d $$f ]; then \
- echo $(INSTALL) -d 775 $(DESTDIR)/$(docdir)/$$p; \
+ $(INSTALL) -d 775 $(DESTDIR)$(docdir)/$$p; \
else \
- echo $(INSTALL) -m 644 $$f $(DESTDIR)/$(docdir)/$$p; \
+ $(INSTALL) -m 644 $$f $(DESTDIR)$(docdir)/$$p; \
fi \
done; \
done
endif
push: all-local index.html
echo Uploading current documentation set to clusterlabs.org
rsync -rtz --progress index.html root@oss.clusterlabs.org:/srv/www/extras/doc/
if BUILD_DOCBOOK
for book in $(docbook); do \
echo Uploading $$book...; \
rsync -rtz --progress --delete $$book/publish/* root@oss.clusterlabs.org:/srv/www/extras/doc/; \
done
endif
clean-local:
-rm -rf $(generated_docs) $(docbook)/tmp $(docbook)/publish
diff --git a/doc/Pacemaker_Explained/publican.cfg b/doc/Pacemaker_Explained/publican.cfg
index 7ca9eaeb26..b7e5260e27 100644
--- a/doc/Pacemaker_Explained/publican.cfg
+++ b/doc/Pacemaker_Explained/publican.cfg
@@ -1,14 +1,13 @@
# Config::Simple 4.59
# Tue Nov 10 22:28:47 2009
docname: Pacemaker_Explained
-version: 1.0
+version: 1.1
xml_lang: en-US
#edition: 1
type: Book
-brand: clusterlabs
product: Pacemaker
chunk_first: 0
chunk_section_depth: 3
generate_section_toc_level: 4
diff --git a/pacemaker.spec b/pacemaker.spec
index fce98e2b50..017015acf5 100644
--- a/pacemaker.spec
+++ b/pacemaker.spec
@@ -1,1290 +1,1349 @@
%global gname haclient
%global uname hacluster
%global pcmk_docdir %{_docdir}/%{name}
-%global specversion 2
-#global upstream_version ee19d8e83c2a
+%global specversion 1
+#global upstream_version tip
%global upstream_prefix pacemaker
# Keep around for when/if required
#global alphatag %{upstream_version}.hg
%global pcmk_release %{?alphatag:0.}%{specversion}%{?alphatag:.%{alphatag}}%{?dist}
-# Compatibility macro wrappers for legacy RPM versions that do not
-# support conditional builds
+# Compatibility macros for distros (fedora) that don't provide Python macros by default
+# Do this instead of trying to conditionally %include %{_rpmconfigdir}/macros.python
+%{!?py_ver: %{expand: %%global py_ver %%(echo `python -c "import sys; print sys.version[:3]"`)}}
+%{!?py_prefix: %{expand: %%global py_prefix %%(echo `python -c "import sys; print sys.prefix"`)}}
+%{!?py_libdir: %{expand: %%global py_libdir %%{expand:%%%%{py_prefix}/lib/python%%%%{py_ver}}}}
+%{!?py_sitedir: %{expand: %%global py_sitedir %%{expand:%%%%{py_libdir}/site-packages}}}
+
+# Compatibility macro wrappers for legacy RPM versions that do not support conditional builds
%{!?bcond_without: %{expand: %%global bcond_without() %%{expand:%%%%{!?_without_%%{1}:%%%%global with_%%{1} 1}}}}
%{!?bcond_with: %{expand: %%global bcond_with() %%{expand:%%%%{?_with_%%{1}:%%%%global with_%%{1} 1}}}}
%{!?with: %{expand: %%global with() %%{expand:%%%%{?with_%%{1}:1}%%%%{!?with_%%{1}:0}}}}
%{!?without: %{expand: %%global without() %%{expand:%%%%{?with_%%{1}:0}%%%%{!?with_%%{1}:1}}}}
# Conditionals
# Invoke "rpmbuild --without <feature>" or "rpmbuild --with <feature>"
# to disable or enable specific features
+
+# Supported cluster stacks, must support at least one
%bcond_without ais
%bcond_without heartbeat
+
# ESMTP is not available in RHEL, only in EPEL. Allow people to build
# the RPM without ESMTP in case they choose not to use EPEL packages
%bcond_without esmtp
+# We generate some docs using Publican, but its not available everywhere
+%bcond_without publican
+
Name: pacemaker
Summary: Scalable High-Availability cluster resource manager
-Version: 1.0.7
+Version: 1.1.1
Release: %{pcmk_release}
License: GPLv2+ and LGPLv2+
Url: http://www.clusterlabs.org
Group: System Environment/Daemons
Source0: pacemaker.tar.bz2
BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
AutoReqProv: on
Requires(pre): cluster-glue
Requires: resource-agents
Requires: python >= 2.4
Conflicts: heartbeat < 2.99
%if 0%{?fedora} || 0%{?centos} > 4 || 0%{?rhel} > 4
Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version))
BuildRequires: help2man libtool-ltdl-devel
%endif
%if 0%{?suse_version}
# net-snmp-devel on SLES10 does not suck in tcpd-devel automatically
BuildRequires: help2man tcpd-devel
%endif
# Required for core functionality
-BuildRequires: automake autoconf libtool pkgconfig
+BuildRequires: automake autoconf libtool pkgconfig python
BuildRequires: glib2-devel cluster-glue-libs-devel libxml2-devel libxslt-devel
BuildRequires: pkgconfig python-devel gcc-c++ bzip2-devel gnutls-devel pam-devel
# Enables optional functionality
BuildRequires: ncurses-devel net-snmp-devel openssl-devel
BuildRequires: lm_sensors-devel libselinux-devel
%if %{with esmtp}
BuildRequires: libesmtp-devel
%endif
%if %{with ais}
BuildRequires: corosynclib-devel
Requires: corosync
%endif
%if %{with heartbeat}
BuildRequires: heartbeat-devel heartbeat-libs
Requires: heartbeat >= 3.0.0
%endif
+%if %{with publican}
+BuildRequires: publican
+%endif
+
%description
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Linux-HA (Heartbeat) and/or OpenAIS.
It supports "n-node" clusters with significant capabilities for
managing resources and dependencies.
It will run scripts at initialization, when machines go up or down,
when related resources fail and can be configured to periodically check
resource health.
Available rpmbuild rebuild options:
- --without : heartbeat ais
+ --without : heartbeat ais esmtp publican
%package -n pacemaker-libs
License: GPLv2+ and LGPLv2+
Summary: Libraries used by the Pacemaker cluster resource manager and its clients
Group: System Environment/Daemons
Requires: %{name} = %{version}-%{release}
%description -n pacemaker-libs
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Linux-HA (Heartbeat) and/or OpenAIS.
It supports "n-node" clusters with significant capabilities for
managing resources and dependencies.
It will run scripts at initialization, when machines go up or down,
when related resources fail and can be configured to periodically check
resource health.
%package -n pacemaker-libs-devel
License: GPLv2+ and LGPLv2+
Summary: Pacemaker development package
Group: Development/Libraries
Requires: %{name}-libs = %{version}-%{release}
Requires: cluster-glue-libs-devel
Obsoletes: libpacemaker3
%if %{with ais}
Requires: corosynclib-devel
%endif
%if %{with heartbeat}
Requires: heartbeat-devel
%endif
%description -n pacemaker-libs-devel
Headers and shared libraries for developing tools for Pacemaker.
Pacemaker is an advanced, scalable High-Availability cluster resource
manager for Linux-HA (Heartbeat) and/or OpenAIS.
It supports "n-node" clusters with significant capabilities for
managing resources and dependencies.
It will run scripts at initialization, when machines go up or down,
when related resources fail and can be configured to periodically check
resource health.
+%package cts
+License: GPLv2+ and LGPLv2+
+Summary: Test framework for cluster-related technologies like Pacemaker
+Group: System Environment/Daemons
+Requires: python
+
+%description cts
+Test framework for cluster-related technologies like Pacemaker
+
+%package doc
+License: GPLv2+ and LGPLv2+
+Summary: Documentation for Pacemaker
+Group: Documentation
+
+%description doc
+Documentation for Pacemaker.
+
+Pacemaker is an advanced, scalable High-Availability cluster resource
+manager for Linux-HA (Heartbeat) and/or OpenAIS.
+
+It supports "n-node" clusters with significant capabilities for
+managing resources and dependencies.
+
+It will run scripts at initialization, when machines go up or down,
+when related resources fail and can be configured to periodically check
+resource health.
+
%prep
%setup -q -n %{upstream_prefix}%{?upstream_version}
%build
./autogen.sh
# RHEL <= 5 does not support --docdir
export docdir=%{pcmk_docdir}
%{configure} --localstatedir=%{_var} --enable-fatal-warnings=no
make %{_smp_mflags} docdir=%{pcmk_docdir}
%install
rm -rf %{buildroot}
make install DESTDIR=%{buildroot} docdir=%{pcmk_docdir}
-# Scripts that need should be executable
-chmod a+x %{buildroot}/%{_libdir}/heartbeat/hb2openais-helper.py
-chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py
-chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/OCFIPraTest.py
-chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/extracttests.py
+# Scripts that should be executable
+chmod a+x %{buildroot}%{_libdir}/heartbeat/hb2openais-helper.py
+chmod a+x %{buildroot}%{_datadir}/pacemaker/tests/cts/CTSlab.py
+chmod a+x %{buildroot}%{_datadir}/pacemaker/tests/cts/OCFIPraTest.py
+chmod a+x %{buildroot}%{_datadir}/pacemaker/tests/cts/extracttests.py
# These are not actually scripts
find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x
find %{buildroot} -name '*.xsl' -type f -print0 | xargs -0 chmod a-x
find %{buildroot} -name '*.rng' -type f -print0 | xargs -0 chmod a-x
find %{buildroot} -name '*.dtd' -type f -print0 | xargs -0 chmod a-x
-# Dont package static libs or compiled python
+# Dont package static libs
find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f
find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f
-find %{buildroot} -name '*.pyc' -type f -print0 | xargs -0 rm -f
-find %{buildroot} -name '*.pyo' -type f -print0 | xargs -0 rm -f
-
-# install shell modules to site-packages
-(
-cd shell
-python setup.py install --prefix=%{_prefix} --root=%{buildroot}
-)
# Do not package these either
-rm %{buildroot}/%{_libdir}/heartbeat/crm_primitive.py
-%if %{with ais}
-rm %{buildroot}/%{_libdir}/service_crm.so
-%endif
+rm -f %{buildroot}/%{_libdir}/heartbeat/crm_primitive.*
+rm -f %{buildroot}/%{_libdir}/heartbeat/atest
+rm -f %{buildroot}/%{_libdir}/service_crm.so
%clean
rm -rf %{buildroot}
%post -n pacemaker-libs -p /sbin/ldconfig
%postun -n pacemaker-libs -p /sbin/ldconfig
%files
###########################################################
%defattr(-,root,root)
%exclude %{_datadir}/pacemaker/tests
%{_datadir}/pacemaker
%{_datadir}/snmp/mibs/PCMK-MIB.txt
%{_libdir}/heartbeat/*
%{_sbindir}/cibadmin
%{_sbindir}/crm_attribute
%{_sbindir}/crm_diff
%{_sbindir}/crm_failcount
%{_sbindir}/crm_master
%{_sbindir}/crm_mon
%{_sbindir}/crm
%{_sbindir}/crm_resource
%{_sbindir}/crm_standby
%{_sbindir}/crm_verify
%{_sbindir}/crmadmin
%{_sbindir}/iso8601
%{_sbindir}/attrd_updater
%{_sbindir}/ptest
%{_sbindir}/crm_shadow
%{_sbindir}/cibpipe
%{_sbindir}/crm_node
-%{py_sitedir}/*
+%{_sbindir}/crm_simulate
+%{_sbindir}/fence_legacy
+%{_sbindir}/stonith_admin
+%{py_sitedir}/crm
%if %{with heartbeat}
%{_sbindir}/crm_uuid
%else
%exclude %{_sbindir}/crm_uuid
%endif
# Packaged elsewhere
%exclude %{pcmk_docdir}/AUTHORS
%exclude %{pcmk_docdir}/COPYING
%exclude %{pcmk_docdir}/COPYING.LIB
-%doc %{pcmk_docdir}/crm_cli.txt
-%doc %{pcmk_docdir}/crm_fencing.txt
-%doc %{pcmk_docdir}/README.hb2openais
-%doc %{_mandir}/man8/*.8*
%doc COPYING
%doc AUTHORS
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/heartbeat/crm
%dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pengine
%dir %attr (750, %{uname}, %{gname}) %{_var}/run/crm
%dir /usr/lib/ocf
%dir /usr/lib/ocf/resource.d
/usr/lib/ocf/resource.d/pacemaker
%if %{with ais}
%{_libexecdir}/lcrso/pacemaker.lcrso
%endif
%files -n pacemaker-libs
%defattr(-,root,root)
%{_libdir}/libcib.so.*
%{_libdir}/libcrmcommon.so.*
%{_libdir}/libcrmcluster.so.*
%{_libdir}/libpe_status.so.*
%{_libdir}/libpe_rules.so.*
%{_libdir}/libpengine.so.*
%{_libdir}/libtransitioner.so.*
%{_libdir}/libstonithd.so.*
%doc COPYING.LIB
%doc AUTHORS
+%files doc
+%doc %{_mandir}/man8/*.8*
+%doc %{pcmk_docdir}/crm_cli.txt
+%doc %{pcmk_docdir}/crm_fencing.txt
+%doc %{pcmk_docdir}/README.hb2openais
+%if %{with publican}
+%doc %{pcmk_docdir}/index.html
+%doc %{pcmk_docdir}/Pacemaker_Explained
+%endif
+
+%files cts
+%{py_sitedir}/cts
+%{_datadir}/pacemaker/tests/cts
+%doc COPYING.LIB
+%doc AUTHORS
+
%files -n pacemaker-libs-devel
%defattr(-,root,root)
+%exclude %{_datadir}/pacemaker/tests/cts
+%{_datadir}/pacemaker/tests
%{_includedir}/pacemaker
-%{_includedir}/heartbeat/fencing
%{_libdir}/*.so
-%{_datadir}/pacemaker/tests
%doc COPYING.LIB
%doc AUTHORS
%changelog
+* Tue Feb 16 2010 Andrew Beekhof <andrew@beekhof.net> - 1.1.1-1
+- First public release of Pacemaker 1.1
+- Package reference documentation in a doc subpackage
+- Move cts into a subpackage so that it can be easily consumed by others
+- Update source tarball to revision: 17d9cd4ee29f
+ + New stonith daemon that supports global notifications
+ + Service placement influenced by the physical resources
+ + A new tool for simulating failures and the cluster’s to them
+ + Ability to serialize an otherwise unrelated a set of resource actions (eg. Xen migrations)
+
* Tue Jan 19 2010 Andrew Beekhof <andrew@beekhof.net> - 1.0.7-2
- Rebuild for corosync 1.2.0
* Mon Jan 18 2010 Andrew Beekhof <andrew@beekhof.net> - 1.0.7-1
- Update source tarball to revision: 2eed906f43e9 (stable-1.0) tip
- Statistics:
Changesets: 193
Diff: 220 files changed, 15933 insertions(+), 8782 deletions(-)
- Changes since 1.0.5-4
+ High: PE: Bug 2213 - Ensure groups process location constraints so that clone-node-max works for cloned groups
+ High: PE: Bug lf#2153 - non-clones should not restart when clones stop/start on other nodes
+ High: PE: Bug lf#2209 - Clone ordering should be able to prevent startup of dependant clones
+ High: PE: Bug lf#2216 - Correctly identify the state of anonymous clones when deciding when to probe
+ High: PE: Bug lf#2225 - Operations that require fencing should wait for 'stonith_complete' not 'all_stopped'.
+ High: PE: Bug lf#2225 - Prevent clone peers from stopping while another is instance is (potentially) being fenced
+ High: PE: Correctly anti-colocate with a group
+ High: PE: Correctly unpack ordering constraints for resource sets to avoid graph loops
+ High: Tools: crm: load help from crm_cli.txt
+ High: Tools: crm: resource sets (bnc#550923)
+ High: Tools: crm: support for comments (LF 2221)
+ High: Tools: crm: support for description attribute in resources/operations (bnc#548690)
+ High: Tools: hb2openais: add EVMS2 CSM processing (and other changes) (bnc#548093)
+ High: Tools: hb2openais: do not allow empty rules, clones, or groups (LF 2215)
+ High: Tools: hb2openais: refuse to convert pure EVMS volumes
+ High: cib: Ensure the loop for login message terminates
+ High: cib: Finally fix reliability of receiving large messages over remote plaintext connections
+ High: cib: Fix remote notifications
+ High: cib: For remote connections, default to CRM_DAEMON_USER since thats the only one that the cib can validate the password for using PAM
+ High: cib: Remote plaintext - Retry sending parts of the message that did not fit the first time
+ High: crmd: Ensure batch-limit is correctly enforced
+ High: crmd: Ensure we have the latest status after a transition abort
+ High (bnc#547579,547582): Tools: crm: status section editing support
+ High: shell: Add allow-migrate as allowed meta-attribute (bnc#539968)
+ Medium: Build: Do not automatically add -L/lib, it could cause 64-bit arches to break
+ Medium: PE: Bug lf#2206 - rsc_order constraints always use score at the top level
+ Medium: PE: Only complain about target-role=master for non m/s resources
+ Medium: PE: Prevent non-multistate resources from being promoted through target-role
+ Medium: PE: Provide a default action for resource-set ordering
+ Medium: PE: Silently fix requires=fencing for stonith resources so that it can be set in op_defaults
+ Medium: Tools: Bug lf#2286 - Allow the shell to accept template parameters on the command line
+ Medium: Tools: Bug lf#2307 - Provide a way to determin the nodeid of past cluster members
+ Medium: Tools: crm: add update method to template apply (LF 2289)
+ Medium: Tools: crm: direct RA interface for ocf class resource agents (LF 2270)
+ Medium: Tools: crm: direct RA interface for stonith class resource agents (LF 2270)
+ Medium: Tools: crm: do not add score which does not exist
+ Medium: Tools: crm: do not consider warnings as errors (LF 2274)
+ Medium: Tools: crm: do not remove sets which contain id-ref attribute (LF 2304)
+ Medium: Tools: crm: drop empty attributes elements
+ Medium: Tools: crm: exclude locations when testing for pathological constraints (LF 2300)
+ Medium: Tools: crm: fix exit code on single shot commands
+ Medium: Tools: crm: fix node delete (LF 2305)
+ Medium: Tools: crm: implement -F (--force) option
+ Medium: Tools: crm: rename status to cibstatus (LF 2236)
+ Medium: Tools: crm: revisit configure commit
+ Medium: Tools: crm: stay in crm if user specified level only (LF 2286)
+ Medium: Tools: crm: verify changes on exit from the configure level
+ Medium: ais: Some clients such as gfs_controld want a cluster name, allow one to be specified in corosync.conf
+ Medium: cib: Clean up logic for receiving remote messages
+ Medium: cib: Create valid notification control messages
+ Medium: cib: Indicate where the remote connection came from
+ Medium: cib: Send password prompt to stderr so that stdout can be redirected
+ Medium: cts: Fix rsh handling when stdout is not required
+ Medium: doc: Fill in the section on removing a node from an AIS-based cluster
+ Medium: doc: Update the docs to reflect the 0.6/1.0 rolling upgrade problem
+ Medium: doc: Use Publican for docbook based documentation
+ Medium: fencing: stonithd: add metadata for stonithd instance attributes (and support in the shell)
+ Medium: fencing: stonithd: ignore case when comparing host names (LF 2292)
+ Medium: tools: Make crm_mon functional with remote connections
+ Medium: xml: Add stopped as a supported role for operations
+ Medium: xml: Bug bnc#552713 - Treat node unames as text fields not IDs
+ Medium: xml: Bug lf#2215 - Create an always-true expression for empty rules when upgrading from 0.6
* Thu Oct 29 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-4
- Include the fixes from CoroSync integration testing
- Move the resource templates - they are not documentation
- Ensure documentation is placed in a standard location
- Exclude documentation that is included elsewhere in the package
- Update the tarball from upstream to version ee19d8e83c2a
+ High: cib: Correctly clean up when both plaintext and tls remote ports are requested
+ High: PE: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions
+ High: PE: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints
+ High: PE: Make sure promote/demote pseudo actions are created correctly
+ High: PE: Prevent target-role from promoting more than master-max instances
+ High: ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage
+ High: ais: Prevent deadlock - dont try to release IPC message if the connection failed
+ High: cib: For validation errors, send back the full CIB so the client can display the errors
+ High: cib: Prevent use-after-free for remote plaintext connections
+ High: crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat
* Wed Oct 13 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-3
- Update the tarball from upstream to version 38cd629e5c3c
+ High: Core: Bug lf#2169 - Allow dtd/schema validation to be disabled
+ High: PE: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change
+ High: PE: Bug lf#2170 - stop-all-resources option had no effect
+ High: PE: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which can not
+ High: PE: Disable resource management if stonith-enabled=true and no stonith resources are defined
+ High: PE: do not include master score if it would prevent allocation
+ High: ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms)
+ High: ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync
+ High: ais: Gracefully handle changes to the AIS nodeid
+ High: crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE
+ High: crmd: Prevent use-after-free with LOG_DEBUG_3
+ Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672)
+ Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm
+ Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild
+ Medium: PE: Bug lf#2178 - Indicate unmanaged clones
+ Medium: PE: Bug lf#2180 - Include node information for all failed ops
+ Medium: PE: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint
+ Medium: PE: Correctly log resources that would like to start but can not
+ Medium: PE: Stop ptest from logging to syslog
+ Medium: ais: Include version details in plugin name
+ Medium: crmd: Requery the resource metadata after every start operation
* Fri Aug 21 2009 Tomas Mraz <tmraz@redhat.com> - 1.0.5-2.1
- rebuilt with new openssl
* Wed Aug 19 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-2
- Add versioned perl dependancy as specified by
https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl
- No longer remove RPATH data, it prevents us finding libperl.so and no other
libraries were being hardcoded
- Compile in support for heartbeat
- Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements
depending on which stacks are supported
* Mon Aug 17 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-1
- Add dependancy on resource-agents
- Use the version of the configure macro that supplies --prefix, --libdir, etc
- Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final)
+ High: Tools: crm_resource - Advertise --move instead of --migrate
+ Medium: Extra: New node connectivity RA that uses system ping and attrd_updater
+ Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches
* Tue Aug 11 2009 Ville Skyttä <ville.skytta@iki.fi> - 1.0.5-0.7.c9120a53a6ae.hg
- Use bzipped upstream tarball.
* Wed Jul 29 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.6.c9120a53a6ae.hg
- Add back missing build auto* dependancies
- Minor cleanups to the install directive
* Tue Jul 28 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.5.c9120a53a6ae.hg
- Add a leading zero to the revision when alphatag is used
* Tue Jul 28 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.5-0.4.c9120a53a6ae.hg
- Incorporate the feedback from the cluster-glue review
- Realistically, the version is a 1.0.5 pre-release
- Use the global directive instead of define for variables
- Use the haclient/hacluster group/user instead of daemon
- Use the _configure macro
- Fix install dependancies
* Fri Jul 24 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-3
- Initial Fedora checkin
- Include an AUTHORS and license file in each package
- Change the library package name to pacemaker-libs to be more
Fedora compliant
- Remove execute permissions from xml related files
- Reference the new cluster-glue devel package name
- Update the tarball from upstream to version c9120a53a6ae
+ High: PE: Only prevent migration if the clone dependancy is stopping/starting on the target node
+ High: PE: Bug 2160 - Dont shuffle clones due to colocation
+ High: PE: New implementation of the resource migration (not stop/start) logic
+ Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options
+ Medium: PE: Prevent use-of-NULL in find_first_action()
* Tue Jul 14 2009 Andrew Beekhof <andrew@beekhof.net> - 1.0.4-2
- Reference authors from the project AUTHORS file instead of listing in description
- Change Source0 to reference the Mercurial repo
- Cleaned up the summaries and descriptions
- Incorporate the results of Fedora package self-review
* Thu Jun 04 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.4-1
- Update source tarball to revision: 1d87d3e0fc7f (stable-1.0)
- Statistics:
Changesets: 209
Diff: 266 files changed, 12010 insertions(+), 8276 deletions(-)
- Changes since Pacemaker-1.0.3
+ High (bnc#488291): ais: do not rely on byte endianness on ptr cast
+ High (bnc#507255): Tools: crm: delete rsc/op_defaults (these meta_attributes are killing me)
+ High (bnc#507255): Tools: crm: import properly rsc/op_defaults
+ High (LF 2114): Tools: crm: add support for operation instance attributes
+ High: ais: Bug lf#2126 - Messages replies cannot be routed to transient clients
+ High: ais: Fix compilation for the latest Corosync API (v1719)
+ High: attrd: Do not perform all updates as complete refreshes
+ High: cib: Fix huge memory leak affecting heartbeat-based clusters
+ High: Core: Allow xpath queries to match attributes
+ High: Core: Generate the help text directly from a tool options struct
+ High: Core: Handle differences in 0.6 messaging format
+ High: crmd: Bug lf#2120 - All transient node attribute updates need to go via attrd
+ High: crmd: Correctly calculate how long an FSA action took to avoid spamming the logs with errors
+ High: crmd: Fix another large memory leak affecting Heartbeat based clusters
+ High: lha: Restore compatability with older versions
+ High: PE: Bug bnc#495687 - Filesystem is not notified of successful STONITH under some conditions
+ High: PE: Make running a cluster with STONITH enabled but no STONITH resources an error and provide details on resolutions
+ High: PE: Prevent use-ofNULL when using resource ordering sets
+ High: PE: Provide inter-notification ordering guarantees
+ High: PE: Rewrite the notification code to be understanable and extendable
+ High: Tools: attrd - Prevent race condition resulting in the cluster forgetting the node wishes to shut down
+ High: Tools: crm: regression tests
+ High: Tools: crm_mon - Fix smtp notifications
+ High: Tools: crm_resource - Repair the ability to query meta attributes
+ Low Build: Bug lf#2105 - Debian package should contain pacemaker doc and crm templates
+ Medium (bnc#507255): Tools: crm: handle empty rsc/op_defaults properly
+ Medium (bnc#507255): Tools: crm: use the right obj_type when creating objects from xml nodes
+ Medium (LF 2107): Tools: crm: revisit exit codes in configure
+ Medium: cib: Do not bother validating updates that only affect the status section
+ Medium: Core: Include supported stacks in version information
+ Medium: crmd: Record in the CIB, the cluster infrastructure being used
+ Medium: cts: Do not combine crm_standby arguments - the wrapper ca not process them
+ Medium: cts: Fix the CIBAusdit class
+ Medium: Extra: Refresh showscores script from Dominik
+ Medium: PE: Build a statically linked version of ptest
+ Medium: PE: Correctly log the actions for resources that are being recovered
+ Medium: PE: Correctly log the occurance of promotion events
+ Medium: PE: Implememt node health based on a patch from Mark Hamzy
+ Medium: Tools: Add examples to help text outputs
+ Medium: Tools: crm: catch syntax errors for configure load
+ Medium: Tools: crm: implement erasing nodes in configure erase
+ Medium: Tools: crm: work with parents only when managing xml objects
+ Medium: Tools: crm_mon - Add option to run custom notification program on resource operations (Patch by Dominik Klein)
+ Medium: Tools: crm_resource - Allow --cleanup to function on complex resources and cluster-wide
+ Medium: Tools: haresource2cib.py - Patch from horms to fix conversion error
+ Medium: Tools: Include stack information in crm_mon output
+ Medium: Tools: Two new options (--stack,--constraints) to crm_resource for querying how a resource is configured
* Wed Apr 08 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.3-1
- Update source tarball to revision: b133b3f19797 (stable-1.0) tip
- Statistics:
Changesets: 383
Diff: 329 files changed, 15471 insertions(+), 15119 deletions(-)
- Changes since Pacemaker-1.0.2
+ Added tag SLE11-HAE-GMC for changeset 9196be9830c2
+ High: ais plugin: Fix quorum calculation (bnc#487003)
+ High: ais: Another memory fix leak in error path
+ High: ais: Bug bnc#482847, bnc#482905 - Force a clean exit of OpenAIS once Pacemaker has finished unloading
+ High: ais: Bug bnc#486858 - Fix update_member() to prevent spamming clients with membership events containing no changes
+ High: ais: Centralize all quorum calculations in the ais plugin and allow expected votes to be configured int he cib
+ High: ais: Correctly handle a return value of zero from openais_dispatch_recv()
+ High: ais: Disable logging to a file
+ High: ais: Fix memory leak in error path
+ High: ais: IPC messages are only in scope until a response is sent
+ High: All signal handlers used with CL_SIGNAL() need to be as minimal as possible
+ High: cib: Bug bnc#482885 - Simplify CIB disk-writes to prevent data loss. Required a change to the backup filename format
+ High: cib: crmd: Revert part of 9782ab035003. Complex shutdown routines need G_main_add_SignalHandler to avoid race coditions
+ High: crm: Avoid infinite loop during crm configure edit (bnc#480327)
+ High: crmd: Avoid a race condition by waiting for the attrd update to trigger a transition automatically
+ High: crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly
+ High: crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly (verified)
+ High: crmd: Bug bnc#489063 - Ensure the DC is always unset after we 'loose' an election
+ High: crmd: Bug BSC#479543 - Correctly find the migration source for timed out migrate_from actions
+ High: crmd: Call crm_peer_init() before we start the FSA - prevents a race condition when used with Heartbeat
+ High: crmd: Erasing the status section should not be forced to the local node
+ High: crmd: Fix memory leak in cib notication processing code
+ High: crmd: Fix memory leak in transition graph processing
+ High: crmd: Fix memory leaks found by valgrind
+ High: crmd: More memory leaks fixes found by valgrind
+ High: fencing: stonithd: is_heartbeat_cluster is a no-no if there is no heartbeat support
+ High: PE: Bug bnc#466788 - Exclude nodes that can not run resources
+ High: PE: Bug bnc#466788 - Make colocation based on node attributes work
+ High: PE: Bug BNC#478687 - Do not crash when clone-max is 0
+ High: PE: Bug bnc#488721 - Fix id-ref expansion for clones, the doc-root for clone children is not the cib root
+ High: PE: Bug bnc#490418 - Correctly determine node state for nodes wishing to be terminated
+ High: PE: Bug LF#2087 - Correctly parse the state of anonymous clones that have multiple instances on a given node
+ High: PE: Bug lf#2089 - Meta attributes are not inherited by clone children
+ High: PE: Bug lf#2091 - Correctly restart modified resources that were found active by a probe
+ High: PE: Bug lf#2094 - Fix probe ordering for cloned groups
+ High: PE: Bug LF:2075 - Fix large pingd memory leaks
+ High: PE: Correctly attach orphaned clone children to their parent
+ High: PE: Correctly handle terminate node attributes that are set to the output from time()
+ High: PE: Ensure orphaned clone members are hooked up to the parent when clone-max=0
+ High: PE: Fix memory leak in LogActions
+ High: PE: Fix the determination of whether a group is active
+ High: PE: Look up the correct promotion preference for anonymous masters
+ High: PE: Simplify handling of start failures by changing the default migration-threshold to INFINITY
+ High: PE: The ordered option for clones no longer causes extra start/stop operations
+ High: RA: Bug bnc#490641 - Shut down dlm_controld with -TERM instead of -KILL
+ High: RA: pingd: Set default ping interval to 1 instead of 0 seconds
+ High: Resources: pingd - Correctly tell the ping daemon to shut down
+ High: Tools: Bug bnc#483365 - Ensure the command from cluster_test includes a value for --log-facility
+ High: Tools: cli: fix and improve delete command
+ High: Tools: crm: add and implement templates
+ High: Tools: crm: add support for command aliases and some common commands (i.e. cd,exit)
+ High: Tools: crm: create top configuration nodes if they are missing
+ High: Tools: crm: fix parsing attributes for rules (broken by the previous changeset)
+ High: Tools: crm: new ra set of commands
+ High: Tools: crm: resource agents information management
+ High: Tools: crm: rsc/op_defaults
+ High: Tools: crm: support for no value attribute in nvpairs
+ High: Tools: crm: the new configure monitor command
+ High: Tools: crm: the new configure node command
+ High: Tools: crm_mon - Prevent use-of-NULL when summarizing an orphan
+ High: Tools: hb2openais: create clvmd clone for respawn evmsd in ha.cf
+ High: Tools: hb2openais: fix a serious recursion bug in xml node processing
+ High: Tools: hb2openais: fix ocfs2 processing
+ High: Tools: pingd - prevent double free of getaddrinfo() output in error path
+ High: Tools: The default re-ping interval for pingd should be 1s not 1ms
+ Medium (bnc#479049): Tools: crm: add validation of resource type for the configure primitive command
+ Medium (bnc#479050): Tools: crm: add help for RA parameters in tab completion
+ Medium (bnc#479050): Tools: crm: add tab completion for primitive params/meta/op
+ Medium (bnc#479050): Tools: crm: reimplement cluster properties completion
+ Medium (bnc#486968): Tools: crm: listnodes function requires no parameters (do not mix completion with other stuff)
+ Medium: ais: Remove the ugly hack for dampening AIS membership changes
+ Medium: cib: Fix memory leaks by using mainloop_add_signal
+ Medium: cib: Move more logging to the debug level (was info)
+ Medium: cib: Overhaul the processing of synchronous replies
+ Medium: Core: Add library functions for instructing the cluster to terminate nodes
+ Medium: crmd: Add new expected-quorum-votes option
+ Medium: crmd: Allow up to 5 retires when an attrd update fails
+ Medium: crmd: Automatically detect and use new values for crm_config options
+ Medium: crmd: Bug bnc#490426 - Escalated shutdowns stall when there are pending resource operations
+ Medium: crmd: Clean up and optimize the DC election algorithm
+ Medium: crmd: Fix memory leak in shutdown
+ Medium: crmd: Fix memory leaks spotted by Valgrind
+ Medium: crmd: Ingore join messages from hosts other than our DC
+ Medium: crmd: Limit the scope of resource updates to the status section
+ Medium: crmd: Prevent the crmd from being respawned if its told to shut down when it did not ask to be
+ Medium: crmd: Re-check the election status after membership events
+ Medium: crmd: Send resource updates via the local CIB during elections
+ Medium: PE: Bug bnc#491441 - crm_mon does not display operations returning 'uninstalled' correctly
+ Medium: PE: Bug lf#2101 - For location constraints, role=Slave is equivalent to role=Started
+ Medium: PE: Clean up the API - removed ->children() and renamed ->find_child() to fine_rsc()
+ Medium: PE: Compress the display of healthy anonymous clones
+ Medium: PE: Correctly log the actions for resources that are being recovered
+ Medium: PE: Determin a promotion score for complex resources
+ Medium: PE: Ensure clones always have a value for globally-unique
+ Medium: PE: Prevent orphan clones from being allocated
+ Medium: RA: controld: Return proper exit code for stop op.
+ Medium: Tools: Bug bnc#482558 - Fix logging test in cluster_test
+ Medium: Tools: Bug bnc#482828 - Fix quoting in cluster_test logging setup
+ Medium: Tools: Bug bnc#482840 - Include directory path to CTSlab.py
+ Medium: Tools: crm: add more user input checks
+ Medium: Tools: crm: do not check resource status of we are working with a shadow
+ Medium: Tools: crm: fix id-refs and allow reference to top objects (i.e. primitive)
+ Medium: Tools: crm: ignore comments in the CIB
+ Medium: Tools: crm: multiple column output would not work with small lists
+ Medium: Tools: crm: refuse to delete running resources
+ Medium: Tools: crm: rudimentary if-else for templates
+ Medium: Tools: crm: Start/stop clones via target-role.
+ Medium: Tools: crm_mon - Compress the node status for healthy and offline nodes
+ Medium: Tools: crm_shadow - Return 0/cib_ok when --create-empty succeeds
+ Medium: Tools: crm_shadow - Support -e, the short form of --create-empty
+ Medium: Tools: Make attrd quieter
+ Medium: Tools: pingd - Avoid using various clplumbing functions as they seem to leak
+ Medium: Tools: Reduce pingd logging
* Mon Feb 16 2009 Andrew Beekhof <abeekhof@suse.de> - 1.0.2-1
- Update source tarball to revision: d232d19daeb9 (stable-1.0) tip
- Statistics:
Changesets: 441
Diff: 639 files changed, 20871 insertions(+), 21594 deletions(-)
- Changes since Pacemaker-1.0.1
+ High (bnc#450815): Tools: crm cli: do not generate id for the operations tag
+ High: ais: Add support for the new AIS IPC layer
+ High: ais: Always set header.error to the correct default: SA_AIS_OK
+ High: ais: Bug BNC#456243 - Ensure the membership cache always contains an entry for the local node
+ High: ais: Bug BNC:456208 - Prevent deadlocks by not logging in the child process before exec()
+ High: ais: By default, disable supprt for the WIP openais IPC patch
+ High: ais: Detect and handle situations where ais and the crm disagree on the node name
+ High: ais: Ensure crm_peer_seq is updated after a membership update
+ High: ais: Make sure all IPC header fields are set to sane defaults
+ High: ais: Repair and streamline service load now that whitetank startup functions correctly
+ High: build: create and install doc files
+ High: cib: Allow clients without mainloop to connect to the cib
+ High: cib: CID:18 - Fix use-of-NULL in cib_perform_op
+ High: cib: CID:18 - Repair errors introduced in b5a18704477b - Fix use-of-NULL in cib_perform_op
+ High: cib: Ensure diffs contain the correct values of admin_epoch
+ High: cib: Fix four moderately sized memory leaks detected by Valgrind
+ High: Core: CID:10 - Prevent indexing into an array of schemas with a negative value
+ High: Core: CID:13 - Fix memory leak in log_data_element
+ High: Core: CID:15 - Fix memory leak in crm_get_peer
+ High: Core: CID:6 - Fix use-of-NULL in copy_ha_msg_input
+ High: Core: Fix crash in the membership code preventing node shutdown
+ High: Core: Fix more memory leaks foudn by valgrind
+ High: Core: Prevent unterminated strings after decompression
+ High: crmd: Bug BNC:467995 - Delay marking STONITH operations complete until STONITH tells us so
+ High: crmd: Bug LF:1962 - Do not NACK peers because they are not (yet) in our membership. Just ignore them.
+ High: crmd: Bug LF:2010 - Ensure fencing cib updates create the node_state entry if needed to preent re-fencing during cluster startup
+ High: crmd: Correctly handle reconnections to attrd
+ High: crmd: Ensure updates for lost migrate operations indicate which node it tried to migrating to
+ High: crmd: If there are no nodes to finalize, start an election.
+ High: crmd: If there are no nodes to welcome, start an election.
+ High: crmd: Prevent node attribute loss by detecting attrd disconnections immediately
+ High: crmd: Prevent node re-probe loops by ensuring manditory actions always complete
+ High: PE: Bug 2005 - Fix startup ordering of cloned stonith groups
+ High: PE: Bug 2006 - Correctly reprobe cloned groups
+ High: PE: Bug BNC:465484 - Fix the no-quorum-policy=suicide option
+ High: PE: Bug LF:1996 - Correctly process disabled monitor operations
+ High: PE: CID:19 - Fix use-of-NULL in determine_online_status
+ High: PE: Clones now default to globally-unique=false
+ High: PE: Correctly calculate the number of available nodes for the clone to use
+ High: PE: Only shoot online nodes with no-quorum-policy=suicide
+ High: PE: Prevent on-fail settings being ignored after a resource is successfully stopped
+ High: PE: Prevent use-of-NULL for failed migrate actions in process_rsc_state()
+ High: PE: Remove an optimization for the terminate node attribute that caused the cluster to block indefinitly
+ High: PE: Repar the ability to colocate based on node attributes other than uname
+ High: PE: Start the correct monitor operation for unmanaged masters
+ High: stonith: CID:3 - Fix another case of exceptionally poor error handling by the original stonith developers
+ High: stonith: CID:5 - Checking for NULL and then dereferencing it anyway is an interesting approach to error handling
+ High: stonithd: Sending IPC to the cluster is a privileged operation
+ High: stonithd: wrong checks for shmid (0 is a valid id)
+ High: Tools: attrd - Correctly determine when an attribute has stopped changing and should be committed to the CIB
+ High: Tools: Bug 2003 - pingd does not correctly detect failures when the interface is down
+ High: Tools: Bug 2003 - pingd does not correctly handle node-down events on multi-NIC systems
+ High: Tools: Bug 2021 - pingd does not detect sequence wrapping correctly, incorrectly reports nodes offline
+ High: Tools: Bug BNC:468066 - Do not use the result of uname() when its no longer in scope
+ High: Tools: Bug BNC:473265 - crm_resource -L dumps core
+ High: Tools: Bug LF:2001 - Transient node attributes should be set via attrd
+ High: Tools: Bug LF:2036 - crm_resource cannot set/get parameters for cloned resources
+ High: Tools: Bug LF:2046 - Node attribute updates are lost because attrd can take too long to start
+ High: Tools: Cause the correct clone instance to be failed with crm_resource -F
+ High: Tools: cluster_test - Allow the user to select a stack and fix CTS invocation
+ High: Tools: crm cli: allow rename only if the resource is stopped
+ High: Tools: crm cli: catch system errors on file operations
+ High: Tools: crm cli: completion for ids in configure
+ High: Tools: crm cli: drop '-rsc' from attributes for order constraint
+ High: Tools: crm cli: exit with an appropriate exit code
+ High: Tools: crm cli: fix wrong order of action and resource in order constraint
+ High: Tools: crm cli: fox wrong exit code
+ High: Tools: crm cli: improve handling of cib attributes
+ High: Tools: crm cli: new command: configure rename
+ High: Tools: crm cli: new command: configure upgrade
+ High: Tools: crm cli: new command: node delete
+ High: Tools: crm cli: prevent key errors on missing cib attributes
+ High: Tools: crm cli: print long help for help topics
+ High: Tools: crm cli: return on syntax error when parsing score
+ High: Tools: crm cli: rsc_location can be without nvpairs
+ High: Tools: crm cli: short node preference location constraint
+ High: Tools: crm cli: sometimes, on errors, level would change on single shot use
+ High: Tools: crm cli: syntax: drop a bunch of commas (remains of help tables conversion)
+ High: Tools: crm cli: verify user input for sanity
+ High: Tools: crm: find expressions within rules (do not always skip xml nodes due to used id)
+ High: Tools: crm_master should not define a set id now that attrd is used. Defining one can break lookups
+ High: Tools: crm_mon Use the OID assigned to the project by IANA for SNMP traps
+ Medium (bnc#445622): Tools: crm cli: improve the node show command and drop node status
+ Medium (LF 2009): stonithd: improve timeouts for remote fencing
+ Medium: ais: Allow dead peers to be removed from membership calculations
+ Medium: ais: Pass node deletion events on to clients
+ Medium: ais: Sanitize ipc usage
+ Medium: ais: Supply the node uname in addtion to the id
+ Medium: Build: Clean up configure to ensure NON_FATAL_CFLAGS is consistent with CFLAGS (ie. includes -g)
+ Medium: Build: Install cluster_test
+ Medium: Build: Use more restrictive CFLAGS and fix the resulting errors
+ Medium: cib: CID:20 - Fix potential use-after-free in cib_native_signon
+ Medium: Core: Bug BNC:474727 - Set a maximum time to wait for IPC messages
+ Medium: Core: CID:12 - Fix memory leak in decode_transition_magic error path
+ Medium: Core: CID:14 - Fix memory leak in calculate_xml_digest error path
+ Medium: Core: CID:16 - Fix memory leak in date_to_string error path
+ Medium: Core: Try to track down the cause of XML parsing errors
+ Medium: crmd: Bug BNC:472473 - Do not wait excessive amounts of time for lost actions
+ Medium: crmd: Bug BNC:472473 - Reduce the transition timeout to action_timeout+network_delay
+ Medium: crmd: Do not fast-track the processing of LRM refreshes when there are pending actions.
+ Medium: crmd: do_dc_join_filter_offer - Check the 'join' message is for the current instance before deciding to NACK peers
+ Medium: crmd: Find option values without having to do a config upgrade
+ Medium: crmd: Implement shutdown using a transient node attribute
+ Medium: crmd: Update the crmd options to use dashes instead of underscores
+ Medium: cts: Add 'cluster reattach' to the suite of automated regression tests
+ Medium: cts: cluster_test - Make some usability enhancements
+ Medium: CTS: cluster_test - suggest a valid port number
+ Medium: CTS: Fix python import order
+ Medium: cts: Implement an automated SplitBrain test
+ Medium: CTS: Remove references to deleted classes
+ Medium: Extra: Resources - Use HA_VARRUN instead of HA_RSCTMP for state files as Heartbeat removes HA_RSCTMP at startup
+ Medium: HB: Bug 1933 - Fake crmd_client_status_callback() calls because HB does not provide them for already running processes
+ Medium: PE: CID:17 - Fix memory leak in find_actions_by_task error path
+ Medium: PE: CID:7,8 - Prevent hypothetical use-of-NULL in LogActions
+ Medium: PE: Defer logging the actions performed on a resource until we have processed ordering constraints
+ Medium: PE: Remove the symmetrical attribute of colocation constraints
+ Medium: Resources: pingd - fix the meta defaults
+ Medium: Resources: Stateful - Add missing meta defaults
+ Medium: stonithd: exit if we the pid file cannot be locked
+ Medium: Tools: Allow attrd clients to specify the ID the attribute should be created with
+ Medium: Tools: attrd - Allow attribute updates to be performed from a hosts peer
+ Medium: Tools: Bug LF:1994 - Clean up crm_verify return codes
+ Medium: Tools: Change the pingd defaults to ping hosts once every second (instead of 5 times every 10 seconds)
+ Medium: Tools: cibmin - Detect resource operations with a view to providing email/snmp/cim notification
+ Medium: Tools: crm cli: add back symmetrical for order constraints
+ Medium: Tools: crm cli: generate role in location when converting from xml
+ Medium: Tools: crm cli: handle shlex exceptions
+ Medium: Tools: crm cli: keep order of help topics
+ Medium: Tools: crm cli: refine completion for ids in configure
+ Medium: Tools: crm cli: replace inf with INFINITY
+ Medium: Tools: crm cli: streamline cib load and parsing
+ Medium: Tools: crm cli: supply provider only for ocf class primitives
+ Medium: Tools: crm_mon - Add support for sending mail notifications of resource events
+ Medium: Tools: crm_mon - Include the DC version in status summary
+ Medium: Tools: crm_mon - Sanitize startup and option processing
+ Medium: Tools: crm_mon - switch to event-driven updates and add support for sending snmp traps
+ Medium: Tools: crm_shadow - Replace the --locate option with the saner --edit
+ Medium: Tools: hb2openais: do not remove Evmsd resources, but replace them with clvmd
+ Medium: Tools: hb2openais: replace crmadmin with crm_mon
+ Medium: Tools: hb2openais: replace the lsb class with ocf for o2cb
+ Medium: Tools: hb2openais: reuse code
+ Medium: Tools: LF:2029 - Display an error if crm_resource is used to reset the operation history of non-primitive resources
+ Medium: Tools: Make pingd resilient to attrd failures
+ Medium: Tools: pingd - fix the command line switches
+ Medium: Tools: Rename ccm_tool to crm_node
* Tue Nov 18 2008 Andrew Beekhof <abeekhof@suse.de> - 1.0.1-1
- Update source tarball to revision: 6fc5ce8302ab (stable-1.0) tip
- Statistics:
Changesets: 170
Diff: 816 files changed, 7633 insertions(+), 6286 deletions(-)
- Changes since Pacemaker-1.0.1
+ High: ais: Allow the crmd to get callbacks whenever a node state changes
+ High: ais: Create an option for starting the mgmtd daemon automatically
+ High: ais: Ensure HA_RSCTMP exists for use by resource agents
+ High: ais: Hook up the openais.conf config logging options
+ High: ais: Zero out the PID of disconnecting clients
+ High: cib: Ensure global updates cause a disk write when appropriate
+ High: Core: Add an extra snaity check to getXpathResults() to prevent segfaults
+ High: Core: Do not redefine __FUNCTION__ unnecessarily
+ High: Core: Repair the ability to have comments in the configuration
+ High: crmd: Bug:1975 - crmd should wait indefinitely for stonith operations to complete
+ High: crmd: Ensure PE processing does not occur for all error cases in do_pe_invoke_callback
+ High: crmd: Requests to the CIB should cause any prior PE calculations to be ignored
+ High: heartbeat: Wait for membership 'up' events before removing stale node status data
+ High: PE: Bug LF:1988 - Ensure recurring operations always have the correct target-rc set
+ High: PE: Bug LF:1988 - For unmanaged resources we need to skip the usual can_run_resources() checks
+ High: PE: Ensure the terminate node attribute is handled correctly
+ High: PE: Fix optional colocation
+ High: PE: Improve up the detection of 'new' nodes joining the cluster
+ High: PE: Prevent assert failures in master_color() by ensuring unmanaged masters are always reallocated to their current location
+ High: Tools: crm cli: parser: return False on syntax error and None for comments
+ High: Tools: crm cli: unify template and edit commands
+ High: Tools: crm_shadow - Show more line number information after validation failures
+ High: Tools: hb2openais: add option to upgrade the CIB to v3.0
+ High: Tools: hb2openais: add U option to getopts and update usage
+ High: Tools: hb2openais: backup improved and multiple fixes
+ High: Tools: hb2openais: fix class/provider reversal
+ High: Tools: hb2openais: fix testing
+ High: Tools: hb2openais: move the CIB update to the end
+ High: Tools: hb2openais: update logging and set logfile appropriately
+ High: Tools: LF:1969 - Attrd never sets any properties in the cib
+ High: Tools: Make attrd functional on OpenAIS
+ Medium: ais: Hook up the options for specifying the expected number of nodes and total quorum votes
+ Medium: ais: Look for pacemaker options inside the service block with 'name: pacemaker' instead of creating an addtional configuration block
+ Medium: ais: Provide better feedback when nodes change nodeids (in openais.conf)
+ Medium: cib: Always store cib contents on disk with num_updates=0
+ Medium: cib: Ensure remote access ports are cleaned up on shutdown
+ Medium: crmd: Detect deleted resource operations automatically
+ Medium: crmd: Erase a nodes resource operations and transient attributes after a successful STONITH
+ Medium: crmd: Find a more appropriate place to update quorum and refresh attrd attributes
+ Medium: crmd: Fix the handling of unexpected PE exits to ensure the current CIB is stored
+ Medium: crmd: Fix the recording of pending operations in the CIB
+ Medium: crmd: Initiate an attrd refresh _after_ the status section has been fully repopulated
+ Medium: crmd: Only the DC should update quorum in an openais cluster
+ Medium: Ensure meta attributes are used consistantly
+ Medium: PE: Allow group and clone level resource attributes
+ Medium: PE: Bug N:437719 - Ensure scores from colocated resources count when allocating groups
+ Medium: PE: Prevent lsb scripts from being used in globally unique clones
+ Medium: PE: Make a best-effort guess at a migration threshold for people with 0.6 configs
+ Medium: Resources: controld - ensure we are part of a clone with globally_unique=false
+ Medium: Tools: attrd - Automatically refresh all attributes after a CIB replace operation
+ Medium: Tools: Bug LF:1985 - crm_mon - Correctly process failed cib queries to allow reconnection after cluster restarts
+ Medium: Tools: Bug LF:1987 - crm_verify incorrectly warns of configuration upgrades for the most recent version
+ Medium: Tools: crm (bnc#441028): check for key error in attributes management
+ Medium: Tools: crm_mon - display the meaning of the operation rc code instead of the status
+ Medium: Tools: crm_mon - Fix the display of timing data
+ Medium: Tools: crm_verify - check that we are being asked to validate a complete config
+ Medium: xml: Relax the restriction on the contents of rsc_locaiton.node
* Thu Oct 16 2008 Andrew Beekhof <abeekhof@suse.de> - 1.0.0-1
- Update source tarball to revision: 388654dfef8f tip
- Statistics:
Changesets: 261
Diff: 3021 files changed, 244985 insertions(+), 111596 deletions(-)
- Changes since f805e1b30103
+ High: add the crm cli program
+ High: ais: Move the service id definition to a common location and make sure it is always used
+ High: build: rename hb2openais.sh to .in and replace paths with vars
+ High: cib: Implement --create for crm_shadow
+ High: cib: Remove dead files
+ High: Core: Allow the expected number of quorum votes to be configrable
+ High: Core: cl_malloc and friends were removed from Heartbeat
+ High: Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault
+ High: hb2openais.sh: improve pingd handling; several bugs fixed
+ High: hb2openais: fix clone creation; replace EVMS strings
+ High: new hb2openais.sh conversion script
+ High: PE: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty)
+ High: PE: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly.
+ High: PE: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures
+ High: PE: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly
+ High: PE: Bug N:420538 - Anit-colocation caused a positive node preference
+ High: PE: Correctly handle unmanaged resources to prevent them from being started elsewhere
+ High: PE: crm_resource - Fix the --migrate command
+ High: PE: MAke stonith-enabled default to true and warn if no STONITH resources are found
+ High: PE: Make sure orphaned clone children are created correctly
+ High: PE: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete
+ High: stonithd (LF 1951): fix remote stonith operations
+ High: stonithd: fix handling of timeouts
+ High: stonithd: fix logic for stonith resource priorities
+ High: stonithd: implement the fence-timeout instance attribute
+ High: stonithd: initialize value before reading fence-timeout
+ High: stonithd: set timeouts for fencing ops to the timeout of the start op
+ High: stonithd: stonith rsc priorities (new feature)
+ High: Tools: Add hb2openais - a tool for upgrading a Heartbeat cluster to use OpenAIS instead
+ High: Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations
+ High: Tools: Make pingd functional on Linux
+ High: Update version numbers for 1.0 candidates
+ Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid
+ Medium: ais: Use the agreed service number
+ Medium: Build: Reliably detect heartbeat libraries during configure
+ Medium: Build: Supply prototypes for libreplace functions when needed
+ Medium: Build: Teach configure how to find corosync
+ Medium: Core: Provide better feedback if Pacemaker is started by a stack it does not support
+ Medium: crmd: Avoid calling GHashTable functions with NULL
+ Medium: crmd: Delay raising I_ERROR when the PE exits until we have had a chance to save the current CIB
+ Medium: crmd: Hook up the stonith-timeout option to stonithd
+ Medium: crmd: Prevent potential use-of-NULL in global_timer_callback
+ Medium: crmd: Rationalize the logging of graph aborts
+ Medium: PE: Add a stonith_timeout option and remove new options that are better set in rsc_defaults
+ Medium: PE: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute
+ Medium: PE: Bug LF:1950 - Notifications do not contain all documented resource state fields
+ Medium: PE: Bug N:417585 - Do not restart group children whos individual score drops below zero
+ Medium: PE: Detect clients that disconnect before receiving their reply
+ Medium: PE: Implement a true maintenance mode
+ Medium: PE: Implement on-fail=standby for NTT. Derived from a patch by Satomi TANIGUCHI
+ Medium: PE: Print the correct message when stonith is disabled
+ Medium: PE: ptest - check the input is valid before proceeding
+ Medium: PE: Revert group stickiness to the 'old way'
+ Medium: PE: Use the correct attribute for action 'requires' (was prereq)
+ Medium: stonithd: Fix compilation without full heartbeat install
+ Medium: stonithd: exit with better code on empty host list
+ Medium: tools: Add a new regression test for CLI tools
+ Medium: tools: crm_resource - return with non-zero when a resource migration command is invalid
+ Medium: tools: crm_shadow - Allow the admin to start with an empty CIB (and no cluster connection)
+ Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema
* Mon Sep 22 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.3-1
- Update source tarball to revision: 33e677ab7764+ tip
- Statistics:
Changesets: 133
Diff: 89 files changed, 7492 insertions(+), 1125 deletions(-)
- Changes since f805e1b30103
+ High: Tools: add the crm cli program
+ High: Core: cl_malloc and friends were removed from Heartbeat
+ High: Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault
+ High: new hb2openais.sh conversion script
+ High: PE: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty)
+ High: PE: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly.
+ High: PE: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures
+ High: PE: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly
+ High: PE: Bug N:420538 - Anit-colocation caused a positive node preference
+ High: PE: Correctly handle unmanaged resources to prevent them from being started elsewhere
+ High: PE: crm_resource - Fix the --migrate command
+ High: PE: MAke stonith-enabled default to true and warn if no STONITH resources are found
+ High: PE: Make sure orphaned clone children are created correctly
+ High: PE: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete
+ High: stonithd (LF 1951): fix remote stonith operations
+ High: Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations
+ Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid
+ Medium: ais: Use the agreed service number
+ Medium: PE: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute
+ Medium: PE: Bug LF:1950 - Notifications do not contain all documented resource state fields
+ Medium: PE: Bug N:417585 - Do not restart group children whos individual score drops below zero
+ Medium: PE: Implement a true maintenance mode
+ Medium: PE: Print the correct message when stonith is disabled
+ Medium: stonithd: exit with better code on empty host list
+ Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema
* Wed Aug 20 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.1-1
- Update source tarball to revision: f805e1b30103+ tip
- Statistics:
Changesets: 184
Diff: 513 files changed, 43408 insertions(+), 43783 deletions(-)
- Changes since 0.7.0-19
+ Fix compilation when GNUTLS isnt found
+ High: admin: Fix use-after-free in crm_mon
+ High: Build: Remove testing code that prevented heartbeat-only builds
+ High: cib: Use single quotes so that the xpath queries for nvpairs will succeed
+ High: crmd: Always connect to stonithd when the TE starts and ensure we notice if it dies
+ High: crmd: Correctly handle a dead PE process
+ High: crmd: Make sure async-failures cause the failcount to be incrimented
+ High: PE: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes
+ High: PE: Parse resource ordering sets correctly
+ High: PE: Prevent use-of-NULL - order->rsc_rh will not always be non-NULL
+ High: PE: Unpack colocation sets correctly
+ High: Tools: crm_mon - Prevent use-of-NULL for orphaned resources
+ Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid
+ Medium: ais: Allow transient clients to receive membership updates
+ Medium: ais: Avoid double-free in error path
+ Medium: ais: Include in the mebership nodes for which we have not determined their hostname
+ Medium: ais: Spawn the PE from the ais plugin instead of the crmd
+ Medium: cib: By default, new configurations use the latest schema
+ Medium: cib: Clean up the CIB if it was already disconnected
+ Medium: cib: Only incriment num_updates if something actually changed
+ Medium: cib: Prevent use-after-free in client after abnormal termination of the CIB
+ Medium: Core: Fix memory leak in xpath searches
+ Medium: Core: Get more details regarding parser errors
+ Medium: Core: Repair expand_plus_plus - do not call char2score on unexpanded values
+ Medium: Core: Switch to the libxml2 parser - its significantly faster
+ Medium: Core: Use a libxml2 library function for xml -> text conversion
+ Medium: crmd: Asynchronous failure actions have no parameters
+ Medium: crmd: Avoid calling glib functions with NULL
+ Medium: crmd: Do not allow an election to promote a node from S_STARTING
+ Medium: crmd: Do not vote if we have not completed the local startup
+ Medium: crmd: Fix te_update_diff() now that get_object_root() functions differently
+ Medium: crmd: Fix the lrmd xpath expressions to not contain quotes
+ Medium: crmd: If we get a join offer during an election, better restart the election
+ Medium: crmd: No further processing is needed when using the LRMs API call for failing resources
+ Medium: crmd: Only update have-quorum if the value changed
+ Medium: crmd: Repair the input validation logic in do_te_invoke
+ Medium: cts: CIBs can no longer contain comments
+ Medium: cts: Enable a bunch of tests that were incorrectly disabled
+ Medium: cts: The libxml2 parser wont allow v1 resources to use integers as parameter names
+ Medium: Do not use the cluster UID and GID directly. Look them up based on the configured value of HA_CCMUSER
+ Medium: Fix compilation when heartbeat is not supported
+ Medium: PE: Allow groups to be involved in optional ordering constraints
+ Medium: PE: Allow sets of operations to be reused by multiple resources
+ Medium: PE: Bug LF:1941 - Mark extra clone instances as orphans and do not show inactive ones
+ Medium: PE: Determin the correct migration-threshold during resource expansion
+ Medium: PE: Implement no-quorum-policy=suicide (FATE #303619)
+ Medium: pengine: Clean up resources after stopping old copies of the PE
+ Medium: pengine: Teach the PE how to stop old copies of itself
+ Medium: Tools: Backport hb_report updates
+ Medium: Tools: cib_shadow - On create, spawn a new shell with CIB_shadow and PS1 set accordingly
+ Medium: Tools: Rename cib_shadow to crm_shadow
* Fri Jul 18 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.0-19
- Update source tarball to revision: 007c3a1c50f5 (unstable) tip
- Statistics:
Changesets: 108
Diff: 216 files changed, 4632 insertions(+), 4173 deletions(-)
- Changes added since unstable-0.7
+ High: admin: Fix use-after-free in crm_mon
+ High: ais: Change the tag for the ais plugin to "pacemaker" (used in openais.conf)
+ High: ais: Log terminated processes as an error
+ High: cib: Performance - Reorganize things to avoid calculating the XML diff twice
+ High: PE: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes
+ High: PE: Fix memory leak in action2xml
+ High: PE: Make OCF_ERR_ARGS a node-level error rather than a cluster-level one
+ High: PE: Properly handle clones that are not installed on all nodes
+ Medium: admin: cibadmin - Show any validation errors if the upgrade failed
+ Medium: admin: cib_shadow - Implement --locate to display the underlying filename
+ Medium: admin: cib_shadow - Implement a --diff option
+ Medium: admin: cib_shadow - Implement a --switch option
+ Medium: admin: crm_resource - create more compact constraints that do not use lifetime (which is deprecated)
+ Medium: ais: Approximate born_on for OpenAIS based clusters
+ Medium: cib: Remove do_id_check, it is a poor substitute for ID validation by a schema
+ Medium: cib: Skip construction of pre-notify messages if no-one wants one
+ Medium: Core: Attempt to streamline some key functions to increase performance
+ Medium: Core: Clean up XML parser after validation
+ Medium: crmd: Detect and optimize the CRMs behavior when processing diffs of an LRM refresh
+ Medium: Fix memory leaks when resetting the name of an XML object
+ Medium: PE: Prefer the current location if it is one of a group of nodes with the same (highest) score
* Wed Jun 25 2008 Andrew Beekhof <abeekhof@suse.de> - 0.7.0-1
- Update source tarball to revision: bde0c7db74fb tip
- Statistics:
Changesets: 439
Diff: 676 files changed, 41310 insertions(+), 52071 deletions(-)
- Changes added since stable-0.6
+ High: A new tool for setting up and invoking CTS
+ High: Admin: All tools now use --node (-N) for specifying node unames
+ High: Admin: All tools now use --xml-file (-x) and --xml-text (-X) for specifying where to find XML blobs
+ High: cib: Cleanup the API - remove redundant input fields
+ High: cib: Implement CIB_shadow - a facility for making and testing changes before uploading them to the cluster
+ High: cib: Make registering per-op callbacks an API call and renamed (for clarity) the API call for requesting notifications
+ High: Core: Add a facility for automatically upgrading old configurations
+ High: Core: Adopt libxml2 as the XML processing library - all external clients need to be recompiled
+ High: Core: Allow sending TLS messages larger than the MTU
+ High: Core: Fix parsing of time-only ISO dates
+ High: Core: Smarter handling of XML values containing quotes
+ High: Core: XML memory corruption - catch, and handle, cases where we are overwriting an attribute value with itself
+ High: Core: The xml ID type does not allow UUIDs that start with a number
+ High: Core: Implement XPath based versions of query/delete/replace/modify
+ High: Core: Remove some HA2.0.(3,4) compatability code
+ High: crmd: Overhaul the detection of nodes that are starting vs. failed
+ High: PE: Bug LF:1459 - Allow failures to expire
+ High: PE: Have the PE do non-persistent configuration upgrades before performing calculations
+ High: PE: Replace failure-stickiness with a simple 'migration-threshold'
+ High: TE: Simplify the design by folding the tengine process into the crmd
+ Medium: Admin: Bug LF:1438 - Allow the list of all/active resource operations to be queried by crm_resource
+ Medium: Admin: Bug LF:1708 - crm_resource should print a warning if an attribute is already set as a meta attribute
+ Medium: Admin: Bug LF:1883 - crm_mon should display fail-count and operation history
+ Medium: Admin: Bug LF:1883 - crm_mon should display operation timing data
+ Medium: Admin: Bug N:371785 - crm_resource -C does not also clean up fail-count attributes
+ Medium: Admin: crm_mon - include timing data for failed actions
+ Medium: ais: Read options from the environment since objdb is not completely usable yet
+ Medium: cib: Add sections for op_defaults and rsc_defaults
+ Medium: cib: Better matching notification callbacks (for detecting duplicates and removal)
+ Medium: cib: Bug LF:1348 - Allow rules and attribute sets to be referenced for use in other objects
+ Medium: cib: BUG LF:1918 - By default, all cib calls now timeout after 30s
+ Medium: cib: Detect updates that decrease the version tuple
+ Medium: cib: Implement a client-side operation timeout - Requires LHA update
+ Medium: cib: Implement callbacks and async notifications for remote connections
+ Medium: cib: Make cib->cmds->update() an alias for modify at the API level (also implemented in cibadmin)
+ Medium: cib: Mark the CIB as disconnected if the IPC connection is terminated
+ Medium: cib: New call option 'cib_can_create' which can be passed to modify actions - allows the object to be created if it does not exist yet
+ Medium: cib: Reimplement get|set|delete attributes using XPath
+ Medium: cib: Remove some useless parts of the API
+ Medium: cib: Remove the 'attributes' scaffolding from the new format
+ Medium: cib: Implement the ability for clients to connect to remote servers
+ Medium: Core: Add support for validating xml against RelaxNG schemas
+ Medium: Core: Allow more than one item to be modified/deleted in XPath based operations
+ Medium: Core: Fix the sort_pairs function for creating sorted xml objects
+ Medium: Core: iso8601 - Implement subtract_duration and fix subtract_time
+ Medium: Core: Reduce the amount of xml copying occuring
+ Medium: Core: Support value='value+=N' XML updates (in addtion to value='value++')
+ Medium: crmd: Add support for lrm_ops->fail_rsc if its available
+ Medium: crmd: HB - watch link status for node leaving events
+ Medium: crmd: Bug LF:1924 - Improved handling of lrmd disconnects and shutdowns
+ Medium: crmd: Do not wait for actions with a start_delay over 5 minutes. Confirm them immediately
+ Medium: PE: Bug LF:1328 - Do not fencing nodes in clusters without managed resources
+ Medium: PE: Bug LF:1461 - Give transient node attributes (in <status/>) preference over persistent ones (in <nodes/>)
+ Medium: PE: Bug LF:1884, Bug LF:1885 - Implement N:M ordering and colocation constraints
+ Medium: PE: Bug LF:1886 - Create a resource and operation 'defaults' config section
+ Medium: PE: Bug LF:1892 - Allow recurring actions to be triggered at known times
+ Medium: PE: Bug LF:1926 - Probes should complete before stop actions are invoked
+ Medium: PE: Fix the standby when its set as a transient attribute
+ Medium: PE: Implement a global 'stop-all-resources' option
+ Medium: PE: Implement cibpipe, a tool for performing/simulating config changes "offline"
+ Medium: PE: We do not allow colocation with specific clone instances
+ Medium: Tools: pingd - Implement a stack-independant version of pingd
+ Medium: xml: Ship an xslt for upgrading from 0.6 to 0.7
* Thu Jun 19 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.5-1
- Update source tarball to revision: b9fe723d1ac5 tip
- Statistics:
Changesets: 48
Diff: 37 files changed, 1204 insertions(+), 234 deletions(-)
- Changes since Pacemaker-0.6.4
+ High: Admin: Repair the ability to delete failcounts
+ High: ais: Audit IPC handling between the AIS plugin and CRM processes
+ High: ais: Have the plugin create needed /var/lib directories
+ High: ais: Make sure the sync and async connections are assigned correctly (not swapped)
+ High: cib: Correctly detect configuration changes - num_updates does not count
+ High: PE: Apply stickiness values to the whole group, not the individual resources
+ High: PE: Bug N:385265 - Ensure groups are migrated instead of remaining partially active on the current node
+ High: PE: Bug N:396293 - Enforce manditory group restarts due to ordering constraints
+ High: PE: Correctly recover master instances found active on more than one node
+ High: PE: Fix memory leaks reported by Valgrind
+ Medium: Admin: crm_mon - Misc improvements from Satomi Taniguchi
+ Medium: Bug LF:1900 - Resource stickiness should not allow placement in asynchronous clusters
+ Medium: crmd: Ensure joins are completed promptly when a node taking part dies
+ Medium: PE: Avoid clone instance shuffling in more cases
+ Medium: PE: Bug LF:1906 - Remove an optimization in native_merge_weights() causing group scores to behave eratically
+ Medium: PE: Make use of target_rc data to correctly process resource operations
+ Medium: PE: Prevent a possible use of NULL in sort_clone_instance()
+ Medium: TE: Include target rc in the transition key - used to correctly determin operation failure
* Thu May 22 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.4-1
- Update source tarball to revision: 226d8e356924 tip
- Statistics:
Changesets: 55
Diff: 199 files changed, 7103 insertions(+), 12378 deletions(-)
- Changes since Pacemaker-0.6.3
+ High: crmd: Bug LF:1881 LF:1882 - Overhaul the logic for operation cancelation and deletion
+ High: crmd: Bug LF:1894 - Make sure cancelled recurring operations are cleaned out from the CIB
+ High: PE: Bug N:387749 - Colocation with clones causes unnecessary clone instance shuffling
+ High: PE: Ensure 'master' monitor actions are cancelled _before_ we demote the resource
+ High: PE: Fix assert failure leading to core dump - make sure variable is properly initialized
+ High: PE: Make sure 'slave' monitoring happens after the resource has been demoted
+ High: PE: Prevent failure stickiness underflows (where too many failures become a _positive_ preference)
+ Medium: Admin: crm_mon - Only complain if the output file could not be opened
+ Medium: Common: filter_action_parameters - enable legacy handling only for older versions
+ Medium: PE: Bug N:385265 - The failure stickiness of group children is ignored until it reaches -INFINITY
+ Medium: PE: Implement master and clone colocation by exlcuding nodes rather than setting ones score to INFINITY (similar to cs: 756afc42dc51)
+ Medium: TE: Bug LF:1875 - Correctly find actions to cancel when their node leaves the cluster
* Wed Apr 23 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.3-1
- Update source tarball to revision: fd8904c9bc67 tip
- Statistics:
Changesets: 117
Diff: 354 files changed, 19094 insertions(+), 11338 deletions(-)
- Changes since Pacemaker-0.6.2
+ High: Admin: Bug LF:1848 - crm_resource - Pass set name and id to delete_resource_attr() in the correct order
+ High: Build: SNMP has been moved to the management/pygui project
+ High: crmd: Bug LF1837 - Unmanaged resources prevent crmd from shutting down
+ High: crmd: Prevent use-after-free in lrm interface code (Patch based on work by Keisuke MORI)
+ High: PE: Allow the cluster to make progress by not retrying failed demote actions
+ High: PE: Anti-colocation with slave should not prevent master colocation
+ High: PE: Bug LF 1768 - Wait more often for STONITH ops to complete before starting resources
+ High: PE: Bug LF1836 - Allow is-managed-default=false to be overridden by individual resources
+ High: PE: Bug LF185 - Prevent pointless master/slave instance shuffling by ignoring the master-pref of stopped instances
+ High: PE: Bug N-191176 - Implement interleaved ordering for clone-to-clone scenarios
+ High: PE: Bug N-347004 - Ensure clone notifications are always sent when an instance is stopped/started
+ High: PE: Bug N-347004 - Include notification ordering is correct for interleaved clones
+ High: PE: Bug PM-11 - Directly link probe_complete to starting clone instances
+ High: PE: Bug PM1 - Fix setting failcounts when applied to complex resources
+ High: PE: Bug PM12, LF1648 - Extensive revision of group ordering
+ High: PE: Bug PM7 - Ensure masters are always demoted before they are stopped
+ High: PE: Create probes after allocation to allow smarter handling of anonymous clones
+ High: PE: Do not prioritize clone instances that must be moved
+ High: PE: Fix error in previous commit that allowed more than the required number of masters to be promoted
+ High: PE: Group start ordering fixes
+ High: PE: Implement promote/demote ordering for cloned groups
+ High: TE: Repair failcount updates
+ High: TE: Use the correct offset when updating failcount
+ Medium: Admin: Add a summary output that can be easily parsed by CTS for audit purposes
+ Medium: Build: Make configure fail if bz2 or libxml2 are not present
+ Medium: Build: Re-instate a better default for LCRSODIR
+ Medium: CIB: Bug LF-1861 - Filter irrelvant error status from synchronous CIB clients
+ Medium: Core: Bug 1849 - Invalid conversion of ordinal leap year to gregorian date
+ Medium: Core: Drop compataibility code for 2.0.4 and 2.0.5 clusters
+ Medium: crmd: Bug LF-1860 - Automatically cancel recurring ops before demote and promote operations (not only stops)
+ Medium: crmd: Save the current CIB contents if we detect the PE crashed
+ Medium: PE: Bug LF:1866 - Fix version check when applying compatability handling for failed start operations
+ Medium: PE: Bug LF:1866 - Restore the ability to have start failures not be fatal
+ Medium: PE: Bug PM1 - Failcount applies to all instances of non-unique clone
+ Medium: PE: Correctly set the state of partially active master/slave groups
+ Medium: PE: Do not claim to be stopping an already stopped orphan
+ Medium: PE: Ensure implies_left ordering constraints are always effective
+ Medium: PE: Indicate each resources 'promotion' score
+ Medium: PE: Prevent a possible use-of-NULL
+ Medium: PE: Reprocess the current action if it changed (so that any prior dependancies are updated)
+ Medium: TE: Bug LF-1859 - Wait for fail-count updates to complete before terminating the transition
+ Medium: TE: Bug LF:1859 - Do not abort graphs due to our own failcount updates
+ Medium: TE: Bug LF:1859 - Prevent the TE from interupting itself
* Thu Feb 14 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.2-1
- Update source tarball to revision: 28b1a8c1868b tip
- Statistics:
Changesets: 11
Diff: 7 files changed, 58 insertions(+), 18 deletions(-)
- Changes since Pacemaker-0.6.1
+ haresources2cib.py: set default-action-timeout to the default (20s)
+ haresources2cib.py: update ra parameters lists
+ Medium: SNMP: Allow the snmp subagent to be built (patch from MATSUDA, Daiki)
+ Medium: Tools: Make sure the autoconf variables in haresources2cib are expanded
* Tue Feb 12 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.1-1
- Update source tarball to revision: e7152d1be933 tip
- Statistics:
Changesets: 25
Diff: 37 files changed, 1323 insertions(+), 227 deletions(-)
- Changes since Pacemaker-0.6.0
+ High: CIB: Ensure changes to top-level attributes (like admin_epoch) cause a disk write
+ High: CIB: Ensure the archived file hits the disk before returning
+ High: CIB: Repair the ability to do 'atomic incriment' updates (value="value++")
+ High: crmd: Bug #7 - Connecting to the crmd immediately after startup causes use-of-NULL
+ Medium: CIB: Mask cib_diff_resync results from the caller - they do not need to know
+ Medium: crmd: Delay starting the IPC server until we are fully functional
+ Medium: CTS: Fix the startup patterns
+ Medium: PE: Bug 1820 - Allow the first resource in a group to be migrated
+ Medium: PE: Bug 1820 - Check the colocation dependancies of resources to be migrated
* Mon Jan 14 2008 Andrew Beekhof <abeekhof@suse.de> - 0.6.0-2
- This is the first release of the Pacemaker Cluster Resource Manager formerly part of Heartbeat.
- For those looking for the GUI, mgmtd, CIM or TSA components, they are now found in
the new pacemaker-pygui project. Build dependancies prevent them from being
included in Heartbeat (since the built-in CRM is no longer supported) and,
being non-core components, are not included with Pacemaker.
- Update source tarball to revision: c94b92d550cf
- Statistics:
Changesets: 347
Diff: 2272 files changed, 132508 insertions(+), 305991 deletions(-)
- Test hardware:
+ 6-node vmware cluster (sles10-sp1/256Mb/vmware stonith) on a single host (opensuse10.3/2Gb/2.66Ghz Quad Core2)
+ 7-node EMC Centera cluster (sles10/512Mb/2Ghz Xeon/ssh stonith)
- Notes: Heartbeat Stack
+ All testing was performed with STONITH enabled
+ The CRM was enabled using the "crm respawn" directive
- Notes: OpenAIS Stack
+ This release contains a preview of support for the OpenAIS cluster stack
+ The current release of the OpenAIS project is missing two important
patches that we require. OpenAIS packages containing these patches are
available for most major distributions at:
http://download.opensuse.org/repositories/server:/ha-clustering
+ The OpenAIS stack is not currently recommended for use in clusters that
have shared data as STONITH support is not yet implimented
+ pingd is not yet available for use with the OpenAIS stack
+ 3 significant OpenAIS issues were found during testing of 4 and 6 node
clusters. We are activly working together with the OpenAIS project to
get these resolved.
- Pending bugs encountered during testing:
+ OpenAIS #1736 - Openais membership took 20s to stabilize
+ Heartbeat #1750 - ipc_bufpool_update: magic number in head does not match
+ OpenAIS #1793 - Assertion failure in memb_state_gather_enter()
+ OpenAIS #1796 - Cluster message corruption
- Changes since Heartbeat-2.1.2-24
+ High: Add OpenAIS support
+ High: Admin: crm_uuid - Look in the right place for Heartbeat UUID files
+ High: admin: Exit and indicate a problem if the crmd exits while crmadmin is performing a query
+ High: cib: Fix CIB_OP_UPDATE calls that modify the whole CIB
+ High: cib: Fix compilation when supporting the heartbeat stack
+ High: cib: Fix memory leaks caused by the switch to get_message_xml()
+ High: cib: HA_VALGRIND_ENABLED needs to be set _and_ set to 1|yes|true
+ High: cib: Use get_message_xml() in preference to cl_get_struct()
+ High: cib: Use the return value from call to write() in cib_send_plaintext()
+ High: Core: ccm nodes can legitimately have a node id of 0
+ High: Core: Fix peer-process tracking for the Heartbeat stack
+ High: Core: Heartbeat does not send status notifications for nodes that were already part of the cluster. Fake them instead
+ High: CRM: Add children to HA_Messages such that the field name matches F_XML_TAGNAME
+ High: crm: Adopt a more flexible appraoch to enabling Valgrind
+ High: crm: Fix compilation when bzip2 is not installed
+ High: CRM: Future-proof get_message_xml()
+ High: crmd: Filter election responses based on time not FSA state
+ High: crmd: Handle all possible peer states in crmd_ha_status_callback()
+ High: crmd: Make sure the current date/time is set - prevents use-of-NULL when evaluating rules
+ High: crmd: Relax an assertion regrading ccm membership instances
+ High: crmd: Use (node->processes&crm_proc_ais) to accurately update the CIB after replace operations
+ High: crmd: Heartbeat: Accurately record peer client status
+ High: PE: Bug 1777 - Allow colocation with a resource in the Stopped state
+ High: PE: Bug 1822 - Prevent use-of-NULL in PromoteRsc()
+ High: PE: Implement three recovery policies based on op_status and op_rc
+ High: PE: Parse fail-count correctly (it may be set to ININFITY)
+ High: PE: Prevent graph-loop when stonith agents need to be moved around before a STONITH op
+ High: PE: Prevent graph-loops when two operations have the same name+interval
+ High: te: Cancel active timers when destroying graphs
+ High: TE: Ensure failcount is set correctly for failed stops/starts
+ High: TE: Update failcount for oeprations that time out
+ Medium: admin: Prevent hang in crm_mon -1 when there is no cib connection - Patch from Junko IKEDA
+ Medium: cib: Require --force|-f when performing potentially dangerous commands with cibadmin
+ Medium: cib: Tweak the shutdown code
+ Medium: Common: Only count peer processes of active nodes
+ Medium: Core: Create generic cluster sign-in method
+ Medium: core: Fix compilation when Heartbeat support is disabled
+ Medium: Core: General cleanup for supporting two stacks
+ Medium: Core: iso6601 - Support parsing of time-only strings
+ Medium: core: Isolate more code that is only needed when SUPPORT_HEARTBEAT is enabled
+ Medium: crm: Improved logging of errors in the XML parser
+ Medium: crmd: Fix potential use-of-NULL in string comparison
+ Medium: crmd: Reimpliment syncronizing of CIB queries and updates when invoking the PE
+ Medium: crm_mon: Indicate when a node is both in standby mode and offline
+ Medium: PE: Bug 1822 - Do not try an promote groups if not all of it is active
+ Medium: PE: on_fail=nothing is an alias for 'ignore' not 'restart'
+ Medium: PE: Prevent a potential use-of-NULL in cron_range_satisfied()
+ snmp subagent: fix a problem on displaying an unmanaged group
+ snmp subagent: use the syslog setting
+ snmp: v2 support (thanks to Keisuke MORI)
+ snmp_subagent - made it not complain about some things if shutting down
* Mon Dec 10 2007 Andrew Beekhof <abeekhof@suse.de> - 0.6.0-1
- Initial opensuse package check-in
diff --git a/shell/Makefile.am b/shell/Makefile.am
index b44fdc76df..37ca79201d 100644
--- a/shell/Makefile.am
+++ b/shell/Makefile.am
@@ -1,26 +1,26 @@
#
# doc: Pacemaker code
#
# Copyright (C) 2008 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
MAINTAINERCLEANFILES = Makefile.in
sbin_SCRIPTS = crm
-EXTRA_DIST = crm setup.py.in README.install
+EXTRA_DIST = crm
SUBDIRS = templates regression modules
diff --git a/shell/README.install b/shell/README.install
deleted file mode 100644
index 8d2fe52153..0000000000
--- a/shell/README.install
+++ /dev/null
@@ -1,7 +0,0 @@
-Run from your favourite packager something like this:
-
- python setup.py install --prefix=$prefix --root=$rootdir
- python%{py_ver} %{py_libdir}/compileall.py -d %{py_site}/ \
- $RPM_BUILD_ROOT/%{py_sitedir}
-
-The above may be used in the RPM spec file.
diff --git a/shell/modules/Makefile.am b/shell/modules/Makefile.am
index 31923f1483..a73a174fd8 100644
--- a/shell/modules/Makefile.am
+++ b/shell/modules/Makefile.am
@@ -1,27 +1,44 @@
#
# doc: Pacemaker code
#
# Copyright (C) 2008 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
MAINTAINERCLEANFILES = Makefile.in
-modules = cache.py cibconfig.py cibstatus.py clidisplay.py help.py.in \
- levels.py main.py msg.py parse.py ra.py.in singletonmixin.py \
- template.py term.py ui.py.in userprefs.py utils.py vars.py.in \
- xmlutil.py
+modules = __init__.py \
+ cache.py \
+ cibconfig.py \
+ cibstatus.py \
+ clidisplay.py \
+ help.py \
+ levels.py \
+ main.py \
+ msg.py \
+ parse.py \
+ ra.py \
+ singletonmixin.py \
+ template.py \
+ term.py \
+ ui.py \
+ userprefs.py \
+ utils.py \
+ vars.py \
+ xmlutil.py
-EXTRA_DIST = $(modules)
+shelllibdir = $(pythondir)/crm
+
+shelllib_PYTHON = $(modules)
diff --git a/shell/setup.py.in b/shell/setup.py.in
deleted file mode 100644
index 15b633433f..0000000000
--- a/shell/setup.py.in
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-#
-from distutils.core import setup
-packages = ['crm']
-
-setup(name='crm',
- version='@PACKAGE_VERSION@',
- author='Dejan Muhamedagic',
- author_email='dejan@hello-penguin.com',
- url='http://www.clusterlabs.org/',
- description='Pacemaker text based user interface (management and configuration)',
- license='GNU GPL',
- packages=packages,
- package_dir={'crm': 'modules'}
- )
diff --git a/tools/Makefile.am b/tools/Makefile.am
index d90ad8e68f..4bf73ff09c 100644
--- a/tools/Makefile.am
+++ b/tools/Makefile.am
@@ -1,147 +1,147 @@
#
# Copyright (C) 2004-2009 Andrew Beekhof
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
MAINTAINERCLEANFILES = Makefile.in
INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include \
-I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl
COMMONLIBS = \
$(top_builddir)/lib/common/libcrmcommon.la \
$(top_builddir)/lib/cib/libcib.la \
$(CURSESLIBS) $(CLUSTERLIBS)
headerdir = $(pkgincludedir)/crm
header_HEADERS = attrd.h
EXTRA_DIST = $(sbin_SCRIPTS)
halibdir = $(CRM_DAEMON_DIR)
-halib_SCRIPTS = haresources2cib.py hb2openais.sh
+halib_SCRIPTS = hb2openais.sh
halib_PROGRAMS = attrd pingd
halib_PYTHON = crm_primitive.py hb2openais-helper.py
sbin_PROGRAMS = crm_simulate crmadmin cibadmin crm_node crm_attribute crm_resource crm_verify \
crm_uuid crm_shadow attrd_updater crm_diff crm_mon iso8601
testdir = $(datadir)/$(PACKAGE)/tests/cli
test_SCRIPTS = regression.sh
test_DATA = regression.exp
if BUILD_SERVICELOG
sbin_PROGRAMS += notifyServicelogEvent
endif
if BUILD_OPENIPMI_SERICELOG
sbin_PROGRAMS += ipmiservicelogd
endif
if BUILD_HELP
man8_MANS = $(sbin_PROGRAMS:%=%.8)
endif
sbin_SCRIPTS = crm_standby crm_master crm_failcount
#sbin_SCRIPTS = crm crm_standby crm_master crm_failcount
## SOURCES
#noinst_HEADERS = config.h control.h crmd.h
noinst_HEADERS =
crmadmin_SOURCES = crmadmin.c
crmadmin_LDADD = $(COMMONLIBS) $(CLUSTERLIBS) \
$(top_builddir)/lib/pengine/libpe_status.la
crm_uuid_SOURCES = crm_uuid.c
crm_uuid_LDADD = $(top_builddir)/lib/common/libcrmcluster.la
cibadmin_SOURCES = cibadmin.c
cibadmin_LDADD = $(COMMONLIBS)
crm_shadow_SOURCES = cib_shadow.c
crm_shadow_LDADD = $(COMMONLIBS)
crm_node_SOURCES = ccm_epoche.c
crm_node_LDADD = $(COMMONLIBS) $(CLUSTERLIBS) \
$(top_builddir)/lib/common/libcrmcluster.la
crm_simulate_SOURCES = crm_inject.c
crm_simulate_CFLAGS = -I$(top_srcdir)/pengine
crm_simulate_LDADD = $(COMMONLIBS) \
$(top_builddir)/lib/pengine/libpe_status.la \
$(top_builddir)/pengine/libpengine.la \
$(top_builddir)/lib/cib/libcib.la \
$(top_builddir)/lib/transition/libtransitioner.la
crm_diff_SOURCES = xml_diff.c
crm_diff_LDADD = $(COMMONLIBS)
crm_mon_SOURCES = crm_mon.c
crm_mon_LDADD = $(COMMONLIBS) $(SNMPLIBS) $(ESMTPLIBS) -llrm \
$(top_builddir)/lib/pengine/libpe_status.la
# Arguments could be made that this should live in crm/pengine
crm_verify_SOURCES = crm_verify.c
crm_verify_LDADD = $(COMMONLIBS) \
$(top_builddir)/lib/pengine/libpe_status.la \
$(top_builddir)/pengine/libpengine.la
crm_attribute_SOURCES = crm_attribute.c
crm_attribute_LDADD = $(COMMONLIBS)
crm_resource_SOURCES = crm_resource.c
crm_resource_LDADD = $(COMMONLIBS) \
$(top_builddir)/lib/pengine/libpe_rules.la \
$(top_builddir)/lib/pengine/libpe_status.la \
$(top_builddir)/pengine/libpengine.la
iso8601_SOURCES = test.iso8601.c
iso8601_LDADD = $(COMMONLIBS)
attrd_SOURCES = attrd.c
attrd_LDADD = $(COMMONLIBS) $(top_builddir)/lib/common/libcrmcluster.la
pingd_SOURCES = pingd.c
pingd_LDADD = $(COMMONLIBS)
attrd_updater_SOURCES = attrd_updater.c
attrd_updater_LDADD = $(COMMONLIBS)
if BUILD_SERVICELOG
notifyServicelogEvent_SOURCES = notifyServicelogEvent.c
notifyServicelogEvent_CFLAGS = `pkg-config --cflags servicelog-1`
notifyServicelogEvent_LDFLAGS = `pkg-config --libs servicelog-1` $(top_builddir)/lib/common/libcrmcommon.la
endif
if BUILD_OPENIPMI_SERICELOG
ipmiservicelogd_SOURCES = ipmiservicelogd.c
ipmiservicelogd_CFLAGS = `pkg-config --cflags OpenIPMI OpenIPMIposix servicelog-1`
ipmiservicelogd_LDFLAGS = `pkg-config --libs OpenIPMI OpenIPMIposix servicelog-1` $(top_builddir)/lib/common/libcrmcommon.la
endif
%.8: %
echo Creating $@
help2man --output $@ --no-info --section 8 --name "Part of the Pacemaker cluster resource manager" $(top_builddir)/tools/$<
clean-generic:
rm -f *.log *.debug *.xml *~
install-exec-local:
uninstall-local:
.PHONY: install-exec-hook
diff --git a/tools/haresources2cib.py.in b/tools/haresources2cib.py.in
deleted file mode 100755
index 3026c9df21..0000000000
--- a/tools/haresources2cib.py.in
+++ /dev/null
@@ -1,519 +0,0 @@
-#!@PYTHON@
-
-'''haresources2cib.py.in, convert the haresources file of heartbeat 1.x
- to cib.xml for heartbeat 2.x
-'''
-
-__copyright__='''
-Author: Huang Zhen <zhenhltc@cn.ibm.com>
-Copyright (C) 2005 International Business Machines
-'''
-
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import sys,string,os
-import xml.dom.minidom
-
-using_ocf = 1
-using_mon = 1
-enable_stonith = False
-
-ocf_ra_setting = {
- "apache" :{"params":["configfile","httpd","port","statusurl","options","testregex"],"time":["120s","60s"]},
- "IPaddr" :{"params":["ip","cidr_netmask","nic","broadcast","iflabel","lvs_support","local_stop_script","local_start_script","ARP_INTERVAL_MS","ARP_REPEAT","ARP_BACKGROUND","ARP_NETMASK"],"time":["5s","5s"]},
- "IPaddr2" :{"params":["ip","cidr_netmask","nic","broadcast","iflabel","lvs_support","mac","clusterip_hash","arp_interval","arp_count","arp_bg","arp_mac"],"time":["5s","5s"]},
- "db2" :{"params":["instance","admin"],"time":["120s","60s"]},
- "oracle" :{"params":["sid","home","user","ipcrm"],"time":["120s","60s"]},
- "oralsnr" :{"params":["sid","home","user","listener"],"time":["120s","60s"]},
- "AudibleAlarm":{"params":["nodelist"],"time":["120s","60s"]},
- "Delay" :{"params":["startdelay","stopdelay","mondelay"],"time":["120s","60s"]},
- "Filesystem":{"params":["device","directory","fstype","options","ocfs2_cluster","ocfs2_configfs"],"time":["120s","60s"]},
- "ICP" :{"params":["driveid","device"],"time":["120s","60s"]},
- "IPsrcaddr" :{"params":["ipaddress"],"time":["5s","5s"]},
- "IPv6addr" :{"params":["ipv6addr"],"time":["5s","5s"]},
- "LinuxSCSI" :{"params":["scsi"],"time":["120s","60s"]},
- "LVM" :{"params":["volgrpname"],"time":["120s","60s"]},
- "MailTo" :{"params":["email","subject"],"time":["120s","60s"]},
- "portblock" :{"params":["protocol","portno","action"],"time":["120s","60s"]},
- "Raid1" :{"params":["raidconf","raiddev","homehost"],"time":["120s","60s"]},
- "ServeRAID" :{"params":["serveraid","mergegroup"],"time":["120s","60s"]},
- "WinPopup" :{"params":["hostfile"],"time":["120s","60s"]},
- "Xinetd" :{"params":["service"],"time":["120s","60s"]},
- "ClusterMon" :{"params":["user","update","extra_options","pidfile","htmlfile"],"time":["120s","60s"]},
- "Dummy" :{"params":["state"],"time":["120s","60s"]},
- "EvmsSCC" :{"params":[],"time":["120s","60s"]},
- "Evmsd" :{"params":[],"time":["120s","60s"]},
- "ManageRAID" :{"params":["raidname"],"time":["120s","60s"]},
- "ManageVE" :{"params":["veid"],"time":["120s","60s"]},
- "Pure-FTPd" :{"params":["script","conffile","daemon_type","pidfile"],"time":["120s","60s"]},
- "SAPDatabase" :{"params":["SID","DIR_EXECUTABLE","DBTYPE","NETSERVICENAME","DBJ2EE_ONLY","DIR_BOOTSTRAP","DIR_SECSTORE"],"time":["120s","60s"]},
- "SAPInstance" :{"params":["InstanceName","DIR_EXECUTABLE","DIR_PROFILE","START_PROFILE"],"time":["120s","60s"]},
- "SendArp" :{"params":["ip","nic"],"time":["120s","60s"]},
- "Stateful" :{"params":["state"],"time":["120s","60s"]},
- "SysInfo" :{"params":["pidfile","delay"],"time":["120s","60s"]},
- "VIPArip" :{"params":["ip","nic"],"time":["120s","60s"]},
- "WAS" :{"params":["config","port"],"time":["120s","60s"]},
- "WAS6" :{"params":["profile"],"time":["120s","60s"]},
- "Xen" :{"params":["xmfile"],"time":["120s","60s"]},
- "drbd" :{"params":["drbd_resource","drbdconf","clone_overrides_hostname","clone_max","clone_node_max","master_max","master_node_max"],"time":["120s","60s"]},
- "eDir88" :{"params":["eDir_config_file","eDir_monitor_ldap","eDir_monitor_idm","eDir_jvm_initial_heap","eDir_jvm_max_heap","eDir_jvm_options"],"time":["120s","60s"]},
- "ids" :{"params":["informixdir","informixserver","onconfig","dbname","sqltestquery"],"time":["120s","60s"]},
- "iscsi" :{"params":["portal","target","discovery_type","iscsiadm"],"time":["120s","60s"]},
- "mysql" :{"params":["binary","config","datadir","user","group","log","pid","socket","test_table","test_user","test_passwd","enable_creation","additional_parameters"],"time":["120s","60s"]},
- "o2cb" :{"params":["netdev","port","ocfs2_cluster"],"time":["120s","60s"]},
- "pgsql" :{"params":["pgctl","start_opt","ctl_opt","psql","pgdata","pgdba","pghost","pgport","pgdb","logfile","stop_escalate"],"time":["120s","60s"]},
- "pingd" :{"params":["pidfile","user","dampen","set","name","section","multiplier","host_list"],"time":["120s","60s"]},
- "rsyncd" :{"params":["binpath","conffile","bwlimit"],"time":["120s","60s"]},
- "tomcat" :{"params":["tomcat_name","script_log","tomcat_stop_timeout","tomcat_suspend_trialcount","tomcat_user","statusurl","java_home","catalina_home","catalina_pid"],"time":["120s","60s"]}
- }
-
-config_defaults = [
- "symmetric-cluster=true",
- "no-quorum-policy=stop",
- "default-resource-stickiness=0",
- "default-resource-failure-stickiness=0",
- "stonith-enabled=false",
- "stonith-action=reboot",
- "startup-fencing=true",
- "stop-orphan-resources=true",
- "stop-orphan-actions=true",
- "remove-after-stop=false",
- "short-resource-names=true",
- "transition-idle-timeout=5min",
- "default-action-timeout=20s",
- "is-managed-default=true",
- "cluster-delay=60s",
- "pe-error-series-max=-1",
- "pe-warn-series-max=-1",
- "pe-input-series-max=-1",
-]
-
-def create_cib() :
- doc = xml.dom.minidom.Document()
- cib = doc.createElement("cib")
- doc.appendChild(cib)
-
- configuration = doc.createElement("configuration")
- cib.appendChild(configuration)
- cib.setAttribute("admin_epoch","0")
- cib.setAttribute("epoch","0")
- cib.setAttribute("num_updates","0")
-
- crm_config = doc.createElement("crm_config")
- configuration.appendChild(crm_config)
-
- option_set = doc.createElement("cluster_property_set")
- option_set.setAttribute("id","cib-bootstrap-options")
- crm_config.appendChild(option_set)
-
- attr_list = doc.createElement("attributes")
- option_set.appendChild(attr_list)
-
- for option in config_defaults:
- option_details = string.split(option, '=')
- nvpair = doc.createElement("nvpair")
- attr_list.appendChild(nvpair)
- nvpair.setAttribute("id", "cib-bootstrap-options-"+option_details[0])
- nvpair.setAttribute("name", option_details[0])
- nvpair.setAttribute("value", option_details[1])
- if option_details[0] == "stonith-enabled" and enable_stonith:
- nvpair.setAttribute("value", "true")
-
-
- configuration.appendChild(doc.createElement("nodes"))
- resources = doc.createElement("resources")
- configuration.appendChild(resources)
- constraints = doc.createElement("constraints")
- configuration.appendChild(constraints)
-
- status = doc.createElement("status")
- cib.appendChild(status)
-
- return doc, resources, constraints
-
-def get_ra_class(type) :
- if os.path.exists("@sysconfdir@/ha.d/resource.d"+"/"+type) :
- return "heartbeat"
- if os.path.exists("@INITDIR@"+"/"+type) :
- return "lsb"
- sys.stderr.write(type+" is an unknown Resource Agent. " \
- "Please refer to http://www.linux-ha.org/ResourceAgent\n")
- return "UNKNOWN"
-
-def cib_resource(doc,user_res_id,index, rsc):
-
- id, type, params = None, None, None
-
- #if no parameters in rsc, like "apache", "192.168.0.11"
- if string.find(rsc, "::") == -1 :
- #if there is a IP address in rsc, like "192.168.0.11"
- if len(string.split(string.split(rsc,'/')[0],'.')) == 4 :
- type = "IPaddr"
- params = [rsc]
- #no IP address, like "apache"
- else :
- type = rsc
- #else there have "::" in rsc,
- #like "IPaddr::192.168.0.11", "IPaddr2::192.168.0.11"
- else :
- if string.find(rsc, "IPaddr") == 0 :
- type = string.split(rsc, "::")[0]
- params = [string.split(rsc, "::")[1]]
- else :
- fields = string.split(rsc,"::")
- type = fields[0]
- params = fields[1:]
-
- if user_res_id == "" :
- if type == "IPaddr" :
- id = type+"_"+string.replace(string.split(params[0],'/')[0],'.','_')
- else :
- id = type+"_"+str(index)
- else :
- id = user_res_id
-
- resource = doc.createElement("primitive")
- resource.setAttribute("id",id)
- resource.setAttribute("type",type)
- if using_ocf and type in ocf_ra_setting:
- resource.setAttribute("class","ocf")
- else :
- ra_class = get_ra_class(type)
- resource.setAttribute("class",ra_class)
- resource.setAttribute("provider","heartbeat")
-
- if using_mon :
- operations = doc.createElement("operations")
- resource.appendChild(operations)
- mon_op = doc.createElement("op")
- operations.appendChild(mon_op)
- mon_op.setAttribute("id", id + "_mon")
- mon_op.setAttribute("name","monitor")
- interval = "120s"
- timeout = "60s"
- if using_ocf and type in ocf_ra_setting :
- interval = ocf_ra_setting[type]["time"][0]
- timeout = ocf_ra_setting[type]["time"][1]
- mon_op.setAttribute("interval", interval)
- mon_op.setAttribute("timeout", timeout)
-
- if params != None and len(params) != 0:
- instance_attributes = doc.createElement("instance_attributes")
- instance_attributes.setAttribute("id", id + "_inst_attr")
- resource.appendChild(instance_attributes)
- attributes = doc.createElement("attributes")
- instance_attributes.appendChild(attributes)
- if using_ocf and type in ocf_ra_setting :
- if type in ["IPaddr", "IPaddr2"] :
- unsort = string.split(params[0], "/")
- params = [None, None, None, None]
- #ip
- params[0] = unsort[0]
- for param in unsort[1:] :
- if len(string.split(param, ".")) == 4 :
- #broadcast
- params[3] = param
- break
- try :
- int(param)
- #netmask bits
- params[2] = param
- except ValueError:
- #nic
- params[1] = param
- for i in range(0,len(params)) :
- if params[i] == None :
- continue
- nvpair = doc.createElement("nvpair")
- name = ocf_ra_setting[type]["params"][i]
- nvpair.setAttribute("id",id + "_attr_" + str(i))
- nvpair.setAttribute("name",name)
- if i == len(ocf_ra_setting[type]["params"]) - 1 :
- nvpair.setAttribute("value",string.join(params[i:]))
- attributes.appendChild(nvpair)
- break
- else :
- nvpair.setAttribute("value",str(params[i]))
- attributes.appendChild(nvpair)
- else :
- i = 1
- for param in params :
- nvpair = doc.createElement("nvpair")
- nvpair.setAttribute("id",id + "_attr_" + str(i))
- nvpair.setAttribute("name",str(i))
- nvpair.setAttribute("value",str(param))
- attributes.appendChild(nvpair)
- i += 1
- return id, resource
-
-def cib_rsc_order(doc, args):
- rsc_order = doc.createElement("rsc_order")
- rsc_order.setAttribute("id","rsc_order_"+args[0]+"_"+args[3])
- rsc_order.setAttribute("from",args[0])
- rsc_order.setAttribute("action",args[1])
- rsc_order.setAttribute("type",args[2])
- rsc_order.setAttribute("to",args[3])
- rsc_order.setAttribute("symmetrical","true")
- return rsc_order
-
-def cib_rsc_colocation(doc, args):
- if len(args) != 3 :
- sys.stderr.write("rsc_colocation: bad usage\n")
- return
- rsc_colocation = doc.createElement("rsc_colocation")
- rsc_colocation.setAttribute("id","rsc_colocation_"+args[0]+"_"+args[1])
- rsc_colocation.setAttribute("from",args[0])
- rsc_colocation.setAttribute("to",args[1])
- rsc_colocation.setAttribute("score",args[2])
- return rsc_colocation
-
-def cib_rsc_location(doc, id, node):
- rsc_location = doc.createElement("rsc_location")
- rsc_location.setAttribute("id","rsc_location_"+id)
- rsc_location.setAttribute("rsc",id)
- rule = doc.createElement("rule")
- rule.setAttribute("id","prefered_location_"+id)
- rule.setAttribute("score","100")
- rsc_location.appendChild(rule)
- expression = doc.createElement("expression")
- expression.setAttribute("id","prefered_location_"+id+"_expr")
- expression.setAttribute("attribute","#uname")
- expression.setAttribute("operation","eq")
- expression.setAttribute("value", node)
- rule.appendChild(expression)
- return rsc_location
-
-def cib_resource_group(doc, id):
- resource_group = doc.createElement("group")
- resource_group.setAttribute("id",id)
- return resource_group
-
-def cib_resource_clone(doc, id, clone_max, clone_node_max):
- resource_clone = doc.createElement("clone")
- resource_clone.setAttribute("id",id)
- instance_attributes = doc.createElement("instance_attributes")
- instance_attributes.setAttribute("id", id + "_inst_attr")
- resource_clone.appendChild(instance_attributes)
- attributes = doc.createElement("attributes")
- instance_attributes.appendChild(attributes)
- nvpair = doc.createElement("nvpair")
- nvpair.setAttribute("id",id + "_attr_1")
- nvpair.setAttribute("name", "clone_max")
- nvpair.setAttribute("value", str(clone_max))
- attributes.appendChild(nvpair)
- nvpair = doc.createElement("nvpair")
- nvpair.setAttribute("id",id + "_attr_2")
- nvpair.setAttribute("name", "clone_node_max")
- nvpair.setAttribute("value", str(clone_node_max))
- attributes.appendChild(nvpair)
- return resource_clone
-
-def add_resource(cib,user_res_id,index,node,rsc):
- id,resource = cib_resource(cib[0],user_res_id,index,rsc)
- cib[1].appendChild(resource)
- rsc_location = cib_rsc_location(cib[0],id,node)
- cib[2].appendChild(rsc_location)
-
-def add_resource_group(cib,user_res_id,index,node,rscs):
- if user_res_id == "" :
- groupid = "group_"+str(group_index)
- else :
- groupid = user_res_id
- resource_group = cib_resource_group(cib[0],groupid)
- cib[1].appendChild(resource_group)
- for rsc in rscs :
- rid,resource = cib_resource(cib[0],"",index,rsc)
- resource_group.appendChild(resource)
- index += 1
- rsc_location = cib_rsc_location(cib[0],groupid,node)
- cib[2].appendChild(rsc_location)
-
-def add_clone(cib,user_res_id, index,rsc) :
- if user_res_id == "" :
- cloneid = "clone_"+str(index)
- else :
- cloneid = user_res_id
- clone = cib_resource_clone(cib[0], cloneid, node_num, 1)
- cib[1].appendChild(clone)
- id,resource = cib_resource(cib[0],"",index,rsc)
- clone.appendChild(resource)
-
-def add_stonith_clone(cib, index, params, node_num) :
- clone = cib_resource_clone(cib[0], "clone_"+str(index), node_num, 1)
- cib[1].appendChild(clone)
- id, stonith = cib_stonith(cib[0], index, params)
- clone.appendChild(stonith)
-
-def cib_stonith(doc, index, params):
- id = "stonith_"+str(index)
- resource = doc.createElement("primitive")
- resource.setAttribute("id",id)
- resource.setAttribute("type",params[1])
- resource.setAttribute("class","stonith")
- resource.setAttribute("provider","heartbeat")
- if using_mon :
- operations = doc.createElement("operations")
- resource.appendChild(operations)
- mon_op = doc.createElement("op")
- operations.appendChild(mon_op)
- mon_op.setAttribute("id", id + "_mon")
- mon_op.setAttribute("name","monitor")
- mon_op.setAttribute("interval", "5s")
- mon_op.setAttribute("timeout", "20s")
- mon_op.setAttribute("prereq", "nothing")
- start_op = doc.createElement("op")
- operations.appendChild(start_op)
- start_op.setAttribute("id", id + "_start")
- start_op.setAttribute("name","start")
- start_op.setAttribute("timeout", "20s")
- start_op.setAttribute("prereq", "nothing")
- if len(params) > 2 :
- instance_attributes = doc.createElement("instance_attributes")
- instance_attributes.setAttribute("id", id + "_inst_attr")
- resource.appendChild(instance_attributes)
- attributes = doc.createElement("attributes")
- instance_attributes.appendChild(attributes)
- names = string.split(os.popen("stonith -n -t "+params[1]).readline())
- for i in range(2, len(params)) :
- nvpair = doc.createElement("nvpair")
- nvpair.setAttribute("id", id + "_attr_" + str(i))
- nvpair.setAttribute("name", names[i-2])
- nvpair.setAttribute("value", params[i])
- attributes.appendChild(nvpair)
-
- return id, resource
-
-def add_stonith_host(cib, stonith_host, index, node_num) :
- params = string.split(stonith_host)[1:]
- if params[0] == "*" :
- add_stonith_clone(cib, index, params, node_num)
- else :
- id, resource = cib_stonith(cib[0], index, params)
- cib[1].appendChild(resource)
- stonith_location = cib_rsc_location(cib[0],id,params[0])
- cib[2].appendChild(stonith_location)
-
-def get_directive_list(config, directive) :
- directive_list = []
- for line in config :
- line = line.lstrip()
- if len (line) == 0 or line[0] == "#" :
- continue
- if string.split(line)[0] == directive :
- directive_list.append(line)
- return directive_list
-
-if __name__=="__main__" :
- resource_file = "@sysconfdir@/ha.d/haresources"
- config_file = "@sysconfdir@/ha.d/ha.cf"
- target_file = "@CRM_CONFIG_DIR@/cib.xml"
- to_stdout = False
-
- # Process arguments...
- skipthis = None
- args = sys.argv[1:]
- for i in range(0, len(args)) :
- if skipthis :
- skipthis = None
- continue
- elif args[i] == "--no-ocf" :
- using_ocf = 0
- elif args[i] == "--stdout" :
- to_stdout = True
- elif args[i] == "--no-monitor" :
- using_mon = 0
- elif args[i] == "--config" or args[i] == "-c" :
- skipthis = True
- config_file = args[i+1]
- elif args[i] == "--help" or args[i] == "-h" :
- print "usage: " + sys.argv[0] \
- + " [--no-ocf]"\
- + " [--stdout]"\
- + " [--no-monitor]"\
- + " [--config|-c configfile]"\
- + " [--help|-h]"\
- + " [resourcefile]"
- sys.exit(1)
- else:
- resource_file = args[i]
-
- config = open(config_file, "r").readlines()
-
- node_list = get_directive_list(config, "node")
- node_num = 0
- for nodes in node_list :
- node_num += len(string.split(nodes)) -1
- stonith_host_list = get_directive_list(config, "stonith_host")
-
- if len(stonith_host_list) != 0 :
- enable_stonith = True
-
- file = open(resource_file, "r")
- cib = create_cib()
- pre_line = ""
- id_index = 1
- group_index = 1
- for line in file.readlines() :
- line = string.strip(line)
- if len (line) == 0 :
- continue
- if line[0] == '#' and string.find(line,"#rsc_order") != 0 and string.find(line,"#rsc_colocation") != 0 :
- continue
- if line[-1] == '\\' :
- pre_line += line[:-1] + " "
- continue
- else :
- line = pre_line + line
- pre_line = ""
- fields = string.split(line)
- if fields[len(fields)-1][0] == '#' :
- user_res_id = fields[len(fields)-1][1:]
- fields.pop()
- else :
- user_res_id = ""
- if fields[0] == "#rsc_order" and len(fields) == 5 :
- rsc_order = cib_rsc_order(cib[0],fields[1:])
- cib[2].appendChild(rsc_order)
- elif fields[0] == "#rsc_colocation" and len(fields) == 4 :
- rsc_colocation = cib_rsc_colocation(cib[0],fields[1:])
- cib[2].appendChild(rsc_colocation)
- elif string.find(user_res_id, "clone:") == 0 :
- add_clone(cib, user_res_id[len("clone:"):], group_index, fields[1])
- group_index += 10
- id_index += 1
- elif len(fields) == 2 :
- add_resource(cib, user_res_id, id_index, fields[0], fields[1])
- id_index += 1
- elif len(fields) > 2 :
- add_resource_group(cib, user_res_id, group_index, fields[0], fields[1:])
- group_index += 10
- id_index += len(fields)
- else :
- sys.stderr.write("can not parse this line:"+line+"\n")
- if enable_stonith :
- for stonith_host in stonith_host_list :
- add_stonith_host(cib, stonith_host, id_index, node_num)
- id_index += 1
- if to_stdout :
- print cib[0].toprettyxml()
- elif os.access(target_file, os.F_OK) or os.access(target_file+".sig", os.F_OK) :
- sys.stderr.write("cib.xml or cib.xml.sig exist in @CRM_CONFIG_DIR@\n")
- sys.stderr.write("please remove them before generating new cib.xml\n")
- else :
- cib_file = open(target_file, "w")
- cib_file.write(cib[0].toprettyxml())
- cib_file.close()
- os.system("chown @CRM_DAEMON_USER@.@CRM_DAEMON_GROUP@ "+target_file)

File Metadata

Mime Type
text/x-diff
Expires
Thu, Jun 26, 5:51 PM (22 h, 16 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1959275
Default Alt Text
(409 KB)

Event Timeline