diff --git a/configure.ac b/configure.ac index 6bba1a525b..3a0c30a18f 100644 --- a/configure.ac +++ b/configure.ac @@ -1,1475 +1,1481 @@ dnl dnl autoconf for Pacemaker dnl dnl License: GNU General Public License (GPL) dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.53) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services AC_INIT(pacemaker, 1.0.7, pacemaker@oss.clusterlabs.org) CRM_DTD_VERSION="1.0" PKG_FEATURES="" HB_PKG=heartbeat AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except lha_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AM_CONFIG_HEADER(include/config.h include/crm_config.h) ALL_LINGUAS="en fr" AC_ARG_WITH(version, [ --with-version=version Override package version (if you're a packager needing to pretend) ], [ PACKAGE_VERSION="$withval" ]) AC_ARG_WITH(pkg-name, [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ], [ PACKAGE_NAME="$withval" ]) AM_INIT_AUTOMAKE($PACKAGE_NAME, $PACKAGE_VERSION) AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", Current pacemaker version) dnl automake >= 1.11 offers --enable-silent-rules for suppressing the output from dnl normal compilation. When a failure occurs, it will then display the full dnl command line dnl Wrap in m4_ifdef to avoid breaking on older platforms m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES]) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AM_PROG_CC_C_O AC_PROG_CC_STDC AC_LIBTOOL_DLOPEN dnl Enable dlopen support... AC_LIBLTDL_CONVENIENCE dnl make libltdl a convenience lib AC_PROG_LIBTOOL AC_C_STRINGIZE AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) AC_STRUCT_TIMEZONE dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="$@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([int main(){return 0;}] ,[RC=0; AC_MSG_RESULT(yes)],[RC=1; AC_MSG_RESULT(no)]) return $RC } extract_header_define() { AC_MSG_CHECKING(for $2 in $1) Cfile=/tmp/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` AC_MSG_RESULT($value) printf $value rm -f ${Cfile}.c ${Cfile} } dnl =============================================== dnl Configure Options dnl =============================================== dnl Some systems, like Solaris require a custom package name AC_ARG_WITH(pkgname, [ --with-pkgname=name name for pkg (typically for Solaris) ], [ PKGNAME="$withval" ], [ PKGNAME="LXHAhb" ], ) AC_SUBST(PKGNAME) AC_ARG_ENABLE([ansi], [ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers. [default=yes]]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings very pedantic and fatal warnings for gcc [default=yes]]) AC_ARG_ENABLE([pretty], [ --enable-pretty Pretty-print compiler output unless there is an error [default=no]]) AC_ARG_ENABLE([quiet], [ --enable-quiet Supress make output unless there is an error [default=no]]) AC_ARG_ENABLE([thread-safe], [ --enable-thread-safe Enable some client libraries to be thread safe. [default=no]]) AC_ARG_ENABLE([bundled-ltdl], [ --enable-bundled-ltdl Configure, build and install the standalone ltdl library bundled with ${PACKAGE} [default=no]]) LTDL_LIBS="" AC_ARG_WITH(ais, [ --with-ais Support the OpenAIS messaging and membership layer ], [ SUPPORT_AIS=$withval ], [ SUPPORT_AIS=try ], ) AC_ARG_WITH(heartbeat, [ --with-heartbeat Support the Heartbeat messaging and membership layer ], [ SUPPORT_HEARTBEAT=$withval ], [ SUPPORT_HEARTBEAT=try ], ) AC_ARG_WITH(snmp, [ --with-snmp Support the SNMP protocol ], [ SUPPORT_SNMP=$withval ], [ SUPPORT_SNMP=try ], ) AC_ARG_WITH(esmtp, [ --with-esmtp Support the sending mail notifications with the esmtp library ], [ SUPPORT_ESMTP=$withval ], [ SUPPORT_ESMTP=try ], ) AISPREFIX="" AC_ARG_WITH(ais-prefix, [ --with-ais-prefix=DIR Prefix used when OpenAIS was installed [$prefix]], [ AISPREFIX=$withval ], [ AISPREFIX=$prefix ]) LCRSODIR="" AC_ARG_WITH(lcrso-dir, [ --with-lcrso-dir=DIR OpenAIS lcrso files. ], [ LCRSODIR="$withval" ]) INITDIR="" AC_ARG_WITH(initdir, [ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]], [ INITDIR="$withval" ]) dnl =============================================== dnl General Processing dnl =============================================== AC_SUBST(HB_PKG) INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in dnl For consistency with Heartbeat, map NONE->$prefix NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac AC_MSG_NOTICE(Sanitizing ais_prefix: ${AISPREFIX}) case $AISPREFIX in dnl For consistency with Heartbeat, map NONE->$prefix NONE) AISPREFIX=$prefix;; prefix) AISPREFIX=$prefix;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR);; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in dnl For consistency with Heartbeat, map NONE->$prefix *prefix*|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we dont end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} #docdir=${datadir}/doc/packages/${PACKAGE} fi AC_SUBST(docdir) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" INIT_EXT=".sh" ;; *solaris*) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) CFLAGS="$CFLAGS -I${prefix}/include" ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac dnl Eventually remove this CFLAGS="$CFLAGS -I${prefix}/include/heartbeat" AC_SUBST(INIT_EXT) AC_DEFINE_UNQUOTED(HA_LOG_FACILITY, LOG_DAEMON, Default logging facility) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac esac AC_MSG_CHECKING(which format is needed to print uint64_t) case "$host_cpu" in s390x)U64T="%lu";; *64*) U64T="%lu";; *) U64T="%llu";; esac AC_MSG_RESULT($U64T) AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t) AC_CHECK_HEADERS(hb_config.h) AC_CHECK_HEADERS(glue_config.h) GLUE_HEADER=none if test "$ac_cv_header_glue_config_h" = "yes"; then GLUE_HEADER=glue_config.h elif test "$ac_cv_header_hb_config_h" = "yes"; then GLUE_HEADER=hb_config.h else AC_MSG_FAILURE(Core development headers were not found) fi dnl Variables needed for substitution CRM_DTD_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DTD_DIRECTORY,"$CRM_DTD_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_DTD_DIRECTORY) AC_DEFINE_UNQUOTED(CRM_DTD_VERSION,"$CRM_DTD_VERSION", Current version of the Pacemaker Relax-NG Schema) AC_SUBST(CRM_DTD_VERSION) CRM_DAEMON_USER=`extract_header_define $GLUE_HEADER HA_CCMUSER` AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) CRM_DAEMON_GROUP=`extract_header_define $GLUE_HEADER HA_APIGROUP` AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_STATE_DIR=${localstatedir}/run/crm AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets) AC_SUBST(CRM_STATE_DIR) PE_STATE_DIR="${localstatedir}/lib/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep PEngine outputs) AC_SUBST(PE_STATE_DIR) dnl Eventually move out of the heartbeat dir tree and create compatability code CRM_CONFIG_DIR="${localstatedir}/lib/heartbeat/crm" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep CIB configuration files) AC_SUBST(CRM_CONFIG_DIR) dnl Eventually move out of the heartbeat dir tree and create symlinks when needed CRM_DAEMON_DIR=`extract_header_define $GLUE_HEADER HA_LIBHBDIR` AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) dnl Needed so that the AIS plugin can clear out the directory as Heartbeat does HA_STATE_DIR=`extract_header_define $GLUE_HEADER HA_VARRUNDIR` AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where Heartbeat keeps state files and sockets) AC_SUBST(HA_STATE_DIR) dnl Needed for the location of hostcache in CTS.py HA_VARLIBHBDIR=`extract_header_define $GLUE_HEADER HA_VARLIBHBDIR` AC_SUBST(HA_VARLIBHBDIR) AC_DEFINE_UNQUOTED(UUID_FILE,"$localstatedir/lib/heartbeat/hb_uuid", Location of Heartbeat's UUID file) OCF_ROOT_DIR=`extract_header_define $GLUE_HEADER OCF_ROOT_DIR` if test "X$OCF_ROOT_DIR" = X; then AC_MSG_ERROR(Could not locate OCF directory) fi AC_SUBST(OCF_ROOT_DIR) OCF_RA_DIR=`extract_header_define $GLUE_HEADER OCF_RA_DIR` AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) dnl Extract this value from glue_config.h once we no longer support anything else STONITH_PLUGIN_DIR="$libdir/stonith/plugins/stonith/" AC_DEFINE_UNQUOTED(STONITH_PLUGIN_DIR,"$STONITH_PLUGIN_DIR", Location for Stonith plugins) AC_SUBST(STONITH_PLUGIN_DIR) AC_PATH_PROGS(HG, hg false) AC_MSG_CHECKING(build version) BUILD_VERSION=unknown if test -f $srcdir/.hg_archival.txt; then BUILD_VERSION=`cat $srcdir/.hg_archival.txt | awk '/node:/ { print $2 }'` elif test -x $HG -a -d .hg; then BUILD_VERSION=`$HG id -itb` if test $? != 0; then BUILD_VERSION=unknown fi fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_MSG_RESULT($BUILD_VERSION) AC_SUBST(BUILD_VERSION) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) AM_PATH_PYTHON AC_CHECK_PROGS(MAKE, gmake make) AC_PATH_PROGS(HTML2TXT, lynx w3m) AC_PATH_PROGS(HELP2MAN, help2man) AC_PATH_PROGS(POD2MAN, pod2man, pod2man) AC_PATH_PROGS(ASCIIDOC, asciidoc) AC_PATH_PROGS(PUBLICAN, publican) AC_PATH_PROGS(FOP, fop) AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh) AC_PATH_PROGS(SCP, scp, /usr/bin/scp) AC_PATH_PROGS(HG, hg, /bin/false) AC_PATH_PROGS(TAR, tar) AC_PATH_PROGS(MD5, md5) AC_PATH_PROGS(TEST, test) AC_PATH_PROGS(PKGCONFIG, pkg-config) AC_PATH_PROGS(XML2CONFIG, xml2-config) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi if test x"${MAKE}" = x""; then AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PKG_FEATURES="$PKG_FEATURES manpages" fi AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"") if test x"${ASCIIDOC}" != x""; then PKG_FEATURES="$PKG_FEATURES asciidoc" fi AM_CONDITIONAL(BUILD_DOCBOOK, test ${PUBLICAN} != x"") if test ${PUBLICAN} != x""; then PKG_FEATURES="$PKG_FEATURES publican" fi dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_LIB(uuid, uuid_parse) dnl e2fsprogs AC_CHECK_LIB(uuid, uuid_create) dnl ossp if test x"${PKGCONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi dnl dnl On many systems libcrypto is needed when linking against libsnmp. dnl Check to see if it exists, and if so use it. dnl AC_CHECK_LIB(crypto, CRYPTO_free, CRYPTOLIB="-lcrypto",) AC_SUBST(CRYPTOLIB) if test "x${enable_thread_safe}" = "xyes"; then GPKGNAME="gthread-2.0" else GPKGNAME="glib-2.0" fi if $PKGCONFIG --exists $GPKGNAME then GLIBCONFIG="$PKGCONFIG $GPKGNAME" else set -x echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH $PKGCONFIG --exists $GPKGNAME; echo $? $PKGCONFIG --cflags $GPKGNAME; echo $? $PKGCONFIG $GPKGNAME; echo $? set +x AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE}) fi AC_MSG_RESULT(using $GLIBCONFIG) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi dnl dnl Check for location of gettext dnl dnl On at least Solaris 2.x, where it is in libc, specifying lintl causes dnl grief. Ensure minimal result, not the sum of all possibilities. dnl And do libc first. dnl Known examples: dnl c: Linux, Solaris 2.6+ dnl intl: BSD, AIX AC_CHECK_LIB(c, gettext) if test x$ac_cv_lib_c_gettext != xyes; then AC_CHECK_LIB(intl, gettext) fi if test x$ac_cv_lib_c_gettext != xyes -a x$ac_cv_lib_intl_gettext != xyes; then AC_MSG_ERROR(You need gettext installed in order to build ${PACKAGE}) fi if test "X$GLIBCONFIG" != X; then AC_MSG_CHECKING(for special glib includes: ) GLIBHEAD=`$GLIBCONFIG --cflags` AC_MSG_RESULT($GLIBHEAD) CPPFLAGS="$CPPFLAGS $GLIBHEAD" AC_MSG_CHECKING(for glib library flags) GLIBLIB=`$GLIBCONFIG --libs` AC_MSG_RESULT($GLIBLIB) LIBS="$LIBS $GLIBLIB" fi dnl ======================================================================== dnl Headers dnl ======================================================================== AC_HEADER_STDC AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(asm/types.h) AC_CHECK_HEADERS(assert.h) AC_CHECK_HEADERS(auth-client.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(fcntl.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/errqueue.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pam/pam_appl.h) AC_CHECK_HEADERS(pthread.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(security/pam_appl.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/poll.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/select.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/timeb.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/uio.h) AC_CHECK_HEADERS(sys/un.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) AC_CHECK_HEADERS(winsock.h) dnl These headers need prerequisits before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) dnl AC_CHECK_HEADERS(netinet/icmp6.h) dnl AC_CHECK_HEADERS(netinet/ip6.h) dnl AC_CHECK_HEADERS(netinet/ip_icmp.h) AC_MSG_CHECKING(for special libxml2 includes) if test "x$XML2CONFIG" = "x"; then AC_MSG_ERROR(libxml2 config not found) else XML2HEAD="`$XML2CONFIG --cflags`" AC_MSG_RESULT($XML2HEAD) AC_CHECK_LIB(xml2, xmlReadMemory) AC_CHECK_LIB(xslt, xsltApplyStylesheet) fi CPPFLAGS="$CPPFLAGS $XML2HEAD" AC_CHECK_HEADERS(libxml/xpath.h) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(The libxml developement headers were not found) fi if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(The libxslt developement headers were not found) fi dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(g_log_set_default_handler) AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) dnl ======================================================================== dnl ltdl dnl ======================================================================== AC_CHECK_LIB(ltdl, lt_dlopen, [LTDL_foo=1]) if test "x${enable_bundled_ltdl}" = "xyes"; then if test $ac_cv_lib_ltdl_lt_dlopen = yes; then AC_MSG_NOTICE([Disabling usage of installed ltdl]) fi ac_cv_lib_ltdl_lt_dlopen=no fi LIBLTDL_DIR="" if test $ac_cv_lib_ltdl_lt_dlopen != yes ; then AC_MSG_NOTICE([Installing local ltdl]) LIBLTDL_DIR=libltdl ( cd $srcdir ; $TAR -xvf libltdl.tar ) if test "$?" -ne 0; then AC_MSG_ERROR([$TAR of libltdl.tar in $srcdir failed]) fi AC_CONFIG_SUBDIRS(libltdl) else LIBS="$LIBS -lltdl" AC_MSG_NOTICE([Using installed ltdl]) INCLTDL="" LIBLTDL="" fi AC_SUBST(INCLTDL) AC_SUBST(LIBLTDL) AC_SUBST(LIBLTDL_DIR) dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [CURSESLIBS='-lncurses'; AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)] ) fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [CURSESLIBS='-lncurses'; AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)] ) fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)] ) fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)] ) fi if test "x$CURSESLIBS" != "x"; then PKG_FEATURES="$PKG_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual && cc_supports_flag -Werror; then AC_MSG_CHECKING(whether printw() requires argument of "const char *") ac_save_LIBS=$LIBS LIBS="$CURSESLIBS $LIBS" ac_save_CFLAGS=$CFLAGS CFLAGS="-Wcast-qual -Werror" AC_LINK_IFELSE( [AC_LANG_PROGRAM( [ #if defined(HAVE_CURSES_H) # include #elif defined(HAVE_NCURSES_H) # include #endif ], [printw((const char *)"Test");] )], [ac_cv_compatible_printw=yes], [ac_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT([$ac_cv_compatible_printw]) if test "$ac_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) dnl ======================================================================== dnl Cluster infrastructure - Heartbeat dnl ======================================================================== dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the depenancies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) if test x"$ac_cv_lib_plumb_G_main_add_IPC_Channel" != x"yes"; then AC_MSG_FAILURE(Core Heartbeat utility libraries not found: $ac_cv_lib_plumb_G_main_add_IPC_Channel) fi dnl Compatability checks AC_CHECK_FUNCS(msgfromIPC_timeout) AC_CHECK_MEMBERS([struct lrm_ops.fail_rsc],,,[[#include ]]) dnl ======================================================================== dnl Cluster stack - Heartbeat dnl ======================================================================== case $SUPPORT_HEARTBEAT in 1|yes|true) AC_CHECK_LIB(hbclient, ll_cluster_new, [SUPPORT_HEARTBEAT=1], [AC_MSG_FAILURE(Unable to support Heartbeat: client libraries not found)]);; try) AC_CHECK_LIB(hbclient, ll_cluster_new, [SUPPORT_HEARTBEAT=1], [SUPPORT_HEARTBEAT=0]);; *) SUPPORT_HEARTBEAT=0;; esac AM_CONDITIONAL(BUILD_HEARTBEAT_SUPPORT, test $SUPPORT_HEARTBEAT = 1) AC_DEFINE_UNQUOTED(SUPPORT_HEARTBEAT, $SUPPORT_HEARTBEAT, Support the Heartbeat messaging and membership layer) dnl ======================================================================== dnl Cluster stack - OpenAIS dnl ======================================================================== AISLIB="" dnl Normalize the values case $SUPPORT_AIS in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_AIS=no;; esac AC_MSG_CHECKING(for native AIS) AISMSGLIB="" AIS_VERSION="none" COROSYNC_PKG="$PKGCONFIG libcoroipcc" if test $SUPPORT_AIS = no; then AC_MSG_RESULT(no... not requested.) else AC_MSG_RESULT($SUPPORT_AIS, with '$AISPREFIX') AC_CHECK_HEADERS(openais/saAis.h) AC_CHECK_HEADERS(corosync/coroipcc.h) $COROSYNC_PKG --exists if test $? = 0; then AIS_VERSION="corosync" elif test "$ac_cv_header_openais_saAis_h" = "yes"; then AIS_VERSION="whitetank" else aisreason="Whitetank headers not found" fi fi if test $AIS_VERSION != "none"; then AC_MSG_CHECKING(for OpenAIS branch) AC_MSG_RESULT($AIS_VERSION) fi if test $AIS_VERSION = "corosync"; then if test "$ac_cv_header_corosync_coroipcc_h" != "yes"; then AIS_VERSION="none" aisreason="Corosync headers not found" fi saveLIBS="$LIBS" LIBS="$LIBS `$COROSYNC_PKG --libs-only-L`" AC_CHECK_LIB(coroipcc, coroipcc_msg_send_reply_receive, []) LIBS="$saveLIBS" if test $ac_cv_lib_coroipcc_coroipcc_msg_send_reply_receive != yes; then AC_MSG_RESULT(Cannot locate AIS messaging library) aisreason="requred Corosync libraries not found" AIS_VERSION="none" fi fi dnl continue? if test $AIS_VERSION = "whitetank"; then dnl Find it in lib, lib64, or wherever it wants to live... AC_MSG_CHECKING(location of OpenAIS libraries) dnl CoroSync location alib=`ls ${AISPREFIX}/*/libcpg.so | head -n 1` if test -z "$alib"; then dnl Whitetank location alib=`ls ${AISPREFIX}/*/*/libcpg.so | head -n 1` fi AISLIB=`dirname $alib` AC_MSG_RESULT($AISLIB) if test "x$AISLIB" = "x"; then AC_MSG_WARN(Use --with-ais-prefix to specify the prefix OpenAIS was installed with) aisreason="library directory not found" AIS_VERSION="none" elif test ! -d "$AISLIB"; then AC_MSG_WARN(Use --with-ais-prefix to specify the prefix OpenAIS was installed with) aisreason="specified library directory does not exist" AIS_VERSION="none" fi fi dnl continue? if test $AIS_VERSION = "whitetank"; then AC_MSG_CHECKING(location of OpenAIS plugins) if test -z "$LCRSODIR"; then LCRSODIR="$libexecdir/lcrso" alib=`ls ${AISPREFIX}/*/lcrso/objdb.lcrso | head -n 1` LCRSODIR=`dirname $alib` fi AC_MSG_RESULT($LCRSODIR) if test "x$LCRSODIR" = "x"; then AC_MSG_RESULT(Invalid. Please specify the correct location with --with-lcrso-dir) aisreason="plugin directory not found" AIS_VERSION="none" elif test ! -d "$LCRSODIR"; then AC_MSG_RESULT(Invalid. Please specify the correct location with --with-lcrso-dir) aisreason="specified plugin directory does not exist" AIS_VERSION="none" fi fi dnl continue? if test $AIS_VERSION = "whitetank"; then dnl Don't add the messaging library to LIBS since most daemons don't need/use it saveLIBS="$LIBS" LIBS="$LIBS -L${AISLIB} -R${AISLIB}" AC_CHECK_LIB(SaMsg, saSendReceiveReply, []) AC_CHECK_LIB(SaMsg, openais_msg_send_reply_receive, []) if test $ac_cv_lib_SaMsg_openais_msg_send_reply_receive = yes; then : OpenAIS elif test $ac_cv_lib_SaMsg_saSendReceiveReply = yes; then : OpenAIS AC_DEFINE_UNQUOTED(TRADITIONAL_AIS_IPC, 1, "Use the 'old' AIS IPC interface") else AC_MSG_RESULT(Cannot locate AIS messaging library) aisreason="requred libraries not found" AIS_VERSION="none" fi LIBS="$saveLIBS" fi SUPPORT_AIS=1 case $AIS_VERSION in corosync) AC_DEFINE_UNQUOTED(AIS_COROSYNC, 1, "AIS target is the corosync series") LCRSODIR=`$PKGCONFIG corosync --variable=lcrsodir` CFLAGS="$CFLAGS `$COROSYNC_PKG --cflags`" AISMSGLIB=`$COROSYNC_PKG --libs` ;; whitetank) AC_DEFINE_UNQUOTED(AIS_WHITETANK, 1, "AIS target is the whitetank series") CFLAGS="$CFLAGS -I$AISPREFIX/include/openais" AISMSGLIB="-L${AISLIB} -R${AISLIB} -lSaMsg" ;; none) SUPPORT_AIS=0 if test "x$aisreason" != x; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support OpenAIS: $aisreason) else AC_MSG_FAILURE(Unable to support OpenAIS: $aisreason) fi fi ;; *) AC_MSG_FAILURE(Unknown OpenAIS branch: $AIS_VERSION);; esac AC_DEFINE_UNQUOTED(SUPPORT_AIS, $SUPPORT_AIS, Support the OpenAIS messaging and membership layer) AM_CONDITIONAL(BUILD_AIS_SUPPORT, test $SUPPORT_AIS = 1) dnl dnl Cluster stack - Sanity dnl STACKS="" CLUSTERLIBS="" if test $SUPPORT_HEARTBEAT = 1; then STACKS="$STACKS heartbeat" CLUSTERLIBS="$CLUSTERLIBS -lhbclient -lccmclient" fi if test $SUPPORT_AIS = 1; then STACKS="$STACKS $AIS_VERSION" CLUSTERLIBS="$CLUSTERLIBS ${AISMSGLIB}" else AISPREFIX="" LCRSODIR="$libdir" fi PKG_FEATURES="$PKG_FEATURES$STACKS" AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must choose at least one cluster stack to support) fi AC_MSG_RESULT($STACKS) AC_SUBST(CLUSTERLIBS) AC_SUBST(LCRSODIR) dnl ======================================================================== dnl SNMP dnl ======================================================================== case $SUPPORT_SNMP in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_SNMP=no;; esac SNMPLIB="" AC_MSG_CHECKING(for snmp support) if test $SUPPORT_SNMP = no; then AC_MSG_RESULT(no... not requested.) SUPPORT_SNMP=0 else SNMPCONFIG="" AC_MSG_RESULT($SUPPORT_SNMP) AC_CHECK_HEADERS(net-snmp/net-snmp-config.h) if test "x${ac_cv_header_net_snmp_net_snmp_config_h}" != "xyes"; then SUPPORT_SNMP="no" fi if test $SUPPORT_SNMP != no; then AC_PATH_PROGS(SNMPCONFIG, net-snmp-config) if test "X${SNMPCONFIG}" = "X"; then AC_MSG_RESULT(You need the net_snmp development package to continue.) SUPPORT_SNMP=no fi fi if test $SUPPORT_SNMP != no; then AC_MSG_CHECKING(for special snmp libraries) SNMPLIBS=`$SNMPCONFIG --agent-libs` AC_MSG_RESULT($SNMPLIBS) fi if test $SUPPORT_SNMP != no; then savedLibs=$LIBS LIBS="$LIBS $SNMPLIBS" AC_CHECK_FUNCS(netsnmp_transport_open_client) if test $ac_cv_func_netsnmp_transport_open_client != yes; then SUPPORT_SNMP=no fi LIBS=$savedLibs fi if test $SUPPORT_SNMP = no; then SUPPORT_SNMP=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support SNMP) else AC_MSG_FAILURE(Unable to support SNMP) fi else SUPPORT_SNMP=1 fi fi if test $SUPPORT_SNMP = 1; then PKG_FEATURES="$PKG_FEATURES snmp" fi AC_SUBST(SNMPLIBS) AM_CONDITIONAL(ENABLE_SNMP, test "$SUPPORT_SNMP" = "1") AC_DEFINE_UNQUOTED(ENABLE_SNMP, $SUPPORT_SNMP, Build in support for sending SNMP traps) dnl ======================================================================== dnl ESMTP dnl ======================================================================== case $SUPPORT_ESMTP in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_ESMTP=no;; esac ESMTPLIB="" AC_MSG_CHECKING(for esmtp support) if test $SUPPORT_ESMTP = no; then AC_MSG_RESULT(no... not requested.) SUPPORT_ESMTP=0 else ESMTPCONFIG="" AC_MSG_RESULT($SUPPORT_ESMTP) AC_CHECK_HEADERS(libesmtp.h) if test "x${ac_cv_header_libesmtp_h}" != "xyes"; then ENABLE_ESMTP="no" fi if test $SUPPORT_ESMTP != no; then AC_PATH_PROGS(ESMTPCONFIG, libesmtp-config) if test "X${ESMTPCONFIG}" = "X"; then AC_MSG_RESULT(You need the libesmtp development package to continue.) SUPPORT_ESMTP=no fi fi if test $SUPPORT_ESMTP != no; then AC_MSG_CHECKING(for special esmtp libraries) ESMTPLIBS=`$ESMTPCONFIG --libs | tr '\n' ' '` AC_MSG_RESULT($ESMTPLIBS) fi if test $SUPPORT_ESMTP = no; then SUPPORT_ESMTP=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ESMTP) else AC_MSG_FAILURE(Unable to support ESMTP) fi else SUPPORT_ESMTP=1 fi fi if test $SUPPORT_ESMTP = 1; then PKG_FEATURES="$PKG_FEATURES libesmtp" fi AC_SUBST(ESMTPLIBS) AM_CONDITIONAL(ENABLE_ESMTP, test "$SUPPORT_ESMTP" = "1") AC_DEFINE_UNQUOTED(ENABLE_ESMTP, $SUPPORT_ESMTP, Build in support for sending mail notifications with ESMTP) dnl ======================================================================== dnl GnuTLS dnl ======================================================================== AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl GNUTLS library: Attempt to determine by 'libgnutls-config' program. dnl If no 'libgnutls-config', try traditional autoconf means. AC_PATH_PROGS(LIBGNUTLS_CONFIG, libgnutls-config) if test -n "$LIBGNUTLS_CONFIG"; then AC_MSG_CHECKING(for gnutls header flags) GNUTLSHEAD="`$LIBGNUTLS_CONFIG --cflags`"; AC_MSG_RESULT($GNUTLSHEAD) AC_MSG_CHECKING(for gnutls library flags) GNUTLSLIBS="`$LIBGNUTLS_CONFIG --libs`"; AC_MSG_RESULT($GNUTLSLIBS) else AC_CHECK_LIB(gnutls, gnutls_init) fi AC_SUBST(GNUTLSHEAD) AC_SUBST(GNUTLSLIBS) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKGCONFIG --exists $SERVICELOG then SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKGCONFIG --exists $OPENIPMI $SERVICELOG then OPENIPMI_SERICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERICELOG, test "$OPENIPMI_SERICELOG_EXISTS" = "yes") dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl unsetenv: is some bsdish function that should also be avoided (No dnl replacement) dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolearably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolearably-fast replacement function for it. dnl dnl daemon: is a GNU function. The daemon() function is for programs wishing to dnl detach themselves from the controlling terminal and run in the dnl background as system daemon dnl A replacement function is supplied for it. AC_REPLACE_FUNCS(alphasort inet_pton NoSuchFunctionName scandir setenv strerror unsetenv strnlen strndup daemon strlcpy strlcat) dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. CC_ERRORS="" CC_EXTRAS="" if export | fgrep " CFLAGS=" > /dev/null; then export -n CFLAGS || true # We don't want to bomb out if this fails fi if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" enable_fatal_warnings=no else CFLAGS="$CFLAGS -ggdb3 -O0" # We had to eliminate -Wnested-externs because of libtool changes EXTRA_FLAGS="-fgnu89-inline -fstack-protector-all -Wall -Waggregate-return -Wbad-function-cast -Wcast-qual -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat=2 -Wformat-security -Wformat-nonliteral -Winline -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wunsigned-char -Wwrite-strings" # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $j then CC_EXTRAS="$CC_EXTRAS $j" fi done dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'` AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4) dnl System specific options case "$host_os" in *linux*|*bsd*) if test "${enable_fatal_warnings}" = "unknown"; then enable_fatal_warnings=yes fi ;; esac if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then enable_fatal_warnings=yes else enable_fatal_warnings=no fi if test "x${enable_ansi}" != xno && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS -Werror" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LIBADD_INTL) dnl extra flags for GNU gettext stuff... AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--quiet" QUIET_MAKE_OPTS="--quiet" fi AC_MSG_RESULT(Supress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKE="${MAKE} \$(QUIET_MAKE_OPTS)" AC_SUBST(CC) AC_SUBST(MAKE) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_MAKE_OPTS) AC_SUBST(QUIET_LIBTOOL_OPTS) dnl The Makefiles and shell scripts we output AC_CONFIG_FILES(Makefile \ README \ cts/Makefile \ cts/CTSvars.py \ cts/LSBDummy \ cib/Makefile \ crmd/Makefile \ pengine/Makefile \ debian/Makefile \ doc/Makefile \ doc/cibadmin.8 \ doc/crm_resource.8 \ include/Makefile \ include/crm/Makefile \ include/crm/common/Makefile \ include/crm/pengine/Makefile \ include/fencing/Makefile \ replace/Makefile \ lib/Makefile \ lib/ais/Makefile \ lib/common/Makefile \ lib/cib/Makefile \ lib/pengine/Makefile \ lib/transition/Makefile \ lib/fencing/Makefile \ lib/plugins/Makefile \ lib/plugins/lrm/Makefile \ fencing/Makefile \ fencing/stonithd/Makefile \ fencing/test/Makefile \ fencing/test/STONITHDBasicSanityCheck \ extra/Makefile \ extra/resources/Makefile \ tools/Makefile \ tools/haresources2cib.py \ tools/hb2openais.sh \ tools/crm_primitive.py \ tools/crm \ shell/Makefile \ + shell/setup.py \ shell/templates/Makefile \ shell/regression/Makefile \ shell/regression/regression.sh \ shell/regression/lrmregtest-lsb \ shell/regression/testcases/Makefile \ + shell/modules/Makefile \ + shell/modules/ui.py \ + shell/modules/ra.py \ + shell/modules/vars.py \ + shell/modules/help.py \ xml/Makefile \ xml/pacemaker.rng \ xml/resources.rng \ xml/constraints.rng \ xml/rule.rng \ xml/nvset.rng \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PKG_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([ AIS Plugins = ${LCRSODIR}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Use system LTDL = ${ac_cv_lib_ltdl_lt_dlopen}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) diff --git a/pacemaker.spec b/pacemaker.spec index 0f0e8ef3aa..f9883aefc6 100644 --- a/pacemaker.spec +++ b/pacemaker.spec @@ -1,1282 +1,1289 @@ %global gname haclient %global uname hacluster %global pcmk_docdir %{_docdir}/%{name} %global specversion 2 #global upstream_version ee19d8e83c2a %global upstream_prefix pacemaker # Keep around for when/if required #global alphatag %{upstream_version}.hg %global pcmk_release %{?alphatag:0.}%{specversion}%{?alphatag:.%{alphatag}}%{?dist} # Compatibility macro wrappers for legacy RPM versions that do not # support conditional builds %{!?bcond_without: %{expand: %%global bcond_without() %%{expand:%%%%{!?_without_%%{1}:%%%%global with_%%{1} 1}}}} %{!?bcond_with: %{expand: %%global bcond_with() %%{expand:%%%%{?_with_%%{1}:%%%%global with_%%{1} 1}}}} %{!?with: %{expand: %%global with() %%{expand:%%%%{?with_%%{1}:1}%%%%{!?with_%%{1}:0}}}} %{!?without: %{expand: %%global without() %%{expand:%%%%{?with_%%{1}:0}%%%%{!?with_%%{1}:1}}}} # Conditionals # Invoke "rpmbuild --without " or "rpmbuild --with " # to disable or enable specific features %bcond_without ais %bcond_without heartbeat # ESMTP is not available in RHEL, only in EPEL. Allow people to build # the RPM without ESMTP in case they choose not to use EPEL packages %bcond_without esmtp Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: 1.0.7 Release: %{pcmk_release} License: GPLv2+ and LGPLv2+ Url: http://www.clusterlabs.org Group: System Environment/Daemons Source0: pacemaker.tar.bz2 BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) AutoReqProv: on Requires(pre): cluster-glue Requires: resource-agents python Conflicts: heartbeat < 2.99 %if 0%{?fedora} || 0%{?centos} > 4 || 0%{?rhel} > 4 Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version)) BuildRequires: help2man libtool-ltdl-devel %endif %if 0%{?suse_version} # net-snmp-devel on SLES10 does not suck in tcpd-devel automatically BuildRequires: help2man tcpd-devel %endif # Required for core functionality BuildRequires: automake autoconf libtool pkgconfig BuildRequires: glib2-devel cluster-glue-libs-devel libxml2-devel libxslt-devel BuildRequires: pkgconfig python-devel gcc-c++ bzip2-devel gnutls-devel pam-devel # Enables optional functionality BuildRequires: ncurses-devel net-snmp-devel openssl-devel BuildRequires: lm_sensors-devel libselinux-devel %if %{with esmtp} BuildRequires: libesmtp-devel %endif %if %{with ais} BuildRequires: corosynclib-devel Requires: corosync %endif %if %{with heartbeat} BuildRequires: heartbeat-devel heartbeat-libs Requires: heartbeat >= 3.0.0 %endif %description Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or OpenAIS. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: --without : heartbeat ais %package -n pacemaker-libs License: GPLv2+ and LGPLv2+ Summary: Libraries used by the Pacemaker cluster resource manager and its clients Group: System Environment/Daemons Requires: %{name} = %{version}-%{release} %description -n pacemaker-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or OpenAIS. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. %package -n pacemaker-libs-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package Group: Development/Libraries Requires: %{name}-libs = %{version}-%{release} Requires: cluster-glue-libs-devel Obsoletes: libpacemaker3 %if %{with ais} Requires: corosynclib-devel %endif %if %{with heartbeat} Requires: heartbeat-devel %endif %description -n pacemaker-libs-devel Headers and shared libraries for developing tools for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or OpenAIS. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. %prep %setup -q -n %{upstream_prefix}%{?upstream_version} %build ./autogen.sh # RHEL <= 5 does not support --docdir export docdir=%{pcmk_docdir} %{configure} --localstatedir=%{_var} --enable-fatal-warnings=no make %{_smp_mflags} docdir=%{pcmk_docdir} %install rm -rf %{buildroot} make install DESTDIR=%{buildroot} docdir=%{pcmk_docdir} # Scripts that need should be executable chmod a+x %{buildroot}/%{_libdir}/heartbeat/hb2openais-helper.py chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/OCFIPraTest.py chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/extracttests.py # These are not actually scripts find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x find %{buildroot} -name '*.xsl' -type f -print0 | xargs -0 chmod a-x find %{buildroot} -name '*.rng' -type f -print0 | xargs -0 chmod a-x find %{buildroot} -name '*.dtd' -type f -print0 | xargs -0 chmod a-x # Dont package static libs or compiled python find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.pyc' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.pyo' -type f -print0 | xargs -0 rm -f +# install shell modules to site-packages +( +cd shell +python setup.py install --prefix=%{_prefix} --root=%{buildroot} +) + # Do not package these either rm %{buildroot}/%{_libdir}/heartbeat/crm_primitive.py %if %{with ais} rm %{buildroot}/%{_libdir}/service_crm.so %endif %clean rm -rf %{buildroot} %post -n pacemaker-libs -p /sbin/ldconfig %postun -n pacemaker-libs -p /sbin/ldconfig %files ########################################################### %defattr(-,root,root) %exclude %{_datadir}/pacemaker/tests %{_datadir}/pacemaker %{_datadir}/snmp/mibs/PCMK-MIB.txt %{_libdir}/heartbeat/* %{_sbindir}/cibadmin %{_sbindir}/crm_attribute %{_sbindir}/crm_diff %{_sbindir}/crm_failcount %{_sbindir}/crm_master %{_sbindir}/crm_mon %{_sbindir}/crm %{_sbindir}/crm_resource %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/attrd_updater %{_sbindir}/ptest %{_sbindir}/crm_shadow %{_sbindir}/cibpipe %{_sbindir}/crm_node +%{py_sitedir}/* %if %{with heartbeat} %{_sbindir}/crm_uuid %else %exclude %{_sbindir}/crm_uuid %endif # Packaged elsewhere %exclude %{pcmk_docdir}/AUTHORS %exclude %{pcmk_docdir}/COPYING %exclude %{pcmk_docdir}/COPYING.LIB %doc %{pcmk_docdir}/crm_cli.txt %doc %{pcmk_docdir}/crm_fencing.txt %doc %{pcmk_docdir}/README.hb2openais %doc %{_mandir}/man8/*.8* %doc COPYING %doc AUTHORS %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/heartbeat/crm %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pengine %dir %attr (750, %{uname}, %{gname}) %{_var}/run/crm %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d /usr/lib/ocf/resource.d/pacemaker %if %{with ais} %{_libexecdir}/lcrso/pacemaker.lcrso %endif %files -n pacemaker-libs %defattr(-,root,root) %{_libdir}/libcib.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libcrmcluster.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpengine.so.* %{_libdir}/libtransitioner.so.* %{_libdir}/libstonithd.so.* %doc COPYING.LIB %doc AUTHORS %files -n pacemaker-libs-devel %defattr(-,root,root) %{_includedir}/pacemaker %{_includedir}/heartbeat/fencing %{_libdir}/*.so %{_datadir}/pacemaker/tests %doc COPYING.LIB %doc AUTHORS %changelog * Tue Jan 19 2010 Andrew Beekhof - 1.0.7-2 - Rebuild for corosync 1.2.0 * Mon Jan 18 2010 Andrew Beekhof - 1.0.7-1 - Update source tarball to revision: 2eed906f43e9 (stable-1.0) tip - Statistics: Changesets: 193 Diff: 220 files changed, 15933 insertions(+), 8782 deletions(-) - Changes since 1.0.5-4 + High: PE: Bug 2213 - Ensure groups process location constraints so that clone-node-max works for cloned groups + High: PE: Bug lf#2153 - non-clones should not restart when clones stop/start on other nodes + High: PE: Bug lf#2209 - Clone ordering should be able to prevent startup of dependant clones + High: PE: Bug lf#2216 - Correctly identify the state of anonymous clones when deciding when to probe + High: PE: Bug lf#2225 - Operations that require fencing should wait for 'stonith_complete' not 'all_stopped'. + High: PE: Bug lf#2225 - Prevent clone peers from stopping while another is instance is (potentially) being fenced + High: PE: Correctly anti-colocate with a group + High: PE: Correctly unpack ordering constraints for resource sets to avoid graph loops + High: Tools: crm: load help from crm_cli.txt + High: Tools: crm: resource sets (bnc#550923) + High: Tools: crm: support for comments (LF 2221) + High: Tools: crm: support for description attribute in resources/operations (bnc#548690) + High: Tools: hb2openais: add EVMS2 CSM processing (and other changes) (bnc#548093) + High: Tools: hb2openais: do not allow empty rules, clones, or groups (LF 2215) + High: Tools: hb2openais: refuse to convert pure EVMS volumes + High: cib: Ensure the loop for login message terminates + High: cib: Finally fix reliability of receiving large messages over remote plaintext connections + High: cib: Fix remote notifications + High: cib: For remote connections, default to CRM_DAEMON_USER since thats the only one that the cib can validate the password for using PAM + High: cib: Remote plaintext - Retry sending parts of the message that did not fit the first time + High: crmd: Ensure batch-limit is correctly enforced + High: crmd: Ensure we have the latest status after a transition abort + High (bnc#547579,547582): Tools: crm: status section editing support + High: shell: Add allow-migrate as allowed meta-attribute (bnc#539968) + Medium: Build: Do not automatically add -L/lib, it could cause 64-bit arches to break + Medium: PE: Bug lf#2206 - rsc_order constraints always use score at the top level + Medium: PE: Only complain about target-role=master for non m/s resources + Medium: PE: Prevent non-multistate resources from being promoted through target-role + Medium: PE: Provide a default action for resource-set ordering + Medium: PE: Silently fix requires=fencing for stonith resources so that it can be set in op_defaults + Medium: Tools: Bug lf#2286 - Allow the shell to accept template parameters on the command line + Medium: Tools: Bug lf#2307 - Provide a way to determin the nodeid of past cluster members + Medium: Tools: crm: add update method to template apply (LF 2289) + Medium: Tools: crm: direct RA interface for ocf class resource agents (LF 2270) + Medium: Tools: crm: direct RA interface for stonith class resource agents (LF 2270) + Medium: Tools: crm: do not add score which does not exist + Medium: Tools: crm: do not consider warnings as errors (LF 2274) + Medium: Tools: crm: do not remove sets which contain id-ref attribute (LF 2304) + Medium: Tools: crm: drop empty attributes elements + Medium: Tools: crm: exclude locations when testing for pathological constraints (LF 2300) + Medium: Tools: crm: fix exit code on single shot commands + Medium: Tools: crm: fix node delete (LF 2305) + Medium: Tools: crm: implement -F (--force) option + Medium: Tools: crm: rename status to cibstatus (LF 2236) + Medium: Tools: crm: revisit configure commit + Medium: Tools: crm: stay in crm if user specified level only (LF 2286) + Medium: Tools: crm: verify changes on exit from the configure level + Medium: ais: Some clients such as gfs_controld want a cluster name, allow one to be specified in corosync.conf + Medium: cib: Clean up logic for receiving remote messages + Medium: cib: Create valid notification control messages + Medium: cib: Indicate where the remote connection came from + Medium: cib: Send password prompt to stderr so that stdout can be redirected + Medium: cts: Fix rsh handling when stdout is not required + Medium: doc: Fill in the section on removing a node from an AIS-based cluster + Medium: doc: Update the docs to reflect the 0.6/1.0 rolling upgrade problem + Medium: doc: Use Publican for docbook based documentation + Medium: fencing: stonithd: add metadata for stonithd instance attributes (and support in the shell) + Medium: fencing: stonithd: ignore case when comparing host names (LF 2292) + Medium: tools: Make crm_mon functional with remote connections + Medium: xml: Add stopped as a supported role for operations + Medium: xml: Bug bnc#552713 - Treat node unames as text fields not IDs + Medium: xml: Bug lf#2215 - Create an always-true expression for empty rules when upgrading from 0.6 * Thu Oct 29 2009 Andrew Beekhof - 1.0.5-4 - Include the fixes from CoroSync integration testing - Move the resource templates - they are not documentation - Ensure documentation is placed in a standard location - Exclude documentation that is included elsewhere in the package - Update the tarball from upstream to version ee19d8e83c2a + High: cib: Correctly clean up when both plaintext and tls remote ports are requested + High: PE: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions + High: PE: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints + High: PE: Make sure promote/demote pseudo actions are created correctly + High: PE: Prevent target-role from promoting more than master-max instances + High: ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage + High: ais: Prevent deadlock - dont try to release IPC message if the connection failed + High: cib: For validation errors, send back the full CIB so the client can display the errors + High: cib: Prevent use-after-free for remote plaintext connections + High: crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat * Wed Oct 13 2009 Andrew Beekhof - 1.0.5-3 - Update the tarball from upstream to version 38cd629e5c3c + High: Core: Bug lf#2169 - Allow dtd/schema validation to be disabled + High: PE: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change + High: PE: Bug lf#2170 - stop-all-resources option had no effect + High: PE: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which can not + High: PE: Disable resource management if stonith-enabled=true and no stonith resources are defined + High: PE: do not include master score if it would prevent allocation + High: ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms) + High: ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync + High: ais: Gracefully handle changes to the AIS nodeid + High: crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE + High: crmd: Prevent use-after-free with LOG_DEBUG_3 + Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672) + Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm + Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild + Medium: PE: Bug lf#2178 - Indicate unmanaged clones + Medium: PE: Bug lf#2180 - Include node information for all failed ops + Medium: PE: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint + Medium: PE: Correctly log resources that would like to start but can not + Medium: PE: Stop ptest from logging to syslog + Medium: ais: Include version details in plugin name + Medium: crmd: Requery the resource metadata after every start operation * Fri Aug 21 2009 Tomas Mraz - 1.0.5-2.1 - rebuilt with new openssl * Wed Aug 19 2009 Andrew Beekhof - 1.0.5-2 - Add versioned perl dependancy as specified by https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl - No longer remove RPATH data, it prevents us finding libperl.so and no other libraries were being hardcoded - Compile in support for heartbeat - Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements depending on which stacks are supported * Mon Aug 17 2009 Andrew Beekhof - 1.0.5-1 - Add dependancy on resource-agents - Use the version of the configure macro that supplies --prefix, --libdir, etc - Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final) + High: Tools: crm_resource - Advertise --move instead of --migrate + Medium: Extra: New node connectivity RA that uses system ping and attrd_updater + Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches * Tue Aug 11 2009 Ville Skyttä - 1.0.5-0.7.c9120a53a6ae.hg - Use bzipped upstream tarball. * Wed Jul 29 2009 Andrew Beekhof - 1.0.5-0.6.c9120a53a6ae.hg - Add back missing build auto* dependancies - Minor cleanups to the install directive * Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.5.c9120a53a6ae.hg - Add a leading zero to the revision when alphatag is used * Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.4.c9120a53a6ae.hg - Incorporate the feedback from the cluster-glue review - Realistically, the version is a 1.0.5 pre-release - Use the global directive instead of define for variables - Use the haclient/hacluster group/user instead of daemon - Use the _configure macro - Fix install dependancies * Fri Jul 24 2009 Andrew Beekhof - 1.0.4-3 - Initial Fedora checkin - Include an AUTHORS and license file in each package - Change the library package name to pacemaker-libs to be more Fedora compliant - Remove execute permissions from xml related files - Reference the new cluster-glue devel package name - Update the tarball from upstream to version c9120a53a6ae + High: PE: Only prevent migration if the clone dependancy is stopping/starting on the target node + High: PE: Bug 2160 - Dont shuffle clones due to colocation + High: PE: New implementation of the resource migration (not stop/start) logic + Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options + Medium: PE: Prevent use-of-NULL in find_first_action() * Tue Jul 14 2009 Andrew Beekhof - 1.0.4-2 - Reference authors from the project AUTHORS file instead of listing in description - Change Source0 to reference the Mercurial repo - Cleaned up the summaries and descriptions - Incorporate the results of Fedora package self-review * Thu Jun 04 2009 Andrew Beekhof - 1.0.4-1 - Update source tarball to revision: 1d87d3e0fc7f (stable-1.0) - Statistics: Changesets: 209 Diff: 266 files changed, 12010 insertions(+), 8276 deletions(-) - Changes since Pacemaker-1.0.3 + High (bnc#488291): ais: do not rely on byte endianness on ptr cast + High (bnc#507255): Tools: crm: delete rsc/op_defaults (these meta_attributes are killing me) + High (bnc#507255): Tools: crm: import properly rsc/op_defaults + High (LF 2114): Tools: crm: add support for operation instance attributes + High: ais: Bug lf#2126 - Messages replies cannot be routed to transient clients + High: ais: Fix compilation for the latest Corosync API (v1719) + High: attrd: Do not perform all updates as complete refreshes + High: cib: Fix huge memory leak affecting heartbeat-based clusters + High: Core: Allow xpath queries to match attributes + High: Core: Generate the help text directly from a tool options struct + High: Core: Handle differences in 0.6 messaging format + High: crmd: Bug lf#2120 - All transient node attribute updates need to go via attrd + High: crmd: Correctly calculate how long an FSA action took to avoid spamming the logs with errors + High: crmd: Fix another large memory leak affecting Heartbeat based clusters + High: lha: Restore compatability with older versions + High: PE: Bug bnc#495687 - Filesystem is not notified of successful STONITH under some conditions + High: PE: Make running a cluster with STONITH enabled but no STONITH resources an error and provide details on resolutions + High: PE: Prevent use-ofNULL when using resource ordering sets + High: PE: Provide inter-notification ordering guarantees + High: PE: Rewrite the notification code to be understanable and extendable + High: Tools: attrd - Prevent race condition resulting in the cluster forgetting the node wishes to shut down + High: Tools: crm: regression tests + High: Tools: crm_mon - Fix smtp notifications + High: Tools: crm_resource - Repair the ability to query meta attributes + Low Build: Bug lf#2105 - Debian package should contain pacemaker doc and crm templates + Medium (bnc#507255): Tools: crm: handle empty rsc/op_defaults properly + Medium (bnc#507255): Tools: crm: use the right obj_type when creating objects from xml nodes + Medium (LF 2107): Tools: crm: revisit exit codes in configure + Medium: cib: Do not bother validating updates that only affect the status section + Medium: Core: Include supported stacks in version information + Medium: crmd: Record in the CIB, the cluster infrastructure being used + Medium: cts: Do not combine crm_standby arguments - the wrapper ca not process them + Medium: cts: Fix the CIBAusdit class + Medium: Extra: Refresh showscores script from Dominik + Medium: PE: Build a statically linked version of ptest + Medium: PE: Correctly log the actions for resources that are being recovered + Medium: PE: Correctly log the occurance of promotion events + Medium: PE: Implememt node health based on a patch from Mark Hamzy + Medium: Tools: Add examples to help text outputs + Medium: Tools: crm: catch syntax errors for configure load + Medium: Tools: crm: implement erasing nodes in configure erase + Medium: Tools: crm: work with parents only when managing xml objects + Medium: Tools: crm_mon - Add option to run custom notification program on resource operations (Patch by Dominik Klein) + Medium: Tools: crm_resource - Allow --cleanup to function on complex resources and cluster-wide + Medium: Tools: haresource2cib.py - Patch from horms to fix conversion error + Medium: Tools: Include stack information in crm_mon output + Medium: Tools: Two new options (--stack,--constraints) to crm_resource for querying how a resource is configured * Wed Apr 08 2009 Andrew Beekhof - 1.0.3-1 - Update source tarball to revision: b133b3f19797 (stable-1.0) tip - Statistics: Changesets: 383 Diff: 329 files changed, 15471 insertions(+), 15119 deletions(-) - Changes since Pacemaker-1.0.2 + Added tag SLE11-HAE-GMC for changeset 9196be9830c2 + High: ais plugin: Fix quorum calculation (bnc#487003) + High: ais: Another memory fix leak in error path + High: ais: Bug bnc#482847, bnc#482905 - Force a clean exit of OpenAIS once Pacemaker has finished unloading + High: ais: Bug bnc#486858 - Fix update_member() to prevent spamming clients with membership events containing no changes + High: ais: Centralize all quorum calculations in the ais plugin and allow expected votes to be configured int he cib + High: ais: Correctly handle a return value of zero from openais_dispatch_recv() + High: ais: Disable logging to a file + High: ais: Fix memory leak in error path + High: ais: IPC messages are only in scope until a response is sent + High: All signal handlers used with CL_SIGNAL() need to be as minimal as possible + High: cib: Bug bnc#482885 - Simplify CIB disk-writes to prevent data loss. Required a change to the backup filename format + High: cib: crmd: Revert part of 9782ab035003. Complex shutdown routines need G_main_add_SignalHandler to avoid race coditions + High: crm: Avoid infinite loop during crm configure edit (bnc#480327) + High: crmd: Avoid a race condition by waiting for the attrd update to trigger a transition automatically + High: crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly + High: crmd: Bug bnc#480977 - Prevent extra, partial, shutdown when a node restarts too quickly (verified) + High: crmd: Bug bnc#489063 - Ensure the DC is always unset after we 'loose' an election + High: crmd: Bug BSC#479543 - Correctly find the migration source for timed out migrate_from actions + High: crmd: Call crm_peer_init() before we start the FSA - prevents a race condition when used with Heartbeat + High: crmd: Erasing the status section should not be forced to the local node + High: crmd: Fix memory leak in cib notication processing code + High: crmd: Fix memory leak in transition graph processing + High: crmd: Fix memory leaks found by valgrind + High: crmd: More memory leaks fixes found by valgrind + High: fencing: stonithd: is_heartbeat_cluster is a no-no if there is no heartbeat support + High: PE: Bug bnc#466788 - Exclude nodes that can not run resources + High: PE: Bug bnc#466788 - Make colocation based on node attributes work + High: PE: Bug BNC#478687 - Do not crash when clone-max is 0 + High: PE: Bug bnc#488721 - Fix id-ref expansion for clones, the doc-root for clone children is not the cib root + High: PE: Bug bnc#490418 - Correctly determine node state for nodes wishing to be terminated + High: PE: Bug LF#2087 - Correctly parse the state of anonymous clones that have multiple instances on a given node + High: PE: Bug lf#2089 - Meta attributes are not inherited by clone children + High: PE: Bug lf#2091 - Correctly restart modified resources that were found active by a probe + High: PE: Bug lf#2094 - Fix probe ordering for cloned groups + High: PE: Bug LF:2075 - Fix large pingd memory leaks + High: PE: Correctly attach orphaned clone children to their parent + High: PE: Correctly handle terminate node attributes that are set to the output from time() + High: PE: Ensure orphaned clone members are hooked up to the parent when clone-max=0 + High: PE: Fix memory leak in LogActions + High: PE: Fix the determination of whether a group is active + High: PE: Look up the correct promotion preference for anonymous masters + High: PE: Simplify handling of start failures by changing the default migration-threshold to INFINITY + High: PE: The ordered option for clones no longer causes extra start/stop operations + High: RA: Bug bnc#490641 - Shut down dlm_controld with -TERM instead of -KILL + High: RA: pingd: Set default ping interval to 1 instead of 0 seconds + High: Resources: pingd - Correctly tell the ping daemon to shut down + High: Tools: Bug bnc#483365 - Ensure the command from cluster_test includes a value for --log-facility + High: Tools: cli: fix and improve delete command + High: Tools: crm: add and implement templates + High: Tools: crm: add support for command aliases and some common commands (i.e. cd,exit) + High: Tools: crm: create top configuration nodes if they are missing + High: Tools: crm: fix parsing attributes for rules (broken by the previous changeset) + High: Tools: crm: new ra set of commands + High: Tools: crm: resource agents information management + High: Tools: crm: rsc/op_defaults + High: Tools: crm: support for no value attribute in nvpairs + High: Tools: crm: the new configure monitor command + High: Tools: crm: the new configure node command + High: Tools: crm_mon - Prevent use-of-NULL when summarizing an orphan + High: Tools: hb2openais: create clvmd clone for respawn evmsd in ha.cf + High: Tools: hb2openais: fix a serious recursion bug in xml node processing + High: Tools: hb2openais: fix ocfs2 processing + High: Tools: pingd - prevent double free of getaddrinfo() output in error path + High: Tools: The default re-ping interval for pingd should be 1s not 1ms + Medium (bnc#479049): Tools: crm: add validation of resource type for the configure primitive command + Medium (bnc#479050): Tools: crm: add help for RA parameters in tab completion + Medium (bnc#479050): Tools: crm: add tab completion for primitive params/meta/op + Medium (bnc#479050): Tools: crm: reimplement cluster properties completion + Medium (bnc#486968): Tools: crm: listnodes function requires no parameters (do not mix completion with other stuff) + Medium: ais: Remove the ugly hack for dampening AIS membership changes + Medium: cib: Fix memory leaks by using mainloop_add_signal + Medium: cib: Move more logging to the debug level (was info) + Medium: cib: Overhaul the processing of synchronous replies + Medium: Core: Add library functions for instructing the cluster to terminate nodes + Medium: crmd: Add new expected-quorum-votes option + Medium: crmd: Allow up to 5 retires when an attrd update fails + Medium: crmd: Automatically detect and use new values for crm_config options + Medium: crmd: Bug bnc#490426 - Escalated shutdowns stall when there are pending resource operations + Medium: crmd: Clean up and optimize the DC election algorithm + Medium: crmd: Fix memory leak in shutdown + Medium: crmd: Fix memory leaks spotted by Valgrind + Medium: crmd: Ingore join messages from hosts other than our DC + Medium: crmd: Limit the scope of resource updates to the status section + Medium: crmd: Prevent the crmd from being respawned if its told to shut down when it did not ask to be + Medium: crmd: Re-check the election status after membership events + Medium: crmd: Send resource updates via the local CIB during elections + Medium: PE: Bug bnc#491441 - crm_mon does not display operations returning 'uninstalled' correctly + Medium: PE: Bug lf#2101 - For location constraints, role=Slave is equivalent to role=Started + Medium: PE: Clean up the API - removed ->children() and renamed ->find_child() to fine_rsc() + Medium: PE: Compress the display of healthy anonymous clones + Medium: PE: Correctly log the actions for resources that are being recovered + Medium: PE: Determin a promotion score for complex resources + Medium: PE: Ensure clones always have a value for globally-unique + Medium: PE: Prevent orphan clones from being allocated + Medium: RA: controld: Return proper exit code for stop op. + Medium: Tools: Bug bnc#482558 - Fix logging test in cluster_test + Medium: Tools: Bug bnc#482828 - Fix quoting in cluster_test logging setup + Medium: Tools: Bug bnc#482840 - Include directory path to CTSlab.py + Medium: Tools: crm: add more user input checks + Medium: Tools: crm: do not check resource status of we are working with a shadow + Medium: Tools: crm: fix id-refs and allow reference to top objects (i.e. primitive) + Medium: Tools: crm: ignore comments in the CIB + Medium: Tools: crm: multiple column output would not work with small lists + Medium: Tools: crm: refuse to delete running resources + Medium: Tools: crm: rudimentary if-else for templates + Medium: Tools: crm: Start/stop clones via target-role. + Medium: Tools: crm_mon - Compress the node status for healthy and offline nodes + Medium: Tools: crm_shadow - Return 0/cib_ok when --create-empty succeeds + Medium: Tools: crm_shadow - Support -e, the short form of --create-empty + Medium: Tools: Make attrd quieter + Medium: Tools: pingd - Avoid using various clplumbing functions as they seem to leak + Medium: Tools: Reduce pingd logging * Mon Feb 16 2009 Andrew Beekhof - 1.0.2-1 - Update source tarball to revision: d232d19daeb9 (stable-1.0) tip - Statistics: Changesets: 441 Diff: 639 files changed, 20871 insertions(+), 21594 deletions(-) - Changes since Pacemaker-1.0.1 + High (bnc#450815): Tools: crm cli: do not generate id for the operations tag + High: ais: Add support for the new AIS IPC layer + High: ais: Always set header.error to the correct default: SA_AIS_OK + High: ais: Bug BNC#456243 - Ensure the membership cache always contains an entry for the local node + High: ais: Bug BNC:456208 - Prevent deadlocks by not logging in the child process before exec() + High: ais: By default, disable supprt for the WIP openais IPC patch + High: ais: Detect and handle situations where ais and the crm disagree on the node name + High: ais: Ensure crm_peer_seq is updated after a membership update + High: ais: Make sure all IPC header fields are set to sane defaults + High: ais: Repair and streamline service load now that whitetank startup functions correctly + High: build: create and install doc files + High: cib: Allow clients without mainloop to connect to the cib + High: cib: CID:18 - Fix use-of-NULL in cib_perform_op + High: cib: CID:18 - Repair errors introduced in b5a18704477b - Fix use-of-NULL in cib_perform_op + High: cib: Ensure diffs contain the correct values of admin_epoch + High: cib: Fix four moderately sized memory leaks detected by Valgrind + High: Core: CID:10 - Prevent indexing into an array of schemas with a negative value + High: Core: CID:13 - Fix memory leak in log_data_element + High: Core: CID:15 - Fix memory leak in crm_get_peer + High: Core: CID:6 - Fix use-of-NULL in copy_ha_msg_input + High: Core: Fix crash in the membership code preventing node shutdown + High: Core: Fix more memory leaks foudn by valgrind + High: Core: Prevent unterminated strings after decompression + High: crmd: Bug BNC:467995 - Delay marking STONITH operations complete until STONITH tells us so + High: crmd: Bug LF:1962 - Do not NACK peers because they are not (yet) in our membership. Just ignore them. + High: crmd: Bug LF:2010 - Ensure fencing cib updates create the node_state entry if needed to preent re-fencing during cluster startup + High: crmd: Correctly handle reconnections to attrd + High: crmd: Ensure updates for lost migrate operations indicate which node it tried to migrating to + High: crmd: If there are no nodes to finalize, start an election. + High: crmd: If there are no nodes to welcome, start an election. + High: crmd: Prevent node attribute loss by detecting attrd disconnections immediately + High: crmd: Prevent node re-probe loops by ensuring manditory actions always complete + High: PE: Bug 2005 - Fix startup ordering of cloned stonith groups + High: PE: Bug 2006 - Correctly reprobe cloned groups + High: PE: Bug BNC:465484 - Fix the no-quorum-policy=suicide option + High: PE: Bug LF:1996 - Correctly process disabled monitor operations + High: PE: CID:19 - Fix use-of-NULL in determine_online_status + High: PE: Clones now default to globally-unique=false + High: PE: Correctly calculate the number of available nodes for the clone to use + High: PE: Only shoot online nodes with no-quorum-policy=suicide + High: PE: Prevent on-fail settings being ignored after a resource is successfully stopped + High: PE: Prevent use-of-NULL for failed migrate actions in process_rsc_state() + High: PE: Remove an optimization for the terminate node attribute that caused the cluster to block indefinitly + High: PE: Repar the ability to colocate based on node attributes other than uname + High: PE: Start the correct monitor operation for unmanaged masters + High: stonith: CID:3 - Fix another case of exceptionally poor error handling by the original stonith developers + High: stonith: CID:5 - Checking for NULL and then dereferencing it anyway is an interesting approach to error handling + High: stonithd: Sending IPC to the cluster is a privileged operation + High: stonithd: wrong checks for shmid (0 is a valid id) + High: Tools: attrd - Correctly determine when an attribute has stopped changing and should be committed to the CIB + High: Tools: Bug 2003 - pingd does not correctly detect failures when the interface is down + High: Tools: Bug 2003 - pingd does not correctly handle node-down events on multi-NIC systems + High: Tools: Bug 2021 - pingd does not detect sequence wrapping correctly, incorrectly reports nodes offline + High: Tools: Bug BNC:468066 - Do not use the result of uname() when its no longer in scope + High: Tools: Bug BNC:473265 - crm_resource -L dumps core + High: Tools: Bug LF:2001 - Transient node attributes should be set via attrd + High: Tools: Bug LF:2036 - crm_resource cannot set/get parameters for cloned resources + High: Tools: Bug LF:2046 - Node attribute updates are lost because attrd can take too long to start + High: Tools: Cause the correct clone instance to be failed with crm_resource -F + High: Tools: cluster_test - Allow the user to select a stack and fix CTS invocation + High: Tools: crm cli: allow rename only if the resource is stopped + High: Tools: crm cli: catch system errors on file operations + High: Tools: crm cli: completion for ids in configure + High: Tools: crm cli: drop '-rsc' from attributes for order constraint + High: Tools: crm cli: exit with an appropriate exit code + High: Tools: crm cli: fix wrong order of action and resource in order constraint + High: Tools: crm cli: fox wrong exit code + High: Tools: crm cli: improve handling of cib attributes + High: Tools: crm cli: new command: configure rename + High: Tools: crm cli: new command: configure upgrade + High: Tools: crm cli: new command: node delete + High: Tools: crm cli: prevent key errors on missing cib attributes + High: Tools: crm cli: print long help for help topics + High: Tools: crm cli: return on syntax error when parsing score + High: Tools: crm cli: rsc_location can be without nvpairs + High: Tools: crm cli: short node preference location constraint + High: Tools: crm cli: sometimes, on errors, level would change on single shot use + High: Tools: crm cli: syntax: drop a bunch of commas (remains of help tables conversion) + High: Tools: crm cli: verify user input for sanity + High: Tools: crm: find expressions within rules (do not always skip xml nodes due to used id) + High: Tools: crm_master should not define a set id now that attrd is used. Defining one can break lookups + High: Tools: crm_mon Use the OID assigned to the project by IANA for SNMP traps + Medium (bnc#445622): Tools: crm cli: improve the node show command and drop node status + Medium (LF 2009): stonithd: improve timeouts for remote fencing + Medium: ais: Allow dead peers to be removed from membership calculations + Medium: ais: Pass node deletion events on to clients + Medium: ais: Sanitize ipc usage + Medium: ais: Supply the node uname in addtion to the id + Medium: Build: Clean up configure to ensure NON_FATAL_CFLAGS is consistent with CFLAGS (ie. includes -g) + Medium: Build: Install cluster_test + Medium: Build: Use more restrictive CFLAGS and fix the resulting errors + Medium: cib: CID:20 - Fix potential use-after-free in cib_native_signon + Medium: Core: Bug BNC:474727 - Set a maximum time to wait for IPC messages + Medium: Core: CID:12 - Fix memory leak in decode_transition_magic error path + Medium: Core: CID:14 - Fix memory leak in calculate_xml_digest error path + Medium: Core: CID:16 - Fix memory leak in date_to_string error path + Medium: Core: Try to track down the cause of XML parsing errors + Medium: crmd: Bug BNC:472473 - Do not wait excessive amounts of time for lost actions + Medium: crmd: Bug BNC:472473 - Reduce the transition timeout to action_timeout+network_delay + Medium: crmd: Do not fast-track the processing of LRM refreshes when there are pending actions. + Medium: crmd: do_dc_join_filter_offer - Check the 'join' message is for the current instance before deciding to NACK peers + Medium: crmd: Find option values without having to do a config upgrade + Medium: crmd: Implement shutdown using a transient node attribute + Medium: crmd: Update the crmd options to use dashes instead of underscores + Medium: cts: Add 'cluster reattach' to the suite of automated regression tests + Medium: cts: cluster_test - Make some usability enhancements + Medium: CTS: cluster_test - suggest a valid port number + Medium: CTS: Fix python import order + Medium: cts: Implement an automated SplitBrain test + Medium: CTS: Remove references to deleted classes + Medium: Extra: Resources - Use HA_VARRUN instead of HA_RSCTMP for state files as Heartbeat removes HA_RSCTMP at startup + Medium: HB: Bug 1933 - Fake crmd_client_status_callback() calls because HB does not provide them for already running processes + Medium: PE: CID:17 - Fix memory leak in find_actions_by_task error path + Medium: PE: CID:7,8 - Prevent hypothetical use-of-NULL in LogActions + Medium: PE: Defer logging the actions performed on a resource until we have processed ordering constraints + Medium: PE: Remove the symmetrical attribute of colocation constraints + Medium: Resources: pingd - fix the meta defaults + Medium: Resources: Stateful - Add missing meta defaults + Medium: stonithd: exit if we the pid file cannot be locked + Medium: Tools: Allow attrd clients to specify the ID the attribute should be created with + Medium: Tools: attrd - Allow attribute updates to be performed from a hosts peer + Medium: Tools: Bug LF:1994 - Clean up crm_verify return codes + Medium: Tools: Change the pingd defaults to ping hosts once every second (instead of 5 times every 10 seconds) + Medium: Tools: cibmin - Detect resource operations with a view to providing email/snmp/cim notification + Medium: Tools: crm cli: add back symmetrical for order constraints + Medium: Tools: crm cli: generate role in location when converting from xml + Medium: Tools: crm cli: handle shlex exceptions + Medium: Tools: crm cli: keep order of help topics + Medium: Tools: crm cli: refine completion for ids in configure + Medium: Tools: crm cli: replace inf with INFINITY + Medium: Tools: crm cli: streamline cib load and parsing + Medium: Tools: crm cli: supply provider only for ocf class primitives + Medium: Tools: crm_mon - Add support for sending mail notifications of resource events + Medium: Tools: crm_mon - Include the DC version in status summary + Medium: Tools: crm_mon - Sanitize startup and option processing + Medium: Tools: crm_mon - switch to event-driven updates and add support for sending snmp traps + Medium: Tools: crm_shadow - Replace the --locate option with the saner --edit + Medium: Tools: hb2openais: do not remove Evmsd resources, but replace them with clvmd + Medium: Tools: hb2openais: replace crmadmin with crm_mon + Medium: Tools: hb2openais: replace the lsb class with ocf for o2cb + Medium: Tools: hb2openais: reuse code + Medium: Tools: LF:2029 - Display an error if crm_resource is used to reset the operation history of non-primitive resources + Medium: Tools: Make pingd resilient to attrd failures + Medium: Tools: pingd - fix the command line switches + Medium: Tools: Rename ccm_tool to crm_node * Tue Nov 18 2008 Andrew Beekhof - 1.0.1-1 - Update source tarball to revision: 6fc5ce8302ab (stable-1.0) tip - Statistics: Changesets: 170 Diff: 816 files changed, 7633 insertions(+), 6286 deletions(-) - Changes since Pacemaker-1.0.1 + High: ais: Allow the crmd to get callbacks whenever a node state changes + High: ais: Create an option for starting the mgmtd daemon automatically + High: ais: Ensure HA_RSCTMP exists for use by resource agents + High: ais: Hook up the openais.conf config logging options + High: ais: Zero out the PID of disconnecting clients + High: cib: Ensure global updates cause a disk write when appropriate + High: Core: Add an extra snaity check to getXpathResults() to prevent segfaults + High: Core: Do not redefine __FUNCTION__ unnecessarily + High: Core: Repair the ability to have comments in the configuration + High: crmd: Bug:1975 - crmd should wait indefinitely for stonith operations to complete + High: crmd: Ensure PE processing does not occur for all error cases in do_pe_invoke_callback + High: crmd: Requests to the CIB should cause any prior PE calculations to be ignored + High: heartbeat: Wait for membership 'up' events before removing stale node status data + High: PE: Bug LF:1988 - Ensure recurring operations always have the correct target-rc set + High: PE: Bug LF:1988 - For unmanaged resources we need to skip the usual can_run_resources() checks + High: PE: Ensure the terminate node attribute is handled correctly + High: PE: Fix optional colocation + High: PE: Improve up the detection of 'new' nodes joining the cluster + High: PE: Prevent assert failures in master_color() by ensuring unmanaged masters are always reallocated to their current location + High: Tools: crm cli: parser: return False on syntax error and None for comments + High: Tools: crm cli: unify template and edit commands + High: Tools: crm_shadow - Show more line number information after validation failures + High: Tools: hb2openais: add option to upgrade the CIB to v3.0 + High: Tools: hb2openais: add U option to getopts and update usage + High: Tools: hb2openais: backup improved and multiple fixes + High: Tools: hb2openais: fix class/provider reversal + High: Tools: hb2openais: fix testing + High: Tools: hb2openais: move the CIB update to the end + High: Tools: hb2openais: update logging and set logfile appropriately + High: Tools: LF:1969 - Attrd never sets any properties in the cib + High: Tools: Make attrd functional on OpenAIS + Medium: ais: Hook up the options for specifying the expected number of nodes and total quorum votes + Medium: ais: Look for pacemaker options inside the service block with 'name: pacemaker' instead of creating an addtional configuration block + Medium: ais: Provide better feedback when nodes change nodeids (in openais.conf) + Medium: cib: Always store cib contents on disk with num_updates=0 + Medium: cib: Ensure remote access ports are cleaned up on shutdown + Medium: crmd: Detect deleted resource operations automatically + Medium: crmd: Erase a nodes resource operations and transient attributes after a successful STONITH + Medium: crmd: Find a more appropriate place to update quorum and refresh attrd attributes + Medium: crmd: Fix the handling of unexpected PE exits to ensure the current CIB is stored + Medium: crmd: Fix the recording of pending operations in the CIB + Medium: crmd: Initiate an attrd refresh _after_ the status section has been fully repopulated + Medium: crmd: Only the DC should update quorum in an openais cluster + Medium: Ensure meta attributes are used consistantly + Medium: PE: Allow group and clone level resource attributes + Medium: PE: Bug N:437719 - Ensure scores from colocated resources count when allocating groups + Medium: PE: Prevent lsb scripts from being used in globally unique clones + Medium: PE: Make a best-effort guess at a migration threshold for people with 0.6 configs + Medium: Resources: controld - ensure we are part of a clone with globally_unique=false + Medium: Tools: attrd - Automatically refresh all attributes after a CIB replace operation + Medium: Tools: Bug LF:1985 - crm_mon - Correctly process failed cib queries to allow reconnection after cluster restarts + Medium: Tools: Bug LF:1987 - crm_verify incorrectly warns of configuration upgrades for the most recent version + Medium: Tools: crm (bnc#441028): check for key error in attributes management + Medium: Tools: crm_mon - display the meaning of the operation rc code instead of the status + Medium: Tools: crm_mon - Fix the display of timing data + Medium: Tools: crm_verify - check that we are being asked to validate a complete config + Medium: xml: Relax the restriction on the contents of rsc_locaiton.node * Thu Oct 16 2008 Andrew Beekhof - 1.0.0-1 - Update source tarball to revision: 388654dfef8f tip - Statistics: Changesets: 261 Diff: 3021 files changed, 244985 insertions(+), 111596 deletions(-) - Changes since f805e1b30103 + High: add the crm cli program + High: ais: Move the service id definition to a common location and make sure it is always used + High: build: rename hb2openais.sh to .in and replace paths with vars + High: cib: Implement --create for crm_shadow + High: cib: Remove dead files + High: Core: Allow the expected number of quorum votes to be configrable + High: Core: cl_malloc and friends were removed from Heartbeat + High: Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault + High: hb2openais.sh: improve pingd handling; several bugs fixed + High: hb2openais: fix clone creation; replace EVMS strings + High: new hb2openais.sh conversion script + High: PE: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty) + High: PE: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly. + High: PE: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures + High: PE: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly + High: PE: Bug N:420538 - Anit-colocation caused a positive node preference + High: PE: Correctly handle unmanaged resources to prevent them from being started elsewhere + High: PE: crm_resource - Fix the --migrate command + High: PE: MAke stonith-enabled default to true and warn if no STONITH resources are found + High: PE: Make sure orphaned clone children are created correctly + High: PE: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete + High: stonithd (LF 1951): fix remote stonith operations + High: stonithd: fix handling of timeouts + High: stonithd: fix logic for stonith resource priorities + High: stonithd: implement the fence-timeout instance attribute + High: stonithd: initialize value before reading fence-timeout + High: stonithd: set timeouts for fencing ops to the timeout of the start op + High: stonithd: stonith rsc priorities (new feature) + High: Tools: Add hb2openais - a tool for upgrading a Heartbeat cluster to use OpenAIS instead + High: Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations + High: Tools: Make pingd functional on Linux + High: Update version numbers for 1.0 candidates + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Use the agreed service number + Medium: Build: Reliably detect heartbeat libraries during configure + Medium: Build: Supply prototypes for libreplace functions when needed + Medium: Build: Teach configure how to find corosync + Medium: Core: Provide better feedback if Pacemaker is started by a stack it does not support + Medium: crmd: Avoid calling GHashTable functions with NULL + Medium: crmd: Delay raising I_ERROR when the PE exits until we have had a chance to save the current CIB + Medium: crmd: Hook up the stonith-timeout option to stonithd + Medium: crmd: Prevent potential use-of-NULL in global_timer_callback + Medium: crmd: Rationalize the logging of graph aborts + Medium: PE: Add a stonith_timeout option and remove new options that are better set in rsc_defaults + Medium: PE: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute + Medium: PE: Bug LF:1950 - Notifications do not contain all documented resource state fields + Medium: PE: Bug N:417585 - Do not restart group children whos individual score drops below zero + Medium: PE: Detect clients that disconnect before receiving their reply + Medium: PE: Implement a true maintenance mode + Medium: PE: Implement on-fail=standby for NTT. Derived from a patch by Satomi TANIGUCHI + Medium: PE: Print the correct message when stonith is disabled + Medium: PE: ptest - check the input is valid before proceeding + Medium: PE: Revert group stickiness to the 'old way' + Medium: PE: Use the correct attribute for action 'requires' (was prereq) + Medium: stonithd: Fix compilation without full heartbeat install + Medium: stonithd: exit with better code on empty host list + Medium: tools: Add a new regression test for CLI tools + Medium: tools: crm_resource - return with non-zero when a resource migration command is invalid + Medium: tools: crm_shadow - Allow the admin to start with an empty CIB (and no cluster connection) + Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema * Mon Sep 22 2008 Andrew Beekhof - 0.7.3-1 - Update source tarball to revision: 33e677ab7764+ tip - Statistics: Changesets: 133 Diff: 89 files changed, 7492 insertions(+), 1125 deletions(-) - Changes since f805e1b30103 + High: Tools: add the crm cli program + High: Core: cl_malloc and friends were removed from Heartbeat + High: Core: Only call xmlCleanupParser() if we parsed anything. Doing so unconditionally seems to cause a segfault + High: new hb2openais.sh conversion script + High: PE: Bug LF:1950 - Ensure the current values for all notification variables are always set (even if empty) + High: PE: Bug LF:1955 - Ensure unmanaged masters are unconditionally repromoted to ensure they are monitored correctly. + High: PE: Bug LF:1955 - Fix another case of filtering causing unmanaged master failures + High: PE: Bug LF:1955 - Umanaged mode prevents master resources from being allocated correctly + High: PE: Bug N:420538 - Anit-colocation caused a positive node preference + High: PE: Correctly handle unmanaged resources to prevent them from being started elsewhere + High: PE: crm_resource - Fix the --migrate command + High: PE: MAke stonith-enabled default to true and warn if no STONITH resources are found + High: PE: Make sure orphaned clone children are created correctly + High: PE: Monitors for unmanaged resources do not need to wait for start/promote/demote actions to complete + High: stonithd (LF 1951): fix remote stonith operations + High: Tools: crm_verify - clean up the upgrade logic to prevent crash on invalid configurations + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Use the agreed service number + Medium: PE: Allow external entities to ask for a node to be shot by creating a terminate=true transient node attribute + Medium: PE: Bug LF:1950 - Notifications do not contain all documented resource state fields + Medium: PE: Bug N:417585 - Do not restart group children whos individual score drops below zero + Medium: PE: Implement a true maintenance mode + Medium: PE: Print the correct message when stonith is disabled + Medium: stonithd: exit with better code on empty host list + Medium: xml: pacemaker-0.7 is now an alias for the 1.0 schema * Wed Aug 20 2008 Andrew Beekhof - 0.7.1-1 - Update source tarball to revision: f805e1b30103+ tip - Statistics: Changesets: 184 Diff: 513 files changed, 43408 insertions(+), 43783 deletions(-) - Changes since 0.7.0-19 + Fix compilation when GNUTLS isnt found + High: admin: Fix use-after-free in crm_mon + High: Build: Remove testing code that prevented heartbeat-only builds + High: cib: Use single quotes so that the xpath queries for nvpairs will succeed + High: crmd: Always connect to stonithd when the TE starts and ensure we notice if it dies + High: crmd: Correctly handle a dead PE process + High: crmd: Make sure async-failures cause the failcount to be incrimented + High: PE: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes + High: PE: Parse resource ordering sets correctly + High: PE: Prevent use-of-NULL - order->rsc_rh will not always be non-NULL + High: PE: Unpack colocation sets correctly + High: Tools: crm_mon - Prevent use-of-NULL for orphaned resources + Medium: ais: Add support for a synchronous call to retrieve the nodes nodeid + Medium: ais: Allow transient clients to receive membership updates + Medium: ais: Avoid double-free in error path + Medium: ais: Include in the mebership nodes for which we have not determined their hostname + Medium: ais: Spawn the PE from the ais plugin instead of the crmd + Medium: cib: By default, new configurations use the latest schema + Medium: cib: Clean up the CIB if it was already disconnected + Medium: cib: Only incriment num_updates if something actually changed + Medium: cib: Prevent use-after-free in client after abnormal termination of the CIB + Medium: Core: Fix memory leak in xpath searches + Medium: Core: Get more details regarding parser errors + Medium: Core: Repair expand_plus_plus - do not call char2score on unexpanded values + Medium: Core: Switch to the libxml2 parser - its significantly faster + Medium: Core: Use a libxml2 library function for xml -> text conversion + Medium: crmd: Asynchronous failure actions have no parameters + Medium: crmd: Avoid calling glib functions with NULL + Medium: crmd: Do not allow an election to promote a node from S_STARTING + Medium: crmd: Do not vote if we have not completed the local startup + Medium: crmd: Fix te_update_diff() now that get_object_root() functions differently + Medium: crmd: Fix the lrmd xpath expressions to not contain quotes + Medium: crmd: If we get a join offer during an election, better restart the election + Medium: crmd: No further processing is needed when using the LRMs API call for failing resources + Medium: crmd: Only update have-quorum if the value changed + Medium: crmd: Repair the input validation logic in do_te_invoke + Medium: cts: CIBs can no longer contain comments + Medium: cts: Enable a bunch of tests that were incorrectly disabled + Medium: cts: The libxml2 parser wont allow v1 resources to use integers as parameter names + Medium: Do not use the cluster UID and GID directly. Look them up based on the configured value of HA_CCMUSER + Medium: Fix compilation when heartbeat is not supported + Medium: PE: Allow groups to be involved in optional ordering constraints + Medium: PE: Allow sets of operations to be reused by multiple resources + Medium: PE: Bug LF:1941 - Mark extra clone instances as orphans and do not show inactive ones + Medium: PE: Determin the correct migration-threshold during resource expansion + Medium: PE: Implement no-quorum-policy=suicide (FATE #303619) + Medium: pengine: Clean up resources after stopping old copies of the PE + Medium: pengine: Teach the PE how to stop old copies of itself + Medium: Tools: Backport hb_report updates + Medium: Tools: cib_shadow - On create, spawn a new shell with CIB_shadow and PS1 set accordingly + Medium: Tools: Rename cib_shadow to crm_shadow * Fri Jul 18 2008 Andrew Beekhof - 0.7.0-19 - Update source tarball to revision: 007c3a1c50f5 (unstable) tip - Statistics: Changesets: 108 Diff: 216 files changed, 4632 insertions(+), 4173 deletions(-) - Changes added since unstable-0.7 + High: admin: Fix use-after-free in crm_mon + High: ais: Change the tag for the ais plugin to "pacemaker" (used in openais.conf) + High: ais: Log terminated processes as an error + High: cib: Performance - Reorganize things to avoid calculating the XML diff twice + High: PE: Bug LF:1941 - Handle failed clone instance probes when clone-max < #nodes + High: PE: Fix memory leak in action2xml + High: PE: Make OCF_ERR_ARGS a node-level error rather than a cluster-level one + High: PE: Properly handle clones that are not installed on all nodes + Medium: admin: cibadmin - Show any validation errors if the upgrade failed + Medium: admin: cib_shadow - Implement --locate to display the underlying filename + Medium: admin: cib_shadow - Implement a --diff option + Medium: admin: cib_shadow - Implement a --switch option + Medium: admin: crm_resource - create more compact constraints that do not use lifetime (which is deprecated) + Medium: ais: Approximate born_on for OpenAIS based clusters + Medium: cib: Remove do_id_check, it is a poor substitute for ID validation by a schema + Medium: cib: Skip construction of pre-notify messages if no-one wants one + Medium: Core: Attempt to streamline some key functions to increase performance + Medium: Core: Clean up XML parser after validation + Medium: crmd: Detect and optimize the CRMs behavior when processing diffs of an LRM refresh + Medium: Fix memory leaks when resetting the name of an XML object + Medium: PE: Prefer the current location if it is one of a group of nodes with the same (highest) score * Wed Jun 25 2008 Andrew Beekhof - 0.7.0-1 - Update source tarball to revision: bde0c7db74fb tip - Statistics: Changesets: 439 Diff: 676 files changed, 41310 insertions(+), 52071 deletions(-) - Changes added since stable-0.6 + High: A new tool for setting up and invoking CTS + High: Admin: All tools now use --node (-N) for specifying node unames + High: Admin: All tools now use --xml-file (-x) and --xml-text (-X) for specifying where to find XML blobs + High: cib: Cleanup the API - remove redundant input fields + High: cib: Implement CIB_shadow - a facility for making and testing changes before uploading them to the cluster + High: cib: Make registering per-op callbacks an API call and renamed (for clarity) the API call for requesting notifications + High: Core: Add a facility for automatically upgrading old configurations + High: Core: Adopt libxml2 as the XML processing library - all external clients need to be recompiled + High: Core: Allow sending TLS messages larger than the MTU + High: Core: Fix parsing of time-only ISO dates + High: Core: Smarter handling of XML values containing quotes + High: Core: XML memory corruption - catch, and handle, cases where we are overwriting an attribute value with itself + High: Core: The xml ID type does not allow UUIDs that start with a number + High: Core: Implement XPath based versions of query/delete/replace/modify + High: Core: Remove some HA2.0.(3,4) compatability code + High: crmd: Overhaul the detection of nodes that are starting vs. failed + High: PE: Bug LF:1459 - Allow failures to expire + High: PE: Have the PE do non-persistent configuration upgrades before performing calculations + High: PE: Replace failure-stickiness with a simple 'migration-threshold' + High: TE: Simplify the design by folding the tengine process into the crmd + Medium: Admin: Bug LF:1438 - Allow the list of all/active resource operations to be queried by crm_resource + Medium: Admin: Bug LF:1708 - crm_resource should print a warning if an attribute is already set as a meta attribute + Medium: Admin: Bug LF:1883 - crm_mon should display fail-count and operation history + Medium: Admin: Bug LF:1883 - crm_mon should display operation timing data + Medium: Admin: Bug N:371785 - crm_resource -C does not also clean up fail-count attributes + Medium: Admin: crm_mon - include timing data for failed actions + Medium: ais: Read options from the environment since objdb is not completely usable yet + Medium: cib: Add sections for op_defaults and rsc_defaults + Medium: cib: Better matching notification callbacks (for detecting duplicates and removal) + Medium: cib: Bug LF:1348 - Allow rules and attribute sets to be referenced for use in other objects + Medium: cib: BUG LF:1918 - By default, all cib calls now timeout after 30s + Medium: cib: Detect updates that decrease the version tuple + Medium: cib: Implement a client-side operation timeout - Requires LHA update + Medium: cib: Implement callbacks and async notifications for remote connections + Medium: cib: Make cib->cmds->update() an alias for modify at the API level (also implemented in cibadmin) + Medium: cib: Mark the CIB as disconnected if the IPC connection is terminated + Medium: cib: New call option 'cib_can_create' which can be passed to modify actions - allows the object to be created if it does not exist yet + Medium: cib: Reimplement get|set|delete attributes using XPath + Medium: cib: Remove some useless parts of the API + Medium: cib: Remove the 'attributes' scaffolding from the new format + Medium: cib: Implement the ability for clients to connect to remote servers + Medium: Core: Add support for validating xml against RelaxNG schemas + Medium: Core: Allow more than one item to be modified/deleted in XPath based operations + Medium: Core: Fix the sort_pairs function for creating sorted xml objects + Medium: Core: iso8601 - Implement subtract_duration and fix subtract_time + Medium: Core: Reduce the amount of xml copying occuring + Medium: Core: Support value='value+=N' XML updates (in addtion to value='value++') + Medium: crmd: Add support for lrm_ops->fail_rsc if its available + Medium: crmd: HB - watch link status for node leaving events + Medium: crmd: Bug LF:1924 - Improved handling of lrmd disconnects and shutdowns + Medium: crmd: Do not wait for actions with a start_delay over 5 minutes. Confirm them immediately + Medium: PE: Bug LF:1328 - Do not fencing nodes in clusters without managed resources + Medium: PE: Bug LF:1461 - Give transient node attributes (in ) preference over persistent ones (in ) + Medium: PE: Bug LF:1884, Bug LF:1885 - Implement N:M ordering and colocation constraints + Medium: PE: Bug LF:1886 - Create a resource and operation 'defaults' config section + Medium: PE: Bug LF:1892 - Allow recurring actions to be triggered at known times + Medium: PE: Bug LF:1926 - Probes should complete before stop actions are invoked + Medium: PE: Fix the standby when its set as a transient attribute + Medium: PE: Implement a global 'stop-all-resources' option + Medium: PE: Implement cibpipe, a tool for performing/simulating config changes "offline" + Medium: PE: We do not allow colocation with specific clone instances + Medium: Tools: pingd - Implement a stack-independant version of pingd + Medium: xml: Ship an xslt for upgrading from 0.6 to 0.7 * Thu Jun 19 2008 Andrew Beekhof - 0.6.5-1 - Update source tarball to revision: b9fe723d1ac5 tip - Statistics: Changesets: 48 Diff: 37 files changed, 1204 insertions(+), 234 deletions(-) - Changes since Pacemaker-0.6.4 + High: Admin: Repair the ability to delete failcounts + High: ais: Audit IPC handling between the AIS plugin and CRM processes + High: ais: Have the plugin create needed /var/lib directories + High: ais: Make sure the sync and async connections are assigned correctly (not swapped) + High: cib: Correctly detect configuration changes - num_updates does not count + High: PE: Apply stickiness values to the whole group, not the individual resources + High: PE: Bug N:385265 - Ensure groups are migrated instead of remaining partially active on the current node + High: PE: Bug N:396293 - Enforce manditory group restarts due to ordering constraints + High: PE: Correctly recover master instances found active on more than one node + High: PE: Fix memory leaks reported by Valgrind + Medium: Admin: crm_mon - Misc improvements from Satomi Taniguchi + Medium: Bug LF:1900 - Resource stickiness should not allow placement in asynchronous clusters + Medium: crmd: Ensure joins are completed promptly when a node taking part dies + Medium: PE: Avoid clone instance shuffling in more cases + Medium: PE: Bug LF:1906 - Remove an optimization in native_merge_weights() causing group scores to behave eratically + Medium: PE: Make use of target_rc data to correctly process resource operations + Medium: PE: Prevent a possible use of NULL in sort_clone_instance() + Medium: TE: Include target rc in the transition key - used to correctly determin operation failure * Thu May 22 2008 Andrew Beekhof - 0.6.4-1 - Update source tarball to revision: 226d8e356924 tip - Statistics: Changesets: 55 Diff: 199 files changed, 7103 insertions(+), 12378 deletions(-) - Changes since Pacemaker-0.6.3 + High: crmd: Bug LF:1881 LF:1882 - Overhaul the logic for operation cancelation and deletion + High: crmd: Bug LF:1894 - Make sure cancelled recurring operations are cleaned out from the CIB + High: PE: Bug N:387749 - Colocation with clones causes unnecessary clone instance shuffling + High: PE: Ensure 'master' monitor actions are cancelled _before_ we demote the resource + High: PE: Fix assert failure leading to core dump - make sure variable is properly initialized + High: PE: Make sure 'slave' monitoring happens after the resource has been demoted + High: PE: Prevent failure stickiness underflows (where too many failures become a _positive_ preference) + Medium: Admin: crm_mon - Only complain if the output file could not be opened + Medium: Common: filter_action_parameters - enable legacy handling only for older versions + Medium: PE: Bug N:385265 - The failure stickiness of group children is ignored until it reaches -INFINITY + Medium: PE: Implement master and clone colocation by exlcuding nodes rather than setting ones score to INFINITY (similar to cs: 756afc42dc51) + Medium: TE: Bug LF:1875 - Correctly find actions to cancel when their node leaves the cluster * Wed Apr 23 2008 Andrew Beekhof - 0.6.3-1 - Update source tarball to revision: fd8904c9bc67 tip - Statistics: Changesets: 117 Diff: 354 files changed, 19094 insertions(+), 11338 deletions(-) - Changes since Pacemaker-0.6.2 + High: Admin: Bug LF:1848 - crm_resource - Pass set name and id to delete_resource_attr() in the correct order + High: Build: SNMP has been moved to the management/pygui project + High: crmd: Bug LF1837 - Unmanaged resources prevent crmd from shutting down + High: crmd: Prevent use-after-free in lrm interface code (Patch based on work by Keisuke MORI) + High: PE: Allow the cluster to make progress by not retrying failed demote actions + High: PE: Anti-colocation with slave should not prevent master colocation + High: PE: Bug LF 1768 - Wait more often for STONITH ops to complete before starting resources + High: PE: Bug LF1836 - Allow is-managed-default=false to be overridden by individual resources + High: PE: Bug LF185 - Prevent pointless master/slave instance shuffling by ignoring the master-pref of stopped instances + High: PE: Bug N-191176 - Implement interleaved ordering for clone-to-clone scenarios + High: PE: Bug N-347004 - Ensure clone notifications are always sent when an instance is stopped/started + High: PE: Bug N-347004 - Include notification ordering is correct for interleaved clones + High: PE: Bug PM-11 - Directly link probe_complete to starting clone instances + High: PE: Bug PM1 - Fix setting failcounts when applied to complex resources + High: PE: Bug PM12, LF1648 - Extensive revision of group ordering + High: PE: Bug PM7 - Ensure masters are always demoted before they are stopped + High: PE: Create probes after allocation to allow smarter handling of anonymous clones + High: PE: Do not prioritize clone instances that must be moved + High: PE: Fix error in previous commit that allowed more than the required number of masters to be promoted + High: PE: Group start ordering fixes + High: PE: Implement promote/demote ordering for cloned groups + High: TE: Repair failcount updates + High: TE: Use the correct offset when updating failcount + Medium: Admin: Add a summary output that can be easily parsed by CTS for audit purposes + Medium: Build: Make configure fail if bz2 or libxml2 are not present + Medium: Build: Re-instate a better default for LCRSODIR + Medium: CIB: Bug LF-1861 - Filter irrelvant error status from synchronous CIB clients + Medium: Core: Bug 1849 - Invalid conversion of ordinal leap year to gregorian date + Medium: Core: Drop compataibility code for 2.0.4 and 2.0.5 clusters + Medium: crmd: Bug LF-1860 - Automatically cancel recurring ops before demote and promote operations (not only stops) + Medium: crmd: Save the current CIB contents if we detect the PE crashed + Medium: PE: Bug LF:1866 - Fix version check when applying compatability handling for failed start operations + Medium: PE: Bug LF:1866 - Restore the ability to have start failures not be fatal + Medium: PE: Bug PM1 - Failcount applies to all instances of non-unique clone + Medium: PE: Correctly set the state of partially active master/slave groups + Medium: PE: Do not claim to be stopping an already stopped orphan + Medium: PE: Ensure implies_left ordering constraints are always effective + Medium: PE: Indicate each resources 'promotion' score + Medium: PE: Prevent a possible use-of-NULL + Medium: PE: Reprocess the current action if it changed (so that any prior dependancies are updated) + Medium: TE: Bug LF-1859 - Wait for fail-count updates to complete before terminating the transition + Medium: TE: Bug LF:1859 - Do not abort graphs due to our own failcount updates + Medium: TE: Bug LF:1859 - Prevent the TE from interupting itself * Thu Feb 14 2008 Andrew Beekhof - 0.6.2-1 - Update source tarball to revision: 28b1a8c1868b tip - Statistics: Changesets: 11 Diff: 7 files changed, 58 insertions(+), 18 deletions(-) - Changes since Pacemaker-0.6.1 + haresources2cib.py: set default-action-timeout to the default (20s) + haresources2cib.py: update ra parameters lists + Medium: SNMP: Allow the snmp subagent to be built (patch from MATSUDA, Daiki) + Medium: Tools: Make sure the autoconf variables in haresources2cib are expanded * Tue Feb 12 2008 Andrew Beekhof - 0.6.1-1 - Update source tarball to revision: e7152d1be933 tip - Statistics: Changesets: 25 Diff: 37 files changed, 1323 insertions(+), 227 deletions(-) - Changes since Pacemaker-0.6.0 + High: CIB: Ensure changes to top-level attributes (like admin_epoch) cause a disk write + High: CIB: Ensure the archived file hits the disk before returning + High: CIB: Repair the ability to do 'atomic incriment' updates (value="value++") + High: crmd: Bug #7 - Connecting to the crmd immediately after startup causes use-of-NULL + Medium: CIB: Mask cib_diff_resync results from the caller - they do not need to know + Medium: crmd: Delay starting the IPC server until we are fully functional + Medium: CTS: Fix the startup patterns + Medium: PE: Bug 1820 - Allow the first resource in a group to be migrated + Medium: PE: Bug 1820 - Check the colocation dependancies of resources to be migrated * Mon Jan 14 2008 Andrew Beekhof - 0.6.0-2 - This is the first release of the Pacemaker Cluster Resource Manager formerly part of Heartbeat. - For those looking for the GUI, mgmtd, CIM or TSA components, they are now found in the new pacemaker-pygui project. Build dependancies prevent them from being included in Heartbeat (since the built-in CRM is no longer supported) and, being non-core components, are not included with Pacemaker. - Update source tarball to revision: c94b92d550cf - Statistics: Changesets: 347 Diff: 2272 files changed, 132508 insertions(+), 305991 deletions(-) - Test hardware: + 6-node vmware cluster (sles10-sp1/256Mb/vmware stonith) on a single host (opensuse10.3/2Gb/2.66Ghz Quad Core2) + 7-node EMC Centera cluster (sles10/512Mb/2Ghz Xeon/ssh stonith) - Notes: Heartbeat Stack + All testing was performed with STONITH enabled + The CRM was enabled using the "crm respawn" directive - Notes: OpenAIS Stack + This release contains a preview of support for the OpenAIS cluster stack + The current release of the OpenAIS project is missing two important patches that we require. OpenAIS packages containing these patches are available for most major distributions at: http://download.opensuse.org/repositories/server:/ha-clustering + The OpenAIS stack is not currently recommended for use in clusters that have shared data as STONITH support is not yet implimented + pingd is not yet available for use with the OpenAIS stack + 3 significant OpenAIS issues were found during testing of 4 and 6 node clusters. We are activly working together with the OpenAIS project to get these resolved. - Pending bugs encountered during testing: + OpenAIS #1736 - Openais membership took 20s to stabilize + Heartbeat #1750 - ipc_bufpool_update: magic number in head does not match + OpenAIS #1793 - Assertion failure in memb_state_gather_enter() + OpenAIS #1796 - Cluster message corruption - Changes since Heartbeat-2.1.2-24 + High: Add OpenAIS support + High: Admin: crm_uuid - Look in the right place for Heartbeat UUID files + High: admin: Exit and indicate a problem if the crmd exits while crmadmin is performing a query + High: cib: Fix CIB_OP_UPDATE calls that modify the whole CIB + High: cib: Fix compilation when supporting the heartbeat stack + High: cib: Fix memory leaks caused by the switch to get_message_xml() + High: cib: HA_VALGRIND_ENABLED needs to be set _and_ set to 1|yes|true + High: cib: Use get_message_xml() in preference to cl_get_struct() + High: cib: Use the return value from call to write() in cib_send_plaintext() + High: Core: ccm nodes can legitimately have a node id of 0 + High: Core: Fix peer-process tracking for the Heartbeat stack + High: Core: Heartbeat does not send status notifications for nodes that were already part of the cluster. Fake them instead + High: CRM: Add children to HA_Messages such that the field name matches F_XML_TAGNAME + High: crm: Adopt a more flexible appraoch to enabling Valgrind + High: crm: Fix compilation when bzip2 is not installed + High: CRM: Future-proof get_message_xml() + High: crmd: Filter election responses based on time not FSA state + High: crmd: Handle all possible peer states in crmd_ha_status_callback() + High: crmd: Make sure the current date/time is set - prevents use-of-NULL when evaluating rules + High: crmd: Relax an assertion regrading ccm membership instances + High: crmd: Use (node->processes&crm_proc_ais) to accurately update the CIB after replace operations + High: crmd: Heartbeat: Accurately record peer client status + High: PE: Bug 1777 - Allow colocation with a resource in the Stopped state + High: PE: Bug 1822 - Prevent use-of-NULL in PromoteRsc() + High: PE: Implement three recovery policies based on op_status and op_rc + High: PE: Parse fail-count correctly (it may be set to ININFITY) + High: PE: Prevent graph-loop when stonith agents need to be moved around before a STONITH op + High: PE: Prevent graph-loops when two operations have the same name+interval + High: te: Cancel active timers when destroying graphs + High: TE: Ensure failcount is set correctly for failed stops/starts + High: TE: Update failcount for oeprations that time out + Medium: admin: Prevent hang in crm_mon -1 when there is no cib connection - Patch from Junko IKEDA + Medium: cib: Require --force|-f when performing potentially dangerous commands with cibadmin + Medium: cib: Tweak the shutdown code + Medium: Common: Only count peer processes of active nodes + Medium: Core: Create generic cluster sign-in method + Medium: core: Fix compilation when Heartbeat support is disabled + Medium: Core: General cleanup for supporting two stacks + Medium: Core: iso6601 - Support parsing of time-only strings + Medium: core: Isolate more code that is only needed when SUPPORT_HEARTBEAT is enabled + Medium: crm: Improved logging of errors in the XML parser + Medium: crmd: Fix potential use-of-NULL in string comparison + Medium: crmd: Reimpliment syncronizing of CIB queries and updates when invoking the PE + Medium: crm_mon: Indicate when a node is both in standby mode and offline + Medium: PE: Bug 1822 - Do not try an promote groups if not all of it is active + Medium: PE: on_fail=nothing is an alias for 'ignore' not 'restart' + Medium: PE: Prevent a potential use-of-NULL in cron_range_satisfied() + snmp subagent: fix a problem on displaying an unmanaged group + snmp subagent: use the syslog setting + snmp: v2 support (thanks to Keisuke MORI) + snmp_subagent - made it not complain about some things if shutting down * Mon Dec 10 2007 Andrew Beekhof - 0.6.0-1 - Initial opensuse package check-in diff --git a/shell/Makefile.am b/shell/Makefile.am index 531c992dd9..b44fdc76df 100644 --- a/shell/Makefile.am +++ b/shell/Makefile.am @@ -1,22 +1,26 @@ # # doc: Pacemaker code # # Copyright (C) 2008 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in -SUBDIRS = templates regression +sbin_SCRIPTS = crm + +EXTRA_DIST = crm setup.py.in README.install + +SUBDIRS = templates regression modules diff --git a/shell/README.install b/shell/README.install new file mode 100644 index 0000000000..8d2fe52153 --- /dev/null +++ b/shell/README.install @@ -0,0 +1,7 @@ +Run from your favourite packager something like this: + + python setup.py install --prefix=$prefix --root=$rootdir + python%{py_ver} %{py_libdir}/compileall.py -d %{py_site}/ \ + $RPM_BUILD_ROOT/%{py_sitedir} + +The above may be used in the RPM spec file. diff --git a/shell/crm b/shell/crm new file mode 100644 index 0000000000..16ab998db5 --- /dev/null +++ b/shell/crm @@ -0,0 +1,33 @@ +#!/usr/bin/python +# + +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import crm.main + +try: + from crm.main import run +except ImportError: + import sys + sys.stderr.write("abort: couldn't find crm libraries in [%s]\n" % + ' '.join(sys.path)) + sys.stderr.write("(check your install and PYTHONPATH)\n") + sys.exit(-1) + +crm.main.run() +# vim:ts=4:sw=4:et: diff --git a/shell/Makefile.am b/shell/modules/Makefile.am similarity index 77% copy from shell/Makefile.am copy to shell/modules/Makefile.am index 531c992dd9..31923f1483 100644 --- a/shell/Makefile.am +++ b/shell/modules/Makefile.am @@ -1,22 +1,27 @@ # # doc: Pacemaker code # # Copyright (C) 2008 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in -SUBDIRS = templates regression +modules = cache.py cibconfig.py cibstatus.py clidisplay.py help.py.in \ + levels.py main.py msg.py parse.py ra.py.in singletonmixin.py \ + template.py term.py ui.py.in userprefs.py utils.py vars.py.in \ + xmlutil.py + +EXTRA_DIST = $(modules) diff --git a/shell/modules/__init__.py b/shell/modules/__init__.py new file mode 100644 index 0000000000..feff2bbd39 --- /dev/null +++ b/shell/modules/__init__.py @@ -0,0 +1,2 @@ +# This file is required for python packages. +# It is intentionally empty. diff --git a/shell/modules/cache.py b/shell/modules/cache.py new file mode 100644 index 0000000000..5a2ddd4f47 --- /dev/null +++ b/shell/modules/cache.py @@ -0,0 +1,43 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import time +from singletonmixin import Singleton + +class WCache(Singleton): + "Cache stuff. A naive implementation." + def __init__(self): + self.lists = {} + self.stamp = time.time() + self.max_cache_age = 600 # seconds + def is_cached(self,name): + if time.time() - self.stamp > self.max_cache_age: + self.stamp = time.time() + self.clear() + return name in self.lists + def store(self,name,lst): + self.lists[name] = lst + return lst + def retrieve(self,name): + if self.is_cached(name): + return self.lists[name] + else: + return None + def clear(self): + self.lists = {} + +# vim:ts=4:sw=4:et: diff --git a/shell/modules/cibconfig.py b/shell/modules/cibconfig.py new file mode 100644 index 0000000000..78619eeb1b --- /dev/null +++ b/shell/modules/cibconfig.py @@ -0,0 +1,2774 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import sys +import subprocess +import copy +import xml.dom.minidom +import re + +from singletonmixin import Singleton +from userprefs import Options, UserPrefs +from vars import Vars +from utils import * +from xmlutil import * +from msg import * +from parse import CliParser +from clidisplay import CliDisplay +from cibstatus import CibStatus + +def id_in_use(obj_id): + if id_store.is_used(obj_id): + id_used_err(obj_id) + return True + return False + +class IdMgmt(Singleton): + ''' + Make sure that ids are unique. + ''' + def __init__(self): + self._id_store = {} + self.ok = True # error var + def new(self,node,pfx): + ''' + Create a unique id for the xml node. + ''' + name = node.getAttribute("name") + if node.tagName == "nvpair": + node_id = "%s-%s" % (pfx,name) + elif node.tagName == "op": + interval = node.getAttribute("interval") + if interval: + node_id = "%s-%s-%s" % (pfx,name,interval) + else: + node_id = "%s-%s" % (pfx,name) + else: + try: + hint = hints_list[node.tagName] + except: hint = '' + if hint: + node_id = "%s-%s" % (pfx,hint) + else: + node_id = "%s" % pfx + if self.is_used(node_id): + for cnt in range(99): # shouldn't really get here + try_id = "%s-%d" % (node_id,cnt) + if not self.is_used(try_id): + node_id = try_id + break + self.save(node_id) + return node_id + def check_node(self,node,lvl): + node_id = node.getAttribute("id") + if not node_id: + return + if id_in_use(node_id): + self.ok = False + return + def _store_node(self,node,lvl): + self.save(node.getAttribute("id")) + def _drop_node(self,node,lvl): + self.remove(node.getAttribute("id")) + def check_xml(self,node): + self.ok = True + xmltraverse_thin(node,self.check_node) + return self.ok + def store_xml(self,node): + if not self.check_xml(node): + return False + xmltraverse_thin(node,self._store_node) + return True + def remove_xml(self,node): + xmltraverse_thin(node,self._drop_node) + def replace_xml(self,oldnode,newnode): + self.remove_xml(oldnode) + if not self.store_xml(newnode): + self.store_xml(oldnode) + return False + return True + def is_used(self,node_id): + return node_id in self._id_store + def save(self,node_id): + if not node_id: return + self._id_store[node_id] = 1 + def rename(self,old_id,new_id): + if not old_id or not new_id: return + if not self.is_used(old_id): return + if self.is_used(new_id): return + self.remove(old_id) + self.save(new_id) + def remove(self,node_id): + if not node_id: return + try: + del self._id_store[node_id] + except KeyError: + pass + def clear(self): + self._id_store = {} + +def filter_on_tag(nl,tag): + return [node for node in nl if node.tagName == tag] +def nodes(node_list): + return filter_on_tag(node_list,"node") +def primitives(node_list): + return filter_on_tag(node_list,"primitive") +def groups(node_list): + return filter_on_tag(node_list,"group") +def clones(node_list): + return filter_on_tag(node_list,"clone") +def mss(node_list): + return filter_on_tag(node_list,"master") +def constraints(node_list): + return filter_on_tag(node_list,"rsc_location") \ + + filter_on_tag(node_list,"rsc_colocation") \ + + filter_on_tag(node_list,"rsc_order") +def properties(node_list): + return filter_on_tag(node_list,"cluster_property_set") \ + + filter_on_tag(node_list,"rsc_defaults") \ + + filter_on_tag(node_list,"op_defaults") +def processing_sort(nl): + ''' + It's usually important to process cib objects in this order, + i.e. simple objects first. + ''' + return nodes(nl) + primitives(nl) + groups(nl) + mss(nl) + clones(nl) \ + + constraints(nl) + properties(nl) + +def obj_cmp(obj1,obj2): + return cmp(obj1.obj_id,obj2.obj_id) +def filter_on_type(cl,obj_type): + if type(cl[0]) == type([]): + l = [cli_list for cli_list in cl if cli_list[0][0] == obj_type] + l.sort(cmp = cmp) + else: + l = [obj for obj in cl if obj.obj_type == obj_type] + l.sort(cmp = obj_cmp) + return l +def nodes_cli(cl): + return filter_on_type(cl,"node") +def primitives_cli(cl): + return filter_on_type(cl,"primitive") +def groups_cli(cl): + return filter_on_type(cl,"group") +def clones_cli(cl): + return filter_on_type(cl,"clone") +def mss_cli(cl): + return filter_on_type(cl,"ms") + filter_on_type(cl,"master") +def constraints_cli(node_list): + return filter_on_type(node_list,"location") \ + + filter_on_type(node_list,"colocation") \ + + filter_on_type(node_list,"collocation") \ + + filter_on_type(node_list,"order") +def properties_cli(cl): + return filter_on_type(cl,"property") \ + + filter_on_type(cl,"rsc_defaults") \ + + filter_on_type(cl,"op_defaults") +def ops_cli(cl): + return filter_on_type(cl,"op") +def processing_sort_cli(cl): + ''' + Return the given list in this order: + nodes, primitives, groups, ms, clones, constraints, rest + Both a list of objects (CibObject) and list of cli + representations accepted. + ''' + return nodes_cli(cl) + primitives_cli(cl) + groups_cli(cl) + mss_cli(cl) + clones_cli(cl) \ + + constraints_cli(cl) + properties_cli(cl) + ops_cli(cl) + +def is_resource_cli(s): + return s in vars.resource_cli_names +def is_constraint_cli(s): + return s in vars.constraint_cli_names + +def referenced_resources_cli(cli_list): + id_list = [] + head = cli_list[0] + obj_type = head[0] + if not is_constraint_cli(obj_type): + return [] + if obj_type == "location": + id_list.append(find_value(head[1],"rsc")) + elif len(cli_list) > 1: # resource sets + for l in cli_list[1][1]: + if l[0] == "resource_ref": + id_list.append(l[1][1]) + elif obj_type == "colocation": + id_list.append(find_value(head[1],"rsc")) + id_list.append(find_value(head[1],"with-rsc")) + elif obj_type == "order": + id_list.append(find_value(head[1],"first")) + id_list.append(find_value(head[1],"then")) + return id_list + +def rename_id(node,old_id,new_id): + if node.getAttribute("id") == old_id: + node.setAttribute("id", new_id) +def rename_rscref_simple(c_obj,old_id,new_id): + c_modified = False + for attr in c_obj.node.attributes.keys(): + if attr in vars.constraint_rsc_refs and \ + c_obj.node.getAttribute(attr) == old_id: + c_obj.node.setAttribute(attr, new_id) + c_obj.updated = True + c_modified = True + return c_modified +def delete_rscref_simple(c_obj,rsc_id): + c_modified = False + for attr in c_obj.node.attributes.keys(): + if attr in vars.constraint_rsc_refs and \ + c_obj.node.getAttribute(attr) == rsc_id: + c_obj.node.removeAttribute(attr) + c_obj.updated = True + c_modified = True + return c_modified +def rset_uniq(c_obj,d): + ''' + Drop duplicate resource references. + ''' + l = [] + for rref in c_obj.node.getElementsByTagName("resource_ref"): + rsc_id = rref.getAttribute("id") + if d[rsc_id] > 1: # drop one + l.append(rref) + d[rsc_id] -= 1 + rmnodes(l) +def delete_rscref_rset(c_obj,rsc_id): + ''' + Drop all reference to rsc_id. + ''' + c_modified = False + l = [] + for rref in c_obj.node.getElementsByTagName("resource_ref"): + if rsc_id == rref.getAttribute("id"): + l.append(rref) + c_obj.updated = True + c_modified = True + rmnodes(l) + l = [] + for rset in c_obj.node.getElementsByTagName("resource_set"): + if len(rset.getElementsByTagName("resource_ref")) == 0: + l.append(rset) + c_obj.updated = True + c_modified = True + rmnodes(l) + return c_modified +def rset_convert(c_obj): + l = c_obj.node.getElementsByTagName("resource_ref") + if len(l) != 2: + return # eh? + c_obj.modified = True + cli = c_obj.repr_cli(format = -1) + newnode = c_obj.cli2node(cli) + if newnode: + c_obj.node.parentNode.replaceChild(newnode,c_obj.node) + c_obj.node.unlink() +def rename_rscref_rset(c_obj,old_id,new_id): + c_modified = False + d = {} + for rref in c_obj.node.getElementsByTagName("resource_ref"): + rsc_id = rref.getAttribute("id") + if rsc_id == old_id: + rref.setAttribute("id", new_id) + rsc_id = new_id + c_obj.updated = True + c_modified = True + if not rsc_id in d: + d[rsc_id] = 0 + else: + d[rsc_id] += 1 + rset_uniq(c_obj,d) + # if only two resource references remained then, to preserve + # sanity, convert it to a simple constraint (sigh) + cnt = 0 + for key in d: + cnt += d[key] + if cnt == 2: + rset_convert(c_obj) + return c_modified +def rename_rscref(c_obj,old_id,new_id): + if rename_rscref_simple(c_obj,old_id,new_id) or \ + rename_rscref_rset(c_obj,old_id,new_id): + err_buf.info("resource references in %s updated" % c_obj.obj_string()) +def delete_rscref(c_obj,rsc_id): + return delete_rscref_simple(c_obj,rsc_id) or \ + delete_rscref_rset(c_obj,rsc_id) +def silly_constraint(c_node,rsc_id): + ''' + Remove a constraint from rsc_id to rsc_id. + Or an invalid one. + ''' + if c_node.getElementsByTagName("resource_ref"): + # it's a resource set + # the resource sets have already been uniq-ed + return len(c_node.getElementsByTagName("resource_ref")) <= 1 + cnt = 0 # total count of referenced resources have to be at least two + rsc_cnt = 0 + for attr in c_node.attributes.keys(): + if attr in vars.constraint_rsc_refs: + cnt += 1 + if c_node.getAttribute(attr) == rsc_id: + rsc_cnt += 1 + if c_node.tagName == "rsc_location": # locations are never silly + return cnt < 1 + else: + return rsc_cnt == 2 or cnt < 2 + +def show_unrecognized_elems(doc): + try: + conf = doc.getElementsByTagName("configuration")[0] + except: + common_warn("CIB has no configuration element") + return + for topnode in conf.childNodes: + if not is_element(topnode): + continue + if is_defaults(topnode): + continue + if not topnode.tagName in cib_topnodes: + common_warn("unrecognized CIB element %s" % c.tagName) + continue + for c in topnode.childNodes: + if not is_element(c): + continue + if not c.tagName in cib_object_map: + common_warn("unrecognized CIB element %s" % c.tagName) + +def get_comments(cli_list): + if not cli_list: + return [] + last = cli_list[len(cli_list)-1] + try: + if last[0] == "comments": + cli_list.pop() + return last[1] + except: pass + return [] + +def lines2cli(s): + ''' + Convert a string into a list of lines. Replace continuation + characters. Strip white space, left and right. Drop empty lines. + ''' + cl = [] + l = s.split('\n') + cum = [] + for p in l: + p = p.strip() + if p.endswith('\\'): + p = p.rstrip('\\') + cum.append(p) + else: + cum.append(p) + cl.append(''.join(cum).strip()) + cum = [] + if cum: # in case s ends with backslash + cl.append(''.join(cum)) + return [x for x in cl if x] + +# +# object sets (enables operations on sets of elements) +# +def mkset_obj(*args): + if args and args[0] == "xml": + obj = lambda: CibObjectSetRaw(*args[1:]) + else: + obj = lambda: CibObjectSetCli(*args) + return obj() + +class CibObjectSet(object): + ''' + Edit or display a set of cib objects. + repr() for objects representation and + save() used to store objects into internal structures + are defined in subclasses. + ''' + def __init__(self, *args): + self.obj_list = [] + def _open_url(self,src): + import urllib + try: + return urllib.urlopen(src) + except: + pass + if src == "-": + return sys.stdin + try: + return open(src) + except: + pass + common_err("could not open %s" % src) + return False + def init_aux_lists(self): + ''' + Before edit, initialize two auxiliary lists which will + hold a list of objects to be removed and a list of + objects which were created. Then, we can create a new + object list which will match the current state of + affairs, i.e. the object set after the last edit. + ''' + self.remove_objs = copy.copy(self.obj_list) + self.add_objs = [] + def recreate_obj_list(self): + ''' + Recreate obj_list: remove deleted objects and add + created objects + ''' + for obj in self.remove_objs: + self.obj_list.remove(obj) + self.obj_list += self.add_objs + rmlist = [] + for obj in self.obj_list: + if obj.invalid: + rmlist.append(obj) + for obj in rmlist: + self.obj_list.remove(obj) + def edit_save(self,s,erase = False): + ''' + Save string s to a tmp file. Invoke editor to edit it. + Parse/save the resulting file. In case of syntax error, + allow user to reedit. If erase is True, erase the CIB + first. + If no changes are done, return silently. + ''' + tmp = str2tmp(s) + if not tmp: + return False + filehash = hash(s) + rc = False + while True: + if edit_file(tmp) != 0: + break + try: f = open(tmp,'r') + except IOError, msg: + common_err(msg) + break + s = ''.join(f) + f.close() + if hash(s) == filehash: # file unchanged + rc = True + break + if erase: + cib_factory.erase() + if not self.save(s): + if ask("Do you want to edit again?"): + continue + rc = True + break + try: os.unlink(tmp) + except: pass + return rc + def edit(self): + if options.batch: + common_info("edit not allowed in batch mode") + return False + cli_display.set_no_pretty() + s = self.repr() + cli_display.reset_no_pretty() + return self.edit_save(s) + def save_to_file(self,fname): + if fname == "-": + f = sys.stdout + else: + if not options.batch and os.access(fname,os.F_OK): + if not ask("File %s exists. Do you want to overwrite it?"%fname): + return False + try: f = open(fname,"w") + except IOError, msg: + common_err(msg) + return False + rc = True + cli_display.set_no_pretty() + s = self.repr() + cli_display.reset_no_pretty() + if s: + f.write(s) + f.write('\n') + elif self.obj_list: + rc = False + if f != sys.stdout: + f.close() + return rc + def show(self): + s = self.repr() + if not s: + if self.obj_list: # objects could not be displayed + return False + else: + return True + page_string(s) + def import_file(self,method,fname): + if not cib_factory.is_cib_sane(): + return False + if method == "replace": + if options.interactive and cib_factory.has_cib_changed(): + if not ask("This operation will erase all changes. Do you want to proceed?"): + return False + cib_factory.erase() + f = self._open_url(fname) + if not f: + return False + s = ''.join(f) + if f != sys.stdin: + f.close() + return self.save(s) + def repr(self): + ''' + Return a string with objects's representations (either + CLI or XML). + ''' + return '' + def save(self,s): + ''' + For each object: + - try to find a corresponding object in obj_list + - if not found: create new + - if found: replace the object in the obj_list with + the new object + See below for specific implementations. + ''' + pass + def verify2(self): + ''' + Test objects for sanity. This is about semantics. + ''' + rc = 0 + for obj in self.obj_list: + rc |= obj.check_sanity() + return rc + def lookup_cli(self,cli_list): + for obj in self.obj_list: + if obj.matchcli(cli_list): + return obj + def lookup(self,xml_obj_type,obj_id): + for obj in self.obj_list: + if obj.match(xml_obj_type,obj_id): + return obj + def drop_remaining(self): + 'Any remaining objects in obj_list are deleted.' + l = [x.obj_id for x in self.remove_objs] + return cib_factory.delete(*l) + +class CibObjectSetCli(CibObjectSet): + ''' + Edit or display a set of cib objects (using cli notation). + ''' + def __init__(self, *args): + CibObjectSet.__init__(self, *args) + self.obj_list = cib_factory.mkobj_list("cli",*args) + def repr(self): + "Return a string containing cli format of all objects." + if not self.obj_list: + return '' + return '\n'.join(obj.repr_cli() \ + for obj in processing_sort_cli(self.obj_list)) + def process(self,cli_list): + ''' + Create new objects or update existing ones. + ''' + comments = get_comments(cli_list) + obj = self.lookup_cli(cli_list) + if obj: + rc = obj.update_from_cli(cli_list) != False + self.remove_objs.remove(obj) + else: + obj = cib_factory.create_from_cli(cli_list) + rc = obj != None + if rc: + self.add_objs.append(obj) + if rc: + obj.set_comment(comments) + return rc + def save(self,s): + ''' + Save a user supplied cli format configuration. + On errors user is typically asked to review the + configuration (for instance on editting). + + On syntax error (return code 1), no changes are done, but + on semantic errors (return code 2), some changes did take + place so object list must be updated properly. + + Finally, once syntax check passed, there's no way back, + all changes are applied to the current configuration. + + TODO: Implement undo configuration changes. + ''' + l = [] + rc = True + err_buf.start_tmp_lineno() + cp = CliParser() + for cli_text in lines2cli(s): + err_buf.incr_lineno() + cli_list = cp.parse(cli_text) + if cli_list: + l.append(cli_list) + elif cli_list == False: + rc = False + err_buf.stop_tmp_lineno() + # we can't proceed if there was a syntax error, but we + # can ask the user to fix problems + if not rc: + return rc + self.init_aux_lists() + if l: + for cli_list in processing_sort_cli(l): + if self.process(cli_list) == False: + rc = False + if not self.drop_remaining(): + # this is tricky, we don't know what was removed! + # it could happen that the user dropped a resource + # which was running and therefore couldn't be removed + rc = False + self.recreate_obj_list() + return rc + +cib_verify = "crm_verify -V -p" +class CibObjectSetRaw(CibObjectSet): + ''' + Edit or display one or more CIB objects (XML). + ''' + def __init__(self, *args): + CibObjectSet.__init__(self, *args) + self.obj_list = cib_factory.mkobj_list("xml",*args) + def repr(self): + "Return a string containing xml of all objects." + doc = cib_factory.objlist2doc(self.obj_list) + s = doc.toprettyxml(user_prefs.xmlindent) + doc.unlink() + return s + def repr_configure(self): + ''' + Return a string containing xml of configure and its + children. + ''' + doc = cib_factory.objlist2doc(self.obj_list) + conf_node = doc.getElementsByTagName("configuration")[0] + s = conf_node.toprettyxml(user_prefs.xmlindent) + doc.unlink() + return s + def process(self,node): + if not cib_factory.is_cib_sane(): + return False + obj = self.lookup(node.tagName,node.getAttribute("id")) + if obj: + rc = obj.update_from_node(node) != False + self.remove_objs.remove(obj) + else: + new_obj = cib_factory.create_from_node(node) + rc = new_obj != None + if rc: + self.add_objs.append(new_obj) + return rc + def save(self,s): + try: + doc = xml.dom.minidom.parseString(s) + except xml.parsers.expat.ExpatError,msg: + cib_parse_err(msg) + return False + rc = True + sanitize_cib(doc) + show_unrecognized_elems(doc) + newnodes = get_interesting_nodes(doc,[]) + self.init_aux_lists() + if newnodes: + for node in processing_sort(newnodes): + if not self.process(node): + rc = False + if not self.drop_remaining(): + rc = False + doc.unlink() + self.recreate_obj_list() + return rc + def verify(self): + if not self.obj_list: + return True + cli_display.set_no_pretty() + rc = pipe_string(cib_verify,self.repr()) + cli_display.reset_no_pretty() + return rc in (0,1) + def ptest(self, nograph, scores, verbosity): + if not cib_factory.is_cib_sane(): + return False + ptest = "ptest -X -%s" % verbosity.upper() + if scores: + ptest = "%s -s" % ptest + if user_prefs.dotty and not nograph: + fd,tmpfile = mkstemp() + ptest = "%s -D %s" % (ptest,tmpfile) + else: + tmpfile = None + doc = cib_factory.objlist2doc(self.obj_list) + cib = doc.childNodes[0] + status = cib_status.get_status() + if not status: + common_err("no status section found") + return False + cib.appendChild(doc.importNode(status,1)) + pipe_string(ptest,doc.toprettyxml()) + doc.unlink() + if tmpfile: + p = subprocess.Popen("%s %s" % (user_prefs.dotty,tmpfile), shell=True, bufsize=0, stdin=None, stdout=None, stderr=None, close_fds=True) + common_info("starting %s to show transition graph"%user_prefs.dotty) + vars.tmpfiles.append(tmpfile) + else: + if not nograph: + common_info("install graphviz to see a transition graph") + return True + +# +# XML generate utilities +# +hints_list = { + "instance_attributes": "instance_attributes", + "meta_attributes": "meta_attributes", + "operations": "ops", + "rule": "rule", + "expression": "expression", +} + +def set_id(node,oldnode,id_hint,id_required = True): + ''' + Set the id attribute for the node. + Procedure: + - if the node already contains "id", keep it + - if the old node contains "id", copy that + - if neither is true, then create a new one using id_hint + (exception: if not id_required, then no new id is generated) + Finally, save the new id in id_store. + ''' + old_id = None + new_id = node.getAttribute("id") + if oldnode and oldnode.getAttribute("id"): + old_id = oldnode.getAttribute("id") + if not new_id: + new_id = old_id + if not new_id: + if id_required: + new_id = id_store.new(node,id_hint) + else: + id_store.save(new_id) + if new_id: + node.setAttribute("id",new_id) + if oldnode and old_id == new_id: + set_id_used_attr(oldnode) + +def mkxmlsimple(e,oldnode,id_hint): + ''' + Create an xml node from the (name,dict) pair. The name is the + name of the element. The dict contains a set of attributes. + ''' + node = cib_factory.createElement(e[0]) + for n,v in e[1]: + if n == "$children": # this one's skipped + continue + if n == "operation": + v = v.lower() + if n.startswith('$'): + n = n.lstrip('$') + if (type(v) != type('') and type(v) != type(u'')) \ + or v: # skip empty strings + node.setAttribute(n,v) + id_ref = node.getAttribute("id-ref") + if id_ref: + id_ref_2 = cib_factory.resolve_id_ref(e[0],id_ref) + node.setAttribute("id-ref",id_ref_2) + else: + set_id(node,lookup_node(node,oldnode),id_hint) + return node + +def find_operation(rsc_node,name,interval): + op_node_l = rsc_node.getElementsByTagName("operations") + for ops in op_node_l: + for c in ops.childNodes: + if not is_element(c): + continue + if c.tagName != "op": + continue + if c.getAttribute("name") == name \ + and c.getAttribute("interval") == interval: + return c +def mkxmlnvpairs(e,oldnode,id_hint): + ''' + Create xml from the (name,dict) pair. The name is the name of + the element. The dict contains a set of nvpairs. Stuff such + as instance_attributes. + NB: Other tags not containing nvpairs are fine if the dict is empty. + ''' + node = cib_factory.createElement(e[0]) + match_node = lookup_node(node,oldnode) + #if match_node: + #print "found nvpairs set:",match_node.tagName,match_node.getAttribute("id") + id_ref = find_value(e[1],"$id-ref") + if id_ref: + id_ref_2 = cib_factory.resolve_id_ref(e[0],id_ref) + node.setAttribute("id-ref",id_ref_2) + if e[0] != "operations": + return node # id_ref is the only attribute (if not operations) + e[1].remove(["$id-ref",id_ref]) + v = find_value(e[1],"$id") + if v: + node.setAttribute("id",v) + e[1].remove(["$id",v]) + else: + if e[0] == "operations": # operations don't need no id + set_id(node,match_node,id_hint,id_required = False) + else: + set_id(node,match_node,id_hint) + try: + hint = hints_list[e[0]] + except: hint = '' + hint = hint and "%s_%s" % (id_hint,hint) or id_hint + nvpair_pfx = node.getAttribute("id") or hint + for n,v in e[1]: + nvpair = cib_factory.createElement("nvpair") + node.appendChild(nvpair) + nvpair.setAttribute("name",n) + if v != None: + nvpair.setAttribute("value",v) + set_id(nvpair,lookup_node(nvpair,match_node),nvpair_pfx) + return node + +def mkxmlop(e,oldnode,id_hint): + ''' + Create an operation xml from the (name,dict) pair. + ''' + node = cib_factory.createElement(e[0]) + inst_attr = [] + for n,v in e[1]: + if n in vars.req_op_attributes + vars.op_attributes: + node.setAttribute(n,v) + else: + inst_attr.append([n,v]) + tmp = cib_factory.createElement("operations") + oldops = lookup_node(tmp,oldnode) # first find old operations + oldop = lookup_node(node,oldops) + set_id(node,oldop,id_hint) + if inst_attr: + e = ["instance_attributes",inst_attr] + nia = mkxmlnvpairs(e,oldop,node.getAttribute("id")) + node.appendChild(nia) + return node + +def mkxmldate(e,oldnode,id_hint): + ''' + Create a date_expression xml from the (name,dict) pair. + ''' + node = cib_factory.createElement(e[0]) + operation = find_value(e[1],"operation").lower() + node.setAttribute("operation", operation) + old_date = lookup_node(node,oldnode) # first find old date element + set_id(node,old_date,id_hint) + date_spec_attr = [] + for n,v in e[1]: + if n in vars.date_ops or n == "operation": + continue + elif n in vars.in_range_attrs: + node.setAttribute(n,v) + else: + date_spec_attr.append([n,v]) + if not date_spec_attr: + return node + elem = operation == "date_spec" and "date_spec" or "duration" + tmp = cib_factory.createElement(elem) + old_date_spec = lookup_node(tmp,old_date) # first find old date element + set_id(tmp,old_date_spec,id_hint) + for n,v in date_spec_attr: + tmp.setAttribute(n,v) + node.appendChild(tmp) + return node + +def mkxmlrsc_set(e,oldnode,id_hint): + ''' + Create a resource_set xml from the (name,dict) pair. + ''' + node = cib_factory.createElement(e[0]) + old_rsc_set = lookup_node(node,oldnode) # first find old date element + set_id(node,old_rsc_set,id_hint) + for ref in e[1]: + if ref[0] == "resource_ref": + ref_node = cib_factory.createElement(ref[0]) + ref_node.setAttribute(ref[1][0],ref[1][1]) + node.appendChild(ref_node) + elif ref[0] in ("sequential", "action", "role"): + node.setAttribute(ref[0], ref[1]) + return node + +conv_list = { + "params": "instance_attributes", + "meta": "meta_attributes", + "property": "cluster_property_set", + "rsc_defaults": "meta_attributes", + "op_defaults": "meta_attributes", + "attributes": "instance_attributes", + "operations": "operations", + "op": "op", +} +def mkxmlnode(e,oldnode,id_hint): + ''' + Create xml from the (name,dict) pair. The name is the name of + the element. The dict contains either a set of nvpairs or a + set of attributes. The id is either generated or copied if + found in the provided xml. Stuff such as instance_attributes. + ''' + if e[0] in conv_list: + e[0] = conv_list[e[0]] + if e[0] in ("instance_attributes","meta_attributes","operations","cluster_property_set"): + return mkxmlnvpairs(e,oldnode,id_hint) + elif e[0] == "op": + return mkxmlop(e,oldnode,id_hint) + elif e[0] == "date_expression": + return mkxmldate(e,oldnode,id_hint) + elif e[0] == "resource_set": + return mkxmlrsc_set(e,oldnode,id_hint) + else: + return mkxmlsimple(e,oldnode,id_hint) + +def new_cib(): + doc = xml.dom.minidom.Document() + cib = doc.createElement("cib") + doc.appendChild(cib) + configuration = doc.createElement("configuration") + cib.appendChild(configuration) + crm_config = doc.createElement("crm_config") + configuration.appendChild(crm_config) + rsc_defaults = doc.createElement("rsc_defaults") + configuration.appendChild(rsc_defaults) + op_defaults = doc.createElement("op_defaults") + configuration.appendChild(op_defaults) + nodes = doc.createElement("nodes") + configuration.appendChild(nodes) + resources = doc.createElement("resources") + configuration.appendChild(resources) + constraints = doc.createElement("constraints") + configuration.appendChild(constraints) + return doc,cib,crm_config,rsc_defaults,op_defaults,nodes,resources,constraints +def mk_topnode(doc, tag): + "Get configuration element or create/append if there's none." + try: + e = doc.getElementsByTagName(tag)[0] + except: + e = doc.createElement(tag) + conf = doc.getElementsByTagName("configuration")[0] + if conf: + conf.appendChild(e) + else: + return None + return e + +def set_nvpair(set_node,name,value): + n_id = set_node.getAttribute("id") + for c in set_node.childNodes: + if is_element(c) and c.getAttribute("name") == name: + c.setAttribute("value",value) + return + np = cib_factory.createElement("nvpair") + np.setAttribute("name",name) + np.setAttribute("value",value) + new_id = id_store.new(np,n_id) + np.setAttribute("id",new_id) + set_node.appendChild(np) + +def xml_cmp(n, m, show = False): + rc = hash(n.toxml()) == hash(m.toxml()) + if not rc and show and user_prefs.get_debug(): + print "original:",n.toprettyxml() + print "processed:",m.toprettyxml() + return hash(n.toxml()) == hash(m.toxml()) + +# +# CLI format generation utilities (from XML) +# +def cli_format(pl,format): + if format > 0: + return ' \\\n\t'.join(pl) + else: + return ' '.join(pl) +def nvpair_format(n,v): + return v == None and cli_display.attr_name(n) \ + or '%s="%s"'%(cli_display.attr_name(n),cli_display.attr_value(v)) +def cli_pairs(pl): + 'Return a string of name="value" pairs (passed in a list of pairs).' + l = [] + for n,v in pl: + l.append(nvpair_format(n,v)) + return ' '.join(l) + +def op2list(node): + pl = [] + action = "" + for name in node.attributes.keys(): + if name == "name": + action = node.getAttribute(name) + elif name != "id": # skip the id + pl.append([name,node.getAttribute(name)]) + if not action: + common_err("op is invalid (no name)") + return action,pl +def op_instattr(node): + pl = [] + for c in node.childNodes: + if not is_element(c): + continue + if c.tagName != "instance_attributes": + common_err("only instance_attributes are supported in operations") + else: + pl += nvpairs2list(c) + return pl +def cli_op(node): + action,pl = op2list(node) + if not action: + return "" + pl += op_instattr(node) + return "%s %s %s" % (cli_display.keyword("op"),action,cli_pairs(pl)) +def cli_operations(node,format = 1): + l = [] + node_id = node.getAttribute("id") + s = '' + if node_id: + s = '$id="%s"' % node_id + idref = node.getAttribute("id-ref") + if idref: + s = '%s $id-ref="%s"' % (s,idref) + if s: + l.append("%s %s" % (cli_display.keyword("operations"),s)) + for c in node.childNodes: + if is_element(c) and c.tagName == "op": + l.append(cli_op(c)) + return cli_format(l,format) +def date_exp2cli(node): + l = [] + operation = node.getAttribute("operation") + l.append(cli_display.keyword("date")) + l.append(cli_display.keyword(operation)) + if operation in vars.simple_date_ops: + value = node.getAttribute(operation == 'lt' and "end" or "start") + l.append('"%s"' % cli_display.attr_value(value)) + else: + if operation == 'in_range': + for name in vars.in_range_attrs: + v = node.getAttribute(name) + if v: + l.append(nvpair_format(name,v)) + for c in node.childNodes: + if is_element(c) and c.tagName in ("duration","date_spec"): + pl = [] + for name in c.attributes.keys(): + if name != "id": + pl.append([name,c.getAttribute(name)]) + l.append(cli_pairs(pl)) + return ' '.join(l) +def binary_op_format(op): + l = op.split(':') + if len(l) == 2: + return "%s:%s" % (l[0], cli_display.keyword(l[1])) + else: + return cli_display.keyword(op) +def exp2cli(node): + operation = node.getAttribute("operation") + type = node.getAttribute("type") + if type: + operation = "%s:%s" % (type, operation) + attribute = node.getAttribute("attribute") + value = node.getAttribute("value") + if not value: + return "%s %s" % (binary_op_format(operation),attribute) + else: + return "%s %s %s" % (attribute,binary_op_format(operation),value) +def get_score(node): + score = node.getAttribute("score") + if not score: + score = node.getAttribute("score-attribute") + else: + if score.find("INFINITY") >= 0: + score = score.replace("INFINITY","inf") + return score + ":" +def cli_rule(node): + s = [] + node_id = node.getAttribute("id") + if node_id: + s.append('$id="%s"' % node_id) + else: + idref = node.getAttribute("id-ref") + if idref: + return '$id-ref="%s"' % idref + rsc_role = node.getAttribute("role") + if rsc_role: + s.append('$role="%s"' % rsc_role) + s.append(cli_display.score(get_score(node))) + bool_op = node.getAttribute("boolean-op") + if not bool_op: + bool_op = "and" + exp = [] + for c in node.childNodes: + if not is_element(c): + continue + if c.tagName == "date_expression": + exp.append(date_exp2cli(c)) + elif c.tagName == "expression": + exp.append(exp2cli(c)) + expression = (" %s "%cli_display.keyword(bool_op)).join(exp) + return "%s %s" % (' '.join(s),expression) +def node_head(node): + obj_type = vars.cib_cli_map[node.tagName] + node_id = node.getAttribute("id") + uname = node.getAttribute("uname") + s = cli_display.keyword(obj_type) + if node_id != uname: + s = '%s $id="%s"' % (s, node_id) + s = '%s %s' % (s, cli_display.id(uname)) + type = node.getAttribute("type") + if type != vars.node_default_type: + s = '%s:%s' % (s, type) + return s +def cli_add_description(node,l): + desc = node.getAttribute("description") + if desc: + l.append(nvpair_format("description",desc)) +def primitive_head(node): + obj_type = vars.cib_cli_map[node.tagName] + node_id = node.getAttribute("id") + ra_type = node.getAttribute("type") + ra_class = node.getAttribute("class") + ra_provider = node.getAttribute("provider") + s1 = s2 = '' + if ra_class: + s1 = "%s:"%ra_class + if ra_provider: + s2 = "%s:"%ra_provider + s = cli_display.keyword(obj_type) + id = cli_display.id(node_id) + return "%s %s %s" % (s, id, ''.join((s1,s2,ra_type))) + +def cont_head(node): + obj_type = vars.cib_cli_map[node.tagName] + node_id = node.getAttribute("id") + children = [] + for c in node.childNodes: + if not is_element(c): + continue + if (obj_type == "group" and is_primitive(c)) or \ + is_child_rsc(c): + children.append(cli_display.rscref(c.getAttribute("id"))) + elif obj_type in vars.clonems_tags and is_child_rsc(c): + children.append(cli_display.rscref(c.getAttribute("id"))) + s = cli_display.keyword(obj_type) + id = cli_display.id(node_id) + return "%s %s %s" % (s, id, ' '.join(children)) +def location_head(node): + obj_type = vars.cib_cli_map[node.tagName] + node_id = node.getAttribute("id") + rsc = cli_display.rscref(node.getAttribute("rsc")) + s = cli_display.keyword(obj_type) + id = cli_display.id(node_id) + s = "%s %s %s"%(s,id,rsc) + pref_node = node.getAttribute("node") + score = cli_display.score(get_score(node)) + if pref_node: + return "%s %s %s" % (s,score,pref_node) + else: + return s +def mkrscrole(node,n): + rsc = cli_display.rscref(node.getAttribute(n)) + rsc_role = node.getAttribute(n + "-role") + if rsc_role: + return "%s:%s"%(rsc,rsc_role) + else: + return rsc +def mkrscaction(node,n): + rsc = cli_display.rscref(node.getAttribute(n)) + rsc_action = node.getAttribute(n + "-action") + if rsc_action: + return "%s:%s"%(rsc,rsc_action) + else: + return rsc +def rsc_set_constraint(node,obj_type): + col = [] + cnt = 0 + for n in node.getElementsByTagName("resource_set"): + sequential = True + if n.getAttribute("sequential") == "false": + sequential = False + if not sequential: + col.append("(") + role = n.getAttribute("role") + action = n.getAttribute("action") + for r in n.getElementsByTagName("resource_ref"): + rsc = cli_display.rscref(r.getAttribute("id")) + q = (obj_type == "colocation") and role or action + col.append(q and "%s:%s"%(rsc,q) or rsc) + cnt += 1 + if not sequential: + col.append(")") + if cnt <= 2: # a degenerate thingie + col.insert(0,"_rsc_set_") + return col +def two_rsc_constraint(node,obj_type): + col = [] + if obj_type == "colocation": + col.append(mkrscrole(node,"rsc")) + col.append(mkrscrole(node,"with-rsc")) + else: + col.append(mkrscaction(node,"first")) + col.append(mkrscaction(node,"then")) + return col +def simple_constraint_head(node): + obj_type = vars.cib_cli_map[node.tagName] + node_id = node.getAttribute("id") + s = cli_display.keyword(obj_type) + id = cli_display.id(node_id) + score = cli_display.score(get_score(node)) + if node.getElementsByTagName("resource_set"): + col = rsc_set_constraint(node,obj_type) + else: + col = two_rsc_constraint(node,obj_type) + symm = node.getAttribute("symmetrical") + if symm: + col.append("symmetrical=%s"%symm) + return "%s %s %s %s" % (s,id,score,' '.join(col)) +# +################################################################ + +# +# cib element classes (CibObject the parent class) +# +class CibObject(object): + ''' + The top level object of the CIB. Resources and constraints. + ''' + state_fmt = "%16s %-8s%-8s%-8s%-8s%-8s%-4s" + def __init__(self,xml_obj_type,obj_id = None): + if not xml_obj_type in cib_object_map: + unsupported_err(xml_obj_type) + return + self.obj_type = cib_object_map[xml_obj_type][0] + self.parent_type = cib_object_map[xml_obj_type][2] + self.xml_obj_type = xml_obj_type + self.origin = "" # where did it originally come from? + self.nocli = False # we don't support this one + self.updated = False # was the object updated + self.invalid = False # the object has been invalidated (removed) + self.moved = False # the object has been moved (from/to a container) + self.recreate = False # constraints to be recreated + self.comment = '' # comment as text + self.parent = None # object superior (group/clone/ms) + self.children = [] # objects inferior + if obj_id: + if not self.mknode(obj_id): + self = None # won't do :( + else: + self.obj_id = None + self.node = None + def dump_state(self): + 'Print object status' + print self.state_fmt % \ + (self.obj_id,self.origin,self.updated,self.moved,self.invalid, \ + self.parent and self.parent.obj_id or "", \ + len(self.children)) + def repr_cli(self,node = None,format = 1): + ''' + CLI representation for the node. Defined in subclasses. + ''' + return '' + def cli2node(self,cli,oldnode = None): + ''' + Convert CLI representation to a DOM node. + Defined in subclasses. + ''' + return None + def cli_format(self,l,format): + ''' + Format and add comment (if any). + ''' + s = cli_format(l,format) + return (self.comment and format >=0) and '\n'.join([self.comment,s]) or s + def set_comment(self,l): + s = '\n'.join(l) + if self.comment != s: + self.comment = s + self.modified = True + def pull_comments(self): + ''' + Collect comments from within this node. Remove them from + the parent and stuff them in self.comments as an array. + ''' + l = [] + cnodes = [x for x in self.node.childNodes if is_comment(x)] + for n in cnodes: + l.append(n.data) + n.parentNode.removeChild(n) + # convert comments from XML node to text. Multiple comments + # are concatenated with '\n'. + if not l: + self.comment = '' + return + s = '\n'.join(l) + l = s.split('\n') + for i in range(len(l)): + if not l[i].startswith('#'): + l[i] = '#%s' % l[i] + self.comment = '\n'.join(l) + def save_xml(self,node): + self.obj_id = node.getAttribute("id") + self.node = node + def mknode(self,obj_id): + if not cib_factory.is_cib_sane(): + return False + if id_in_use(obj_id): + return False + if self.xml_obj_type in vars.defaults_tags: + tag = "meta_attributes" + else: + tag = self.xml_obj_type + self.node = cib_factory.createElement(tag) + self.obj_id = obj_id + self.node.setAttribute("id",self.obj_id) + self.origin = "user" + return True + def mkcopy(self): + ''' + Create a new object with the same obj_id and obj_type + (for the purpose of CibFactory.delete_objects) + ''' + obj_copy = CibObject(self.xml_obj_type) + obj_copy.obj_id = self.obj_id + obj_copy.obj_type = self.obj_type + return obj_copy + def can_be_renamed(self): + ''' + Return False if this object can't be renamed. + ''' + if is_rsc_running(self.obj_id): + common_err("cannot rename a running resource (%s)" % self.obj_id) + return False + if not is_live_cib() and self.node.tagName == "node": + common_err("cannot rename nodes") + return False + return True + def attr_exists(self,attr): + if not attr in self.node.attributes.keys(): + no_attribute_err(attr,self.obj_id) + return False + return True + def cli_use_validate(self): + ''' + Check validity of the object, as we know it. It may + happen that we don't recognize a construct, but that the + object is still valid for the CRM. In that case, the + object is marked as "CLI read only", i.e. we will neither + convert it to CLI nor try to edit it in that format. + + The validation procedure: + we convert xml to cli and then back to xml. If the two + xml representations match then we can understand the xml. + ''' + if not self.node: + return True + if not self.attr_exists("id"): + return False + cli_display.set_no_pretty() + cli_text = self.repr_cli(format = -1) + cli_display.reset_no_pretty() + if not cli_text: + return False + xml2 = self.cli2node(cli_text) + if not xml2: + return False + rc = xml_cmp(self.node, xml2, show = True) + xml2.unlink() + return rc + def check_sanity(self): + ''' + Right now, this is only for primitives. + ''' + return 0 + def matchcli(self,cli_list): + head = cli_list[0] + return self.obj_type == head[0] \ + and self.obj_id == find_value(head[1],"id") + def match(self,xml_obj_type,obj_id): + return self.xml_obj_type == xml_obj_type and self.obj_id == obj_id + def obj_string(self): + return "%s:%s" % (self.obj_type,self.obj_id) + def reset_updated(self): + self.updated = False + self.moved = False + self.recreate = False + for child in self.children: + child.reset_updated() + def propagate_updated(self): + if self.parent: + self.parent.updated = self.updated + self.parent.propagate_updated() + def update_links(self): + ''' + Update the structure links for the object (self.children, + self.parent). Update also the dom nodes, if necessary. + ''' + self.children = [] + if self.obj_type not in vars.container_tags: + return + for c in self.node.childNodes: + if is_child_rsc(c): + child = cib_factory.find_object_for_node(c) + if not child: + missing_obj_err(c) + continue + child.parent = self + self.children.append(child) + if not c.isSameNode(child.node): + rmnode(child.node) + child.node = c + def update_from_cli(self,cli_list): + 'Update ourselves from the cli intermediate.' + if not cib_factory.is_cib_sane(): + return False + if not cib_factory.verify_cli(cli_list): + return False + oldnode = self.node + id_store.remove_xml(oldnode) + newnode = self.cli2node(cli_list) + if not newnode: + id_store.store_xml(oldnode) + return False + if xml_cmp(oldnode,newnode): + newnode.unlink() + return True # the new and the old versions are equal + self.node = newnode + if user_prefs.is_check_always() \ + and self.check_sanity() > 1: + id_store.remove_xml(newnode) + id_store.store_xml(oldnode) + self.node = oldnode + newnode.unlink() + return False + oldnode.parentNode.replaceChild(newnode,oldnode) + cib_factory.adjust_children(self,cli_list) + oldnode.unlink() + self.updated = True + self.propagate_updated() + return True + def update_from_node(self,node): + 'Update ourselves from a doc node.' + if not node: + return False + if not cib_factory.is_cib_sane(): + return False + oldxml = self.node + newxml = node + if xml_cmp(oldxml,newxml): + return True # the new and the old versions are equal + if not id_store.replace_xml(oldxml,newxml): + return False + oldxml.unlink() + self.node = cib_factory.doc.importNode(newxml,1) + cib_factory.topnode[self.parent_type].appendChild(self.node) + self.update_links() + self.updated = True + self.propagate_updated() + def top_parent(self): + '''Return the top parent or self''' + if self.parent: + return self.parent.top_parent() + else: + return self + def find_child_in_node(self,child): + for c in self.node.childNodes: + if not is_element(c): + continue + if c.tagName == child.obj_type and \ + c.getAttribute("id") == child.obj_id: + return c + return None + def filter(self,*args): + "Filter objects." + if not args: + return True + if args[0] == "NOOBJ": + return False + if args[0] == "changed": + return self.updated or self.origin == "user" + return self.obj_id in args + +def mk_cli_list(cli): + 'Sometimes we get a string and sometimes a list.' + if type(cli) == type('') or type(cli) == type(u''): + return CliParser().parse(cli) + else: + return cli + +class CibNode(CibObject): + ''' + Node and node's attributes. + ''' + def repr_cli(self,node = None,format = 1): + ''' + We assume that uname is unique. + ''' + if not node: + node = self.node + l = [] + l.append(node_head(node)) + cli_add_description(node,l) + for c in node.childNodes: + if not is_element(c): + continue + if c.tagName == "instance_attributes": + l.append("%s %s" % \ + (cli_display.keyword("attributes"), \ + cli_pairs(nvpairs2list(c)))) + return self.cli_format(l,format) + def cli2node(self,cli,oldnode = None): + cli_list = mk_cli_list(cli) + if not cli_list: + return None + if not oldnode: + oldnode = self.node + head = copy.copy(cli_list[0]) + head[0] = backtrans[head[0]] + obj_id = find_value(head[1],"$id") + if not obj_id: + obj_id = find_value(head[1],"uname") + if not obj_id: + return None + type = find_value(head[1],"type") + if not type: + type = vars.node_default_type + head[1].append(["type",type]) + headnode = mkxmlsimple(head,cib_factory.topnode[cib_object_map[self.xml_obj_type][2]],'node') + id_hint = headnode.getAttribute("id") + for e in cli_list[1:]: + n = mkxmlnode(e,oldnode,id_hint) + headnode.appendChild(n) + remove_id_used_attributes(cib_factory.topnode[cib_object_map[self.xml_obj_type][2]]) + return headnode + +class CibPrimitive(CibObject): + ''' + Primitives. + ''' + def repr_cli(self,node = None,format = 1): + if not node: + node = self.node + l = [] + l.append(primitive_head(node)) + cli_add_description(node,l) + for c in node.childNodes: + if not is_element(c): + continue + if c.tagName == "instance_attributes": + l.append("%s %s" % \ + (cli_display.keyword("params"), \ + cli_pairs(nvpairs2list(c)))) + elif c.tagName == "meta_attributes": + l.append("%s %s" % \ + (cli_display.keyword("meta"), \ + cli_pairs(nvpairs2list(c)))) + elif c.tagName == "operations": + l.append(cli_operations(c,format)) + return self.cli_format(l,format) + def cli2node(self,cli,oldnode = None): + ''' + Convert a CLI description to DOM node. + Try to preserve as many ids as possible in case there's + an old XML version. + ''' + cli_list = mk_cli_list(cli) + if not cli_list: + return None + if not oldnode: + oldnode = self.node + head = copy.copy(cli_list[0]) + head[0] = backtrans[head[0]] + headnode = mkxmlsimple(head,oldnode,'rsc') + id_hint = headnode.getAttribute("id") + operations = None + for e in cli_list[1:]: + n = mkxmlnode(e,oldnode,id_hint) + if keyword_cmp(e[0], "operations"): + operations = n + if not keyword_cmp(e[0], "op"): + headnode.appendChild(n) + else: + if not operations: + operations = mkxmlnode(["operations",{}],oldnode,id_hint) + headnode.appendChild(operations) + operations.appendChild(n) + remove_id_used_attributes(oldnode) + return headnode + def check_sanity(self): + ''' + Check operation timeouts and if all required parameters + are defined. + ''' + if not self.node: # eh? + common_err("%s: no xml (strange)" % self.obj_id) + return user_prefs.get_check_rc() + from ra import RAInfo + ra_type = self.node.getAttribute("type") + ra_class = self.node.getAttribute("class") + ra_provider = self.node.getAttribute("provider") + ra = RAInfo(ra_class,ra_type,ra_provider) + if not ra.mk_ra_node(): # no RA found? + ra.error("no such resource agent") + return user_prefs.get_check_rc() + params = [] + for c in self.node.childNodes: + if not is_element(c): + continue + if c.tagName == "instance_attributes": + params += nvpairs2list(c) + rc1 = ra.sanity_check_params(self.obj_id, params) + actions = {} + for c in self.node.childNodes: + if not is_element(c): + continue + if c.tagName == "operations": + for c2 in c.childNodes: + if is_element(c2) and c2.tagName == "op": + op,pl = op2list(c2) + if op: + actions[op] = pl + rc2 = ra.sanity_check_ops(self.obj_id, actions) + return rc1 | rc2 + +class CibContainer(CibObject): + ''' + Groups and clones and ms. + ''' + def repr_cli(self,node = None,format = 1): + if not node: + node = self.node + l = [] + l.append(cont_head(node)) + cli_add_description(node,l) + for c in node.childNodes: + if not is_element(c): + continue + if c.tagName == "instance_attributes": + l.append("%s %s" % \ + (cli_display.keyword("params"), \ + cli_pairs(nvpairs2list(c)))) + elif c.tagName == "meta_attributes": + l.append("%s %s" % \ + (cli_display.keyword("meta"), \ + cli_pairs(nvpairs2list(c)))) + return self.cli_format(l,format) + def cli2node(self,cli,oldnode = None): + cli_list = mk_cli_list(cli) + if not cli_list: + return None + if not oldnode: + oldnode = self.node + head = copy.copy(cli_list[0]) + head[0] = backtrans[head[0]] + headnode = mkxmlsimple(head,oldnode,'grp') + id_hint = headnode.getAttribute("id") + for e in cli_list[1:]: + n = mkxmlnode(e,oldnode,id_hint) + headnode.appendChild(n) + v = find_value(head[1],"$children") + if v: + for child_id in v: + obj = cib_factory.find_object(child_id) + if obj: + n = obj.node.cloneNode(1) + headnode.appendChild(n) + else: + no_object_err(child_id) + remove_id_used_attributes(oldnode) + return headnode + +class CibLocation(CibObject): + ''' + Location constraint. + ''' + def repr_cli(self,node = None,format = 1): + if not node: + node = self.node + l = [] + l.append(location_head(node)) + for c in node.childNodes: + if not is_element(c): + continue + if c.tagName == "rule": + l.append("%s %s" % \ + (cli_display.keyword("rule"), cli_rule(c))) + return self.cli_format(l,format) + def cli2node(self,cli,oldnode = None): + cli_list = mk_cli_list(cli) + if not cli_list: + return None + if not oldnode: + oldnode = self.node + head = copy.copy(cli_list[0]) + head[0] = backtrans[head[0]] + headnode = mkxmlsimple(head,oldnode,'location') + id_hint = headnode.getAttribute("id") + oldrule = None + for e in cli_list[1:]: + if e[0] in ("expression","date_expression"): + n = mkxmlnode(e,oldrule,id_hint) + else: + n = mkxmlnode(e,oldnode,id_hint) + if keyword_cmp(e[0], "rule"): + add_missing_attr(n) + rule = n + headnode.appendChild(n) + oldrule = lookup_node(rule,oldnode,location_only=True) + else: + rule.appendChild(n) + remove_id_used_attributes(oldnode) + return headnode + +class CibSimpleConstraint(CibObject): + ''' + Colocation and order constraints. + ''' + def repr_cli(self,node = None,format = 1): + if not node: + node = self.node + l = [] + l.append(simple_constraint_head(node)) + return self.cli_format(l,format) + def cli2node(self,cli,oldnode = None): + cli_list = mk_cli_list(cli) + if not cli_list: + return None + if not oldnode: + oldnode = self.node + head = copy.copy(cli_list[0]) + head[0] = backtrans[head[0]] + headnode = mkxmlsimple(head,oldnode,'') + id_hint = headnode.getAttribute("id") + for e in cli_list[1:]: + # if more than one element, it's a resource set + n = mkxmlnode(e,oldnode,id_hint) + headnode.appendChild(n) + remove_id_used_attributes(oldnode) + return headnode + +class CibProperty(CibObject): + ''' + Cluster properties. + ''' + def repr_cli(self,node = None,format = 1): + if not node: + node = self.node + l = [] + l.append(cli_display.keyword(self.obj_type)) + properties = nvpairs2list(node, add_id = True) + for n,v in properties: + if n == "$id": + l[0] = '%s %s="%s"' % (l[0],n,v) + else: + l.append(nvpair_format(n,v)) + return self.cli_format(l,format) + def cli2node(self,cli,oldnode = None): + cli_list = mk_cli_list(cli) + if not cli_list: + return None + if not oldnode: + oldnode = cib_factory.topnode[cib_object_map[self.xml_obj_type][2]] + head = copy.copy(cli_list[0]) + head[0] = backtrans[head[0]] + obj_id = find_value(head[1],"$id") + if not obj_id: + obj_id = cib_object_map[self.xml_obj_type][3] + headnode = mkxmlnode(head,oldnode,obj_id) + remove_id_used_attributes(oldnode) + return headnode + def matchcli(self,cli_list): + head = cli_list[0] + return self.obj_type == head[0] \ + and self.obj_id == find_value(head[1],"$id") +# +################################################################ + +# +# cib update interface (cibadmin) +# +cib_piped = "cibadmin -p" +def cib_delete_element(obj): + 'Remove one element from the CIB.' + if obj.xml_obj_type in vars.defaults_tags: + node = cib_factory.createElement("meta_attributes") + else: + node = cib_factory.createElement(obj.xml_obj_type) + node.setAttribute("id",obj.obj_id) + rc = pipe_string("%s -D" % cib_piped, node.toxml()) + if rc != 0: + update_err(obj.obj_id,'-D',node.toprettyxml()) + node.unlink() + return rc +def cib_update_elements(upd_list): + 'Update a set of objects in the CIB.' + l = [x.obj_id for x in upd_list] + o = CibObjectSetRaw(*l) + xml = o.repr_configure() + rc = pipe_string("%s -U" % cib_piped, xml) + if rc != 0: + update_err(' '.join(l),'-U',xml) + return rc +def cib_replace_element(obj): + if obj.comment: + comm_node = cib_factory.createComment(s) + if obj.node.hasChildNodes(): + obj.node.insertBefore(comm_node, obj.node.firstChild) + else: + obj.node.appendChild(comm_node) + rc = pipe_string("%s -R -o %s" % \ + (cib_piped, obj.parent_type), obj.node.toxml()) + if rc != 0: + update_err(obj.obj_id,'-R',obj.node.toprettyxml()) + if comm_node: + rmnode(comm_node) + return rc +def cib_delete_moved_children(obj): + for c in obj.children: + if c.origin == "cib" and c.moved: + cib_delete_element(c) + +def get_cib_default(property): + if cib_factory.is_cib_sane(): + return cib_factory.get_property(property) + +# xml -> cli translations (and classes) +cib_object_map = { + "node": ( "node", CibNode, "nodes" ), + "primitive": ( "primitive", CibPrimitive, "resources" ), + "group": ( "group", CibContainer, "resources" ), + "clone": ( "clone", CibContainer, "resources" ), + "master": ( "ms", CibContainer, "resources" ), + "rsc_location": ( "location", CibLocation, "constraints" ), + "rsc_colocation": ( "colocation", CibSimpleConstraint, "constraints" ), + "rsc_order": ( "order", CibSimpleConstraint, "constraints" ), + "cluster_property_set": ( "property", CibProperty, "crm_config", "cib-bootstrap-options" ), + "rsc_defaults": ( "rsc_defaults", CibProperty, "rsc_defaults", "rsc-options" ), + "op_defaults": ( "op_defaults", CibProperty, "op_defaults", "op-options" ), +} +backtrans = odict() # generate a translation cli -> tag +for key in cib_object_map: + backtrans[cib_object_map[key][0]] = key +cib_topnodes = [] # get a list of parents +for key in cib_object_map: + if not cib_object_map[key][2] in cib_topnodes: + cib_topnodes.append(cib_object_map[key][2]) + +cib_upgrade = "cibadmin --upgrade --force" +class CibFactory(Singleton): + ''' + Juggle with CIB objects. + See check_structure below for details on the internal cib + representation. + ''' + def __init__(self): + self.init_vars() + self.regtest = options.regression_tests + self.all_committed = True # has commit produced error + self._no_constraint_rm_msg = False # internal (just not to produce silly messages) + self.supported_cib_re = "^pacemaker-1[.]0$" + def is_cib_sane(self): + if not self.doc: + empty_cib_err() + return False + return True + # + # check internal structures + # + def check_topnode(self,obj): + if not obj.node.parentNode.isSameNode(self.topnode[obj.parent_type]): + common_err("object %s is not linked to %s"%(obj.obj_id,obj.parent_type)) + def check_parent(self,obj,parent): + if not obj in parent.children: + common_err("object %s does not reference its child %s"%(parent.obj_id,obj.obj_id)) + return False + if not parent.node.isSameNode(obj.node.parentNode): + common_err("object %s node is not a child of its parent %s, but %s:%s"%(obj.obj_id,parent.obj_id,obj.node.tagName,obj.node.getAttribute("id"))) + return False + def check_structure(self): + #print "Checking structure..." + if not self.doc: + empty_cib_err() + return False + rc = True + for obj in self.cib_objects: + #print "Checking %s... (%s)" % (obj.obj_id,obj.nocli) + if obj.parent: + if self.check_parent(obj,obj.parent) == False: + rc = False + else: + if self.check_topnode(obj) == False: + rc = False + for child in obj.children: + if self.check_parent(child,child.parent) == False: + rc = False + return rc + def regression_testing(self,param): + # provide some help for regression testing + # in particular by trying to provide output which is + # easier to predict + if param == "off": + self.regtest = False + elif param == "on": + self.regtest = True + else: + common_warn("bad parameter for regtest: %s" % param) + def createElement(self,tag): + if self.doc: + return self.doc.createElement(tag) + else: + empty_cib_err() + def createComment(self,s): + if self.doc: + return self.doc.createComment(s) + else: + empty_cib_err() + def is_cib_supported(self,cib): + 'Do we support this CIB?' + req = cib.getAttribute("crm_feature_set") + validator = cib.getAttribute("validate-with") + if validator and re.match(self.supported_cib_re,validator): + return True + cib_ver_unsupported_err(validator,req) + return False + def upgrade_cib_06to10(self,force = False): + 'Upgrade the CIB from 0.6 to 1.0.' + if not self.doc: + empty_cib_err() + return False + cib = self.doc.getElementsByTagName("cib") + if not cib: + common_err("CIB has no cib element") + return False + req = cib[0].getAttribute("crm_feature_set") + validator = cib[0].getAttribute("validate-with") + if force or not validator or re.match("0[.]6",validator): + return ext_cmd(cib_upgrade) == 0 + def import_cib(self): + 'Parse the current CIB (from cibadmin -Q).' + self.doc,cib = read_cib(cibdump2doc) + if not self.doc: + return False + if not cib: + common_err("CIB has no cib element") + self.reset() + return False + if not self.is_cib_supported(cib): + self.reset() + return False + for attr in cib.attributes.keys(): + self.cib_attrs[attr] = cib.getAttribute(attr) + for t in cib_topnodes: + self.topnode[t] = get_conf_elem(self.doc, t) + if not self.topnode[t]: + self.topnode[t] = mk_topnode(self.doc, t) + self.missing_topnodes.append(t) + if not self.topnode[t]: + common_err("could not create %s node; out of memory?" % t) + self.reset() + return False + return True + # + # create a doc from the list of objects + # (used by CibObjectSetRaw) + # + def regtest_filter(self,cib): + for attr in ("epoch","admin_epoch"): + if cib.getAttribute(attr): + cib.setAttribute(attr,"0") + for attr in ("cib-last-written",): + if cib.getAttribute(attr): + cib.removeAttribute(attr) + def set_cib_attributes(self,cib): + for attr in self.cib_attrs: + cib.setAttribute(attr,self.cib_attrs[attr]) + if self.regtest: + self.regtest_filter(cib) + def objlist2doc(self,obj_list,obj_filter = None): + ''' + Return document containing objects in obj_list. + Must remove all children from the object list, because + printing xml of parents will include them. + Optional filter to sieve objects. + ''' + doc,cib,crm_config,rsc_defaults,op_defaults,nodes,resources,constraints = new_cib() + # get only top parents for the objects in the list + # e.g. if we get a primitive which is part of a clone, + # then the clone gets in, not the primitive + # dict will weed out duplicates + d = {} + for obj in obj_list: + if obj_filter and not obj_filter(obj): + continue + d[obj.top_parent()] = 1 + for obj in d: + i_node = doc.importNode(obj.node,1) + add_comment(doc,i_node,obj.comment) + if obj.parent_type == "nodes": + nodes.appendChild(i_node) + elif obj.parent_type == "resources": + resources.appendChild(i_node) + elif obj.parent_type == "constraints": + constraints.appendChild(i_node) + elif obj.parent_type == "crm_config": + crm_config.appendChild(i_node) + elif obj.parent_type == "rsc_defaults": + rsc_defaults.appendChild(i_node) + elif obj.parent_type == "op_defaults": + op_defaults.appendChild(i_node) + self.set_cib_attributes(cib) + return doc + # + # commit changed objects to the CIB + # + def attr_match(self,c,a): + 'Does attribute match?' + try: cib_attr = self.cib_attrs[a] + except: cib_attr = None + return c.getAttribute(a) == cib_attr + def is_current_cib_equal(self, silent = False): + if self.overwrite: + return True + doc,cib = read_cib(cibdump2doc) + if not doc: + return False + if not cib: + doc.unlink() + return False + rc = self.attr_match(cib,'epoch') and \ + self.attr_match(cib,'admin_epoch') + if not silent and not rc: + common_warn("CIB changed in the meantime: won't touch it!") + doc.unlink() + return rc + def add_missing_topnodes(self): + cib_create_topnode = "cibadmin -C -o configuration -X" + for tag in self.missing_topnodes: + if not self.topnode[tag].hasChildNodes(): + continue + if ext_cmd("%s '<%s/>'" % (cib_create_topnode, tag)) != 0: + common_err("could not create %s in the cib" % tag) + return False + return True + def state_header(self): + 'Print object status header' + print CibObject.state_fmt % \ + ("","origin","updated","moved","invalid","parent","children") + def showobjects(self): + self.state_header() + for obj in self.cib_objects: + obj.dump_state() + if self.remove_queue: + print "Remove queue:" + for obj in self.remove_queue: + obj.dump_state() + def showqueue(self, title, obj_filter): + upd_list = self.cib_objs4cibadmin(obj_filter) + if title == "delete": + upd_list += self.remove_queue + if upd_list: + s = '' + upd_list = processing_sort_cli(upd_list) + if title == "delete": + upd_list = reversed(upd_list) + for obj in upd_list: + s = s + " " + obj.obj_string() + print "%s:%s" % (title,s) + def showqueues(self): + 'Show what is going to happen on commit.' + # 1. remove objects (incl. modified constraints) + self.showqueue("delete", lambda o: + o.origin == "cib" and (o.updated or o.recreate) and is_constraint(o.node)) + # 2. update existing objects + self.showqueue("replace", lambda o: \ + o.origin != 'user' and o.updated and not is_constraint(o.node)) + # 3. create new objects + self.showqueue("create", lambda o: \ + o.origin == 'user' and not is_constraint(o.node)) + # 4. create objects moved from a container + self.showqueue("create", lambda o: \ + not o.parent and o.moved and o.origin == "cib") + # 5. create constraints + self.showqueue("create", lambda o: is_constraint(o.node) and \ + (((o.updated or o.recreate) and o.origin == "cib") or o.origin == "user")) + def commit(self): + 'Commit the configuration to the CIB.' + if not self.doc: + empty_cib_err() + return False + if not self.add_missing_topnodes(): + return False + # all_committed is updated in the invoked object methods + self.all_committed = True + cnt = 0 + # 1. remove objects (incl. modified constraints) + cnt += self.delete_objects(lambda o: + o.origin == "cib" and (o.updated or o.recreate) and is_constraint(o.node)) + # 2. update existing objects + cnt += self.replace_objects(lambda o: \ + o.origin != 'user' and o.updated and not is_constraint(o.node)) + # 3. create new objects + cnt += self.create_objects(lambda o: \ + o.origin == 'user' and not is_constraint(o.node)) + # 4. create objects moved from a container + cnt += self.create_objects(lambda o: \ + not o.parent and o.moved and o.origin == "cib") + # 5. create constraints + cnt += self.create_objects(lambda o: is_constraint(o.node) and \ + (((o.updated or o.recreate) and o.origin == "cib") or o.origin == "user")) + if cnt: + # reload the cib! + self.reset() + self.initialize() + return self.all_committed + def cib_objs4cibadmin(self,obj_filter): + ''' + Filter objects from our cib_objects list. But add only + top parents. + For this to work, the filter must not filter out parents. + That's guaranteed by the updated flag propagation. + ''' + upd_list = [] + for obj in self.cib_objects: + if not obj_filter or obj_filter(obj): + if not obj.parent and not obj in upd_list: + upd_list.append(obj) + return upd_list + def delete_objects(self,obj_filter): + cnt = 0 + upd_list = self.cib_objs4cibadmin(obj_filter) + if not (self.remove_queue + upd_list): + return 0 + obj_list = processing_sort_cli(self.remove_queue + upd_list) + for obj in reversed(obj_list): + if cib_delete_element(obj) == 0: + if obj in self.remove_queue: + self.remove_queue.remove(obj) + cnt += 1 + else: + self.all_committed = False + return cnt + def create_objects(self,obj_filter): + upd_list = self.cib_objs4cibadmin(obj_filter) + if not upd_list: + return 0 + for obj in upd_list: + cib_delete_moved_children(obj) + if cib_update_elements(upd_list) == 0: + for obj in upd_list: + obj.reset_updated() + return len(upd_list) + else: + self.all_committed = False + return 0 + def replace_objects(self,obj_filter): + cnt = 0 + upd_list = self.cib_objs4cibadmin(obj_filter) + if not upd_list: + return 0 + for obj in processing_sort_cli(upd_list): + #print obj.node.toprettyxml() + cib_delete_moved_children(obj) + if cib_replace_element(obj) == 0: + cnt += 1 + obj.reset_updated() + else: + self.all_committed = False + return cnt + # + # initialize cib_objects from CIB + # + def save_node(self,node,pnode = None): + ''' + Need pnode (parent node) acrobacy because cluster + properties and rsc/op_defaults hold stuff in a + meta_attributes child. + ''' + if not pnode: + pnode = node + obj = cib_object_map[pnode.tagName][1](pnode.tagName) + obj.origin = "cib" + self.cib_objects.append(obj) + obj.save_xml(node) + def populate(self): + "Walk the cib and collect cib objects." + all_nodes = get_interesting_nodes(self.doc,[]) + if not all_nodes: + return + for node in processing_sort(all_nodes): + if is_defaults(node): + for c in node.childNodes: + if not is_element(c) or c.tagName != "meta_attributes": + continue + self.save_node(c,node) + else: + self.save_node(node) + for obj in self.cib_objects: + obj.pull_comments() + for obj in self.cib_objects: + if not obj.cli_use_validate(): + obj.nocli = True + for obj in self.cib_objects: + obj.update_links() + def initialize(self): + if self.doc: + return True + if not self.import_cib(): + return False + sanitize_cib(self.doc) + show_unrecognized_elems(self.doc) + self.populate() + return self.check_structure() + def init_vars(self): + self.doc = None # the cib + self.topnode = {} + for t in cib_topnodes: + self.topnode[t] = None + self.missing_topnodes = [] + self.cib_attrs = {} # cib version dictionary + self.cib_objects = [] # a list of cib objects + self.remove_queue = [] # a list of cib objects to be removed + self.overwrite = False # update cib unconditionally + def reset(self): + if not self.doc: + return + self.doc.unlink() + self.init_vars() + id_store.clear() + def find_object(self,obj_id): + "Find an object for id." + for obj in self.cib_objects: + if obj.obj_id == obj_id: + return obj + return None + # + # tab completion functions + # + def id_list(self): + "List of ids (for completion)." + return [x.obj_id for x in self.cib_objects] + def prim_id_list(self): + "List of primitives ids (for group completion)." + return [x.obj_id for x in self.cib_objects if x.obj_type == "primitive"] + def children_id_list(self): + "List of child ids (for clone/master completion)." + return [x.obj_id for x in self.cib_objects if x.obj_type in vars.children_tags] + def rsc_id_list(self): + "List of resource ids (for constraint completion)." + return [x.obj_id for x in self.cib_objects \ + if x.obj_type in vars.resource_tags and not x.parent] + def f_prim_id_list(self): + "List of possible primitives ids (for group completion)." + return [x.obj_id for x in self.cib_objects \ + if x.obj_type == "primitive" and not x.parent] + def f_children_id_list(self): + "List of possible child ids (for clone/master completion)." + return [x.obj_id for x in self.cib_objects \ + if x.obj_type in vars.children_tags and not x.parent] + # + # a few helper functions + # + def find_object_for_node(self,node): + "Find an object which matches a dom node." + for obj in self.cib_objects: + if node.getAttribute("id") == obj.obj_id: + return obj + return None + def resolve_id_ref(self,attr_list_type,id_ref): + ''' + User is allowed to specify id_ref either as a an object + id or as attributes id. Here we try to figure out which + one, i.e. if the former is the case to find the right + id to reference. + ''' + obj= self.find_object(id_ref) + if obj: + node_l = obj.node.getElementsByTagName(attr_list_type) + if node_l: + if len(node_l) > 1: + common_warn("%s contains more than one %s, using first" % \ + (obj.obj_id,attr_list_type)) + id = node_l[0].getAttribute("id") + if not id: + common_err("%s reference not found" % id_ref) + return id_ref # hope that user will fix that + return id + # verify if id_ref exists + node_l = self.doc.getElementsByTagName(attr_list_type) + for node in node_l: + if node.getAttribute("id") == id_ref: + return id_ref + common_err("%s reference not found" % id_ref) + return id_ref # hope that user will fix that + def get_property(self,property): + ''' + Get the value of the given cluster property. + ''' + for obj in self.cib_objects: + if obj.obj_type == "property" and obj.node: + pl = nvpairs2list(obj.node) + v = find_value(pl, property) + if v: + return v + return None + def new_object(self,obj_type,obj_id): + "Create a new object of type obj_type." + if id_in_use(obj_id): + return None + for xml_obj_type,v in cib_object_map.items(): + if v[0] == obj_type: + obj = v[1](xml_obj_type,obj_id) + if obj.obj_id: + return obj + else: + return None + return None + def mkobj_list(self,mode,*args): + obj_list = [] + for obj in self.cib_objects: + f = lambda: obj.filter(*args) + if not f(): + continue + if mode == "cli" and obj.nocli: + obj_cli_err(obj.obj_id) + continue + obj_list.append(obj) + return obj_list + def has_cib_changed(self): + return self.mkobj_list("xml","changed") or self.remove_queue + def verify_constraints(self,cli_list): + ''' + Check if all resources referenced in a constraint exist + ''' + rc = True + head = cli_list[0] + constraint_id = find_value(head[1],"id") + for obj_id in referenced_resources_cli(cli_list): + if not self.find_object(obj_id): + constraint_norefobj_err(constraint_id,obj_id) + rc = False + return rc + def verify_children(self,cli_list): + ''' + Check prerequisites: + a) all children must exist + b) no child may have other parent than me + (or should we steal children?) + c) there may not be duplicate children + ''' + head = cli_list[0] + obj_type = head[0] + obj_id = find_value(head[1],"id") + c_ids = find_value(head[1],"$children") + if not c_ids: + return True + rc = True + c_dict = {} + for child_id in c_ids: + if not self.verify_child(child_id,obj_type,obj_id): + rc = False + if child_id in c_dict: + common_err("in group %s child %s listed more than once"%(obj_id,child_id)) + rc = False + c_dict[child_id] = 1 + return rc + def verify_child(self,child_id,obj_type,obj_id): + 'Check if child exists and obj_id is (or may become) its parent.' + child = self.find_object(child_id) + if not child: + no_object_err(child_id) + return False + if child.parent and child.parent.obj_id != obj_id: + common_err("%s already in use at %s"%(child_id,child.parent.obj_id)) + return False + if obj_type == "group" and child.obj_type != "primitive": + common_err("a group may contain only primitives; %s is %s"%(child_id,child.obj_type)) + return False + if not child.obj_type in vars.children_tags: + common_err("%s may contain a primitive or a group; %s is %s"%(obj_type,child_id,child.obj_type)) + return False + return True + def verify_cli(self,cli_list): + ''' + Can we create this object given its CLI representation. + This is not about syntax, we're past that, but about + semantics. + Right now we check if the children, if any, are fit for + the parent. And if this is a constraint, if all + referenced resources are present. + ''' + rc = True + if not self.verify_children(cli_list): + rc = False + if not self.verify_constraints(cli_list): + rc = False + return rc + def create_object(self,*args): + s = [] + s += args + return self.create_from_cli(CliParser().parse(args)) != None + def set_property_cli(self,cli_list): + head_pl = cli_list[0] + obj_type = head_pl[0].lower() + pset_id = find_value(head_pl[1],"$id") + if pset_id: + head_pl[1].remove(["$id",pset_id]) + else: + pset_id = cib_object_map[backtrans[obj_type]][3] + obj = self.find_object(pset_id) + if not obj: + if not is_id_valid(pset_id): + invalid_id_err(pset_id) + return None + obj = self.new_object(obj_type,pset_id) + if not obj: + return None + self.topnode[obj.parent_type].appendChild(obj.node) + obj.origin = "user" + self.cib_objects.append(obj) + for n,v in head_pl[1]: + set_nvpair(obj.node,n,v) + obj.updated = True + return obj + def add_op(self,cli_list): + '''Add an op to a primitive.''' + head = cli_list[0] + # does the referenced primitive exist + rsc_id = find_value(head[1],"rsc") + rsc_obj = cib_factory.find_object(rsc_id) + if not rsc_obj: + no_object_err(rsc_id) + return None + if rsc_obj.obj_type != "primitive": + common_err("%s is not a primitive" % rsc_id) + return None + # check if there is already an op with the same interval + name = find_value(head[1], "name") + interval = find_value(head[1], "interval") + if find_operation(rsc_obj.node,name,interval): + common_err("%s already has a %s op with interval %s" % \ + (rsc_id, name, interval)) + return None + # drop the rsc attribute + head[1].remove(["rsc",rsc_id]) + # create an xml node + mon_node = mkxmlsimple(head, None, rsc_id) + # get the place to append it to + try: + op_node = rsc_obj.node.getElementsByTagName("operations")[0] + except: + op_node = self.createElement("operations") + rsc_obj.node.appendChild(op_node) + op_node.appendChild(mon_node) + # the resource is updated + rsc_obj.updated = True + rsc_obj.propagate_updated() + return rsc_obj + def create_from_cli(self,cli): + 'Create a new cib object from the cli representation.' + cli_list = mk_cli_list(cli) + if not cli_list: + return None + if not self.verify_cli(cli_list): + return None + head = cli_list[0] + obj_type = head[0].lower() + if obj_type in vars.nvset_cli_names: + return self.set_property_cli(cli_list) + if obj_type == "op": + return self.add_op(cli_list) + obj_id = find_value(head[1],"id") + if not is_id_valid(obj_id): + invalid_id_err(obj_id) + return None + obj = self.new_object(obj_type,obj_id) + if not obj: + return None + obj.node = obj.cli2node(cli_list) + if user_prefs.is_check_always() \ + and obj.check_sanity() > 1: + id_store.remove_xml(obj.node) + obj.node.unlink() + return None + self.topnode[obj.parent_type].appendChild(obj.node) + self.adjust_children(obj,cli_list) + obj.origin = "user" + for child in obj.children: + # redirect constraints to the new parent + for c_obj in self.related_constraints(child): + self.remove_queue.append(c_obj.mkcopy()) + rename_rscref(c_obj,child.obj_id,obj.obj_id) + # drop useless constraints which may have been created above + for c_obj in self.related_constraints(obj): + if silly_constraint(c_obj.node,obj.obj_id): + self._no_constraint_rm_msg = True + self._remove_obj(c_obj) + self._no_constraint_rm_msg = False + self.cib_objects.append(obj) + return obj + def update_moved(self,obj): + 'Updated the moved flag. Mark affected constraints.' + obj.moved = not obj.moved + if obj.moved: + for c_obj in self.related_constraints(obj): + c_obj.recreate = True + def adjust_children(self,obj,cli_list): + ''' + All stuff children related: manage the nodes of children, + update the list of children for the parent, update + parents in the children. + ''' + head = cli_list[0] + children_ids = find_value(head[1],"$children") + if not children_ids: + return + new_children = [] + for child_id in children_ids: + new_children.append(self.find_object(child_id)) + self._relink_orphans(obj,new_children) + obj.children = new_children + self._update_children(obj) + def _relink_child(self,obj): + 'Relink a child to the top node.' + obj.node.parentNode.removeChild(obj.node) + self.topnode[obj.parent_type].appendChild(obj.node) + self.update_moved(obj) + obj.parent = None + def _update_children(self,obj): + '''For composite objects: update all children nodes. + ''' + # unlink all and find them in the new node + for child in obj.children: + oldnode = child.node + child.node = obj.find_child_in_node(child) + if child.children: # and children of children + self._update_children(child) + rmnode(oldnode) + if not child.parent: + self.update_moved(child) + if child.parent and child.parent != obj: + child.parent.updated = True # the other parent updated + child.parent = obj + def _relink_orphans(self,obj,new_children): + "New orphans move to the top level for the object type." + for child in obj.children: + if child not in new_children: + self._relink_child(child) + def add_obj(self,obj_type,node): + obj = self.new_object(obj_type, node.getAttribute("id")) + if not obj: + return None + obj.save_xml(node) + if not obj.cli_use_validate(): + obj.nocli = True + obj.update_links() + obj.origin = "user" + self.cib_objects.append(obj) + return obj + def create_from_node(self,node): + 'Create a new cib object from a document node.' + if not node: + return None + obj_type = cib_object_map[node.tagName][0] + node = self.doc.importNode(node,1) + obj = None + if is_defaults(node): + for c in node.childNodes: + if not is_element(c) or c.tagName != "meta_attributes": + continue + obj = self.add_obj(obj_type,c) + else: + obj = self.add_obj(obj_type,node) + if obj: + self.topnode[obj.parent_type].appendChild(node) + return obj + def cib_objects_string(self, obj_list = None): + l = [] + if not obj_list: + obj_list = self.cib_objects + for obj in obj_list: + l.append(obj.obj_string()) + return ' '.join(l) + def _remove_obj(self,obj): + "Remove a cib object and its children." + # remove children first + # can't remove them here from obj.children! + common_debug("remove object %s" % obj.obj_string()) + for child in obj.children: + #self._remove_obj(child) + # just relink, don't remove children + self._relink_child(child) + if obj.parent: # remove obj from its parent, if any + obj.parent.children.remove(obj) + id_store.remove_xml(obj.node) + rmnode(obj.node) + obj.invalid = True + self.add_to_remove_queue(obj) + self.cib_objects.remove(obj) + for c_obj in self.related_constraints(obj): + if is_simpleconstraint(c_obj.node) and obj.children: + # the first child inherits constraints + rename_rscref(c_obj,obj.obj_id,obj.children[0].obj_id) + delete_rscref(c_obj,obj.obj_id) + if silly_constraint(c_obj.node,obj.obj_id): + # remove invalid constraints + self._remove_obj(c_obj) + if not self._no_constraint_rm_msg: + err_buf.info("hanging %s deleted" % c_obj.obj_string()) + def related_constraints(self,obj): + if not is_resource(obj.node): + return [] + c_list = [] + for obj2 in self.cib_objects: + if not is_constraint(obj2.node): + continue + if rsc_constraint(obj.obj_id,obj2.node): + c_list.append(obj2) + return c_list + def add_to_remove_queue(self,obj): + if obj.origin == "cib": + self.remove_queue.append(obj) + #print self.cib_objects_string(self.remove_queue) + def delete_1(self,obj): + ''' + Remove an object and its parent in case the object is the + only child. + ''' + if obj.parent and len(obj.parent.children) == 1: + self.delete_1(obj.parent) + if obj in self.cib_objects: # don't remove parents twice + self._remove_obj(obj) + def delete(self,*args): + 'Delete a cib object.' + if not self.doc: + empty_cib_err() + return False + rc = True + l = [] + for obj_id in args: + obj = self.find_object(obj_id) + if not obj: + no_object_err(obj_id) + rc = False + continue + if is_rsc_running(obj_id): + common_warn("resource %s is running, can't delete it" % obj_id) + else: + l.append(obj) + if l: + l = processing_sort_cli(l) + for obj in reversed(l): + self.delete_1(obj) + return rc + def remove_on_rename(self,obj): + ''' + If the renamed object is coming from the cib, then it + must be removed and a new one created. + ''' + if obj.origin == "cib": + self.remove_queue.append(obj.mkcopy()) + obj.origin = "user" + def rename(self,old_id,new_id): + ''' + Rename a cib object. + - check if the resource (if it's a resource) is stopped + - check if the new id is not taken + - find the object with old id + - rename old id to new id in all related objects + (constraints) + - if the object came from the CIB, then it must be + deleted and the one with the new name created + - rename old id to new id in the object + ''' + if not self.doc: + empty_cib_err() + return False + if id_in_use(new_id): + return False + obj = self.find_object(old_id) + if not obj: + no_object_err(old_id) + return False + if not obj.can_be_renamed(): + return False + for c_obj in self.related_constraints(obj): + rename_rscref(c_obj,old_id,new_id) + self.remove_on_rename(obj) + rename_id(obj.node,old_id,new_id) + obj.obj_id = new_id + id_store.rename(old_id,new_id) + obj.updated = True + obj.propagate_updated() + def erase(self): + "Remove all cib objects." + # remove only bottom objects and no constraints + # the rest will automatically follow + if not self.doc: + empty_cib_err() + return False + erase_ok = True + l = [] + for obj in [obj for obj in self.cib_objects \ + if not obj.children and not is_constraint(obj.node) \ + and obj.obj_type != "node" ]: + if is_rsc_running(obj.obj_id): + common_warn("resource %s is running, can't delete it" % obj.obj_id) + erase_ok = False + else: + l.append(obj) + if not erase_ok: + common_err("CIB erase aborted (nothing was deleted)") + return False + self._no_constraint_rm_msg = True + for obj in l: + self.delete(obj.obj_id) + self._no_constraint_rm_msg = False + remaining = 0 + for obj in self.cib_objects: + if obj.obj_type != "node": + remaining += 1 + if remaining > 0: + common_err("strange, but these objects remained:") + for obj in self.cib_objects: + if obj.obj_type != "node": + print >> sys.stderr, obj.obj_string() + self.cib_objects = [] + return True + def erase_nodes(self): + "Remove nodes only." + if not self.doc: + empty_cib_err() + return False + l = [obj for obj in self.cib_objects if obj.obj_type == "node"] + for obj in l: + self.delete(obj.obj_id) + def refresh(self): + "Refresh from the CIB." + self.reset() + self.initialize() + +user_prefs = UserPrefs.getInstance() +options = Options.getInstance() +err_buf = ErrorBuffer.getInstance() +vars = Vars.getInstance() +cib_factory = CibFactory.getInstance() +cli_display = CliDisplay.getInstance() +cib_status = CibStatus.getInstance() +id_store = IdMgmt.getInstance() + +# vim:ts=4:sw=4:et: diff --git a/shell/modules/cibstatus.py b/shell/modules/cibstatus.py new file mode 100644 index 0000000000..bc922839c2 --- /dev/null +++ b/shell/modules/cibstatus.py @@ -0,0 +1,248 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import sys +import re +from singletonmixin import Singleton +from xmlutil import * +from msg import * + +def get_tag_by_id(node,tag,id): + "Find a doc node which matches tag and id." + for n in node.getElementsByTagName(tag): + if n.getAttribute("id") == id: + return n + return None +def get_status_node(n): + try: n = n.parentNode + except: return None + if n.tagName != "node_state": + return get_status_node(n) + return n.getAttribute("id") +def get_status_ops(status_node,rsc,op,interval,node = ''): + ''' + Find a doc node which matches the operation. interval set to + "-1" means to lookup an operation with non-zero interval (for + monitors). Empty interval means any interval is fine. + ''' + l = [] + for n in status_node.childNodes: + if not is_element(n) or n.tagName != "node_state": + continue + if node and n.getAttribute("id") != node: + continue + for r in n.getElementsByTagName("lrm_resource"): + if r.getAttribute("id") != rsc: + continue + for o in r.getElementsByTagName("lrm_rsc_op"): + if o.getAttribute("operation") != op: + continue + if (interval == "") or \ + (interval == "-1" and o.getAttribute("interval") != "0") or \ + (interval != "" and o.getAttribute("interval") == interval): + l.append(o) + return l + +class CibStatus(Singleton): + ''' + CIB status management + ''' + def __init__(self): + self.origin = "live" + self.status_node = None + self.doc = None + self.cib = None + self.modified = False + self.node_changes = {} + self.op_changes = {} + def _cib_path(self,source): + if source[0:7] == "shadow:": + return shadowfile(source[7:]) + else: + return source + def _load_cib(self,source): + if source == "live": + doc,cib = read_cib(cibdump2doc) + else: + doc,cib = read_cib(file2doc,self._cib_path(source)) + return doc,cib + def _load(self,source): + doc,cib = self._load_cib(source) + if not doc: + return False + status = get_conf_elem(doc, "status") + if not status: + return False + self.doc,self.cib = doc,cib + self.status_node = status + self.modified = False + self.node_changes = {} + self.op_changes = {} + return True + def status_node_list(self): + if not self.status_node and not self._load(self.origin): + return + return [x.getAttribute("id") for x in self.doc.getElementsByTagName("node_state")] + def status_rsc_list(self): + if not self.status_node and not self._load(self.origin): + return + rsc_list = [x.getAttribute("id") for x in self.doc.getElementsByTagName("lrm_resource")] + # how to uniq? + d = {} + for e in rsc_list: + d[e] = 0 + return d.keys() + def load(self,source): + ''' + Load the status section from the given source. The source + may be cluster ("live"), shadow CIB, or CIB in a file. + ''' + if not self._load(source): + common_err("the cib contains no status") + return False + self.origin = source + return True + def save(self,dest = None): + ''' + Save the modified status section to a file/shadow. If the + file exists, then it must be a cib file and the status + section is replaced with our status section. If the file + doesn't exist, then our section and some (?) configuration + is saved. + ''' + if not self.modified: + common_info("apparently you didn't modify status") + return False + if (not dest and self.origin == "live") or dest == "live": + common_warn("cannot save status to the cluster") + return False + doc,cib = self.doc,self.cib + if dest: + dest_path = self._cib_path(dest) + if os.path.isfile(dest_path): + doc,cib = self._load_cib(dest) + if not doc or not cib: + common_err("%s exists, but no cib inside" % dest) + return False + else: + dest_path = self._cib_path(self.origin) + if doc != self.doc: + status = get_conf_elem(doc, "status") + rmnode(status) + cib.appendChild(doc.importNode(self.status_node,1)) + xml = doc.toprettyxml(user_prefs.xmlindent) + try: f = open(dest_path,"w") + except IOError, msg: + common_err(msg) + return False + f.write(xml) + f.close() + return True + def get_status(self): + ''' + Return the status section node. + ''' + if not self.status_node and not self._load(self.origin): + return None + return self.status_node + def list_changes(self): + ''' + Dump a set of changes done. + ''' + if not self.modified: + return True + for node in self.node_changes: + print node,self.node_changes[node] + for op in self.op_changes: + print op,self.op_changes[op] + return True + def show(self): + ''' + Page the "pretty" XML of the status section. + ''' + if not self.status_node and not self._load(self.origin): + return + page_string(self.status_node.toprettyxml(user_prefs.xmlindent)) + return True + def edit_node(self,node,state): + ''' + Modify crmd, expected, and join attributes of node_state + to set the node's state to online, offline, or unclean. + ''' + if not self.status_node and not self._load(self.origin): + return + node_node = get_tag_by_id(self.status_node,"node_state",node) + if not node_node: + common_err("node %s not found" % node) + return False + if state == "online": + node_node.setAttribute("crmd","online") + node_node.setAttribute("expected","member") + node_node.setAttribute("join","member") + elif state == "offline": + node_node.setAttribute("crmd","offline") + node_node.setAttribute("expected","") + elif state == "unclean": + node_node.setAttribute("crmd","offline") + node_node.setAttribute("expected","member") + else: + common_err("unknown state %s" % state) + return False + self.node_changes[node] = state + self.modified = True + return True + def edit_op(self,op,rsc,rc,op_status,node = ''): + ''' + Set rc-code and op-status in the lrm_rsc_op status + section element. + ''' + if not self.status_node and not self._load(self.origin): + return + l_op = op + l_int = "" + if op == "probe": + l_op = "monitor" + l_int = "0" + elif op == "monitor": + l_int = "-1" + elif op[0:8] == "monitor:": + l_op = "monitor" + l_int = op[8:] + op_nodes = get_status_ops(self.status_node,rsc,l_op,l_int,node) + if len(op_nodes) == 0: + common_err("operation %s not found" % op) + return False + elif len(op_nodes) > 1: + nodelist = [get_status_node(x) for x in op_nodes] + common_err("operation %s found at %s" % (op,' '.join(nodelist))) + return False + op_node = op_nodes[0] + if not node: + node = get_status_node(op_node) + prev_rc = op_node.getAttribute("rc-code") + op_node.setAttribute("rc-code",rc) + self.op_changes[node+":"+rsc+":"+op] = "rc="+rc + if op_status: + op_node.setAttribute("op-status",op_status) + self.op_changes[node+":"+rsc+":"+op] += "," "op-status="+op_status + op_node.setAttribute("last-run",str(int(time.time()))) + if rc != prev_rc: + op_node.setAttribute("last-rc-change",str(int(time.time()))) + self.modified = True + return True + +# vim:ts=4:sw=4:et: diff --git a/shell/modules/clidisplay.py b/shell/modules/clidisplay.py new file mode 100644 index 0000000000..ff6de51dd8 --- /dev/null +++ b/shell/modules/clidisplay.py @@ -0,0 +1,69 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +from singletonmixin import Singleton +from userprefs import Options, UserPrefs +from vars import Vars +from cache import WCache +from utils import * +from msg import * +from term import TerminalController + +class CliDisplay(Singleton): + """ + Display output for various syntax elements. + """ + def __init__(self): + self.no_pretty = False + def set_no_pretty(self): + self.no_pretty = True + def reset_no_pretty(self): + self.no_pretty = False + def colorstring(self, clrnum, s): + if self.no_pretty: + return s + else: + return termctrl.render("${%s}%s${NORMAL}" % \ + (user_prefs.colorscheme[clrnum].upper(), s)) + def keyword(self, kw): + s = kw + if "uppercase" in user_prefs.output: + s = s.upper() + if "color" in user_prefs.output: + s = self.colorstring(0, s) + return s + def otherword(self, n, s): + if "color" in user_prefs.output: + return self.colorstring(n, s) + else: + return s + def id(self, s): + return self.otherword(1, s) + def attr_name(self, s): + return self.otherword(2, s) + def attr_value(self, s): + return self.otherword(3, s) + def rscref(self, s): + return self.otherword(4, s) + def score(self, s): + return self.otherword(5, s) + +user_prefs = UserPrefs.getInstance() +vars = Vars.getInstance() +termctrl = TerminalController.getInstance() + +# vim:ts=4:sw=4:et: diff --git a/shell/modules/help.py.in b/shell/modules/help.py.in new file mode 100644 index 0000000000..9659315a26 --- /dev/null +++ b/shell/modules/help.py.in @@ -0,0 +1,282 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import os +import re +from cache import WCache +from utils import odict, page_string +from msg import * + +# +# help or make users feel less lonely +# +def add_shorthelp(topic,shorthelp,topic_help): + ''' + Join topics ("%s,%s") if they share the same short + description. + ''' + for i in range(len(topic_help)): + if topic_help[i][1] == shorthelp: + topic_help[i][0] = "%s,%s" % (topic_help[i][0], topic) + return + topic_help.append([topic, shorthelp]) +def dump_short_help(help_tab): + topic_help = [] + for topic in help_tab: + if topic == '.': + continue + # with odict, for whatever reason, python parses differently: + # help_tab["..."] = ("...","...") and + # help_tab["..."] = ("...",""" + # ...""") + # a parser bug? + if type(help_tab[topic][0]) == type(()): + shorthelp = help_tab[topic][0][0] + else: + shorthelp = help_tab[topic][0] + add_shorthelp(topic,shorthelp,topic_help) + for t,d in topic_help: + print "\t%-16s %s" % (t,d) +def overview(help_tab): + print "" + print help_tab['.'][1] + print "" + print "Available commands:" + print "" + dump_short_help(help_tab) + print "" +def topic_help(help_tab,topic): + if topic not in help_tab: + print "There is no help for topic %s" % topic + return + if type(help_tab[topic][0]) == type(()): + shorthelp = help_tab[topic][0][0] + longhelp = help_tab[topic][0][1] + else: + shorthelp = help_tab[topic][0] + longhelp = help_tab[topic][1] + if longhelp: + page_string(longhelp) + else: + print shorthelp +def cmd_help(help_tab,topic = ''): + "help!" + # help_tab is an odict (ordered dictionary): + # help_tab[topic] = (short_help,long_help) + # topic '.' is a special entry for the top level + if not help_tab: + common_info("sorry, help not available") + return + if not topic: + overview(help_tab) + else: + topic_help(help_tab,topic) + +def help_short(s): + r = re.search("help_[^,]+,(.*)\]\]", s) + return r and r.group(1) or '' + +class HelpSystem(object): + ''' + The help system. All help is in the following form in the + manual: + [[cmdhelp__,]] + === ... + Long help text. + ... + [[cmdhelp__,]] + + Help for the level itself is like this: + + [[cmdhelp_,]] + ''' + help_text_file = "@datadir@/@PACKAGE@/crm_cli.txt" + index_file = "%s/%s" % (os.getenv("HOME"),".crm_help_index") + def __init__(self): + self.key_pos = {} + self.key_list = [] + self.no_help_file = False # don't print repeatedly messages + self.bad_index = False # don't print repeatedly warnings for bad index + def open_file(self,name,mode): + try: + f = open(name,mode) + return f + except IOError,msg: + common_err("%s open: %s"%(name,msg)) + common_err("extensive help system is not available") + self.no_help_file = True + return None + def drop_index(self): + common_info("removing index") + os.unlink(self.index_file) + self.key_pos = {} + self.key_list = [] + self.bad_index = True + def mk_index(self): + ''' + Prepare an index file, sorted by topic, with seek positions + Do we need a hash on content? + ''' + if self.no_help_file: + return False + crm_help_v = os.getenv("CRM_HELP_FILE") + if crm_help_v: + self.help_text_file = crm_help_v + help_f = self.open_file(self.help_text_file,"r") + if not help_f: + return False + idx_f = self.open_file(self.index_file,"w") + if not idx_f: + return False + common_info("building help index") + key_pos = {} + while 1: + pos = help_f.tell() + s = help_f.readline() + if not s: + break + if s.startswith("[["): + r = re.search(r'..([^,]+),', s) + if r: + key_pos[r.group(1)] = pos + help_f.close() + l = key_pos.keys() + l.sort() + for key in l: + print >>idx_f, '%s %d' % (key,key_pos[key]) + idx_f.close() + return True + def is_index_old(self): + try: + t_idx = os.path.getmtime(self.index_file) + except: + return True + try: + t_help = os.path.getmtime(self.help_text_file) + except: + return True + return t_help > t_idx + def load_index(self): + if self.is_index_old(): + self.mk_index() + self.key_pos = {} + idx_f = self.open_file(self.index_file,"r") + if not idx_f: + return False + for s in idx_f: + a = s.split() + if len(a) != 2: + if not self.bad_index: + common_err("index file corrupt") + idx_f.close() + self.drop_index() + return self.load_index() # this runs only once + return False + self.key_pos[a[0]] = long(a[1]) + idx_f.close() + self.key_list = self.key_pos.keys() + self.key_list.sort() + return True + def __filter(self,s): + if '<<' in s: + return re.sub(r'<<[^,]+,(.+)>>', r'\1', s) + else: + return s + def __find_key(self,key): + low = 0 + high = len(self.key_list)-1 + while low <= high: + mid = (low + high)/2 + if self.key_list[mid] > key: + high = mid - 1 + elif self.key_list[mid] < key: + low = mid + 1 + else: + return mid + return -1 + def __load_help_one(self,key,skip = 2): + longhelp = '' + self.help_f.seek(self.key_pos[key]) + shorthelp = help_short(self.help_f.readline()) + for i in range(skip-1): + self.help_f.readline() + l = [] + for s in self.help_f: + if s.startswith("[[") or s.startswith("="): + break + l.append(self.__filter(s)) + if l and l[-1] == '\n': # drop the last line of empty + l.pop() + if l: + longhelp = ''.join(l) + if not shorthelp or not longhelp: + if not self.bad_index: + common_warn("help topic %s not found" % key) + self.drop_index() + return shorthelp,longhelp + def cmdhelp(self,s): + if not self.key_pos and not self.load_index(): + return None,None + if not s in self.key_pos: + if not self.bad_index: + common_warn("help topic %s not found" % s) + self.drop_index() + return None,None + return self.__load_help_one(s) + def __load_level(self,lvl): + ''' + For the given level, create a help table. + ''' + if wcache.is_cached("lvl_help_tab_%s" % lvl): + return wcache.retrieve("lvl_help_tab_%s" % lvl) + if not self.key_pos and not self.load_index(): + return None + self.help_f = self.open_file(self.help_text_file,"r") + if not self.help_f: + return None + lvl_s = "cmdhelp_%s" % lvl + if not lvl_s in self.key_pos: + if not self.bad_index: + common_warn("help table for level %s not found" % lvl) + self.drop_index() + return None + common_debug("loading help table for level %s" % lvl) + help_tab = odict() + help_tab["."] = self.__load_help_one(lvl_s) + lvl_idx = self.__find_key(lvl_s) + lvl_idx += 1 + while lvl_idx < len(self.key_list): + key = self.key_list[lvl_idx] + if not key.startswith(lvl_s): + break + cmd = key[len(lvl_s)+1:] + help_tab[cmd] = self.__load_help_one(key) + lvl_idx += 1 + self.help_f.close() + help_tab["quit"] = ("exit the program", "") + help_tab["help"] = ("show help", "") + help_tab["end"] = ("go back one level", "") + return help_tab + def load_level(self,lvl): + help_tab = self.__load_level(lvl) + if self.bad_index: # try again + help_tab = self.__load_level(lvl) + return wcache.store("lvl_help_tab_%s" % lvl, help_tab) + +wcache = WCache.getInstance() + +# vim:ts=4:sw=4:et: diff --git a/shell/modules/levels.py b/shell/modules/levels.py new file mode 100644 index 0000000000..fcb5db2bdc --- /dev/null +++ b/shell/modules/levels.py @@ -0,0 +1,93 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import sys +import re +from singletonmixin import Singleton + +def topics_dict(help_tab): + if not help_tab: + return {} + topics = {} + for topic in help_tab: + if topic != '.': + topics[topic] = None + return topics + +def mk_completion_tab(obj,ctab): + cmd_table = obj.cmd_table + for key,value in cmd_table.items(): + if key.startswith("_"): + continue + if type(value) == type(object): + ctab[key] = {} + elif key == "help": + ctab[key] = topics_dict(obj.help_table) + else: + try: + ctab[key] = value[3] + except: + ctab[key] = None + pass + +class Levels(Singleton): + ''' + Keep track of levels and prompts. + ''' + def __init__(self,start_level): + self._marker = 0 + self._in_transit = False + self.level_stack = [] + self.comp_stack = [] + self.current_level = start_level() + self.parse_root = self.current_level.cmd_table + self.prompts = [] + self.completion_tab = {} + mk_completion_tab(self.current_level,self.completion_tab) + def getprompt(self): + return ' '.join(self.prompts) + def mark(self): + self._marker = len(self.level_stack) + self._in_transit = False + def release(self): + while len(self.level_stack) > self._marker: + self.droplevel() + def new_level(self,level_obj,token): + self.level_stack.append(self.current_level) + self.comp_stack.append(self.completion_tab) + self.prompts.append(token) + self.current_level = level_obj() + self.parse_root = self.current_level.cmd_table + try: + if not self.completion_tab[token]: + mk_completion_tab(self.current_level,self.completion_tab[token]) + self.completion_tab = self.completion_tab[token] + except: + pass + self._in_transit = True + def previous(self): + if self.level_stack: + return self.level_stack[-1] + def droplevel(self): + if self.level_stack: + self.current_level.end_game(self._in_transit) + self.current_level = self.level_stack.pop() + self.completion_tab = self.comp_stack.pop() + self.parse_root = self.current_level.cmd_table + self.prompts.pop() + +# vim:ts=4:sw=4:et: diff --git a/shell/modules/main.py b/shell/modules/main.py new file mode 100644 index 0000000000..ac66d705d5 --- /dev/null +++ b/shell/modules/main.py @@ -0,0 +1,279 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import sys +import shlex +import readline +import getopt + +from utils import * +from userprefs import Options, UserPrefs +from vars import Vars +from msg import * +from ui import cmd_exit, TopLevel, completer +from levels import Levels + +def load_rc(rcfile): + try: f = open(rcfile) + except: return + save_stdin = sys.stdin + sys.stdin = f + while True: + inp = multi_input() + if inp == None: + break + try: parse_line(levels,shlex.split(inp)) + except ValueError, msg: + common_err(msg) + f.close() + sys.stdin = save_stdin + +def multi_input(prompt = ''): + """ + Get input from user + Allow multiple lines using a continuation character + """ + line = [] + while True: + try: + text = raw_input(prompt) + except EOFError: + return None + err_buf.incr_lineno() + if options.regression_tests: + print ".INP:",text + sys.stdout.flush() + sys.stderr.flush() + stripped = text.strip() + if stripped.endswith('\\'): + stripped = stripped.rstrip('\\') + line.append(stripped) + if prompt: + prompt = '> ' + else: + line.append(stripped) + break + return ''.join(line) + +def check_args(args,argsdim): + if not argsdim: return True + if len(argsdim) == 1: + minargs = argsdim[0] + return len(args) >= minargs + else: + minargs,maxargs = argsdim + return len(args) >= minargs and len(args) <= maxargs + +# +# Note on parsing +# +# Parsing tables are python dictionaries. +# +# Keywords are used as keys and the corresponding values are +# lists (actually tuples, since they should be read-only) or +# classes. In the former case, the keyword is a terminal and +# in the latter, a new object for the class is created. The class +# must have the cmd_table variable. +# +# The list has the following content: +# +# function: a function to handle this command +# numargs_list: number of minimum/maximum arguments; for example, +# (0,1) means one optional argument, (1,1) one required; if the +# list is empty then the function will parse arguments itself +# required minimum skill level: operator, administrator, expert +# (encoded as a small integer from 0 to 2) +# list of completer functions (optional) +# + +def show_usage(cmd): + p = None + try: p = cmd.__doc__ + except: pass + if p: + print >> sys.stderr, p + else: + syntax_err(cmd.__name__) + +def parse_line(lvl,s): + if not s: return True + if s[0].startswith('#'): return True + lvl.mark() + pt = lvl.parse_root + cmd = None + i = 0 + for i in range(len(s)): + token = s[i] + if token in pt: + if type(pt[token]) == type(object): + lvl.new_level(pt[token],token) + pt = lvl.parse_root # move to the next level + else: + cmd = pt[token] # terminal symbol + break # and stop parsing + else: + syntax_err(s[i:]) + lvl.release() + return False + if cmd: # found a terminal symbol + if not user_prefs.check_skill_level(cmd[2]): + lvl.release() + skill_err(s[i]) + return False + args = s[i+1:] + if not check_args(args,cmd[1]): + lvl.release() + show_usage(cmd[0]) + return False + args = s[i:] + d = lambda: cmd[0](*args) + rv = d() # execute the command + lvl.release() + return rv != False + return True + +def prereqs(): + proglist = "which cibadmin crm_resource crm_attribute crm_mon crm_standby crm_failcount" + for prog in proglist.split(): + if not is_program(prog): + print >> sys.stderr, "%s not available, check your installation"%prog + sys.exit(1) + +# three modes: interactive (no args supplied), batch (input from +# a file), half-interactive (args supplied, but not batch) +def cib_prompt(): + return vars.cib_in_use or "live" + +def setup_readline(): + readline.set_history_length(100) + readline.parse_and_bind("tab: complete") + readline.set_completer(completer) + readline.set_completer_delims(\ + readline.get_completer_delims().replace('-','').replace('/','').replace('=','')) + try: readline.read_history_file(vars.hist_file) + except: pass + +def usage(): + print >> sys.stderr, """ +usage: + crm [-D display_type] [-f file] [-hF] [args] + + Use crm without arguments for an interactive session. + Supply one or more arguments for a "single-shot" use. + Specify with -f a file which contains a script. Use '-' for + standard input or use pipe/redirection. + + crm displays cli format configurations using a color scheme + and/or in uppercase. Pick one of "color" or "uppercase", or + use "-D color,uppercase" if you want colorful uppercase. + Get plain output by "-D plain". The default may be set in + user preferences (options). + + -F stands for force, if set all operations will behave as if + force was specified on the line (e.g. configure commit). + +Examples: + + # crm -f stopapp2.cli + # crm < stopapp2.cli + # crm resource stop global_www + # crm status + + """ + sys.exit(1) + +user_prefs = UserPrefs.getInstance() +options = Options.getInstance() +err_buf = ErrorBuffer.getInstance() +vars = Vars.getInstance() +levels = Levels.getInstance() + +def run(): + prereqs() + inp_file = '' + + load_rc(vars.rc_file) + + if not sys.stdin.isatty(): + err_buf.reset_lineno() + options.batch = True + else: + options.interactive = True + + try: + opts, args = getopt.getopt(sys.argv[1:], \ + 'hdf:FRD:', ("help","debug","file=",\ + "force","regression-tests","display=")) + for o,p in opts: + if o in ("-h","--help"): + usage() + elif o == "-d": + user_prefs.set_debug() + elif o == "-R": + options.regression_tests = True + elif o in ("-D","--display"): + user_prefs.set_output(p) + elif o in ("-F","--force"): + user_prefs.set_force() + elif o in ("-f","--file"): + options.batch = True + err_buf.reset_lineno() + inp_file = p + except getopt.GetoptError,msg: + print msg + usage() + + if len(args) == 1 and args[0].startswith("conf"): + parse_line(levels,["configure"]) + options.interactive = True + elif len(args) > 0: + err_buf.reset_lineno() + options.interactive = False + if parse_line(levels,shlex.split(' '.join(args))): + # if the user entered a level, then just continue + if levels.previous(): + if not inp_file and sys.stdin.isatty(): + options.interactive = True + else: + sys.exit(0) + else: + sys.exit(1) + + if inp_file == "-": + pass + elif inp_file: + try: + f = open(inp_file) + except IOError, msg: + common_err(msg) + usage() + sys.stdin = f + + if options.interactive: + setup_readline() + + while True: + if options.interactive: + vars.prompt = "crm(%s)%s# " % (cib_prompt(),levels.getprompt()) + inp = multi_input(vars.prompt) + if inp == None: + cmd_exit("eof") + try: parse_line(levels,shlex.split(inp)) + except ValueError, msg: + common_err(msg) + +# vim:ts=4:sw=4:et: diff --git a/shell/modules/msg.py b/shell/modules/msg.py new file mode 100644 index 0000000000..cb025bd14c --- /dev/null +++ b/shell/modules/msg.py @@ -0,0 +1,148 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import sys +from singletonmixin import Singleton +from userprefs import Options, UserPrefs + +class ErrorBuffer(Singleton): + ''' + Show error messages either immediately or buffered. + ''' + def __init__(self): + self.msg_list = [] + self.mode = "immediate" + self.lineno = -1 + def buffer(self): + self.mode = "keep" + def release(self): + if self.msg_list: + print >> sys.stderr, '\n'.join(self.msg_list) + if not options.batch: + try: + raw_input("Press enter to continue... ") + except EOFError: + pass + self.msg_list = [] + self.mode = "immediate" + def writemsg(self,msg): + if self.mode == "immediate": + if options.regression_tests: + print msg + else: + print >> sys.stderr, msg + else: + self.msg_list.append(msg) + def reset_lineno(self): + self.lineno = 0 + def incr_lineno(self): + if self.lineno >= 0: + self.lineno += 1 + def start_tmp_lineno(self): + self._save_lineno = self.lineno + self.reset_lineno() + def stop_tmp_lineno(self): + self.lineno = self._save_lineno + def add_lineno(self,s): + if self.lineno > 0: + return "%d: %s" % (self.lineno,s) + else: return s + def error(self,s): + self.writemsg("ERROR: %s" % self.add_lineno(s)) + def warning(self,s): + self.writemsg("WARNING: %s" % self.add_lineno(s)) + def info(self,s): + self.writemsg("INFO: %s" % self.add_lineno(s)) + def debug(self,s): + if user_prefs.get_debug(): + self.writemsg("DEBUG: %s" % add_lineno(s)) + +def common_err(s): + err_buf.error(s) +def common_warn(s): + err_buf.warning(s) +def common_info(s): + err_buf.info(s) +def common_debug(s): + err_buf.debug(s) +def no_prog_err(name): + err_buf.error("%s not available, check your installation"%name) +def missing_prog_warn(name): + err_buf.warning("could not find any %s on the system"%name) +def no_attribute_err(attr,obj_type): + err_buf.error("required attribute %s not found in %s"%(attr,obj_type)) +def bad_def_err(what,msg): + err_buf.error("bad %s definition: %s"%(what,msg)) +def unsupported_err(name): + err_buf.error("%s is not supported"%name) +def no_such_obj_err(name): + err_buf.error("%s object is not supported"%name) +def obj_cli_err(name): + err_buf.error("object %s cannot be represented in the CLI notation"%name) +def missing_obj_err(node): + err_buf.error("object %s:%s missing (shouldn't have happened)"% \ + (node.tagName,node.getAttribute("id"))) +def constraint_norefobj_err(constraint_id,obj_id): + err_buf.error("constraint %s references a resource %s which doesn't exist"% \ + (constraint_id,obj_id)) +def obj_exists_err(name): + err_buf.error("object %s already exists"%name) +def no_object_err(name): + err_buf.error("object %s does not exist"%name) +def invalid_id_err(obj_id): + err_buf.error("%s: invalid object id"%obj_id) +def id_used_err(node_id): + err_buf.error("%s: id is already in use"%node_id) +def skill_err(s): + err_buf.error("%s: this command is not allowed at this skill level"%' '.join(s)) +def syntax_err(s,token = '',context = ''): + pfx = "syntax" + if context: + pfx = "%s in %s" %(pfx,context) + if type(s) == type(''): + err_buf.error("%s near <%s>"%(pfx,s)) + elif token: + err_buf.error("%s near <%s>: %s"%(pfx,token,' '.join(s))) + else: + err_buf.error("%s: %s"%(pfx,' '.join(s))) +def bad_usage(cmd,args): + err_buf.error("bad usage: %s %s"%(cmd,args)) +def empty_cib_err(): + err_buf.error("No CIB!") +def cib_parse_err(msg): + err_buf.error("%s"%msg) +def cib_no_elem_err(el_name): + err_buf.error("CIB contains no '%s' element!"%el_name) +def cib_ver_unsupported_err(validator,rel): + err_buf.error("CIB not supported: validator '%s', release '%s'"% (validator,rel)) + err_buf.error("You may try the upgrade command") +def update_err(obj_id,cibadm_opt,xml): + if cibadm_opt == '-U': + task = "update" + elif cibadm_opt == '-D': + task = "delete" + else: + task = "replace" + err_buf.error("could not %s %s"%(task,obj_id)) + err_buf.info("offending xml: %s" % xml) +def not_impl_info(s): + err_buf.info("%s is not implemented yet" % s) + +user_prefs = UserPrefs.getInstance() +err_buf = ErrorBuffer.getInstance() +options = Options.getInstance() +# vim:ts=4:sw=4:et: diff --git a/shell/modules/parse.py b/shell/modules/parse.py new file mode 100644 index 0000000000..f105da3183 --- /dev/null +++ b/shell/modules/parse.py @@ -0,0 +1,620 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import shlex +import re +from utils import * +from vars import Vars +from msg import * +from ra import disambiguate_ra_type, ra_type_validate + +# +# CLI parsing utilities +# WARNING: ugly code ahead (to be replaced some day by a proper +# yacc parser, if there's such a thing) +# +def cli_parse_rsctype(s, pl): + ''' + Parse the resource type. + ''' + ra_class,provider,rsc_type = disambiguate_ra_type(s) + if not ra_type_validate(s,ra_class,provider,rsc_type): + return None + pl.append(["class",ra_class]) + if ra_class == "ocf": + pl.append(["provider",provider]) + pl.append(["type",rsc_type]) +def is_attribute(p,a): + return p.startswith(a + '=') +def cli_parse_attr_strict(s,pl): + ''' + Parse attributes in the 'p=v' form. + ''' + if s and '=' in s[0]: + n,v = s[0].split('=',1) + if not n: + return + pl.append([n,v]) + cli_parse_attr_strict(s[1:],pl) +def cli_parse_attr(s,pl): + ''' + Parse attributes in the 'p=v' form. + Allow also the 'p' form (no value) unless p is one of the + attr_list_keyw words. + ''' + attr_lists_keyw = olist(["params","meta","operations","op","attributes"]) + if s: + if s[0] in attr_lists_keyw: + return + if '=' in s[0]: + n,v = s[0].split('=',1) + else: + n = s[0]; v = None + if not n: + return + pl.append([n,v]) + cli_parse_attr(s[1:],pl) +def is_only_id(pl,keyw): + if len(pl) > 1: + common_err("%s: only single $id or $id-ref attribute is allowed" % keyw) + return False + if len(pl) == 1 and pl[0][0] not in ("$id","$id-ref"): + common_err("%s: only single $id or $id-ref attribute is allowed" % keyw) + return False + return True +def check_operation(pl): + op_name = find_value(pl,"name") + if not op_name in vars.op_cli_names: + common_warn("%s: operation not recognized" % op_name) + if op_name == "monitor" and not find_value(pl,"interval"): + common_err("monitor requires interval") + return False + return True +def parse_resource(s): + el_type = s[0].lower() + if el_type == "master": # ugly kludge :( + el_type = "ms" + attr_lists_keyw = olist(["params","meta"]) + cli_list = [] + # the head + head = [] + head.append(["id",s[1]]) + i = 3 + if el_type == "primitive": + cli_parse_rsctype(s[2],head) + if not find_value(head,"type"): + syntax_err(s[2:], context = "primitive") + return False + else: + cl = [] + cl.append(s[2]) + if el_type == "group": + while i < len(s): + if s[i] in attr_lists_keyw: + break + elif is_attribute(s[i],"description"): + break + else: + cl.append(s[i]) + i += 1 # skip to the next token + head.append(["$children",cl]) + try: # s[i] may be out of range + if is_attribute(s[i],"description"): + cli_parse_attr(s[i:i+1],head) + i += 1 # skip to the next token + except: pass + cli_list.append([el_type,head]) + # the rest + state = 0 # 1: reading operations; 2: operations read + while len(s) > i+1: + pl = [] + keyw = s[i].lower() + if keyw in attr_lists_keyw: + if state == 1: + state = 2 + elif el_type == "primitive" and state == 0 and keyword_cmp(keyw, "operations"): + state = 1 + elif el_type == "primitive" and state <= 1 and keyword_cmp(keyw, "op"): + if state == 0: + state = 1 + pl.append(["name",s[i+1]]) + else: + syntax_err(s[i:], context = 'primitive') + return False + if keyword_cmp(keyw, "op"): + if len(s) > i+2: + cli_parse_attr(s[i+2:],pl) + if not check_operation(pl): + return False + else: + cli_parse_attr(s[i+1:],pl) + if len(pl) == 0: + syntax_err(s[i:], context = 'primitive') + return False + if keyword_cmp(keyw, "operations") and not is_only_id(pl,keyw): + return False + i += len(pl)+1 + # interval is obligatory for ops, supply 0 if not there + if keyword_cmp(keyw, "op") and not find_value(pl,"interval"): + pl.append(["interval","0"]) + cli_list.append([keyw,pl]) + if len(s) > i: + syntax_err(s[i:], context = 'primitive') + return False + return cli_list +def parse_op(s): + if len(s) != 3: + syntax_err(s, context = s[0]) + return False + cli_list = [] + head_pl = [] + # this is an op + cli_list.append(["op",head_pl]) + if not cli_parse_rsc_role(s[1],head_pl): + return False + if not cli_parse_op_times(s[2],head_pl): + return False + # rename rsc-role to role + for i in range(len(head_pl)): + if head_pl[i][0] == "rsc-role": + head_pl[i][0] = "role" + break + # add the operation name + head_pl.append(["name",s[0]]) + return cli_list + +def cli_parse_score(score,pl,noattr = False): + if score.endswith(':'): + score = score.rstrip(':') + else: + syntax_err(score, context = 'score') + return False + if score in vars.score_types: + pl.append(["score",vars.score_types[score]]) + elif re.match("^[+-]?(inf|infinity|INFINITY|[[0-9]+)$",score): + score = score.replace("infinity","INFINITY") + score = score.replace("inf","INFINITY") + pl.append(["score",score]) + elif score: + if noattr: + common_err("attribute not allowed for score in orders") + return False + else: + pl.append(["score-attribute",score]) + return True +def is_binary_op(s): + l = s.split(':') + if len(l) == 2: + return l[0] in vars.binary_types and l[1] in vars.binary_ops + elif len(l) == 1: + return l[0] in vars.binary_ops + else: + return False +def cli_parse_binary_op(s,pl): + l = s.split(':') + if len(l) == 2: + pl.append(["type",l[0]]) + pl.append(["operation",l[1]]) + else: + pl.append(["operation",l[0]]) +def cli_parse_expression(s,pl): + if len(s) > 1 and s[0] in vars.unary_ops: + pl.append(["operation",s[0]]) + pl.append(["attribute",s[1]]) + elif len(s) > 2 and is_binary_op(s[1]): + pl.append(["attribute",s[0]]) + cli_parse_binary_op(s[1],pl) + pl.append(["value",s[2]]) + else: + return False + return True +def cli_parse_dateexpr(s,pl): + if len(s) < 3: + return False + if s[1] not in vars.date_ops: + return False + pl.append(["operation",s[1]]) + if s[1] in vars.simple_date_ops: + pl.append([keyword_cmp(s[1], 'lt') and "end" or "start",s[2]]) + return True + cli_parse_attr_strict(s[2:],pl) + return True +def parse_rule(s): + if not keyword_cmp(s[0], "rule"): + syntax_err(s,context = "rule") + return 0,None + rule_list = [] + head_pl = [] + rule_list.append([s[0].lower(),head_pl]) + i = 1 + cli_parse_attr_strict(s[i:],head_pl) + i += len(head_pl) + if find_value(head_pl,"$id-ref"): + return i,rule_list + if not cli_parse_score(s[i],head_pl): + return i,None + i += 1 + bool_op = '' + while len(s) > i+1: + pl = [] + if keyword_cmp(s[i], "date"): + fun = cli_parse_dateexpr + elem = "date_expression" + else: + fun = cli_parse_expression + elem = "expression" + if not fun(s[i:],pl): + syntax_err(s[i:],context = "rule") + return i,None + rule_list.append([elem,pl]) + i += len(pl) + if find_value(pl, "type"): + i -= 1 # reduce no of tokens by one if there was "type:op" + if elem == "date_expression": + i += 1 # increase no of tokens by one if it was date expression + if len(s) > i and s[i] in vars.boolean_ops: + if bool_op and not keyword_cmp(bool_op, s[i]): + common_err("rule contains different bool operations: %s" % ' '.join(s)) + return i,None + else: + bool_op = s[i].lower() + i += 1 + if len(s) > i and keyword_cmp(s[i], "rule"): + break + if bool_op and not keyword_cmp(bool_op, 'and'): + head_pl.append(["boolean-op",bool_op]) + return i,rule_list +def parse_location(s): + cli_list = [] + head_pl = [] + head_pl.append(["id",s[1]]) + head_pl.append(["rsc",s[2]]) + cli_list.append([s[0].lower(),head_pl]) + if len(s) == 5 and not keyword_cmp(s[3], "rule"): # the short node preference form + if not cli_parse_score(s[3],head_pl): + return False + head_pl.append(["node",s[4]]) + return cli_list + i = 3 + while i < len(s): + numtoks,l = parse_rule(s[i:]) + if not l: + return False + cli_list += l + i += numtoks + if len(s) < i: + syntax_err(s[i:],context = "location") + return False + return cli_list + +def cli_opt_symmetrical(p,pl): + if not p: + return True + pl1 = [] + cli_parse_attr([p],pl1) + if len(pl1) != 1 or not find_value(pl1,"symmetrical"): + syntax_err(p,context = "order") + return False + pl += pl1 + return True +def cli_parse_rsc_role(s,pl,attr_pfx = ''): + l = s.split(':') + pl.append([attr_pfx+"rsc",l[0]]) + if len(l) == 2: + if l[1] not in vars.roles_names: + bad_def_err("resource role",s) + return False + pl.append([attr_pfx+"rsc-role",l[1]]) + elif len(l) > 2: + bad_def_err("resource role",s) + return False + return True +def cli_parse_op_times(s,pl): + l = s.split(':') + pl.append(["interval",l[0]]) + if len(l) == 2: + pl.append(["timeout",l[1]]) + elif len(l) > 2: + bad_def_err("op times",s) + return False + return True + +class ResourceSet(object): + ''' + Constraint resource set parser. Parses sth like: + a ( b c:start ) d:Master e ... + Appends one or more lists to cli_list. + Lists are in form: + list :: ["resource_set",set_pl] + set_pl :: [["sequential","false"], ["action"|"role",action|role], + ["resource_ref",["id",rsc]], ...] + (the first two elements of set_pl are optional) + Action/role change makes a new resource set. + ''' + def __init__(self,type,s,cli_list): + self.type = type + self.valid_q = (type == "order") and vars.actions_names or vars.roles_names + self.q_attr = (type == "order") and "action" or "role" + self.tokens = s + self.cli_list = cli_list + self.reset_set() + self.sequential = True + self.fix_parentheses() + def fix_parentheses(self): + newtoks = [] + for p in self.tokens: + if p.startswith('(') and len(p) > 1: + newtoks.append('(') + newtoks.append(p[1:]) + elif p.endswith(')') and len(p) > 1: + newtoks.append(p[0:len(p)-1]) + newtoks.append(')') + else: + newtoks.append(p) + self.tokens = newtoks + def reset_set(self): + self.set_pl = [] + self.prev_q = '' # previous qualifier (action or role) + self.curr_attr = '' # attribute (action or role) + def save_set(self): + if not self.set_pl: + return + if self.curr_attr: + self.set_pl.insert(0,[self.curr_attr,self.prev_q]) + if not self.sequential: + self.set_pl.insert(0,["sequential","false"]) + self.cli_list.append(["resource_set",self.set_pl]) + self.reset_set() + def splitrsc(self,p): + l = p.split(':') + return (len(l) == 1) and [p,''] or l + def parse(self): + tokpos = -1 + for p in self.tokens: + tokpos += 1 + if p == "_rsc_set_": + continue # a degenerate resource set + if p == '(': + if self.set_pl: # save the set before + self.save_set() + self.sequential = False + continue + if p == ')': + if self.sequential: # no '(' + syntax_err(self.tokens[tokpos:],context = self.type) + return False + if not self.set_pl: # empty sets not allowed + syntax_err(self.tokens[tokpos:],context = self.type) + return False + self.save_set() + self.sequential = True + continue + rsc,q = self.splitrsc(p) + if q != self.prev_q: # one set can't have different roles/actions + self.save_set() + self.prev_q = q + if q: + if q not in self.valid_q: + common_err("%s: invalid %s in %s" % (q,self.q_attr,self.type)) + return False + if not self.curr_attr: + self.curr_attr = self.q_attr + else: + self.curr_attr = '' + self.set_pl.append(["resource_ref",["id",rsc]]) + if not self.sequential: # no ')' + syntax_err(self.tokens[tokpos:],context = self.type) + return False + if self.set_pl: # save the final set + self.save_set() + return True + +def parse_colocation(s): + cli_list = [] + head_pl = [] + type = s[0] + if type == "collocation": # another ugly :( + type = "colocation" + cli_list.append([type,head_pl]) + if len(s) < 5: + syntax_err(s,context = "colocation") + return False + head_pl.append(["id",s[1]]) + if not cli_parse_score(s[2],head_pl): + return False + if len(s) == 5: + if not cli_parse_rsc_role(s[3],head_pl): + return False + if not cli_parse_rsc_role(s[4],head_pl,'with-'): + return False + else: + resource_set_obj = ResourceSet(type,s[3:],cli_list) + if not resource_set_obj.parse(): + return False + return cli_list +def cli_parse_rsc_action(s,pl,rsc_pos): + l = s.split(':') + pl.append([rsc_pos,l[0]]) + if len(l) == 2: + if l[1] not in vars.actions_names: + bad_def_err("resource action",s) + return False + pl.append([rsc_pos+"-action",l[1]]) + elif len(l) > 1: + bad_def_err("resource action",s) + return False + return True + +def parse_order(s): + cli_list = [] + head_pl = [] + type = "order" + cli_list.append([s[0],head_pl]) + if len(s) < 5: + syntax_err(s,context = "order") + return False + head_pl.append(["id",s[1]]) + if not cli_parse_score(s[2],head_pl,noattr = True): + return False + # save symmetrical for later (if it exists) + symm = "" + if is_attribute(s[len(s)-1],"symmetrical"): + symm = s.pop() + if len(s) == 5: + if not cli_parse_rsc_action(s[3],head_pl,'first'): + return False + if not cli_parse_rsc_action(s[4],head_pl,'then'): + return False + else: + resource_set_obj = ResourceSet(type,s[3:],cli_list) + if not resource_set_obj.parse(): + return False + if not cli_opt_symmetrical(symm,head_pl): + return False + return cli_list + +def parse_constraint(s): + if keyword_cmp(s[0], "location"): + return parse_location(s) + elif s[0] in olist(["colocation","collocation"]): + return parse_colocation(s) + elif keyword_cmp(s[0], "order"): + return parse_order(s) +def parse_property(s): + cli_list = [] + head_pl = [] + cli_list.append([s[0],head_pl]) + cli_parse_attr(s[1:],head_pl) + if len(head_pl) < 0 or len(s) > len(head_pl)+1: + syntax_err(s, context = s[0]) + return False + return cli_list +def cli_parse_uname(s, pl): + l = s.split(':') + if not l or len(l) > 2: + return None + pl.append(["uname",l[0]]) + if len(l) == 2: + pl.append(["type",l[1]]) +def parse_node(s): + cli_list = [] + # the head + head = [] + # optional $id + id = '' + opt_id_l = [] + i = 1 + cli_parse_attr_strict(s[i:],opt_id_l) + if opt_id_l: + id = find_value(opt_id_l,"$id") + i += 1 + # uname[:type] + cli_parse_uname(s[i],head) + uname = find_value(head,"uname") + if not uname: + return False + head.append(["id",id and id or uname]) + # drop type if default + type = find_value(head,"type") + if type == vars.node_default_type: + head.remove(["type",type]) + cli_list.append([s[0],head]) + if len(s) == i: + return cli_list + # the rest + i += 1 + try: # s[i] may be out of range + if is_attribute(s[i],"description"): + cli_parse_attr(s[i:i+1],head) + i += 1 # skip to the next token + except: pass + keyw = vars.node_attributes_keyw # some day there may be more than one + while len(s) > i+1: + if not keyword_cmp(s[i], keyw): + syntax_err(s[i:], context = 'node') + return False + pl = [] + cli_parse_attr(s[i+1:],pl) + if len(pl) == 0: + syntax_err(s[i:], context = 'node') + return False + i += len(pl)+1 + cli_list.append([keyw,pl]) + if len(s) > i: + syntax_err(s[i:], context = 'node') + return False + return cli_list + +class CliParser(object): + parsers = { + "primitive": (3,parse_resource), + "group": (3,parse_resource), + "clone": (3,parse_resource), + "ms": (3,parse_resource), + "master": (3,parse_resource), + "location": (3,parse_constraint), + "colocation": (3,parse_constraint), + "collocation": (3,parse_constraint), + "order": (3,parse_constraint), + "monitor": (3,parse_op), + "node": (2,parse_node), + "property": (2,parse_property), + "rsc_defaults": (2,parse_property), + "op_defaults": (2,parse_property), + } + def __init__(self): + self.comments = [] + def parse(self,s): + ''' + Input: a list of tokens (or a CLI format string). + Return: a list of items; each item is a tuple + with two members: a string (tag) and a nvpairs or + attributes dict. + ''' + cli_list = '' + if type(s) == type(u''): + s = s.encode('ascii') + if type(s) == type(''): + if s and s.startswith('#'): + self.comments.append(s) + return None + try: s = shlex.split(s) + except ValueError, msg: + common_err(msg) + return False + # but there shouldn't be any newlines (?) + while '\n' in s: + s.remove('\n') + if not s: + return None + if s[0] not in self.parsers.keys(): + syntax_err(s) + return False + mintoks,parser_fn = self.parsers[s[0]] + if len(s) < mintoks: + syntax_err(s) + return False + cli_list = parser_fn(s) + if not cli_list: + return False + if self.comments: + cli_list.append(["comments",self.comments]) + self.comments = [] + return cli_list + +vars = Vars.getInstance() +# vim:ts=4:sw=4:et: diff --git a/shell/modules/ra.py.in b/shell/modules/ra.py.in new file mode 100644 index 0000000000..21d4264c26 --- /dev/null +++ b/shell/modules/ra.py.in @@ -0,0 +1,592 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import os +import sys +import subprocess +import copy +import xml.dom.minidom +import re +import glob +from userprefs import Options, UserPrefs +from cache import WCache +from utils import * +from msg import * +#from cib import get_cib_default + +# +# Resource Agents interface (meta-data, parameters, etc) +# +ocf_root = os.getenv("OCF_ROOT") +if not ocf_root: + ocf_root = "@OCF_ROOT_DIR@" + if not ocf_root: + ocf_root = "/usr/lib/ocf" + os.putenv("OCF_ROOT",ocf_root) +class RaLrmd(object): + ''' + Getting information from the resource agents. + ''' + lrmadmin_prog = "lrmadmin" + def __init__(self): + self.good = self.is_lrmd_accessible() + def lrmadmin(self, opts, xml = False): + ''' + Get information directly from lrmd using lrmadmin. + ''' + l = stdout2list("%s %s" % (self.lrmadmin_prog,opts)) + if l and not xml: + l = l[1:] # skip the first line + return l + def is_lrmd_accessible(self): + if not (is_program(self.lrmadmin_prog) and is_process("lrmd")): + return False + return subprocess.call(\ + add_sudo(">/dev/null 2>&1 %s -C" % self.lrmadmin_prog), \ + shell=True) == 0 + def meta(self, ra_class,ra_type,ra_provider): + return self.lrmadmin("-M %s %s %s"%(ra_class,ra_type,ra_provider),True) + def providers(self, ra_type,ra_class = "ocf"): + 'List of providers for a class:type.' + return self.lrmadmin("-P %s %s" % (ra_class,ra_type),True) + def classes(self): + 'List of providers for a class:type.' + return self.lrmadmin("-C") + def types(self, ra_class = "ocf", ra_provider = ""): + 'List of types for a class.' + return self.lrmadmin("-T %s" % ra_class) + +class RaOS(object): + ''' + Getting information from the resource agents (direct). + ''' + def __init__(self): + self.good = True + def meta(self, ra_class,ra_type,ra_provider): + l = [] + if ra_class == "ocf": + l = stdout2list("%s/resource.d/%s/%s meta-data" % \ + (ocf_root,ra_provider,ra_type)) + elif ra_class == "stonith": + l = stdout2list("stonith -m -t %s" % ra_type) + return l + def providers(self, ra_type,ra_class = "ocf"): + 'List of providers for a class:type.' + l = [] + if ra_class == "ocf": + for s in glob.glob("%s/resource.d/*/%s" % (ocf_root,ra_type)): + a = s.split("/") + if len(a) == 7: + l.append(a[5]) + return l + def classes(self): + 'List of classes.' + return "heartbeat lsb ocf stonith".split() + def types(self, ra_class = "ocf", ra_provider = ""): + 'List of types for a class.' + l = [] + prov = ra_provider and ra_provider or "*" + if ra_class == "ocf": + l = os_types_list("%s/resource.d/%s/*" % (ocf_root,prov)) + elif ra_class == "lsb": + l = os_types_list("/etc/init.d/*") + elif ra_class == "stonith": + l = stdout2list("stonith -L") + l = list(set(l)) + l.sort() + return l + +def ra_classes(): + ''' + List of RA classes. + ''' + if wcache.is_cached("ra_classes"): + return wcache.retrieve("ra_classes") + l = ra_if.classes() + l.sort() + return wcache.store("ra_classes",l) +def ra_providers(ra_type,ra_class = "ocf"): + 'List of providers for a class:type.' + id = "ra_providers-%s-%s" % (ra_class,ra_type) + if wcache.is_cached(id): + return wcache.retrieve(id) + l = ra_if.providers(ra_type,ra_class) + l.sort() + return wcache.store(id,l) +def ra_providers_all(ra_class = "ocf"): + ''' + List of providers for a class. + ''' + id = "ra_providers_all-%s" % ra_class + if wcache.is_cached(id): + return wcache.retrieve(id) + dir = ocf_root + "/resource.d" + l = [] + for s in os.listdir(dir): + if os.path.isdir("%s/%s" % (dir,s)): + l.append(s) + l.sort() + return wcache.store(id,l) +def ra_types(ra_class = "ocf", ra_provider = ""): + ''' + List of RA type for a class. + ''' + if not ra_class: + ra_class = "ocf" + id = "ra_types-%s-%s" % (ra_class,ra_provider) + if wcache.is_cached(id): + return wcache.retrieve(id) + if ra_provider: + list = [] + for ra in ra_if.types(ra_class): + if ra_provider in ra_providers(ra,ra_class): + list.append(ra) + else: + list = ra_if.types(ra_class) + list.sort() + return wcache.store(id,list) + +def prog_meta(s): + ''' + Do external program metadata. + ''' + prog = "@CRM_DAEMON_DIR@/%s" % s + l = [] + if is_program(prog): + l = stdout2list("%s metadata" % prog) + return l +def get_nodes_text(n,tag): + try: + node = n.getElementsByTagName(tag)[0] + for c in node.childNodes: + if c.nodeType == c.TEXT_NODE: + return c.data.strip() + except: return '' + +def mk_monitor_name(role,depth): + depth = depth == "0" and "" or ("_%s" % depth) + return role and role != "Started" and \ + "monitor_%s%s" % (role,depth) or \ + "monitor%s" % depth +def monitor_name_node(node): + depth = node.getAttribute("depth") or '0' + role = node.getAttribute("role") + return mk_monitor_name(role,depth) +def monitor_name_pl(pl): + depth = find_value(pl, "depth") or '0' + role = find_value(pl, "role") + return mk_monitor_name(role,depth) +def crm_msec(t): + ''' + See lib/common/utils.c:crm_get_msec(). + ''' + convtab = { + 'ms': (1,1), + 'msec': (1,1), + 'us': (1,1000), + 'usec': (1,1000), + '': (1000,1), + 's': (1000,1), + 'sec': (1000,1), + 'm': (60*1000,1), + 'min': (60*1000,1), + 'h': (60*60*1000,1), + 'hr': (60*60*1000,1), + } + if not t: + return -1 + r = re.match("\s*(\d+)\s*([a-zA-Z]+)?", t) + if not r: + return -1 + if not r.group(2): + q = '' + else: + q = r.group(2).lower() + try: + mult,div = convtab[q] + except: + return -1 + return (int(r.group(1))*mult)/div +def crm_time_cmp(a, b): + return crm_msec(a) - crm_msec(b) + +#def get_default(property): +# return get_cib_default(property) or pe_metadata.param_default(property) + +class RAInfo(object): + ''' + A resource agent and whatever's useful about it. + ''' + ra_tab = " " # four horses + required_ops = ("start", "stop") + skip_ops = ("meta-data", "validate-all") + skip_op_attr = ("name", "depth", "role") + def __init__(self,ra_class,ra_type,ra_provider = "heartbeat"): + self.ra_class = ra_class + self.ra_type = ra_type + self.ra_provider = ra_provider + if not self.ra_provider: + self.ra_provider = "heartbeat" + self.ra_node = None + def ra_string(self): + return self.ra_class == "ocf" and \ + "%s:%s:%s" % (self.ra_class, self.ra_provider, self.ra_type) or \ + "%s:%s" % (self.ra_class, self.ra_type) + def error(self, s): + common_err("%s: %s" % (self.ra_string(), s)) + def warn(self, s): + common_warn("%s: %s" % (self.ra_string(), s)) + def add_extra_stonith_params(self): + if not stonithd_metadata.mk_ra_node(): + return + try: + params_node = self.doc.getElementsByTagName("parameters")[0] + except: + params_node = self.doc.createElement("parameters") + self.ra_node.appendChild(params_node) + for n in stonithd_metadata.ra_node.getElementsByTagName("parameter"): + params_node.appendChild(self.doc.importNode(n,1)) + def mk_ra_node(self): + ''' + Return the resource_agent node. + ''' + if self.ra_node: + return self.ra_node + meta = self.meta() + try: + self.doc = xml.dom.minidom.parseString('\n'.join(meta)) + except: + #common_err("could not parse meta-data for (%s,%s,%s)" \ + # % (self.ra_class,self.ra_type,self.ra_provider)) + self.ra_node = None + return None + try: + self.ra_node = self.doc.getElementsByTagName("resource-agent")[0] + except: + self.error("meta-data contains no resource-agent element") + self.ra_node = None + return None + if self.ra_class == "stonith": + self.add_extra_stonith_params() + return self.ra_node + def param_type_default(self,n): + try: + content = n.getElementsByTagName("content")[0] + type = content.getAttribute("type") + default = content.getAttribute("default") + return type,default + except: + return None,None + def params(self): + ''' + Construct a dict of dicts: parameters are keys and + dictionary of attributes/values are values. Cached too. + ''' + id = "ra_params-%s" % self.ra_string() + if wcache.is_cached(id): + return wcache.retrieve(id) + if not self.mk_ra_node(): + return None + d = {} + for pset in self.ra_node.getElementsByTagName("parameters"): + for c in pset.getElementsByTagName("parameter"): + name = c.getAttribute("name") + if not name: + continue + required = c.getAttribute("required") + unique = c.getAttribute("unique") + type,default = self.param_type_default(c) + d[name] = { + "required": required, + "unique": unique, + "type": type, + "default": default, + } + return wcache.store(id,d) + def actions(self): + ''' + Construct a dict of dicts: actions are keys and + dictionary of attributes/values are values. Cached too. + ''' + id = "ra_actions-%s" % self.ra_string() + if wcache.is_cached(id): + return wcache.retrieve(id) + if not self.mk_ra_node(): + return None + d = {} + for pset in self.ra_node.getElementsByTagName("actions"): + for c in pset.getElementsByTagName("action"): + name = c.getAttribute("name") + if not name or name in self.skip_ops: + continue + if name == "monitor": + name = monitor_name_node(c) + d[name] = {} + for a in c.attributes.keys(): + if a in self.skip_op_attr: + continue + v = c.getAttribute(a) + if v: + d[name][a] = v + # add monitor ops without role, if they don't already + # exist + d2 = {} + for op in d.keys(): + if re.match("monitor_[^0-9]", op): + norole_op = re.sub(r'monitor_[^0-9_]+_(.*)', r'monitor_\1', op) + if not norole_op in d: + d2[norole_op] = d[op] + d.update(d2) + return wcache.store(id,d) + def reqd_params_list(self): + ''' + List of required parameters. + ''' + d = self.params() + if not d: return [] + return [x for x in d if d[x]["required"] == '1'] + def param_default(self,pname): + ''' + Parameter's default. + ''' + d = self.params() + if not d: return None + return d[pname]["default"] + def sanity_check_params(self, id, pl): + ''' + pl is a list of (attribute,value) pairs. + - are all required parameters defined + - do all parameters exist + ''' + rc = 0 + d = {} + for p,v in pl: + d[p] = v + for p in self.reqd_params_list(): + if p not in d: + common_err("%s: required parameter %s not defined" % (id,p)) + rc |= user_prefs.get_check_rc() + for p in d: + if p not in self.params(): + common_err("%s: parameter %s does not exist" % (id,p)) + rc |= user_prefs.get_check_rc() + return rc + def sanity_check_ops(self, id, ops): + ''' + ops is a dict, operation names are keys and values are + lists of (attribute,value) pairs. + - do all operations exist + - are timeouts sensible + ''' + rc = 0 + n_ops = {} + for op in ops: + n_op = op == "monitor" and monitor_name_pl(ops[op]) or op + n_ops[n_op] = {} + for p,v in ops[op]: + if p in self.skip_op_attr: + continue + n_ops[n_op][p] = v + #default_timeout = get_default("default-action-timeout") + for req_op in self.required_ops: + if req_op not in n_ops: + n_ops[req_op] = {} + for op in n_ops: + if op not in self.actions(): + common_warn("%s: action %s not advertised in meta-data, it may not be supported by the RA" % (id,op)) + rc |= 1 + continue + try: + adv_timeout = self.actions()[op]["timeout"] + except: + continue + for a in n_ops[op]: + v = n_ops[op][a] + if a == "timeout": + if crm_msec(v) < 0: + continue + if crm_time_cmp(adv_timeout,v) > 0: + common_warn("%s: timeout %s for %s is smaller than the advised %s" % \ + (id,v,op,adv_timeout)) + rc |= 1 + return rc + def meta(self): + ''' + RA meta-data as raw xml. + ''' + id = "ra_meta-%s" % self.ra_string() + if wcache.is_cached(id): + return wcache.retrieve(id) + if self.ra_class in ("pengine","stonithd"): + l = prog_meta(self.ra_class) + else: + l = ra_if.meta(self.ra_class,self.ra_type,self.ra_provider) + return wcache.store(id, l) + def meta_pretty(self): + ''' + Print the RA meta-data in a human readable form. + ''' + if not self.mk_ra_node(): + return '' + l = [] + title = self.meta_title() + l.append(title) + longdesc = get_nodes_text(self.ra_node,"longdesc") + if longdesc: + l.append(longdesc) + if self.ra_class != "heartbeat": + params = self.meta_parameters() + if params: + l.append(params.rstrip()) + actions = self.meta_actions() + if actions: + l.append(actions) + return '\n\n'.join(l) + def get_shortdesc(self,n): + name = n.getAttribute("name") + shortdesc = get_nodes_text(n,"shortdesc") + longdesc = get_nodes_text(n,"longdesc") + if shortdesc and shortdesc not in (name,longdesc,self.ra_type): + return shortdesc + return '' + def meta_title(self): + s = self.ra_string() + shortdesc = self.get_shortdesc(self.ra_node) + if shortdesc: + s = "%s (%s)" % (shortdesc,s) + return s + def meta_param_head(self,n): + name = n.getAttribute("name") + if not name: + return None + s = name + if n.getAttribute("required") == "1": + s = s + "*" + type,default = self.param_type_default(n) + if type and default: + s = "%s (%s, [%s])" % (s,type,default) + elif type: + s = "%s (%s)" % (s,type) + shortdesc = self.get_shortdesc(n) + s = "%s: %s" % (s,shortdesc) + return s + def format_parameter(self,n): + l = [] + head = self.meta_param_head(n) + if not head: + self.error("no name attribute for parameter") + return "" + l.append(head) + longdesc = get_nodes_text(n,"longdesc") + if longdesc: + longdesc = self.ra_tab + longdesc.replace("\n","\n"+self.ra_tab) + '\n' + l.append(longdesc) + return '\n'.join(l) + def meta_parameter(self,param): + if not self.mk_ra_node(): + return '' + l = [] + for pset in self.ra_node.getElementsByTagName("parameters"): + for c in pset.getElementsByTagName("parameter"): + if c.getAttribute("name") == param: + return self.format_parameter(c) + def meta_parameters(self): + if not self.mk_ra_node(): + return '' + l = [] + for pset in self.ra_node.getElementsByTagName("parameters"): + for c in pset.getElementsByTagName("parameter"): + s = self.format_parameter(c) + if s: + l.append(s) + if l: + return "Parameters (* denotes required, [] the default):\n\n" + '\n'.join(l) + def meta_action_head(self,n): + name = n.getAttribute("name") + if not name: + return '' + if name in self.skip_ops: + return '' + if name == "monitor": + name = monitor_name_node(n) + s = "%-13s" % name + for a in n.attributes.keys(): + if a in self.skip_op_attr: + continue + v = n.getAttribute(a) + if v: + s = "%s %s=%s" % (s,a,v) + return s + def meta_actions(self): + l = [] + for aset in self.ra_node.getElementsByTagName("actions"): + for c in aset.getElementsByTagName("action"): + s = self.meta_action_head(c) + if s: + l.append(self.ra_tab + s) + if l: + return "Operations' defaults (advisory minimum):\n\n" + '\n'.join(l) + +# +# resource type definition +# +def ra_type_validate(s, ra_class, provider, rsc_type): + ''' + Only ocf ra class supports providers. + ''' + if not rsc_type: + common_err("bad resource type specification %s"%s) + return False + if ra_class == "ocf": + if not provider: + common_err("provider could not be determined for %s"%s) + return False + else: + if provider: + common_warn("ra class %s does not support providers"%ra_class) + return True + return True +def disambiguate_ra_type(s): + ''' + Unravel [class:[provider:]]type + ''' + l = s.split(':') + if not l or len(l) > 3: + return None + if len(l) == 3: + return l + elif len(l) == 2: + ra_class,ra_type = l + else: + ra_class = "ocf" + ra_type = l[0] + ra_provider = '' + if ra_class == "ocf": + pl = ra_providers(ra_type,ra_class) + if pl and len(pl) == 1: + ra_provider = pl[0] + elif not pl: + ra_provider = 'heartbeat' + return ra_class,ra_provider,ra_type + +ra_if = RaLrmd() +if not ra_if.good: + ra_if = RaOS() +stonithd_metadata = RAInfo("stonithd","metadata") + +wcache = WCache.getInstance() +# vim:ts=4:sw=4:et: diff --git a/shell/modules/singletonmixin.py b/shell/modules/singletonmixin.py new file mode 100644 index 0000000000..68211ab385 --- /dev/null +++ b/shell/modules/singletonmixin.py @@ -0,0 +1,509 @@ +""" +A Python Singleton mixin class that makes use of some of the ideas +found at http://c2.com/cgi/wiki?PythonSingleton. Just inherit +from it and you have a singleton. No code is required in +subclasses to create singleton behavior -- inheritance from +Singleton is all that is needed. + +Singleton creation is threadsafe. + +USAGE: + +Just inherit from Singleton. If you need a constructor, include +an __init__() method in your class as you usually would. However, +if your class is S, you instantiate the singleton using S.getInstance() +instead of S(). Repeated calls to S.getInstance() return the +originally-created instance. + +For example: + +class S(Singleton): + + def __init__(self, a, b=1): + pass + +S1 = S.getInstance(1, b=3) + + +Most of the time, that's all you need to know. However, there are some +other useful behaviors. Read on for a full description: + +1) Getting the singleton: + + S.getInstance() + +returns the instance of S. If none exists, it is created. + +2) The usual idiom to construct an instance by calling the class, i.e. + + S() + +is disabled for the sake of clarity. + +For one thing, the S() syntax means instantiation, but getInstance() +usually does not cause instantiation. So the S() syntax would +be misleading. + +Because of that, if S() were allowed, a programmer who didn't +happen to notice the inheritance from Singleton (or who +wasn't fully aware of what a Singleton pattern +does) might think he was creating a new instance, +which could lead to very unexpected behavior. + +So, overall, it is felt that it is better to make things clearer +by requiring the call of a class method that is defined in +Singleton. An attempt to instantiate via S() will result +in a SingletonException being raised. + +3) Use __S.__init__() for instantiation processing, +since S.getInstance() runs S.__init__(), passing it the args it has received. + +If no data needs to be passed in at instantiation time, you don't need S.__init__(). + +4) If S.__init__(.) requires parameters, include them ONLY in the +first call to S.getInstance(). If subsequent calls have arguments, +a SingletonException is raised by default. + +If you find it more convenient for subsequent calls to be allowed to +have arguments, but for those argumentsto be ignored, just include +'ignoreSubsequent = True' in your class definition, i.e.: + + class S(Singleton): + + ignoreSubsequent = True + + def __init__(self, a, b=1): + pass + +5) For testing, it is sometimes convenient for all existing singleton +instances to be forgotten, so that new instantiations can occur. For that +reason, a forgetAllSingletons() function is included. Just call + + forgetAllSingletons() + +and it is as if no earlier instantiations have occurred. + +6) As an implementation detail, classes that inherit +from Singleton may not have their own __new__ +methods. To make sure this requirement is followed, +an exception is raised if a Singleton subclass includ +es __new__. This happens at subclass instantiation +time (by means of the MetaSingleton metaclass. + + +By Gary Robinson, grobinson@flyfi.com. No rights reserved -- +placed in the public domain -- which is only reasonable considering +how much it owes to other people's code and ideas which are in the +public domain. The idea of using a metaclass came from +a comment on Gary's blog (see +http://www.garyrobinson.net/2004/03/python_singleto.html#comments). +Other improvements came from comments and email from other +people who saw it online. (See the blog post and comments +for further credits.) + +Not guaranteed to be fit for any particular purpose. Use at your +own risk. +""" + +import threading + +class SingletonException(Exception): + pass + +_stSingletons = set() +_lockForSingletons = threading.RLock() +_lockForSingletonCreation = threading.RLock() # Ensure only one instance of each Singleton + # class is created. This is not bound to the + # individual Singleton class since we need to + # ensure that there is only one mutex for each + # Singleton class, which would require having + # a lock when setting up the Singleton class, + # which is what this is anyway. So, when any + # Singleton is created, we lock this lock and + # then we don't need to lock it again for that + # class. + +def _createSingletonInstance(cls, lstArgs, dctKwArgs): + _lockForSingletonCreation.acquire() + try: + if cls._isInstantiated(): # some other thread got here first + return + + instance = cls.__new__(cls) + try: + instance.__init__(*lstArgs, **dctKwArgs) + except TypeError, e: + if e.message.find('__init__() takes') != -1: + raise SingletonException, 'If the singleton requires __init__ args, supply them on first call to getInstance().' + else: + raise + cls.cInstance = instance + _addSingleton(cls) + finally: + _lockForSingletonCreation.release() + +def _addSingleton(cls): + _lockForSingletons.acquire() + try: + assert cls not in _stSingletons + _stSingletons.add(cls) + finally: + _lockForSingletons.release() + +def _removeSingleton(cls): + _lockForSingletons.acquire() + try: + if cls in _stSingletons: + _stSingletons.remove(cls) + finally: + _lockForSingletons.release() + +def forgetAllSingletons(): + '''This is useful in tests, since it is hard to know which singletons need to be cleared to make a test work.''' + _lockForSingletons.acquire() + try: + for cls in _stSingletons.copy(): + cls._forgetClassInstanceReferenceForTesting() + + # Might have created some Singletons in the process of tearing down. + # Try one more time - there should be a limit to this. + iNumSingletons = len(_stSingletons) + if len(_stSingletons) > 0: + for cls in _stSingletons.copy(): + cls._forgetClassInstanceReferenceForTesting() + iNumSingletons -= 1 + assert iNumSingletons == len(_stSingletons), 'Added a singleton while destroying ' + str(cls) + assert len(_stSingletons) == 0, _stSingletons + finally: + _lockForSingletons.release() + +class MetaSingleton(type): + def __new__(metaclass, strName, tupBases, dct): + if dct.has_key('__new__'): + raise SingletonException, 'Can not override __new__ in a Singleton' + return super(MetaSingleton, metaclass).__new__(metaclass, strName, tupBases, dct) + + def __call__(cls, *lstArgs, **dictArgs): + raise SingletonException, 'Singletons may only be instantiated through getInstance()' + +class Singleton(object): + __metaclass__ = MetaSingleton + + def getInstance(cls, *lstArgs, **dctKwArgs): + """ + Call this to instantiate an instance or retrieve the existing instance. + If the singleton requires args to be instantiated, include them the first + time you call getInstance. + """ + if cls._isInstantiated(): + if (lstArgs or dctKwArgs) and not hasattr(cls, 'ignoreSubsequent'): + raise SingletonException, 'Singleton already instantiated, but getInstance() called with args.' + else: + _createSingletonInstance(cls, lstArgs, dctKwArgs) + + return cls.cInstance + getInstance = classmethod(getInstance) + + def _isInstantiated(cls): + # Don't use hasattr(cls, 'cInstance'), because that screws things up if there is a singleton that + # extends another singleton. hasattr looks in the base class if it doesn't find in subclass. + return 'cInstance' in cls.__dict__ + _isInstantiated = classmethod(_isInstantiated) + + # This can be handy for public use also + isInstantiated = _isInstantiated + + def _forgetClassInstanceReferenceForTesting(cls): + """ + This is designed for convenience in testing -- sometimes you + want to get rid of a singleton during test code to see what + happens when you call getInstance() under a new situation. + + To really delete the object, all external references to it + also need to be deleted. + """ + try: + if hasattr(cls.cInstance, '_prepareToForgetSingleton'): + # tell instance to release anything it might be holding onto. + cls.cInstance._prepareToForgetSingleton() + del cls.cInstance + _removeSingleton(cls) + except AttributeError: + # run up the chain of base classes until we find the one that has the instance + # and then delete it there + for baseClass in cls.__bases__: + if issubclass(baseClass, Singleton): + baseClass._forgetClassInstanceReferenceForTesting() + _forgetClassInstanceReferenceForTesting = classmethod(_forgetClassInstanceReferenceForTesting) + + +if __name__ == '__main__': + + import unittest + import time + + class singletonmixin_Public_TestCase(unittest.TestCase): + def testReturnsSameObject(self): + """ + Demonstrates normal use -- just call getInstance and it returns a singleton instance + """ + + class A(Singleton): + def __init__(self): + super(A, self).__init__() + + a1 = A.getInstance() + a2 = A.getInstance() + self.assertEquals(id(a1), id(a2)) + + def testInstantiateWithMultiArgConstructor(self): + """ + If the singleton needs args to construct, include them in the first + call to get instances. + """ + + class B(Singleton): + + def __init__(self, arg1, arg2): + super(B, self).__init__() + self.arg1 = arg1 + self.arg2 = arg2 + + b1 = B.getInstance('arg1 value', 'arg2 value') + b2 = B.getInstance() + self.assertEquals(b1.arg1, 'arg1 value') + self.assertEquals(b1.arg2, 'arg2 value') + self.assertEquals(id(b1), id(b2)) + + def testInstantiateWithKeywordArg(self): + + class B(Singleton): + + def __init__(self, arg1=5): + super(B, self).__init__() + self.arg1 = arg1 + + b1 = B.getInstance('arg1 value') + b2 = B.getInstance() + self.assertEquals(b1.arg1, 'arg1 value') + self.assertEquals(id(b1), id(b2)) + + def testTryToInstantiateWithoutNeededArgs(self): + + class B(Singleton): + + def __init__(self, arg1, arg2): + super(B, self).__init__() + self.arg1 = arg1 + self.arg2 = arg2 + + self.assertRaises(SingletonException, B.getInstance) + + def testPassTypeErrorIfAllArgsThere(self): + """ + Make sure the test for capturing missing args doesn't interfere with a normal TypeError. + """ + class B(Singleton): + + def __init__(self, arg1, arg2): + super(B, self).__init__() + self.arg1 = arg1 + self.arg2 = arg2 + raise TypeError, 'some type error' + + self.assertRaises(TypeError, B.getInstance, 1, 2) + + def testTryToInstantiateWithoutGetInstance(self): + """ + Demonstrates that singletons can ONLY be instantiated through + getInstance, as long as they call Singleton.__init__ during construction. + + If this check is not required, you don't need to call Singleton.__init__(). + """ + + class A(Singleton): + def __init__(self): + super(A, self).__init__() + + self.assertRaises(SingletonException, A) + + def testDontAllowNew(self): + + def instantiatedAnIllegalClass(): + class A(Singleton): + def __init__(self): + super(A, self).__init__() + + def __new__(metaclass, strName, tupBases, dct): + return super(MetaSingleton, metaclass).__new__(metaclass, strName, tupBases, dct) + + self.assertRaises(SingletonException, instantiatedAnIllegalClass) + + + def testDontAllowArgsAfterConstruction(self): + class B(Singleton): + + def __init__(self, arg1, arg2): + super(B, self).__init__() + self.arg1 = arg1 + self.arg2 = arg2 + + B.getInstance('arg1 value', 'arg2 value') + self.assertRaises(SingletonException, B, 'arg1 value', 'arg2 value') + + def test_forgetClassInstanceReferenceForTesting(self): + class A(Singleton): + def __init__(self): + super(A, self).__init__() + class B(A): + def __init__(self): + super(B, self).__init__() + + # check that changing the class after forgetting the instance produces + # an instance of the new class + a = A.getInstance() + assert a.__class__.__name__ == 'A' + A._forgetClassInstanceReferenceForTesting() + b = B.getInstance() + assert b.__class__.__name__ == 'B' + + # check that invoking the 'forget' on a subclass still deletes the instance + B._forgetClassInstanceReferenceForTesting() + a = A.getInstance() + B._forgetClassInstanceReferenceForTesting() + b = B.getInstance() + assert b.__class__.__name__ == 'B' + + def test_forgetAllSingletons(self): + # Should work if there are no singletons + forgetAllSingletons() + + class A(Singleton): + ciInitCount = 0 + def __init__(self): + super(A, self).__init__() + A.ciInitCount += 1 + + A.getInstance() + self.assertEqual(A.ciInitCount, 1) + + A.getInstance() + self.assertEqual(A.ciInitCount, 1) + + forgetAllSingletons() + A.getInstance() + self.assertEqual(A.ciInitCount, 2) + + def test_threadedCreation(self): + # Check that only one Singleton is created even if multiple + # threads try at the same time. If fails, would see assert in _addSingleton + class Test_Singleton(Singleton): + def __init__(self): + super(Test_Singleton, self).__init__() + + class Test_SingletonThread(threading.Thread): + def __init__(self, fTargetTime): + super(Test_SingletonThread, self).__init__() + self._fTargetTime = fTargetTime + self._eException = None + + def run(self): + try: + fSleepTime = self._fTargetTime - time.time() + if fSleepTime > 0: + time.sleep(fSleepTime) + Test_Singleton.getInstance() + except Exception, e: + self._eException = e + + fTargetTime = time.time() + 0.1 + lstThreads = [] + for _ in xrange(100): + t = Test_SingletonThread(fTargetTime) + t.start() + lstThreads.append(t) + eException = None + for t in lstThreads: + t.join() + if t._eException and not eException: + eException = t._eException + if eException: + raise eException + + def testNoInit(self): + """ + Demonstrates use with a class not defining __init__ + """ + + class A(Singleton): + pass + + #INTENTIONALLY UNDEFINED: + #def __init__(self): + # super(A, self).__init__() + + A.getInstance() #Make sure no exception is raised + + def testMultipleGetInstancesWithArgs(self): + + class A(Singleton): + + ignoreSubsequent = True + + def __init__(self, a, b=1): + pass + + a1 = A.getInstance(1) + a2 = A.getInstance(2) # ignores the second call because of ignoreSubsequent + + class B(Singleton): + + def __init__(self, a, b=1): + pass + + b1 = B.getInstance(1) + self.assertRaises(SingletonException, B.getInstance, 2) # No ignoreSubsequent included + + class C(Singleton): + + def __init__(self, a=1): + pass + + c1 = C.getInstance(a=1) + self.assertRaises(SingletonException, C.getInstance, a=2) # No ignoreSubsequent included + + def testInheritance(self): + """ + It's sometimes said that you can't subclass a singleton (see, for instance, + http://steve.yegge.googlepages.com/singleton-considered-stupid point e). This + test shows that at least rudimentary subclassing works fine for us. + """ + + class A(Singleton): + + def setX(self, x): + self.x = x + + def setZ(self, z): + raise NotImplementedError + + class B(A): + + def setX(self, x): + self.x = -x + + def setY(self, y): + self.y = y + + a = A.getInstance() + a.setX(5) + b = B.getInstance() + b.setX(5) + b.setY(50) + self.assertEqual((a.x, b.x, b.y), (5, -5, 50)) + self.assertRaises(AttributeError, eval, 'a.setY', {}, locals()) + self.assertRaises(NotImplementedError, b.setZ, 500) + + unittest.main() + +# vim:ts=4:sw=4:et: diff --git a/shell/modules/template.py b/shell/modules/template.py new file mode 100644 index 0000000000..f3448f7dea --- /dev/null +++ b/shell/modules/template.py @@ -0,0 +1,176 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import re +from vars import Vars +from utils import * +from msg import * + +def get_var(l,key): + for s in l: + a = s.split() + if len(a) == 2 and a[0] == key: + return a[1] + return '' +def chk_var(l,key): + for s in l: + a = s.split() + if len(a) == 2 and a[0] == key and a[1]: + return True + return False +def chk_key(l,key): + for s in l: + a = s.split() + if len(a) >= 1 and a[0] == key: + return True + return False +def validate_template(l): + 'Test for required stuff in a template.' + if not chk_var(l,'%name'): + common_err("invalid template: missing '%name'") + return False + if not chk_key(l,'%generate'): + common_err("invalid template: missing '%generate'") + return False + g = l.index('%generate') + if not (chk_key(l[0:g],'%required') or chk_key(l[0:g],'%optional')): + common_err("invalid template: missing '%required' or '%optional'") + return False + return True +def fix_tmpl_refs(l,id,pfx): + for i in range(len(l)): + l[i] = l[i].replace(id,pfx) +def fix_tmpl_refs_re(l,regex,repl): + for i in range(len(l)): + l[i] = re.sub(regex,repl,l[i]) +class LoadTemplate(object): + ''' + Load a template and its dependencies, generate a + configuration file which should be relatively easy and + straightforward to parse. + ''' + edit_instructions = '''# Edit instructions: +# +# Add content only at the end of lines starting with '%%'. +# Only add content, don't remove or replace anything. +# The parameters following '%required' are not optional, +# unlike those following '%optional'. +# You may also add comments for future reference.''' + no_more_edit = '''# Don't edit anything below this line.''' + def __init__(self,name): + self.name = name + self.all_pre_gen = [] + self.all_post_gen = [] + self.all_pfx = [] + def new_pfx(self,name): + i = 1 + pfx = name + while pfx in self.all_pfx: + pfx = "%s_%d" % (name,i) + i += 1 + self.all_pfx.append(pfx) + return pfx + def generate(self): + return '\n'.join([ \ + "# Configuration: %s" % self.name, \ + '', \ + self.edit_instructions, \ + '', \ + '\n'.join(self.all_pre_gen), \ + self.no_more_edit, \ + '', \ + '%generate', \ + '\n'.join(self.all_post_gen)]) + def write_config(self,name): + try: + f = open("%s/%s" % (vars.tmpl_conf_dir, name),"w") + except IOError,msg: + common_err("open: %s"%msg) + return False + print >>f, self.generate() + f.close() + return True + def load_template(self,tmpl): + try: + f = open("%s/%s" % (vars.tmpl_dir, tmpl)) + except IOError,msg: + common_err("open: %s"%msg) + return '' + l = (''.join(f)).split('\n') + if not validate_template(l): + return '' + common_info("pulling in template %s" % tmpl) + g = l.index('%generate') + pre_gen = l[0:g] + post_gen = l[g+1:] + name = get_var(pre_gen,'%name') + for s in l[0:g]: + if s.startswith('%depends_on'): + a = s.split() + if len(a) != 2: + common_warn("%s: wrong usage" % s) + continue + tmpl_id = a[1] + tmpl_pfx = self.load_template(a[1]) + if tmpl_pfx: + fix_tmpl_refs(post_gen,'%'+tmpl_id,'%'+tmpl_pfx) + pfx = self.new_pfx(name) + fix_tmpl_refs(post_gen, '%_:', '%'+pfx+':') + # replace remaining %_, it may be useful at times + fix_tmpl_refs(post_gen, '%_', pfx) + v_idx = pre_gen.index('%required') or pre_gen.index('%optional') + pre_gen.insert(v_idx,'%pfx ' + pfx) + self.all_pre_gen += pre_gen + self.all_post_gen += post_gen + return pfx + def post_process(self, params): + pfx_re = '(%s)' % '|'.join(self.all_pfx) + for n in params: + fix_tmpl_refs(self.all_pre_gen, '%% '+n, "%% "+n+" "+params[n]) + fix_tmpl_refs_re(self.all_post_gen, \ + '%'+pfx_re+'([^:]|$)', r'\1\2') + # process %if ... [%else] ... %fi + rmidx_l = [] + if_seq = False + for i in range(len(self.all_post_gen)): + s = self.all_post_gen[i] + if if_seq: + a = s.split() + if len(a) >= 1 and a[0] == '%fi': + if_seq = False + rmidx_l.append(i) + elif len(a) >= 1 and a[0] == '%else': + outcome = not outcome + rmidx_l.append(i) + else: + if not outcome: + rmidx_l.append(i) + continue + if not s: + continue + a = s.split() + if len(a) == 2 and a[0] == '%if': + outcome = not a[1].startswith('%') # not replaced -> false + if_seq = True + rmidx_l.append(i) + rmidx_l.reverse() + for i in rmidx_l: + del self.all_post_gen[i] + +vars = Vars.getInstance() + +# vim:ts=4:sw=4:et: diff --git a/shell/modules/term.py b/shell/modules/term.py new file mode 100644 index 0000000000..d88d1fed63 --- /dev/null +++ b/shell/modules/term.py @@ -0,0 +1,153 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import sys +import re +from singletonmixin import Singleton + +# from: http://code.activestate.com/recipes/475116/ + +class TerminalController(Singleton): + """ + A class that can be used to portably generate formatted output to + a terminal. + `TerminalController` defines a set of instance variables whose + values are initialized to the control sequence necessary to + perform a given action. These can be simply included in normal + output to the terminal: + >>> term = TerminalController() + >>> print 'This is '+term.GREEN+'green'+term.NORMAL + Alternatively, the `render()` method can used, which replaces + '${action}' with the string required to perform 'action': + >>> term = TerminalController() + >>> print term.render('This is ${GREEN}green${NORMAL}') + If the terminal doesn't support a given action, then the value of + the corresponding instance variable will be set to ''. As a + result, the above code will still work on terminals that do not + support color, except that their output will not be colored. + Also, this means that you can test whether the terminal supports a + given action by simply testing the truth value of the + corresponding instance variable: + >>> term = TerminalController() + >>> if term.CLEAR_SCREEN: + ... print 'This terminal supports clearning the screen.' + Finally, if the width and height of the terminal are known, then + they will be stored in the `COLS` and `LINES` attributes. + """ + # Cursor movement: + BOL = '' #: Move the cursor to the beginning of the line + UP = '' #: Move the cursor up one line + DOWN = '' #: Move the cursor down one line + LEFT = '' #: Move the cursor left one char + RIGHT = '' #: Move the cursor right one char + # Deletion: + CLEAR_SCREEN = '' #: Clear the screen and move to home position + CLEAR_EOL = '' #: Clear to the end of the line. + CLEAR_BOL = '' #: Clear to the beginning of the line. + CLEAR_EOS = '' #: Clear to the end of the screen + # Output modes: + BOLD = '' #: Turn on bold mode + BLINK = '' #: Turn on blink mode + DIM = '' #: Turn on half-bright mode + REVERSE = '' #: Turn on reverse-video mode + NORMAL = '' #: Turn off all modes + # Cursor display: + HIDE_CURSOR = '' #: Make the cursor invisible + SHOW_CURSOR = '' #: Make the cursor visible + # Terminal size: + COLS = None #: Width of the terminal (None for unknown) + LINES = None #: Height of the terminal (None for unknown) + # Foreground colors: + BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = '' + # Background colors: + BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = '' + BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = '' + _STRING_CAPABILITIES = """ + BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1 + CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold + BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0 + HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split() + _COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split() + _ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split() + def __init__(self, term_stream=sys.stdout): + """ + Create a `TerminalController` and initialize its attributes + with appropriate values for the current terminal. + `term_stream` is the stream that will be used for terminal + output; if this stream is not a tty, then the terminal is + assumed to be a dumb terminal (i.e., have no capabilities). + """ + # Curses isn't available on all platforms + try: import curses + except: + common_info("no curses support: you won't see colors") + return + # If the stream isn't a tty, then assume it has no capabilities. + if not term_stream.isatty(): return + # Check the terminal type. If we fail, then assume that the + # terminal has no capabilities. + try: curses.setupterm() + except: return + # Look up numeric capabilities. + self.COLS = curses.tigetnum('cols') + self.LINES = curses.tigetnum('lines') + # Look up string capabilities. + for capability in self._STRING_CAPABILITIES: + (attrib, cap_name) = capability.split('=') + setattr(self, attrib, self._tigetstr(cap_name) or '') + # Colors + set_fg = self._tigetstr('setf') + if set_fg: + for i,color in zip(range(len(self._COLORS)), self._COLORS): + setattr(self, color, curses.tparm(set_fg, i) or '') + set_fg_ansi = self._tigetstr('setaf') + if set_fg_ansi: + for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS): + setattr(self, color, curses.tparm(set_fg_ansi, i) or '') + set_bg = self._tigetstr('setb') + if set_bg: + for i,color in zip(range(len(self._COLORS)), self._COLORS): + setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '') + set_bg_ansi = self._tigetstr('setab') + if set_bg_ansi: + for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS): + setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '') + def _tigetstr(self, cap_name): + # String capabilities can include "delays" of the form "$<2>". + # For any modern terminal, we should be able to just ignore + # these, so strip them out. + import curses + cap = curses.tigetstr(cap_name) or '' + return re.sub(r'\$<\d+>[/*]?', '', cap) + def render(self, template): + """ + Replace each $-substitutions in the given template string with + the corresponding terminal control string (if it's defined) or + '' (if it's not). + """ + return re.sub(r'\$\$|\${\w+}', self._render_sub, template) + def _render_sub(self, match): + s = match.group() + if s == '$$': return s + else: return getattr(self, s[2:-1]) + def is_color(self, s): + try: + attr = getattr(self, s.upper()) + return attr != None + except: return False + +# vim:ts=4:sw=4:et: diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in new file mode 100644 index 0000000000..2e9afa1fac --- /dev/null +++ b/shell/modules/ui.py.in @@ -0,0 +1,1744 @@ +# Copyright (C) 2008 Dejan Muhamedagic +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import sys +import re +import os +import readline +import time + +from help import HelpSystem, cmd_help +from vars import Vars +from cache import WCache +from levels import Levels +from cibconfig import mkset_obj, CibFactory +from cibstatus import CibStatus +from template import LoadTemplate +from ra import * +from msg import * +from utils import * +from xmlutil import * + +def cmd_end(cmd,dir = ".."): + "Go up one level." + levels.droplevel() +def cmd_exit(cmd): + "Exit the crm program" + cmd_end(cmd) + if options.interactive: + print "bye" + try: + readline.write_history_file(hist_file) + except: + pass + for f in vars.tmpfiles: + os.unlink(f) + sys.exit() + +class UserInterface(object): + ''' + Stuff common to all user interface classes. + ''' + global_cmd_aliases = { + "quit": ("bye","exit"), + "end": ("cd","up"), + } + def __init__(self): + self.cmd_table = odict() + self.cmd_table["help"] = (self.help,(0,1),0) + self.cmd_table["quit"] = (self.exit,(0,0),0) + self.cmd_table["end"] = (self.end,(0,1),0) + self.cmd_aliases = self.global_cmd_aliases.copy() + def end_game(self, no_questions_asked = False): + pass + def help(self,cmd,topic = ''): + "usage: help []" + cmd_help(self.help_table,topic) + def end(self,cmd,dir = ".."): + "usage: end" + self.end_game() + cmd_end(cmd,dir) + def exit(self,cmd): + "usage: exit" + self.end_game() + cmd_exit(cmd) + +class CliOptions(UserInterface): + ''' + Manage user preferences + ''' + def __init__(self): + UserInterface.__init__(self) + self.help_table = help_sys.load_level("options") + self.cmd_table["skill-level"] = (self.set_skill_level,(1,1),0,(skills_list,)) + self.cmd_table["editor"] = (self.set_editor,(1,1),0) + self.cmd_table["pager"] = (self.set_pager,(1,1),0) + self.cmd_table["user"] = (self.set_crm_user,(0,1),0) + self.cmd_table["output"] = (self.set_output,(1,1),0) + self.cmd_table["colorscheme"] = (self.set_colors,(1,1),0) + self.cmd_table["check-frequency"] = (self.set_check_frequency,(1,1),0) + self.cmd_table["check-mode"] = (self.set_check_mode,(1,1),0) + self.cmd_table["save"] = (self.save_options,(0,0),0) + self.cmd_table["show"] = (self.show_options,(0,0),0) + setup_aliases(self) + def set_skill_level(self,cmd,skill_level): + """usage: skill-level + level: operator | administrator | expert""" + return user_prefs.set_skill_level(skill_level) + def set_editor(self,cmd,prog): + "usage: editor " + return user_prefs.set_editor(prog) + def set_pager(self,cmd,prog): + "usage: pager " + return user_prefs.set_pager(prog) + def set_crm_user(self,cmd,user = ''): + "usage: user []" + return user_prefs.set_crm_user(user) + def set_output(self,cmd,otypes): + "usage: output " + return user_prefs.set_output(otypes) + def set_colors(self,cmd,scheme): + "usage: colorscheme " + return user_prefs.set_colors(scheme) + def set_check_frequency(self,cmd,freq): + "usage: check-frequence " + return user_prefs.set_check_freq(freq) + def set_check_mode(self,cmd,mode): + "usage: check-mode " + return user_prefs.set_check_mode(mode) + def show_options(self,cmd): + "usage: show" + return user_prefs.write_rc(sys.stdout) + def save_options(self,cmd): + "usage: save" + return user_prefs.save_options(vars.rc_file) + def end_game(self, no_questions_asked = False): + if no_questions_asked and not options.interactive: + self.save_options("save") + +def listshadows(): + return stdout2list("ls @CRM_CONFIG_DIR@ | fgrep shadow. | sed 's/^shadow\.//'") +def shadowfile(name): + return "@CRM_CONFIG_DIR@/shadow.%s" % name +def shadow2doc(name): + return file2doc(shadowfile(name)) + +class CibShadow(UserInterface): + ''' + CIB shadow management class + ''' + extcmd = ">/dev/null &1" % self.extcmd) + except os.error: + no_prog_err(self.extcmd) + return False + return True + def new(self,cmd,name,*args): + "usage: new [withstatus] [force]" + if not is_filename_sane(name): + return False + new_cmd = "%s -c '%s'" % (self.extcmd,name) + for par in args: + if not par in ("force","--force","withstatus"): + syntax_err((cmd,name,par), context = 'new') + return False + if user_prefs.get_force() or "force" in args or "--force" in args: + new_cmd = "%s --force" % new_cmd + if ext_cmd(new_cmd) == 0: + common_info("%s shadow CIB created"%name) + self.use("use",name) + if "withstatus" in args: + cib_status.load("shadow:%s" % name) + def delete(self,cmd,name): + "usage: delete " + if not is_filename_sane(name): + return False + if vars.cib_in_use == name: + common_err("%s shadow CIB is in use"%name) + return False + if ext_cmd("%s -D '%s' --force" % (self.extcmd,name)) == 0: + common_info("%s shadow CIB deleted"%name) + else: + common_err("failed to delete %s shadow CIB"%name) + return False + def reset(self,cmd,name): + "usage: reset " + if not is_filename_sane(name): + return False + if ext_cmd("%s -r '%s'" % (self.extcmd,name)) == 0: + common_info("copied live CIB to %s"%name) + else: + common_err("failed to copy live CIB to %s"%name) + return False + def commit(self,cmd,name): + "usage: commit " + if not is_filename_sane(name): + return False + if ext_cmd("%s -C '%s' --force" % (self.extcmd,name)) == 0: + common_info("commited '%s' shadow CIB to the cluster"%name) + wcache.clear() + else: + common_err("failed to commit the %s shadow CIB"%name) + return False + def diff(self,cmd): + "usage: diff" + s = get_stdout(add_sudo("%s -d" % self.extcmd_stdout)) + page_string(s) + def list(self,cmd): + "usage: list" + if options.regression_tests: + for t in listshadows(): + print t + else: + multicolumn(listshadows()) + def _use(self,name,withstatus): + # Choose a shadow cib for further changes. If the name + # provided is empty, then choose the live (cluster) cib. + # Don't allow ' in shadow names + if not name or name == "live": + os.unsetenv(vars.shadow_envvar) + vars.cib_in_use = "" + if withstatus: + cib_status.load("live") + else: + os.putenv(vars.shadow_envvar,name) + vars.cib_in_use = name + if withstatus: + cib_status.load("shadow:%s" % name) + def use(self,cmd,name = '', withstatus = ''): + "usage: use [] [withstatus]" + # check the name argument + if name and not is_filename_sane(name): + return False + if name and name != "live": + if not os.access(shadowfile(name),os.F_OK): + common_err("%s: no such shadow CIB"%name) + return False + if withstatus and withstatus != "withstatus": + syntax_err((cmd,withstatus), context = 'use') + return False + # If invoked from configure + # take special precautions + try: + prev_level = levels.previous().myname() + except: + prev_level = '' + if prev_level != "cibconfig": + self._use(name,withstatus) + return True + if not cib_factory.has_cib_changed(): + self._use(name,withstatus) + # new CIB: refresh the CIB factory + cib_factory.refresh() + return True + saved_cib = vars.cib_in_use + self._use(name,'') # don't load the status yet + if not cib_factory.is_current_cib_equal(silent = True): + # user made changes and now wants to switch to a + # different and unequal CIB; we refuse to cooperate + common_err("the requested CIB is different from the current one") + if user_prefs.get_force(): + common_info("CIB overwrite forced") + elif not ask("All changes will be dropped. Do you want to proceed?"): + self._use(saved_cib,'') # revert to the previous CIB + return False + self._use(name,withstatus) # now load the status too + return True + +def listtemplates(): + l = [] + for f in os.listdir(vars.tmpl_dir): + if os.path.isfile("%s/%s" % (vars.tmpl_dir,f)): + l.append(f) + return l +def listconfigs(): + l = [] + for f in os.listdir(vars.tmpl_conf_dir): + if os.path.isfile("%s/%s" % (vars.tmpl_conf_dir,f)): + l.append(f) + return l +def check_transition(inp,state,possible_l): + if not state in possible_l: + common_err("input (%s) in wrong state %s" % (inp,state)) + return False + return True +class Template(UserInterface): + ''' + Configuration templates. + ''' + def __init__(self): + UserInterface.__init__(self) + self.help_table = help_sys.load_level("template") + self.cmd_table["new"] = (self.new,(2,),1,(null_list,templates_list,loop)) + self.cmd_table["load"] = (self.load,(0,1),1,(config_list,)) + self.cmd_table["edit"] = (self.edit,(0,1),1,(config_list,)) + self.cmd_table["delete"] = (self.delete,(1,2),1,(config_list,)) + self.cmd_table["show"] = (self.show,(0,1),0,(config_list,)) + self.cmd_table["apply"] = (self.apply,(0,2),1,(config_list_method,config_list)) + self.cmd_table["list"] = (self.list,(0,1),0) + setup_aliases(self) + self.init_dir() + self.curr_conf = '' + def init_dir(self): + '''Create the conf directory, link to templates''' + if not os.path.isdir(vars.tmpl_conf_dir): + try: + os.makedirs(vars.tmpl_conf_dir) + except os.error,msg: + common_err("makedirs: %s"%msg) + return + def get_depends(self,tmpl): + '''return a list of required templates''' + # Not used. May need it later. + try: + tf = open("%s/%s" % (vars.tmpl_dir, tmpl),"r") + except IOError,msg: + common_err("open: %s"%msg) + return + l = [] + for s in tf: + a = s.split() + if len(a) >= 2 and a[0] == '%depends_on': + l += a[1:] + tf.close() + return l + def replace_params(self,s,user_data): + change = False + for i in range(len(s)): + word = s[i] + for p in user_data: + # is parameter in the word? + pos = word.find('%' + p) + if pos < 0: + continue + endpos = pos + len('%' + p) + # and it isn't part of another word? + if re.match("[A-Za-z0-9]", word[endpos:endpos+1]): + continue + # if the value contains a space or + # it is a value of an attribute + # put quotes around it + if user_data[p].find(' ') >= 0 or word[pos-1:pos] == '=': + v = '"' + user_data[p] + '"' + else: + v = user_data[p] + word = word.replace('%' + p, v) + change = True # we did replace something + if change: + s[i] = word + if 'opt' in s: + if not change: + s = [] + else: + s.remove('opt') + return s + def generate(self,l,user_data): + '''replace parameters (user_data) and generate output + ''' + l2 = [] + for piece in l: + piece2 = [] + for s in piece: + s = self.replace_params(s,user_data) + if s: + piece2.append(' '.join(s)) + if piece2: + l2.append(' \\\n\t'.join(piece2)) + return '\n'.join(l2) + def process(self,config = ''): + '''Create a cli configuration from the current config''' + try: + f = open("%s/%s" % (vars.tmpl_conf_dir, config or self.curr_conf),'r') + except IOError,msg: + common_err("open: %s"%msg) + return '' + l = [] + piece = [] + user_data = {} + # states + START = 0; PFX = 1; DATA = 2; GENERATE = 3 + state = START + err_buf.start_tmp_lineno() + rc = True + for inp in f: + err_buf.incr_lineno() + if inp.startswith('#'): + continue + if type(inp) == type(u''): + inp = inp.encode('ascii') + inp = inp.strip() + try: + s = shlex.split(inp) + except ValueError, msg: + common_err(msg) + continue + while '\n' in s: + s.remove('\n') + if not s: + if state == GENERATE and piece: + l.append(piece) + piece = [] + elif s[0] in ("%name","%depends_on","%suggests"): + continue + elif s[0] == "%pfx": + if check_transition(inp,state,(START,DATA)) and len(s) == 2: + pfx = s[1] + state = PFX + elif s[0] == "%required": + if check_transition(inp,state,(PFX,)): + state = DATA + data_reqd = True + elif s[0] == "%optional": + if check_transition(inp,state,(PFX,DATA)): + state = DATA + data_reqd = False + elif s[0] == "%%": + if state != DATA: + common_warn("user data in wrong state %s" % state) + if len(s) < 2: + common_warn("parameter name missing") + elif len(s) == 2: + if data_reqd: + common_err("required parameter %s not set" % s[1]) + rc = False + elif len(s) == 3: + user_data["%s:%s" % (pfx,s[1])] = s[2] + else: + common_err("%s: syntax error" % inp) + elif s[0] == "%generate": + if check_transition(inp,state,(DATA,)): + state = GENERATE + piece = [] + elif state == GENERATE: + if s: + piece.append(s) + else: + common_err("<%s> unexpected" % inp) + if piece: + l.append(piece) + err_buf.stop_tmp_lineno() + f.close() + if not rc: + return '' + return self.generate(l,user_data) + def new(self,cmd,name,*args): + "usage: new