diff --git a/Makefile.am b/Makefile.am index d35ba4e23c..793d20595d 100644 --- a/Makefile.am +++ b/Makefile.am @@ -1,71 +1,71 @@ # # Pacemaker code # # Copyright (C) 2004 Andrew Beekhof # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # EXTRA_DIST = autogen.sh ConfigureMe README.in libltdl.tar MAINTAINERCLEANFILES = Makefile.in aclocal.m4 configure DRF/config-h.in \ DRF/stamp-h.in libtool.m4 ltdl.m4 libltdl.tar -CORE = $(LIBLTDL_DIR) replace include lib mcp pengine cib crmd fencing tools shell xml +CORE = $(LIBLTDL_DIR) replace include lib mcp pengine cib crmd fencing tools xml SUBDIRS = $(CORE) cts extra doc doc_DATA = AUTHORS COPYING COPYING.LIB AUTOMAKE_OPTIONS = foreign ##ACLOCAL = aclocal -I $(auxdir) core: @echo "Building only core components: $(CORE)" list='$(CORE)'; for subdir in $$list; do make -C $$subdir all; done core-install: @echo "Installing only core components: $(CORE)" list='$(CORE)'; for subdir in $$list; do make -C $$subdir install; done install-exec-local: $(INSTALL) -d $(DESTDIR)/$(LCRSODIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_CONFIG_DIR) $(INSTALL) -d -m 750 $(DESTDIR)/$(CRM_STATE_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_CONFIG_DIR) -chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(CRM_STATE_DIR) if BUILD_CS_PLUGIN rm -f $(DESTDIR)$(LCRSODIR)/pacemaker.lcrso $(DESTDIR)$(LCRSODIR)/service_crm.so cp $(DESTDIR)$(libdir)/service_crm.so $(DESTDIR)$(LCRSODIR)/pacemaker.lcrso endif if BUILD_HEARTBEAT_SUPPORT $(INSTALL) -d $(DESTDIR)/$(HB_DAEMON_DIR) ln -sf $(CRM_DAEMON_DIR)/attrd $(DESTDIR)$(HB_DAEMON_DIR)/ ln -sf $(CRM_DAEMON_DIR)/cib $(DESTDIR)$(HB_DAEMON_DIR)/ ln -sf $(CRM_DAEMON_DIR)/crmd $(DESTDIR)$(HB_DAEMON_DIR)/ ln -sf $(CRM_DAEMON_DIR)/pengine $(DESTDIR)$(HB_DAEMON_DIR)/ ln -sf $(CRM_DAEMON_DIR)/stonithd $(DESTDIR)$(HB_DAEMON_DIR)/ endif # Use chown because the user/group may not exist clean-generic: rm -f $(TARFILE) *.tar.bz2 *.sed dist-clean-local: rm -f autoconf automake autoheader maintainer-clean-local: rm -f libltdl.tar .PHONY: rpm pkg handy handy-copy diff --git a/README.markdown b/README.markdown index 65d899411c..4393897a7c 100644 --- a/README.markdown +++ b/README.markdown @@ -1,75 +1,79 @@ # Pacemaker ## What is Pacemaker? Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or Corosync. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. ## For more information look at: * [Website](http://www.clusterlabs.org) * [Issues/Bugs](http://bugs.clusterlabs.org) * [Mailing list](http://oss.clusterlabs.org/mailman/listinfo/pacemaker). * [Documentation](http://www.clusterlabs.org/doc) +## Important information about the _crm shell_ +Since late-April, the _crm shell_ is no longer included in the Pacemaker source tree. +This change was made at the author's request as it is now maintained as a separate project at https://savannah.nongnu.org/projects/crmsh/ + ## Build Dependencies * automake * autoconf * libtool-ltdl-devel * pkgconfig * python * glib2-devel * cluster-glue-libs-devel * libxml2-devel * libxslt-devel * python-devel * gcc-c++ * bzip2-devel * gnutls-devel * pam-devel ## Cluster Stack Dependencies (Pick at least one) * clusterlib-devel (CMAN) * corosynclib-devel (Corosync) * heartbeat-devel (Heartbeat) ## Optional Build Dependencies * ncurses-devel * openssl-devel * libselinux-devel * docbook-style-xsl (documentation) * libesmtp-devel (Email alerts) * lm_sensors-devel (SNMP alerts) * net-snmp-devel (SNMP alerts) * asciidoc (documentation) * help2man (documentation) * publican (documentation) * inkscape (documentation) ## Source Control (GIT) git clone git://github.com/ClusterLabs/pacemaker.git [See Github](https://github.com/ClusterLabs/pacemaker) ## Installing from source $ ./autogen.sh $ ./configure $ make $ sudo make install ## How you can help If you find this project useful, you may want to consider supporting its future development. There are a number of ways to support the project. * Test and report issues. * Help others on the [mailing list](http://oss.clusterlabs.org/mailman/listinfo/pacemaker). * Contribute documentation, examples and test cases. * Contribute patches. * Spread the word. diff --git a/configure.ac b/configure.ac index fd08c0e82b..eb5cda0dfd 100644 --- a/configure.ac +++ b/configure.ac @@ -1,1731 +1,1722 @@ dnl dnl autoconf for Pacemaker dnl dnl License: GNU General Public License (GPL) dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.53) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services AC_INIT(pacemaker, 1.1.7, pacemaker@oss.clusterlabs.org) CRM_DTD_VERSION="1.2" PCMK_FEATURES="" HB_PKG=heartbeat AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except lha_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AM_CONFIG_HEADER(include/config.h include/crm_config.h) ALL_LINGUAS="en fr" AC_ARG_WITH(version, [ --with-version=version Override package version (if you're a packager needing to pretend) ], [ PACKAGE_VERSION="$withval" ]) AC_ARG_WITH(pkg-name, [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ], [ PACKAGE_NAME="$withval" ]) AM_INIT_AUTOMAKE($PACKAGE_NAME, $PACKAGE_VERSION) AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", Current pacemaker version) PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_SUBST(PACKAGE_VERSION) dnl automake >= 1.11 offers --enable-silent-rules for suppressing the output from dnl normal compilation. When a failure occurs, it will then display the full dnl command line dnl Wrap in m4_ifdef to avoid breaking on older platforms m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])]) dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AM_PROG_CC_C_O AC_PROG_CC_STDC AC_LIBTOOL_DLOPEN dnl Enable dlopen support... AC_LIBLTDL_CONVENIENCE dnl make libltdl a convenience lib AC_PROG_LIBTOOL AC_PROG_YACC AM_PROG_LEX AC_C_STRINGIZE AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) AC_STRUCT_TIMEZONE dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="$@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)],[RC=1; AC_MSG_RESULT(no)]) return $RC } try_extract_header_define() { AC_MSG_CHECKING(if $2 in $1 exists. If not defaulting to $3) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) {\n" >> ${Cfile}.c printf "#ifdef %s\n" $2 >> ${Cfile}.c printf "printf(\"%%s\", %s);\n" $2 >> ${Cfile}.c printf "#endif \n return 0; }\n" >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` if test x"${value}" == x""; then value=$3 fi AC_MSG_RESULT($value) printf $value rm -rf ${Cfile}.c ${Cfile} ${Cfile}.dSYM } extract_header_define() { AC_MSG_CHECKING(for $2 in $1) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` AC_MSG_RESULT($value) printf $value rm -rf ${Cfile}.c ${Cfile} ${Cfile}.dSYM } dnl =============================================== dnl Configure Options dnl =============================================== dnl Some systems, like Solaris require a custom package name AC_ARG_WITH(pkgname, [ --with-pkgname=name name for pkg (typically for Solaris) ], [ PKGNAME="$withval" ], [ PKGNAME="LXHAhb" ], ) AC_SUBST(PKGNAME) AC_ARG_ENABLE([ansi], [ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers. [default=no]]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings very pedantic and fatal warnings for gcc [default=yes]]) AC_ARG_ENABLE([quiet], [ --enable-quiet Supress make output unless there is an error [default=no]]) AC_ARG_ENABLE([thread-safe], [ --enable-thread-safe Enable some client libraries to be thread safe. [default=no]]) AC_ARG_ENABLE([bundled-ltdl], [ --enable-bundled-ltdl Configure, build and install the standalone ltdl library bundled with ${PACKAGE} [default=no]]) LTDL_LIBS="" AC_ARG_ENABLE([no-stack], [ --enable-no-stack Only build the Policy Engine and pieces needed to support it [default=no]]) AC_ARG_WITH(ais, [ --with-ais Support the Corosync messaging and membership layer ], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH(corosync, [ --with-corosync Support the Corosync messaging and membership layer ], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH(heartbeat, [ --with-heartbeat Support the Heartbeat messaging and membership layer ], [ SUPPORT_HEARTBEAT=$withval ], [ SUPPORT_HEARTBEAT=try ], ) AC_ARG_WITH(cman, [ --with-cman Support the consumption of membership and quorum from cman ], [ SUPPORT_CMAN=$withval ], [ SUPPORT_CMAN=try ], ) AC_ARG_WITH(cpg, [ --with-cs-quorum Support the consumption of membership and quorum from corosync ], [ SUPPORT_CS_QUORUM=$withval ], [ SUPPORT_CS_QUORUM=try ], ) AC_ARG_WITH(snmp, [ --with-snmp Support the SNMP protocol ], [ SUPPORT_SNMP=$withval ], [ SUPPORT_SNMP=try ], ) AC_ARG_WITH(esmtp, [ --with-esmtp Support the sending mail notifications with the esmtp library ], [ SUPPORT_ESMTP=$withval ], [ SUPPORT_ESMTP=try ], ) AC_ARG_WITH(acl, [ --with-acl Support CIB ACL ], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=no ], ) CSPREFIX="" AC_ARG_WITH(ais-prefix, [ --with-ais-prefix=DIR Prefix used when Corosync was installed [$prefix]], [ CSPREFIX=$withval ], [ CSPREFIX=$prefix ]) LCRSODIR="" AC_ARG_WITH(lcrso-dir, [ --with-lcrso-dir=DIR Corosync lcrso files. ], [ LCRSODIR="$withval" ]) INITDIR="" AC_ARG_WITH(initdir, [ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]], [ INITDIR="$withval" ]) SUPPORT_PROFILING=0 AC_ARG_WITH(profiling, [ --with-profiling Support gprof profiling ], [ SUPPORT_PROFILING=$withval ]) SUPPORT_GCOV=0 AC_ARG_WITH(gcov, [ --with-gcov Support gcov coverage testing ], [ SUPPORT_GCOV=$withval ]) PUBLICAN_BRAND="common" AC_ARG_WITH(brand, [ --with-brand=brand Brand to use for generated documentation [$PUBLICAN_BRAND]], [ PUBLICAN_BRAND="$withval" ]) AC_SUBST(PUBLICAN_BRAND) dnl =============================================== dnl General Processing dnl =============================================== AC_SUBST(HB_PKG) INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in dnl For consistency with Heartbeat, map NONE->$prefix NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac AC_MSG_NOTICE(Sanitizing ais_prefix: ${CSPREFIX}) case $CSPREFIX in dnl For consistency with Heartbeat, map NONE->$prefix NONE) CSPREFIX=$prefix;; prefix) CSPREFIX=$prefix;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR);; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in dnl For consistency with Heartbeat, map NONE->$prefix *prefix*|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we dont end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} #docdir=${datadir}/doc/packages/${PACKAGE} fi AC_SUBST(docdir) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" INIT_EXT=".sh" ;; *solaris*) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) CFLAGS="$CFLAGS -I${prefix}/include" ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac dnl Eventually remove this CFLAGS="$CFLAGS -I${prefix}/include/heartbeat" AC_SUBST(INIT_EXT) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac esac AC_MSG_CHECKING(which format is needed to print uint64_t) ac_save_CFLAGS=$CFLAGS CFLAGS="-Wall -Werror" AC_COMPILE_IFELSE( [AC_LANG_PROGRAM( [ #include #include #include ], [ int max = 512; uint64_t bignum = 42; char *buffer = malloc(max); const char *random = "random"; snprintf(buffer, max-1, "", bignum, random); fprintf(stderr, "Result: %s\n", buffer); ] )], [U64T="%lu"], [U64T="%llu"] ) CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT($U64T) AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) AM_PATH_PYTHON AC_CHECK_PROGS(MAKE, gmake make) AC_PATH_PROGS(HTML2TXT, lynx w3m) AC_PATH_PROGS(HELP2MAN, help2man) AC_PATH_PROGS(POD2MAN, pod2man, pod2man) AC_PATH_PROGS(ASCIIDOC, asciidoc) AC_PATH_PROGS(PUBLICAN, publican) AC_PATH_PROGS(INKSCAPE, inkscape) AC_PATH_PROGS(XSLTPROC, xsltproc) AC_PATH_PROGS(FOP, fop) AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh) AC_PATH_PROGS(SCP, scp, /usr/bin/scp) AC_PATH_PROGS(TAR, tar) AC_PATH_PROGS(MD5, md5) AC_PATH_PROGS(TEST, test) AC_PATH_PROGS(PKGCONFIG, pkg-config) AC_PATH_PROGS(XML2CONFIG, xml2-config) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) dnl Disable these until we decide if the stonith config file should be supported dnl AC_PATH_PROGS(BISON, bison) dnl AC_PATH_PROGS(FLEX, flex) dnl AC_PATH_PROGS(HAVE_YACC, $YACC) if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi if test x"${MAKE}" = x""; then AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) XSLT=`find ${datadir} -name docbook.xsl` for xsl in $XSLT; do dname=`dirname $xsl` bname=`basename $dname` if test "$bname" = "manpages"; then MANPAGE_XSLT="$xsl" break fi done fi AC_MSG_RESULT($MANPAGE_XSLT) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"") if test x"${ASCIIDOC}" != x""; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi SUPPORT_STONITH_CONFIG=0 if test x"${HAVE_YACC}" != x"" -a x"${FLEX}" != x"" -a x"${BISON}" != x""; then SUPPORT_STONITH_CONFIG=1 PCMK_FEATURES="$PCMK_FEATURES st-conf" fi AM_CONDITIONAL(BUILD_STONITH_CONFIG, test $SUPPORT_STONITH_CONFIG = 1) AC_DEFINE_UNQUOTED(SUPPORT_STONITH_CONFIG, $SUPPORT_STONITH_CONFIG, Support a stand-alone stonith config file in addition to the CIB) AM_CONDITIONAL(BUILD_DOCBOOK, test x"${PUBLICAN}" != x"" -a x"${INKSCAPE}" != x"") if test x"${PUBLICAN}" != x"" -a x"${INKSCAPE}" != x""; then AC_MSG_NOTICE(Enabling publican) PCMK_FEATURES="$PCMK_FEATURES publican-docs" fi dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl unsetenv: is some bsdish function that should also be avoided (No dnl replacement) dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolearably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolearably-fast replacement function for it. dnl dnl daemon: is a GNU function. The daemon() function is for programs wishing to dnl detach themselves from the controlling terminal and run in the dnl background as system daemon dnl A replacement function is supplied for it. AC_REPLACE_FUNCS(alphasort inet_pton NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup daemon strlcpy strlcat) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_LIB(uuid, uuid_parse) dnl e2fsprogs AC_CHECK_LIB(uuid, uuid_create) dnl ossp if test x"${PKGCONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi if test "x${enable_thread_safe}" = "xyes"; then GPKGNAME="gthread-2.0" else GPKGNAME="glib-2.0" fi if $PKGCONFIG --exists $GPKGNAME then GLIBCONFIG="$PKGCONFIG $GPKGNAME" else set -x echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH $PKGCONFIG --exists $GPKGNAME; echo $? $PKGCONFIG --cflags $GPKGNAME; echo $? $PKGCONFIG $GPKGNAME; echo $? set +x AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE}) fi AC_MSG_RESULT(using $GLIBCONFIG) USE_GHASH_COMPAT=0 AC_CHECK_LIB(glib-2.0, g_hash_table_get_values) if test "x$ac_cv_lib_glib_2_0_g_hash_table_get_values" != x""yes; then AC_MSG_WARN(Your version of Glib is too old, you should have at least 2.14) USE_GHASH_COMPAT=1 fi AC_DEFINE_UNQUOTED(USE_GHASH_COMPAT, $USE_GHASH_COMPAT, Use g_hash_table compatibility functions) AC_SUBST(USE_GHASH_COMPAT) if $PKGCONFIG --exists systemd then systemdunitdir=`$PKGCONFIG --variable=systemdsystemunitdir systemd` AC_SUBST(systemdunitdir) fi AM_CONDITIONAL(HAVE_SYSTEMD, test -n "$systemdunitdir" -a "x$systemdunitdir" != xno) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi dnl dnl Check for location of gettext dnl dnl On at least Solaris 2.x, where it is in libc, specifying lintl causes dnl grief. Ensure minimal result, not the sum of all possibilities. dnl And do libc first. dnl Known examples: dnl c: Linux, Solaris 2.6+ dnl intl: BSD, AIX AC_CHECK_LIB(c, gettext) if test x$ac_cv_lib_c_gettext != xyes; then AC_CHECK_LIB(intl, gettext) fi if test x$ac_cv_lib_c_gettext != xyes -a x$ac_cv_lib_intl_gettext != xyes; then AC_MSG_ERROR(You need gettext installed in order to build ${PACKAGE}) fi if test "X$GLIBCONFIG" != X; then AC_MSG_CHECKING(for special glib includes: ) GLIBHEAD=`$GLIBCONFIG --cflags` AC_MSG_RESULT($GLIBHEAD) CPPFLAGS="$CPPFLAGS $GLIBHEAD" AC_MSG_CHECKING(for glib library flags) GLIBLIB=`$GLIBCONFIG --libs` AC_MSG_RESULT($GLIBLIB) LIBS="$LIBS $GLIBLIB" fi dnl ======================================================================== dnl Headers dnl ======================================================================== AC_HEADER_STDC AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(asm/types.h) AC_CHECK_HEADERS(assert.h) AC_CHECK_HEADERS(auth-client.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(fcntl.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/errqueue.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pam/pam_appl.h) AC_CHECK_HEADERS(pthread.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(security/pam_appl.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/poll.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/select.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/timeb.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/uio.h) AC_CHECK_HEADERS(sys/un.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) AC_CHECK_HEADERS(winsock.h) dnl These headers need prerequisits before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) dnl AC_CHECK_HEADERS(netinet/icmp6.h) dnl AC_CHECK_HEADERS(netinet/ip6.h) dnl AC_CHECK_HEADERS(netinet/ip_icmp.h) AC_MSG_CHECKING(for special libxml2 includes) if test "x$XML2CONFIG" = "x"; then AC_MSG_ERROR(libxml2 config not found) else XML2HEAD="`$XML2CONFIG --cflags`" AC_MSG_RESULT($XML2HEAD) AC_CHECK_LIB(xml2, xmlReadMemory) AC_CHECK_LIB(xslt, xsltApplyStylesheet) fi CPPFLAGS="$CPPFLAGS $XML2HEAD" AC_CHECK_HEADERS(libxml/xpath.h) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(The libxml developement headers were not found) fi if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(The libxslt developement headers were not found) fi dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBERS([lrm_op_t.rsc_deleted],,,[[#include ]]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(g_log_set_default_handler) AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) dnl ======================================================================== dnl ltdl dnl ======================================================================== AC_CHECK_LIB(ltdl, lt_dlopen, [LTDL_foo=1]) if test "x${enable_bundled_ltdl}" = "xyes"; then if test $ac_cv_lib_ltdl_lt_dlopen = yes; then AC_MSG_NOTICE([Disabling usage of installed ltdl]) fi ac_cv_lib_ltdl_lt_dlopen=no fi LIBLTDL_DIR="" if test $ac_cv_lib_ltdl_lt_dlopen != yes ; then AC_MSG_NOTICE([Installing local ltdl]) LIBLTDL_DIR=libltdl ( cd $srcdir ; $TAR -xvf libltdl.tar ) if test "$?" -ne 0; then AC_MSG_ERROR([$TAR of libltdl.tar in $srcdir failed]) fi AC_CONFIG_SUBDIRS(libltdl) else LIBS="$LIBS -lltdl" AC_MSG_NOTICE([Using installed ltdl]) INCLTDL="" LIBLTDL="" fi AC_SUBST(INCLTDL) AC_SUBST(LIBLTDL) AC_SUBST(LIBLTDL_DIR) dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [CURSESLIBS='-lncurses'; AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)] ) fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [CURSESLIBS='-lncurses'; AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)] ) fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)] ) fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)] ) fi if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual && cc_supports_flag -Werror; then AC_MSG_CHECKING(whether printw() requires argument of "const char *") ac_save_LIBS=$LIBS LIBS="$CURSESLIBS $LIBS" ac_save_CFLAGS=$CFLAGS CFLAGS="-Wcast-qual -Werror" AC_LINK_IFELSE( [AC_LANG_PROGRAM( [ #if defined(HAVE_CURSES_H) # include #elif defined(HAVE_NCURSES_H) # include #endif ], [printw((const char *)"Test");] )], [ac_cv_compatible_printw=yes], [ac_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT([$ac_cv_compatible_printw]) if test "$ac_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Enable gprof #LIBS="$LIBS -pg" #CFLAGS="$CFLAGS -pg" dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer" #CFLAGS="$CFLAGS -fno-inline-functions -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl CFLAGS="$CFLAGS -fno-default-inline -fno-inline" dnl Update features PCMK_FEATURES="$PCMK_FEATURES gprof" ;; *) SUPPORT_PROFILING=0;; esac AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for gprof profiling) case $SUPPORT_GCOV in 1|yes|true) SUPPORT_GCOV=1 dnl Enable gprof #LIBS="$LIBS -pg" #CFLAGS="$CFLAGS -pg" dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage -fno-inline -fno-default-inline" dnl Turn off optimization so code coverage tool dnl can get accurate line numbers CFLAGS=`echo "$CFLAGS" | sed -e 's/-O[0-9]*//g'` CFLAGS="$CFLAGS -O0" dnl Update features PCMK_FEATURES="$PCMK_FEATURES gcov" ;; *) SUPPORT_PROFILING=0;; esac AC_DEFINE_UNQUOTED(SUPPORT_GCOV, $SUPPORT_GCOV, Support for gcov coverage testing) dnl ======================================================================== dnl Cluster infrastructure - Heartbeat / LibQB dnl ======================================================================== dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the depenancies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) if test x"$ac_cv_lib_plumb_G_main_add_IPC_Channel" != x"yes"; then AC_MSG_FAILURE(Core Heartbeat utility libraries not found: $ac_cv_lib_plumb_G_main_add_IPC_Channel) fi dnl Compatability checks AC_CHECK_FUNCS(msgfromIPC_timeout) AC_CHECK_MEMBERS([struct lrm_ops.fail_rsc],,,[[#include ]]) if test x${enable_no_stack} = xyes; then SUPPORT_HEARTBEAT=no SUPPORT_CS=no fi PKG_CHECK_MODULES(libqb, libqb, HAVE_libqb=1, HAVE_libqb=0) AC_CHECK_HEADERS(qb/qbipc_common.h) AC_CHECK_LIB(qb, qb_log_callsite_get) AC_CHECK_FUNCS(qb_log_callsite_get) LIBQB_LOG=0 if test $ac_cv_lib_qb_qb_log_callsite_get = yes; then LIBQB_LOG=1 PCMK_FEATURES="$PCMK_FEATURES libqb-logging" fi AC_DEFINE_UNQUOTED(LIBQB_LOGGING, $LIBQB_LOG, Use libqb for logging) AC_DEFINE_UNQUOTED(LIBQB_IPC, 0, Use libqb for IPC) LIBS="$LIBS $libqb_LIBS" AC_CHECK_HEADERS(hb_config.h) AC_CHECK_HEADERS(glue_config.h) AC_CHECK_HEADERS(agent_config.h) GLUE_HEADER=none if test "$ac_cv_header_glue_config_h" = "yes"; then GLUE_HEADER=glue_config.h elif test "$ac_cv_header_hb_config_h" = "yes"; then GLUE_HEADER=hb_config.h else AC_MSG_FAILURE(Core development headers were not found) fi dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_DTD_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DTD_DIRECTORY,"$CRM_DTD_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_DTD_DIRECTORY) AC_DEFINE_UNQUOTED(CRM_DTD_VERSION,"$CRM_DTD_VERSION", Current version of the Pacemaker Relax-NG Schema) AC_SUBST(CRM_DTD_VERSION) CRM_DAEMON_USER=`extract_header_define $GLUE_HEADER HA_CCMUSER` AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) CRM_DAEMON_GROUP=`extract_header_define $GLUE_HEADER HA_APIGROUP` AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_STATE_DIR=${localstatedir}/run/crm AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets) AC_SUBST(CRM_STATE_DIR) PE_STATE_DIR="${localstatedir}/lib/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep PEngine outputs) AC_SUBST(PE_STATE_DIR) dnl Eventually move out of the heartbeat dir tree and create compatability code CRM_CONFIG_DIR="${localstatedir}/lib/heartbeat/crm" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep CIB configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) HB_DAEMON_DIR=`try_extract_header_define $GLUE_HEADER HA_LIBHBDIR $libdir/heartbeat` AC_DEFINE_UNQUOTED(HB_DAEMON_DIR,"$HB_DAEMON_DIR", Location for Heartbeat expects Pacemaker daemons to be in) AC_SUBST(HB_DAEMON_DIR) dnl Needed so that the Corosync plugin can clear out the directory as Heartbeat does HA_STATE_DIR=`extract_header_define $GLUE_HEADER HA_VARRUNDIR` AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where Heartbeat keeps state files and sockets) AC_SUBST(HA_STATE_DIR) CRM_RSCTMP_DIR= if test "$ac_cv_header_agent_config_h" = "yes"; then CRM_RSCTMP_DIR=`extract_header_define agent_config.h HA_RSCTMPDIR` else AC_MSG_WARN(Agents development headers were not found.) fi if test x$CRM_RSCTMP_DIR = x; then CRM_RSCTMP_DIR="$HA_STATE_DIR/heartbeat/rsctmp" fi AC_MSG_CHECKING(Scratch dir for resource agents) AC_MSG_RESULT($CRM_RSCTMP_DIR) AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) dnl Needed for the location of hostcache in CTS.py HA_VARLIBHBDIR=`extract_header_define $GLUE_HEADER HA_VARLIBHBDIR` AC_SUBST(HA_VARLIBHBDIR) AC_DEFINE_UNQUOTED(UUID_FILE,"$localstatedir/lib/heartbeat/hb_uuid", Location of Heartbeat's UUID file) OCF_ROOT_DIR=`extract_header_define $GLUE_HEADER OCF_ROOT_DIR` if test "X$OCF_ROOT_DIR" = X; then AC_MSG_ERROR(Could not locate OCF directory) fi AC_SUBST(OCF_ROOT_DIR) OCF_RA_DIR=`extract_header_define $GLUE_HEADER OCF_RA_DIR` AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) dnl Extract this value from glue_config.h once we no longer support anything else STONITH_PLUGIN_DIR="$libdir/stonith/plugins/stonith/" AC_DEFINE_UNQUOTED(STONITH_PLUGIN_DIR,"$STONITH_PLUGIN_DIR", Location for Stonith plugins) AC_SUBST(STONITH_PLUGIN_DIR) RH_STONITH_DIR="$sbindir" AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents) RH_STONITH_PREFIX="fence_" AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING(build version) BUILD_VERSION=$Format:%H$ if test $BUILD_VERSION != ":%H$"; then AC_MSG_RESULT(archive hash: $BUILD_VERSION) elif test -x $GIT -a -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT(git hash: $BUILD_VERSION) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT(directory based hash: $BUILD_VERSION) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) STACKS="" CLUSTERLIBS="" dnl ======================================================================== dnl Cluster stack - Heartbeat dnl ======================================================================== case $SUPPORT_HEARTBEAT in 1|yes|true) AC_CHECK_LIB(hbclient, ll_cluster_new, [SUPPORT_HEARTBEAT=1], [AC_MSG_FAILURE(Unable to support Heartbeat: client libraries not found)]);; try) AC_CHECK_LIB(hbclient, ll_cluster_new, [SUPPORT_HEARTBEAT=1], [SUPPORT_HEARTBEAT=0]);; *) SUPPORT_HEARTBEAT=0;; esac if test $SUPPORT_HEARTBEAT = 1; then STACKS="$STACKS heartbeat" AC_DEFINE_UNQUOTED(CCM_LIBRARY, "libccmclient.so.1", Library to load for ccm support) AC_DEFINE_UNQUOTED(HEARTBEAT_LIBRARY, "libhbclient.so.1", Library to load for heartbeat support) fi AM_CONDITIONAL(BUILD_HEARTBEAT_SUPPORT, test $SUPPORT_HEARTBEAT = 1) AC_DEFINE_UNQUOTED(SUPPORT_HEARTBEAT, $SUPPORT_HEARTBEAT, Support the Heartbeat messaging and membership layer) AC_SUBST(SUPPORT_HEARTBEAT) dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_CS=no;; esac AC_MSG_CHECKING(for native corosync) COROSYNC_LIBS="" CS_USES_LIBQB=0 PCMK_SERVICE_ID=9 LCRSODIR="$libdir" if test $SUPPORT_CS = no; then AC_MSG_RESULT(no (disabled)) else AC_MSG_RESULT($SUPPORT_CS, with '$CSPREFIX') PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal PKG_CHECK_MODULES(cmap, libcmap, HAVE_cmap=1, HAVE_cmap=0) PKG_CHECK_MODULES(cman, libcman, HAVE_cman=1, HAVE_cman=0) PKG_CHECK_MODULES(confdb, libconfdb, HAVE_confdb=1, HAVE_confdb=0) PKG_CHECK_MODULES(fenced, libfenced, HAVE_fenced=1, HAVE_fenced=0) PKG_CHECK_MODULES(quorum, libquorum, HAVE_quorum=1, HAVE_quorum=0) PKG_CHECK_MODULES(oldipc, libcoroipcc, HAVE_oldipc=1, HAVE_oldipc=0) if test $HAVE_oldipc = 1; then SUPPORT_CS=1 CFLAGS="$CFLAGS $oldipc_FLAGS $cpg_FLAGS $cfg_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $oldipc_LIBS $cpg_LIBS $cfg_LIBS" elif test $HAVE_libqb = 1; then SUPPORT_CS=1 CS_USES_LIBQB=1 CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS" AC_CHECK_LIB(corosync_common, cs_strerror) else aisreason="corosync/libqb IPC libraries not found by pkg_config" fi AC_DEFINE_UNQUOTED(HAVE_CONFDB, $HAVE_confdb, Have the old herarchial Corosync config API) AC_DEFINE_UNQUOTED(HAVE_CMAP, $HAVE_cmap, Have the new non-herarchial Corosync config API) fi if test $SUPPORT_CS = 1 -a x$HAVE_oldipc = x0 ; then dnl Support for plugins was removed about the time the IPC was dnl moved to libqb. dnl The only option now is the built-in quorum API CFLAGS="$CFLAGS $cmap_CFLAGS $quorum_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $cmap_LIBS $quorum_LIBS" STACKS="$STACKS corosync-native" AC_DEFINE_UNQUOTED(SUPPORT_CS_QUORUM, 1, Support the consumption of membership and quorum from corosync) fi if test $SUPPORT_CS = 1 -a x$HAVE_confdb = x1; then dnl Need confdb to support cman and the plugins LCRSODIR=`$PKGCONFIG corosync --variable=lcrsodir` STACKS="$STACKS corosync-plugin" COROSYNC_LIBS="$COROSYNC_LIBS $confdb_LIBS" if test $SUPPORT_CMAN != no; then if test $HAVE_cman = 1 -a $HAVE_fenced = 1; then SUPPORT_CMAN=1 STACKS="$STACKS cman" CFLAGS="$CFLAGS $cman_FLAGS $fenced_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $cman_LIBS $fenced_LIBS" fi fi fi dnl Normalize SUPPORT_CS and SUPPORT_CMAN for use with #if directives if test $SUPPORT_CMAN != 1; then SUPPORT_CMAN=0 fi if test $SUPPORT_CS = 1; then CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" elif test $SUPPORT_CS != no; then SUPPORT_CS=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support Corosync: $aisreason) else AC_MSG_FAILURE(Unable to support Corosync: $aisreason) fi fi AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AC_DEFINE_UNQUOTED(SUPPORT_CMAN, $SUPPORT_CMAN, Support the consumption of membership and quorum from cman) AC_DEFINE_UNQUOTED(CS_USES_LIBQB, $CS_USES_LIBQB, Does corosync use libqb for its ipc) AC_DEFINE_UNQUOTED(PCMK_SERVICE_ID, $PCMK_SERVICE_ID, Corosync service number) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AM_CONDITIONAL(BUILD_CS_PLUGIN, test $HAVE_confdb = 1) dnl confdb went away at about the same time as plugins AC_SUBST(SUPPORT_CMAN) AC_SUBST(SUPPORT_CS) dnl dnl Cluster stack - Sanity dnl if test x${enable_no_stack} = xyes; then AC_MSG_NOTICE(No cluster stack supported. Just building the Policy Engine) PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" else AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must support at least one cluster stack (heartbeat or corosync) ) fi AC_MSG_RESULT($STACKS) PCMK_FEATURES="$PCMK_FEATURES $STACKS" fi AC_SUBST(CLUSTERLIBS) AC_SUBST(LCRSODIR) dnl ======================================================================== dnl SNMP dnl ======================================================================== case $SUPPORT_SNMP in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_SNMP=no;; esac SNMPLIBS="" AC_MSG_CHECKING(for snmp support) if test $SUPPORT_SNMP = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_SNMP=0 else SNMPCONFIG="" AC_MSG_RESULT($SUPPORT_SNMP) AC_CHECK_HEADERS(net-snmp/net-snmp-config.h) if test "x${ac_cv_header_net_snmp_net_snmp_config_h}" != "xyes"; then SUPPORT_SNMP="no" fi if test $SUPPORT_SNMP != no; then AC_PATH_PROGS(SNMPCONFIG, net-snmp-config) if test "X${SNMPCONFIG}" = "X"; then AC_MSG_RESULT(You need the net_snmp development package to continue.) SUPPORT_SNMP=no fi fi if test $SUPPORT_SNMP != no; then AC_MSG_CHECKING(for special snmp libraries) SNMPLIBS=`$SNMPCONFIG --agent-libs` AC_MSG_RESULT($SNMPLIBS) fi if test $SUPPORT_SNMP != no; then savedLibs=$LIBS LIBS="$LIBS $SNMPLIBS" dnl On many systems libcrypto is needed when linking against libsnmp. dnl Check to see if it exists, and if so use it. dnl AC_CHECK_LIB(crypto, CRYPTO_free, CRYPTOLIB="-lcrypto",) dnl AC_SUBST(CRYPTOLIB) AC_CHECK_FUNCS(netsnmp_transport_open_client) if test $ac_cv_func_netsnmp_transport_open_client != yes; then AC_CHECK_FUNCS(netsnmp_tdomain_transport) if test $ac_cv_func_netsnmp_tdomain_transport != yes; then SUPPORT_SNMP=no else AC_DEFINE_UNQUOTED(NETSNMPV53, 1, [Use the older 5.3 version of the net-snmp API]) fi fi LIBS=$savedLibs fi if test $SUPPORT_SNMP = no; then SNMPLIBS="" SUPPORT_SNMP=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support SNMP) else AC_MSG_FAILURE(Unable to support SNMP) fi else SUPPORT_SNMP=1 fi fi if test $SUPPORT_SNMP = 1; then PCMK_FEATURES="$PCMK_FEATURES snmp" fi AC_SUBST(SNMPLIBS) AM_CONDITIONAL(ENABLE_SNMP, test "$SUPPORT_SNMP" = "1") AC_DEFINE_UNQUOTED(ENABLE_SNMP, $SUPPORT_SNMP, Build in support for sending SNMP traps) dnl ======================================================================== dnl ESMTP dnl ======================================================================== case $SUPPORT_ESMTP in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_ESMTP=no;; esac ESMTPLIB="" AC_MSG_CHECKING(for esmtp support) if test $SUPPORT_ESMTP = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ESMTP=0 else ESMTPCONFIG="" AC_MSG_RESULT($SUPPORT_ESMTP) AC_CHECK_HEADERS(libesmtp.h) if test "x${ac_cv_header_libesmtp_h}" != "xyes"; then ENABLE_ESMTP="no" fi if test $SUPPORT_ESMTP != no; then AC_PATH_PROGS(ESMTPCONFIG, libesmtp-config) if test "X${ESMTPCONFIG}" = "X"; then AC_MSG_RESULT(You need the libesmtp development package to continue.) SUPPORT_ESMTP=no fi fi if test $SUPPORT_ESMTP != no; then AC_MSG_CHECKING(for special esmtp libraries) ESMTPLIBS=`$ESMTPCONFIG --libs | tr '\n' ' '` AC_MSG_RESULT($ESMTPLIBS) fi if test $SUPPORT_ESMTP = no; then SUPPORT_ESMTP=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ESMTP) else AC_MSG_FAILURE(Unable to support ESMTP) fi else SUPPORT_ESMTP=1 PCMK_FEATURES="$PCMK_FEATURES libesmtp" fi fi AC_SUBST(ESMTPLIBS) AM_CONDITIONAL(ENABLE_ESMTP, test "$SUPPORT_ESMTP" = "1") AC_DEFINE_UNQUOTED(ENABLE_ESMTP, $SUPPORT_ESMTP, Build in support for sending mail notifications with ESMTP) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_ACL=no;; esac AC_MSG_CHECKING(for acl support) if test $SUPPORT_ACL = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ACL=0 else AC_MSG_RESULT($SUPPORT_ACL) AC_CHECK_MEMBERS([struct IPC_CHANNEL.farside_uid], [SUPPORT_ACL=1], [SUPPORT_ACL=0], [[#include ]]) if test $SUPPORT_ACL = 0; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ACL. You need to use cluster-glue >= 1.0.6) else AC_MSG_FAILURE(Unable to support ACL. You need to use cluster-glue >= 1.0.6) fi fi fi if test $SUPPORT_ACL = 1; then PCMK_FEATURES="$PCMK_FEATURES acls" fi AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl GnuTLS dnl ======================================================================== AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl GNUTLS library: Attempt to determine by 'libgnutls-config' program. dnl If no 'libgnutls-config', try traditional autoconf means. AC_PATH_PROGS(LIBGNUTLS_CONFIG, libgnutls-config) if test -n "$LIBGNUTLS_CONFIG"; then AC_MSG_CHECKING(for gnutls header flags) GNUTLSHEAD="`$LIBGNUTLS_CONFIG --cflags`"; AC_MSG_RESULT($GNUTLSHEAD) AC_MSG_CHECKING(for gnutls library flags) GNUTLSLIBS="`$LIBGNUTLS_CONFIG --libs`"; AC_MSG_RESULT($GNUTLSLIBS) fi AC_CHECK_LIB(gnutls, gnutls_init) AC_CHECK_FUNCS(gnutls_priority_set_direct) AC_SUBST(GNUTLSHEAD) AC_SUBST(GNUTLSLIBS) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKGCONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKGCONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) OPENIPMI_SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. CC_ERRORS="" CC_EXTRAS="" if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" enable_fatal_warnings=no else CFLAGS="$CFLAGS -ggdb" # We had to eliminate -Wnested-externs because of libtool changes EXTRA_FLAGS="-fgnu89-inline -fstack-protector-all -Wall -Waggregate-return -Wbad-function-cast -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat=2 -Wformat-security -Wformat-nonliteral -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wno-unused-but-set-variable -Wpointer-arith -Wstrict-prototypes -Wunsigned-char -Wwrite-strings" # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $j then CC_EXTRAS="$CC_EXTRAS $j" fi done dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'` AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4) dnl System specific options case "$host_os" in *linux*|*bsd*) if test "${enable_fatal_warnings}" = "unknown"; then enable_fatal_warnings=yes fi ;; esac if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then enable_fatal_warnings=yes else enable_fatal_warnings=no fi if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS -Werror" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LIBADD_INTL) dnl extra flags for GNU gettext stuff... AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--quiet" QUIET_MAKE_OPTS="--quiet" fi AC_MSG_RESULT(Supress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKE="${MAKE} \$(QUIET_MAKE_OPTS)" AC_SUBST(CC) AC_SUBST(MAKE) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_MAKE_OPTS) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl The Makefiles and shell scripts we output AC_CONFIG_FILES(Makefile \ cts/Makefile \ cts/CTSvars.py \ cts/LSBDummy \ cts/benchmark/Makefile \ cts/benchmark/clubench \ cib/Makefile \ crmd/Makefile \ pengine/Makefile \ pengine/regression.core.sh \ doc/Makefile \ doc/Pacemaker_Explained/publican.cfg \ doc/Clusters_from_Scratch/publican.cfg \ include/Makefile \ include/crm/Makefile \ include/crm/common/Makefile \ include/crm/pengine/Makefile \ replace/Makefile \ lib/Makefile \ lib/pcmk.pc \ lib/pcmk-pe.pc \ lib/pcmk-cib.pc \ lib/ais/Makefile \ lib/common/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/pengine/Makefile \ lib/transition/Makefile \ lib/fencing/Makefile \ lib/plugins/Makefile \ lib/plugins/lrm/Makefile \ mcp/Makefile \ mcp/pacemaker \ mcp/pacemaker.service \ fencing/Makefile \ extra/Makefile \ extra/resources/Makefile \ extra/rgmanager/Makefile \ tools/Makefile \ tools/crm_report \ tools/coverage.sh \ tools/hb2openais.sh \ tools/crm_primitive.py \ -shell/Makefile \ - shell/templates/Makefile \ - shell/regression/Makefile \ - shell/regression/testcases/Makefile \ - shell/modules/Makefile \ - shell/modules/ui.py \ - shell/modules/ra.py \ - shell/modules/vars.py \ - shell/modules/help.py \ xml/Makefile \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([ Corosync Plugins = ${LCRSODIR}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Use system LTDL = ${ac_cv_lib_ltdl_lt_dlopen}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) diff --git a/cts/CIB.py b/cts/CIB.py index ad7b2ba6a3..76d73f3e87 100644 --- a/cts/CIB.py +++ b/cts/CIB.py @@ -1,741 +1,565 @@ '''CTS: Cluster Testing System: CIB generator ''' __copyright__=''' Author: Andrew Beekhof Copyright (C) 2008 Andrew Beekhof ''' from UserDict import UserDict import sys, time, types, syslog, os, struct, string, signal, traceback, warnings, socket from cts.CTSvars import * from cts.CTS import ClusterManager, RemoteExec class CibBase: cts_cib = None cib_tmpfile = None version = "unknown" feature_set = "unknown" Factory = None def __init__(self, CM, factory, tmpfile=None): self.CM = CM self.Factory = factory if not tmpfile: warnings.filterwarnings("ignore") self.cib_tmpfile=os.tmpnam() warnings.resetwarnings() else: self.cib_tmpfile = tmpfile self.Factory.tmpfile = self.cib_tmpfile def version(self): return self.version def NextIP(self): fields = string.split(self.CM.Env["IPBase"], '.') fields[3] = str(int(fields[3])+1) ip = string.join(fields, '.') self.CM.Env["IPBase"] = ip return ip -class CIB10(CibBase): - feature_set = "3.0" - version = "pacemaker-1.0" - cib_template = ''' - - - - - - - - -''' - - def _create(self, command): - fixed = "HOME=/root CIB_file="+self.Factory.tmpfile+" crm --force configure " + command - rc = self.CM.rsh(self.Factory.target, fixed) - if rc != 0: - self.CM.log("Configure call failed: "+fixed) - sys.exit(1) - - def _show(self, command=""): - output = "" - (rc, result) = self.CM.rsh(self.Factory.target, "HOME=/root CIB_file="+self.Factory.tmpfile+" crm configure show "+command, None, ) - for line in result: - output += line - self.CM.debug("Generated Config: "+line) - return output - - def NewIP(self, name=None, standard="ocf:heartbeat"): - ip = self.NextIP() - if not name: - name = "r"+ip - - if not standard: - standard = "" - else: - standard += ":" - - self._create('''primitive %s %sIPaddr params ip=%s cidr_netmask=32 op monitor interval=5s''' - % (name, standard, ip)) - return name - - def install(self, target): - old = self.Factory.tmpfile - - # Force a rebuild - self.cts_cib = None - - self.Factory.tmpfile = CTSvars.CRM_CONFIG_DIR+"/cib.xml" - self.contents(target) - self.CM.rsh(self.Factory.target, "chown "+CTSvars.CRM_DAEMON_USER+" "+self.Factory.tmpfile) - - self.Factory.tmpfile = old - - def contents(self, target=None): - # fencing resource - if self.cts_cib: - return self.cts_cib - - if target: - self.Factory.target = target - - cib_base = self.cib_template % (self.feature_set, self.version, ''' remote-tls-port='9898' remote-clear-port='9999' ''') - self.CM.rsh(self.Factory.target, '''echo "%s" > %s''' % (cib_base, self.Factory.tmpfile)) - #self.CM.rsh.cp(self.Factory.tmpfile, "root@%s:%s" % (self.Factory.target, self.Factory.tmpfile)) - - nodelist = "" - self.num_nodes = 0 - for node in self.CM.Env["nodes"]: - nodelist += node + " " - self.num_nodes = self.num_nodes + 1 - - no_quorum = "stop" - if self.num_nodes < 3: - no_quorum = "ignore" - self.CM.log("Cluster only has %d nodes, configuring: no-quroum-policy=ignore" % self.num_nodes) - - - # The shell no longer functions when the lrmd isn't running, how wonderful - # Start one here and let the cluster clean it up when the full stack starts - # Just hope target has the same location for lrmd - self.CM.rsh(self.Factory.target, CTSvars.CRM_DAEMON_DIR+"/lrmd", synchronous=0) - - # Tell the shell to mind its own business, we know what we're doing - self.CM.rsh(self.Factory.target, "crm options check-mode relaxed") - - # Fencing resource - # Define first so that the shell doesn't reject every update - if self.CM.Env["DoFencing"]: - params = None - entries = string.split(self.CM.Env["stonith-params"], ',') - for entry in entries: - (name, value) = string.split(entry, '=') - if name == "hostlist" and value == "all": - value = string.join(self.CM.Env["nodes"], " ") - - if params: - params = ("""%s '%s="%s"' """ % (params, name, value)) - else: - params = ("""'%s="%s"' """ % (name, value)) - - if params: - params = "params %s" % params - else: - params = "" - - # Set a threshold for unreliable stonith devices such as the vmware one - self._create('''primitive Fencing stonith::%s %s meta migration-threshold=5 op monitor interval=120s timeout=300 op start interval=0 timeout=180s op stop interval=0 timeout=180s''' % (self.CM.Env["stonith-type"], params)) - - self._create('''property stonith-enabled=%s''' % (self.CM.Env["DoFencing"])) - self._create('''property start-failure-is-fatal=false pe-input-series-max=5000 default-action-timeout=60s''') - self._create('''property shutdown-escalation=5min batch-limit=10 dc-deadtime=5s''') - self._create('''property no-quorum-policy=%s expected-quorum-votes=%d''' % (no_quorum, self.num_nodes)) - - if self.CM.Env["DoBSC"] == 1: - self._create('''property ident-string="Linux-HA TEST configuration file - REMOVEME!!"''') - - # Add resources? - if self.CM.Env["CIBResource"] == 1: - self.add_resources() - - if self.CM.cluster_monitor == 1: - self._create('''primitive cluster_mon ocf:pacemaker:ClusterMon params update=10 extra_options="-r -n" user=abeekhof htmlfile=/suse/abeekhof/Export/cluster.html op start interval=0 requires=nothing op monitor interval=5s requires=nothing''') - self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''') - - # generate cib - self.cts_cib = self._show("xml") - - if self.Factory.tmpfile != CTSvars.CRM_CONFIG_DIR+"/cib.xml": - self.CM.rsh(self.Factory.target, "rm -f "+self.Factory.tmpfile) - - return self.cts_cib - - def add_resources(self): - # Group Resource - r1 = self.NewIP() - #ip = self.NextIP() - #r2 = "r"+ip - #self._create('''primitive %s heartbeat::IPaddr params 1=%s/32 op monitor interval=5s''' % (r2, ip)) - r2 = self.NewIP() - r3 = self.NewIP() - self._create('''group group-1 %s %s %s''' % (r1, r2, r3)) - - # Per-node resources - for node in self.CM.Env["nodes"]: - r = self.NewIP("rsc_"+node) - self._create('''location prefer-%s %s rule 100: \#uname eq %s''' % (node, r, node)) - - # LSB resource - lsb_agent = self.CM.install_helper("LSBDummy") - - self._create('''primitive lsb-dummy lsb::''' +lsb_agent+ ''' op monitor interval=5s''') - self._create('''colocation lsb-with-group INFINITY: lsb-dummy group-1''') - self._create('''order lsb-after-group mandatory: group-1 lsb-dummy symmetrical=true''') - - # Migrator - # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach - self._create('''primitive migrator ocf:pacemaker:Dummy meta resource-stickiness=1 allow-migrate=1 op monitor interval=P10S''') - - # Ping the test master - self._create('''primitive ping-1 ocf:pacemaker:ping params host_list=%s name=connected debug=true op monitor interval=60s''' % self.CM.Env["cts-master"]) - self._create('''clone Connectivity ping-1 meta globally-unique=false''') - - #master slave resource - self._create('''primitive stateful-1 ocf:pacemaker:Stateful op monitor interval=15s timeout=60s op monitor interval=16s role=Master timeout=60s ''') - self._create('''ms master-1 stateful-1 meta clone-max=%d clone-node-max=%d master-max=%d master-node-max=%d''' - % (self.num_nodes, 1, 1, 1)) - - # Require conectivity to run the master - self._create('''location %s-is-connected %s rule -INFINITY: connected lt %d or not_defined connected''' % ("m1", "master-1", 1)) - - # Group with the master - self._create('''colocation group-with-master INFINITY: group-1 master-1:Master''') - self._create('''order group-after-master mandatory: master-1:promote group-1:start symmetrical=true''') - class Option: def __init__(self, Factory, name, value, section="cib-bootstrap-options"): self.Factory = Factory self.id = "%s-%s" % (section, name) self.section = section self.name = name self.value = value self.target = "pcmk-1" self.cib_tmpfile = CTSvars.CRM_CONFIG_DIR+"/cib.xml" def show(self): text = '''''' text += ''' ''' % self.section text += ''' ''' % (self.id, self.name, self.value) text += ''' ''' text += '''''' return text def commit(self): self.Factory.debug("Writing out %s" % self.id) fixed = "HOME=/root CIB_file="+self.cib_tmpfile+" cibadmin --modify --xml-text '%s'" % self.show() rc = self.Factory.rsh(self.target, fixed) if rc != 0: self.Factory.log("Configure call failed: "+fixed) sys.exit(1) class CibXml: def __init__(self, tag, name, **kwargs): self.tag = tag self.name = name self.kwargs = kwargs def __setitem__(self, key, value): self.kwargs[key] = value def show(self): text = '''<%s id="%s"''' % (self.tag, self.name) for k in self.kwargs.keys(): text += ''' %s="%s"''' % (k, self.kwargs[k]) text += '''/>''' return text class Expression(CibXml): def __init__(self, name, attr, op, value=None): CibXml.__init__(self, "expression", name, attribute=attr, operation=op) if value: self["value"] = value class ResourceOp(CibXml): def __init__(self, resource, name, interval, **kwargs): CibXml.__init__(self, "op", "%s-%s-%s" % (resource, name, interval), **kwargs) self["name"] = name self["interval"] = interval class Rule: def __init__(self, name, score, op="and", expr=None): self.id = name self.op = op self.score = score self.expr = [] if expr: self.add_exp(expr) def add_exp(self, e): self.expr.append(e) def show(self): text = '''''' % (self.id, self.score) for e in self.expr: text += e.show() text += '''''' return text class Resource: def __init__(self, Factory, name, rtype, standard, provider=None): self.Factory = Factory self.name = name self.rtype = rtype self.standard = standard self.provider = provider self.op=[] self.meta={} self.param={} self.scores={} self.needs={} self.coloc={} if self.standard == "ocf" and not provider: self.provider = "heartbeat" elif self.standard == "lsb": self.provider = None def __setitem__(self, key, value): self.add_param(key, value) def add_op(self, name, interval, **kwargs): self.op.append(ResourceOp(self.name, name, interval, **kwargs)) def add_param(self, name, value): self.param[name] = value def add_meta(self, name, value): self.meta[name] = value def prefer(self, node, score="INFINITY", rule=None): if not rule: rule = Rule("prefer-%s-r" % node, score, expr=Expression("prefer-%s-e" % node, "#uname", "eq", node)) self.scores[node] = rule def _needs(self, resource, kind="Mandatory", first="start", then="start", **kwargs): kargs = kwargs.copy() kargs["kind"] = kind if then: kargs["first-action"] = "start" kargs["then-action"] = then if first: kargs["first-action"] = first self.needs[resource] = kargs def _coloc(self, resource, score="INFINITY", role=None, withrole=None, **kwargs): kargs = kwargs.copy() kargs["score"] = score if role: kargs["rsc-role"] = role if withrole: kargs["with-rsc-role"] = withrole self.coloc[resource] = kargs def constraints(self): text = "" for k in self.scores.keys(): text += '''''' % (k, self.name) text += self.scores[k].show() text += '''''' for k in self.needs.keys(): text += '''''' for k in self.coloc.keys(): text += '''''' text += "" return text def show(self): text = '''''' if len(self.meta) > 0: text += '''''' % self.name for p in self.meta.keys(): text += '''''' % (self.name, p, p, self.meta[p]) text += '''''' if len(self.param) > 0: text += '''''' % self.name for p in self.param.keys(): text += '''''' % (self.name, p, p, self.param[p]) text += '''''' if len(self.op) > 0: text += '''''' for o in self.op: text += o.show() text += '''''' text += '''''' return text def commit(self): self.Factory.debug("Writing out %s" % self.name) fixed = "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin --create --scope resources --xml-text '%s'" % self.show() rc = self.Factory.rsh(self.Factory.target, fixed) if rc != 0: self.Factory.log("Configure call failed: "+fixed) sys.exit(1) fixed = "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin --modify --xml-text '%s'" % self.constraints() rc = self.Factory.rsh(self.Factory.target, fixed) if rc != 0: self.Factory.log("Configure call failed: "+fixed) sys.exit(1) class Group(Resource): def __init__(self, Factory, name): self.name = name self.children = [] self.object = "group" Resource.__init__(self, Factory, name, None, None) def add_child(self, resource): self.children.append(resource) def __setitem__(self, key, value): self.add_meta(key, value) def show(self): text = '''<%s id="%s">''' % (self.object, self.name) if len(self.meta) > 0: text += '''''' % self.name for p in self.meta.keys(): text += '''''' % (self.name, p, p, self.meta[p]) text += '''''' for c in self.children: text += c.show() text += '''''' % self.object return text class Clone(Group): def __init__(self, Factory, name, child=None): Group.__init__(self, Factory, name) self.object = "clone" if child: self.add_child(child) def add_child(self, resource): if not self.children: self.children.append(resource) else: self.Factory.log("Clones can only have a single child. Ignoring %s" % resource.name) class Master(Clone): def __init__(self, Factory, name, child=None): Clone.__init__(self, Factory, name, child) self.object = "master" class CIB12(CibBase): feature_set = "3.0" version = "pacemaker-1.2" def _show(self, command=""): output = "" (rc, result) = self.Factory.rsh(self.Factory.target, "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin -Ql "+command, None, ) for line in result: output += line self.Factory.debug("Generated Config: "+line) return output def NewIP(self, name=None, standard="ocf"): ip = self.NextIP() if not name: name = "r"+ip r = Resource(self.Factory, name, "IPaddr2", standard) r["ip"] = ip r["cidr_netmask"] = "32" r.add_op("monitor", "5s") return r def install(self, target): old = self.Factory.tmpfile # Force a rebuild self.cts_cib = None self.Factory.tmpfile = CTSvars.CRM_CONFIG_DIR+"/cib.xml" self.contents(target) self.Factory.rsh(self.Factory.target, "chown "+CTSvars.CRM_DAEMON_USER+" "+self.Factory.tmpfile) self.Factory.tmpfile = old def contents(self, target=None): # fencing resource if self.cts_cib: return self.cts_cib if target: self.Factory.target = target self.Factory.rsh(self.Factory.target, "HOME=/root cibadmin --empty > %s" % self.Factory.tmpfile) #cib_base = self.cib_template % (self.feature_set, self.version, ''' remote-tls-port='9898' remote-clear-port='9999' ''') nodelist = "" self.num_nodes = 0 for node in self.CM.Env["nodes"]: nodelist += node + " " self.num_nodes = self.num_nodes + 1 no_quorum = "stop" if self.num_nodes < 3: no_quorum = "ignore" self.Factory.log("Cluster only has %d nodes, configuring: no-quroum-policy=ignore" % self.num_nodes) # Fencing resource # Define first so that the shell doesn't reject every update if self.CM.Env["DoFencing"]: st = Resource(self.Factory, "Fencing", self.CM.Env["stonith-type"], "stonith") # Set a threshold for unreliable stonith devices such as the vmware one st.add_meta("migration-threshold", "5") st.add_op("monitor", "120s", timeout="300s") st.add_op("stop", "0", timeout="180s") st.add_op("start", "0", timeout="180s") entries = string.split(self.CM.Env["stonith-params"], ',') for entry in entries: (name, value) = string.split(entry, '=') if name == "hostlist" and value == "all": value = string.join(self.CM.Env["nodes"], " ") st[name] = value st.commit() Option(self.Factory, "stonith-enabled", self.CM.Env["DoFencing"]).commit() Option(self.Factory, "start-failure-is-fatal", "false").commit() Option(self.Factory, "pe-input-series-max", "5000").commit() Option(self.Factory, "default-action-timeout", "60s").commit() Option(self.Factory, "shutdown-escalation", "5min").commit() Option(self.Factory, "batch-limit", "10").commit() Option(self.Factory, "dc-deadtime", "5s").commit() Option(self.Factory, "no-quorum-policy", no_quorum).commit() Option(self.Factory, "expected-quorum-votes", self.num_nodes).commit() if self.CM.Env["DoBSC"] == 1: Option(self.Factory, "ident-string", "Linux-HA TEST configuration file - REMOVEME!!").commit() # Add resources? if self.CM.Env["CIBResource"] == 1: self.add_resources() if self.CM.cluster_monitor == 1: mon = Resource(self.Factory, "cluster_mon", "ocf", "ClusterMon", "pacemaker") mon.add_op("start", "0", requires="nothing") mon.add_op("monitor", "5s", requires="nothing") mon["update"] = "10" mon["extra_options"] = "-r -n" mon["user"] = "abeekhof" mon["htmlfile"] = "/suse/abeekhof/Export/cluster.html" mon.commit() #self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''') # generate cib self.cts_cib = self._show() if self.Factory.tmpfile != CTSvars.CRM_CONFIG_DIR+"/cib.xml": self.Factory.rsh(self.Factory.target, "rm -f "+self.Factory.tmpfile) return self.cts_cib def add_resources(self): # Per-node resources for node in self.CM.Env["nodes"]: name = "rsc_"+node r = self.NewIP(name) r.prefer(node, "100") r.commit() # Migrator # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach m = Resource(self.Factory, "migrator","Dummy", "ocf", "pacemaker") m.add_meta("resource-stickiness","1") m.add_meta("allow-migrate", "1") m.add_op("monitor", "P10S") m.commit() # Ping the test master p = Resource(self.Factory, "ping-1","ping", "ocf", "pacemaker") p.add_op("monitor", "60s") p["host-list"] = self.CM.Env["cts-master"] p["name"] = "connected" p["debug"] = "true" c = Clone(self.Factory, "Connectivity", p) c["globally-unique"] = "false" c.commit() #master slave resource s = Resource(self.Factory, "stateful-1", "Stateful", "ocf", "pacemaker") s.add_op("monitor", "15s", timeout="60s") s.add_op("monitor", "16s", timeout="60s", role="Master") ms = Master(self.Factory, "master-1", s) ms["clone-max"] = self.num_nodes ms["master-max"] = 1 ms["clone-node-max"] = 1 ms["master-node-max"] = 1 # Require conectivity to run the master r = Rule("connected", "-INFINITY", op="or") r.add_exp(Expression("m1-connected-1", "connected", "lt", "1")) r.add_exp(Expression("m1-connected-2", "connected", "not_defined", None)) ms.prefer("connected", rule=r) ms.commit() # Group Resource g = Group(self.Factory, "group-1") g.add_child(self.NewIP()) g.add_child(self.NewIP()) g.add_child(self.NewIP()) # Group with the master g._coloc("master-1", "INFINITY", withrole="Master") g._needs("master-1", first="promote", then="start") g.commit() # LSB resource lsb_agent = self.CM.install_helper("LSBDummy") lsb = Resource(self.Factory, "lsb-dummy",lsb_agent, "lsb") lsb.add_op("monitor", "5s") # LSB with group lsb._needs("group-1") lsb._coloc("group-1") lsb.commit() -class HASI(CIB10): - def add_resources(self): - # DLM resource - self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''') - self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''') +#class HASI(CIB10): +# def add_resources(self): +# # DLM resource +# self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''') +# self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''') # O2CB resource - self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''') - self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''') - self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''') - self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''') +# self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''') +# self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''') +# self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''') +# self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''') class ConfigFactory: def __init__(self, CM): self.CM = CM self.rsh = self.CM.rsh - self.register("pacemaker10", CIB10, CM, self) self.register("pacemaker11", CIB12, CM, self) - self.register("hae", HASI, CM, self) + self.register("pacemaker12", CIB12, CM, self) +# self.register("hae", HASI, CM, self) self.target = self.CM.Env["nodes"][0] self.tmpfile = None def log(self, args): self.CM.log("cib: %s" % args) def debug(self, args): self.CM.debug("cib: %s" % args) def register(self, methodName, constructor, *args, **kargs): """register a constructor""" _args = [constructor] _args.extend(args) setattr(self, methodName, apply(ConfigFactoryItem,_args, kargs)) def unregister(self, methodName): """unregister a constructor""" delattr(self, methodName) def createConfig(self, name="pacemaker-1.0"): if name == "pacemaker-1.0": name = "pacemaker10"; elif name == "pacemaker-1.1": name = "pacemaker11"; elif name == "pacemaker-1.2": name = "pacemaker12"; elif name == "hasi": name = "hae"; if hasattr(self, name): return getattr(self, name)() else: self.CM.log("Configuration variant '%s' is unknown. Defaulting to latest config" % name) - return self.pacemaker10() + return self.pacemaker12() class ConfigFactoryItem: def __init__(self, function, *args, **kargs): assert callable(function), "function should be a callable obj" self._function = function self._args = args self._kargs = kargs def __call__(self, *args, **kargs): """call function""" _args = list(self._args) _args.extend(args) _kargs = self._kargs.copy() _kargs.update(kargs) return apply(self._function,_args,_kargs) # Basic Sanity Testing if __name__ == '__main__': import CTSlab env = CTSlab.LabEnvironment() env["nodes"] = [] env["nodes"].append("pcmk-1") env["nodes"].append("pcmk-2") env["nodes"].append("pcmk-3") env["nodes"].append("pcmk-4") env["CIBResource"] = 1 env["IPBase"] = "10.0.0.10" env["DoStonith"]=1 env["stonith-type"] = "fence_xvm" env["stonith-params"] = "pcmk_arg_map=domain:uname" manager = ClusterManager(env) manager.cluster_monitor = False CibFactory = ConfigFactory(manager) - cib = CibFactory.createConfig("pacemaker-1.1") + cib = CibFactory.createConfig("pacemaker-1.0") print cib.contents() diff --git a/pacemaker.spec.in b/pacemaker.spec.in index f98c0f642c..f394df77af 100644 --- a/pacemaker.spec.in +++ b/pacemaker.spec.in @@ -1,770 +1,768 @@ %global gname haclient %global uname hacluster %global pcmk_docdir %{_docdir}/%{name} %global specversion 1 %global upstream_version HEAD %global upstream_prefix pacemaker # Compatibility macros for distros (fedora) that don't provide Python macros by default # Do this instead of trying to conditionally include {_rpmconfigdir}/macros.python %{!?py_ver: %{expand: %%global py_ver %%(echo `python -c "import sys; print sys.version[:3]"`)}} %{!?py_prefix: %{expand: %%global py_prefix %%(echo `python -c "import sys; print sys.prefix"`)}} %{!?py_libdir: %{expand: %%global py_libdir %%{expand:%%%%{py_prefix}/%%%%{_lib}/python%%%%{py_ver}}}} %{!?py_sitedir: %{expand: %%global py_sitedir %%{expand:%%%%{py_libdir}/site-packages}}} # Compatibility macro wrappers for legacy RPM versions that do not # support conditional builds %{!?bcond_without: %{expand: %%global bcond_without() %%{expand:%%%%{!?_without_%%{1}:%%%%global with_%%{1} 1}}}} %{!?bcond_with: %{expand: %%global bcond_with() %%{expand:%%%%{?_with_%%{1}:%%%%global with_%%{1} 1}}}} %{!?with: %{expand: %%global with() %%{expand:%%%%{?with_%%{1}:1}%%%%{!?with_%%{1}:0}}}} %{!?without: %{expand: %%global without() %%{expand:%%%%{?with_%%{1}:0}%%%%{!?with_%%{1}:1}}}} %global cs_major %(pkg-config corosync --modversion | awk -F . '{print $1}') %global cs_minor %(pkg-config corosync --modversion | awk -F . '{print $2}') %global rawhide %(test ! -e /etc/yum.repos.d/fedora-rawhide.repo; echo $?) # Conditionals # Invoke "rpmbuild --without " or "rpmbuild --with " # to disable or enable specific features # Supported cluster stacks, must support at least one %bcond_without cman %bcond_without corosync %bcond_with heartbeat # ESMTP is not available in RHEL, only in EPEL. Allow people to build # the RPM without ESMTP in case they choose not to use EPEL packages %bcond_with esmtp %bcond_with snmp # Build with/without support for profiling tools %bcond_with profiling %bcond_with gcov # We generate docs using Publican, Asciidoc and Inkscape, but they're not available everywhere %bcond_without doc # Use a different versioning scheme %bcond_with pre_release %if %{with profiling} # This disables -debuginfo package creation and also the stripping binaries/libraries # Useful if you want sane profiling data %global debug_package %{nil} %endif %if %{with pre_release} %global pcmk_release 0.%{specversion}.%{upstream_version}.git %else %global pcmk_release %{specversion} %endif Name: pacemaker Summary: Scalable High-Availability cluster resource manager Version: 1.1.7 Release: %{pcmk_release}%{?dist} License: GPLv2+ and LGPLv2+ Url: http://www.clusterlabs.org Group: System Environment/Daemons # export VER={upstream_version} # wget --no-check-certificate -O ClusterLabs-pacemaker-${VER}.tar.gz https://github.com/ClusterLabs/pacemaker/tarball/${VER} Source0: %{upstream_prefix}-%{upstream_version}.tar.gz BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) AutoReqProv: on Requires(pre): cluster-glue Requires: resource-agents Requires: %{name}-libs = %{version}-%{release} Requires: %{name}-cluster-libs = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} Requires: python >= 2.4 Conflicts: heartbeat < 2.99 %if %{with snmp} Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version)) %endif %if 0%{?suse_version} # net-snmp-devel on SLES10 does not suck in tcpd-devel automatically BuildRequires: tcpd-devel # Suse splits this off into a separate package Requires: python-curses python-xml BuildRequires: python-curses python-xml %endif # Required for core functionality BuildRequires: automake autoconf libtool pkgconfig python libtool-ltdl-devel BuildRequires: glib2-devel cluster-glue-libs-devel libxml2-devel libxslt-devel BuildRequires: pkgconfig python-devel gcc-c++ bzip2-devel pam-devel %if 0%{?suse_version} >= 1100 # Renamed since opensuse-11.0 BuildRequires: libgnutls-devel %else BuildRequires: gnutls-devel %endif # Enables optional functionality BuildRequires: ncurses-devel openssl-devel libselinux-devel docbook-style-xsl libqb-devel BuildRequires: bison byacc flex help2man %if %{with cman} %if 0%{?fedora} > 0 %if 0%{?fedora} < 17 BuildRequires: clusterlib-devel %endif %endif %if 0%{?rhel} > 0 %if 0%{?rhel} < 7 BuildRequires: clusterlib-devel %endif %endif %endif %if %{with esmtp} BuildRequires: libesmtp-devel %endif %if %{with snmp} %ifarch alpha %{ix86} x86_64 BuildRequires: lm_sensors-devel %endif BuildRequires: net-snmp-devel %endif %if %{with corosync} Requires: corosync BuildRequires: corosynclib-devel %endif %if %{with heartbeat} # Do not require heartbeat, the admin should select which stack to use and install it BuildRequires: heartbeat-devel heartbeat-libs >= 3.0.0 %endif %if !%{rawhide} # More often than not, inkscape is busted on rawhide, don't even bother %if %{with doc} %ifarch %{ix86} x86_64 BuildRequires: publican inkscape asciidoc %endif %endif %endif %description Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or Corosync. It supports "n-node" clusters with significant capabilities for managing resources and dependencies. It will run scripts at initialization, when machines go up or down, when related resources fail and can be configured to periodically check resource health. Available rpmbuild rebuild options: --with(out) : heartbeat cman corosync doc publican snmp esmtp pre_release %package cli License: GPLv2+ and LGPLv2+ Summary: Command line tools for controlling Pacemaker clusters Group: System Environment/Daemons Requires: %{name}-libs = %{version}-%{release} %description cli Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or Corosync. The %{name}-cli package contains command line tools that can be used to query and control the cluster from machines that may, or may not, be part of the cluster. %package -n %{name}-libs License: GPLv2+ and LGPLv2+ Summary: Core Pacemaker libraries Group: System Environment/Daemons %description -n %{name}-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or Corosync. The %{name}-libs package contains shared libraries needed for cluster nodes and those just running the CLI tools. %package -n %{name}-cluster-libs License: GPLv2+ and LGPLv2+ Summary: Cluster Libraries used by Pacemaker Group: System Environment/Daemons Requires: %{name}-libs = %{version}-%{release} %description -n %{name}-cluster-libs Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or Corosync. The %{name}-cluster-libs package contains cluster-aware shared libraries needed for nodes that will form part of the cluster nodes. %package -n %{name}-libs-devel License: GPLv2+ and LGPLv2+ Summary: Pacemaker development package Group: Development/Libraries Requires: %{name}-libs = %{version}-%{release} Requires: %{name}-cluster-libs = %{version}-%{release} Requires: cluster-glue-libs-devel libtool-ltdl-devel Requires: libxml2-devel libxslt-devel bzip2-devel glib2-devel %if %{with corosync} Requires: corosynclib-devel %endif %if %{with heartbeat} Requires: heartbeat-devel %endif %description -n %{name}-libs-devel Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or Corosync. The %{name}-libs-devel package contains headers and shared libraries for developing tools for Pacemaker. %package cts License: GPLv2+ and LGPLv2+ Summary: Test framework for cluster-related technologies like Pacemaker Group: System Environment/Daemons Requires: python %description cts Test framework for cluster-related technologies like Pacemaker %package doc License: GPLv2+ and LGPLv2+ Summary: Documentation for Pacemaker Group: Documentation %description doc Documentation for Pacemaker. Pacemaker is an advanced, scalable High-Availability cluster resource manager for Linux-HA (Heartbeat) and/or Corosync. %prep %setup -q -n %{upstream_prefix}-%{upstream_version} # Force the local time # # 'hg archive' sets the file date to the date of the last commit. # This can result in files having been created in the future # when building on machines in timezones 'behind' the one the # commit occurred in - which seriously confuses 'make' find . -exec touch \{\} \; %build ./autogen.sh %if %{with snmp} eval `objdump --headers --private-headers /usr/bin/perl | grep RPATH | awk '{print "export LD_LIBRARY_PATH="$2}'` %endif echo "CSV: %{cs_major} . %{cs_minor}." # RHEL <= 5 does not support --docdir docdir=%{pcmk_docdir} %{configure} \ %{!?with_heartbeat: --without-heartbeat} \ %{!?with_corosync: --without-ais} \ %{!?with_esmtp: --without-esmtp} \ %{!?with_snmp: --without-snmp} \ %{?with_cman: --with-cman} \ %{?with_profiling: --with-profiling} \ %{?with_gcov: --with-gcov} \ --with-initdir=%{_initrddir} \ --localstatedir=%{_var} \ --with-version=%{version}-%{release} \ --enable-fatal-warnings=no make %{_smp_mflags} V=1 docdir=%{pcmk_docdir} all %install rm -rf %{buildroot} make DESTDIR=%{buildroot} docdir=%{pcmk_docdir} V=1 install mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig install -m 644 mcp/pacemaker.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/pacemaker # Scripts that should be executable chmod a+x %{buildroot}/%{_libexecdir}/pacemaker/hb2openais-helper.py chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/CTSlab.py chmod a+x %{buildroot}/%{_datadir}/pacemaker/tests/cts/extracttests.py # These are not actually scripts find %{buildroot} -name '*.xml' -type f -print0 | xargs -0 chmod a-x find %{buildroot} -name '*.xsl' -type f -print0 | xargs -0 chmod a-x find %{buildroot} -name '*.rng' -type f -print0 | xargs -0 chmod a-x find %{buildroot} -name '*.dtd' -type f -print0 | xargs -0 chmod a-x # Dont package static libs find %{buildroot} -name '*.a' -type f -print0 | xargs -0 rm -f find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f # Do not package these either rm -f %{buildroot}/%{_libexecdir}/pacemaker/hb2openais-helper.* rm -f %{buildroot}/%{_libexecdir}/pacemaker/crm_primitive.* rm -f %{buildroot}/%{_libdir}/service_crm.so %if %{with gcov} GCOV_BASE=%{buildroot}/%{_var}/lib/pacemaker/gcov mkdir -p $GCOV_BASE find . -name '*.gcno' -type f | while read F ; do D=`dirname $F` mkdir -p ${GCOV_BASE}/$D cp $F ${GCOV_BASE}/$D done %endif %clean rm -rf %{buildroot} %post /sbin/chkconfig --add pacemaker || : %preun if [ $1 -eq 0 ]; then /sbin/service pacemaker stop &>/dev/null || : /sbin/chkconfig --del pacemaker || : fi %post -n %{name}-libs -p /sbin/ldconfig %postun -n %{name}-libs -p /sbin/ldconfig %post -n %{name}-cluster-libs -p /sbin/ldconfig %postun -n %{name}-cluster-libs -p /sbin/ldconfig %files ########################################################### %defattr(-,root,root) %exclude %{_datadir}/pacemaker/tests %config(noreplace) %{_sysconfdir}/sysconfig/pacemaker %{_sbindir}/pacemakerd %{_initrddir}/pacemaker %if %{defined _unitdir} %{_unitdir}/pacemaker.service %endif %{_datadir}/pacemaker %{_datadir}/snmp/mibs/PCMK-MIB.txt %{_libdir}/heartbeat/* %{_libexecdir}/pacemaker/* %{_sbindir}/crm_attribute %{_sbindir}/crm_master %{_sbindir}/crm_node %{_sbindir}/attrd_updater %{_sbindir}/fence_legacy %{_sbindir}/fence_pcmk %{_bindir}/ccs2cib %{_bindir}/ccs_flatten %{_bindir}/disable_rgmanager %{_sbindir}/stonith_admin %if %{with heartbeat} %{_sbindir}/crm_uuid %else %exclude %{_sbindir}/crm_uuid %endif %doc %{_mandir}/man7/* %doc %{_mandir}/man8/attrd_updater.* %doc %{_mandir}/man8/crm_attribute.* %doc %{_mandir}/man8/crm_node.* %doc %{_mandir}/man8/crm_master.* %doc %{_mandir}/man8/fence_pcmk.* %doc %{_mandir}/man8/pacemakerd.* %doc %{_mandir}/man8/stonith_admin.* %doc COPYING %doc AUTHORS %doc ChangeLog %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/heartbeat/crm %dir %attr (750, %{uname}, %{gname}) %{_var}/lib/pengine %ghost %dir %attr (750, %{uname}, %{gname}) %{_var}/run/crm %dir /usr/lib/ocf %dir /usr/lib/ocf/resource.d /usr/lib/ocf/resource.d/pacemaker %if %{with corosync} %if 0%{?cs_major} < 2 %if 0%{?cs_minor} < 8 %{_libexecdir}/lcrso/pacemaker.lcrso %endif %endif %endif %files cli %defattr(-,root,root) %{_sbindir}/cibadmin %{_sbindir}/crm_diff %{_sbindir}/crm_failcount %{_sbindir}/crm_mon -%{_sbindir}/crm %{_sbindir}/crm_resource %{_sbindir}/crm_standby %{_sbindir}/crm_verify %{_sbindir}/crmadmin %{_sbindir}/iso8601 %{_sbindir}/ptest %{_sbindir}/crm_shadow %{_sbindir}/cibpipe %{_sbindir}/crm_simulate %{_sbindir}/crm_report %{_sbindir}/crm_ticket -%{py_sitedir}/crm %doc %{_mandir}/man8/* %exclude %{_mandir}/man8/attrd_updater.* %exclude %{_mandir}/man8/crm_attribute.* %exclude %{_mandir}/man8/crm_node.* %exclude %{_mandir}/man8/crm_master.* %exclude %{_mandir}/man8/fence_pcmk.* %exclude %{_mandir}/man8/pacemakerd.* %exclude %{_mandir}/man8/stonith_admin.* %doc COPYING %doc AUTHORS %doc ChangeLog %files -n %{name}-libs %defattr(-,root,root) %{_libdir}/libcib.so.* %{_libdir}/libcrmcommon.so.* %{_libdir}/libpe_status.so.* %{_libdir}/libpe_rules.so.* %{_libdir}/libpengine.so.* %{_libdir}/libtransitioner.so.* %doc COPYING.LIB %doc AUTHORS %files -n %{name}-cluster-libs %defattr(-,root,root) %{_libdir}/libcrmcluster.so.* %{_libdir}/libstonithd.so.* %doc COPYING.LIB %doc AUTHORS %files doc %defattr(-,root,root) %doc %{pcmk_docdir} %files cts %defattr(-,root,root) %{py_sitedir}/cts %{_datadir}/pacemaker/tests/cts %doc COPYING.LIB %doc AUTHORS %files -n %{name}-libs-devel %defattr(-,root,root) %exclude %{_datadir}/pacemaker/tests/cts %{_datadir}/pacemaker/tests %{_includedir}/pacemaker %{_libdir}/*.so %if %{with gcov} %{_var}/lib/pacemaker %endif %{_libdir}/pkgconfig/*.pc %doc COPYING.LIB %doc AUTHORS %changelog * Wed Aug 31 2011 Andrew Beekhof 1.1.6-1 - Update source tarball to revision: 676e5f25aa46 tip - Statistics: Changesets: 376 Diff: 1761 files changed, 36259 insertions(+), 140578 deletions(-) - See included ChangeLog file or http://hg.clusterlabs.org/pacemaker/1.1/file/tip/ChangeLog for details * Fri Feb 11 2011 Andrew Beekhof 1.1.5-1 - Update source tarball to revision: baad6636a053 - Statistics: Changesets: 184 Diff: 605 files changed, 46103 insertions(+), 26417 deletions(-) - See included ChangeLog file or http://hg.clusterlabs.org/pacemaker/1.1/file/tip/ChangeLog for details * Wed Oct 20 2010 Andrew Beekhof 1.1.4-1 - Moved all the interesting parts of the changelog into a separate file as per the Fedora policy :-/ - Update source tarball to revision: 75406c3eb2c1 tip - Significant performance enhancements to the Policy Engine and CIB - Statistics: Changesets: 169 Diff: 772 files changed, 56172 insertions(+), 39309 deletions(-) - See included ChangeLog file or http://hg.clusterlabs.org/pacemaker/1.1/file/tip/ChangeLog for details * Tue Sep 21 2010 Andrew Beekhof 1.1.3-1 - Update source tarball to revision: e3bb31c56244 tip - Statistics: Changesets: 352 Diff: 481 files changed, 14130 insertions(+), 11156 deletions(-) * Wed May 12 2010 Andrew Beekhof 1.1.2-1 - Update source tarball to revision: c25c972a25cc tip - Statistics: Changesets: 339 Diff: 708 files changed, 37918 insertions(+), 10584 deletions(-) * Tue Feb 16 2010 Andrew Beekhof - 1.1.1-1 - First public release of Pacemaker 1.1 - Package reference documentation in a doc subpackage - Move cts into a subpackage so that it can be easily consumed by others - Update source tarball to revision: 17d9cd4ee29f + New stonith daemon that supports global notifications + Service placement influenced by the physical resources + A new tool for simulating failures and the cluster’s reaction to them + Ability to serialize an otherwise unrelated a set of resource actions (eg. Xen migrations) * Wed Feb 10 2010 Andrew Beekhof - 1.0.7-4 - Rebuild for heartbeat 3.0.2-2 * Wed Feb 10 2010 Andrew Beekhof - 1.0.7-3 - Rebuild for cluster-glue 1.0.3 * Tue Jan 19 2010 Andrew Beekhof - 1.0.7-2 - Rebuild for corosync 1.2.0 * Mon Jan 18 2010 Andrew Beekhof - 1.0.7-1 - Update source tarball to revision: 2eed906f43e9 (stable-1.0) tip - Statistics: Changesets: 193 Diff: 220 files changed, 15933 insertions(+), 8782 deletions(-) * Thu Oct 29 2009 Andrew Beekhof - 1.0.5-4 - Include the fixes from CoroSync integration testing - Move the resource templates - they are not documentation - Ensure documentation is placed in a standard location - Exclude documentation that is included elsewhere in the package - Update the tarball from upstream to version ee19d8e83c2a + High: cib: Correctly clean up when both plaintext and tls remote ports are requested + High: PE: Bug bnc#515172 - Provide better defaults for lt(e) and gt(e) comparisions + High: PE: Bug lf#2197 - Allow master instances placemaker to be influenced by colocation constraints + High: PE: Make sure promote/demote pseudo actions are created correctly + High: PE: Prevent target-role from promoting more than master-max instances + High: ais: Bug lf#2199 - Prevent expected-quorum-votes from being populated with garbage + High: ais: Prevent deadlock - dont try to release IPC message if the connection failed + High: cib: For validation errors, send back the full CIB so the client can display the errors + High: cib: Prevent use-after-free for remote plaintext connections + High: crmd: Bug lf#2201 - Prevent use-of-NULL when running heartbeat * Wed Oct 13 2009 Andrew Beekhof - 1.0.5-3 - Update the tarball from upstream to version 38cd629e5c3c + High: Core: Bug lf#2169 - Allow dtd/schema validation to be disabled + High: PE: Bug lf#2106 - Not all anonymous clone children are restarted after configuration change + High: PE: Bug lf#2170 - stop-all-resources option had no effect + High: PE: Bug lf#2171 - Prevent groups from starting if they depend on a complex resource which can not + High: PE: Disable resource management if stonith-enabled=true and no stonith resources are defined + High: PE: do not include master score if it would prevent allocation + High: ais: Avoid excessive load by checking for dead children every 1s (instead of 100ms) + High: ais: Bug rh#525589 - Prevent shutdown deadlocks when running on CoroSync + High: ais: Gracefully handle changes to the AIS nodeid + High: crmd: Bug bnc#527530 - Wait for the transition to complete before leaving S_TRANSITION_ENGINE + High: crmd: Prevent use-after-free with LOG_DEBUG_3 + Medium: xml: Mask the "symmetrical" attribute on rsc_colocation constraints (bnc#540672) + Medium (bnc#520707): Tools: crm: new templates ocfs2 and clvm + Medium: Build: Invert the disable ais/heartbeat logic so that --without (ais|heartbeat) is available to rpmbuild + Medium: PE: Bug lf#2178 - Indicate unmanaged clones + Medium: PE: Bug lf#2180 - Include node information for all failed ops + Medium: PE: Bug lf#2189 - Incorrect error message when unpacking simple ordering constraint + Medium: PE: Correctly log resources that would like to start but can not + Medium: PE: Stop ptest from logging to syslog + Medium: ais: Include version details in plugin name + Medium: crmd: Requery the resource metadata after every start operation * Fri Aug 21 2009 Tomas Mraz - 1.0.5-2.1 - rebuilt with new openssl * Wed Aug 19 2009 Andrew Beekhof - 1.0.5-2 - Add versioned perl dependency as specified by https://fedoraproject.org/wiki/Packaging/Perl#Packages_that_link_to_libperl - No longer remove RPATH data, it prevents us finding libperl.so and no other libraries were being hardcoded - Compile in support for heartbeat - Conditionally add heartbeat-devel and corosynclib-devel to the -devel requirements depending on which stacks are supported * Mon Aug 17 2009 Andrew Beekhof - 1.0.5-1 - Add dependency on resource-agents - Use the version of the configure macro that supplies --prefix, --libdir, etc - Update the tarball from upstream to version 462f1569a437 (Pacemaker 1.0.5 final) + High: Tools: crm_resource - Advertise --move instead of --migrate + Medium: Extra: New node connectivity RA that uses system ping and attrd_updater + Medium: crmd: Note that dc-deadtime can be used to mask the brokeness of some switches * Tue Aug 11 2009 Ville Skyttä - 1.0.5-0.7.c9120a53a6ae.hg - Use bzipped upstream tarball. * Wed Jul 29 2009 Andrew Beekhof - 1.0.5-0.6.c9120a53a6ae.hg - Add back missing build auto* dependancies - Minor cleanups to the install directive * Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.5.c9120a53a6ae.hg - Add a leading zero to the revision when alphatag is used * Tue Jul 28 2009 Andrew Beekhof - 1.0.5-0.4.c9120a53a6ae.hg - Incorporate the feedback from the cluster-glue review - Realistically, the version is a 1.0.5 pre-release - Use the global directive instead of define for variables - Use the haclient/hacluster group/user instead of daemon - Use the _configure macro - Fix install dependancies * Fri Jul 24 2009 Andrew Beekhof - 1.0.4-3 - Initial Fedora checkin - Include an AUTHORS and license file in each package - Change the library package name to pacemaker-libs to be more Fedora compliant - Remove execute permissions from xml related files - Reference the new cluster-glue devel package name - Update the tarball from upstream to version c9120a53a6ae + High: PE: Only prevent migration if the clone dependency is stopping/starting on the target node + High: PE: Bug 2160 - Dont shuffle clones due to colocation + High: PE: New implementation of the resource migration (not stop/start) logic + Medium: Tools: crm_resource - Prevent use-of-NULL by requiring a resource name for the -A and -a options + Medium: PE: Prevent use-of-NULL in find_first_action() * Tue Jul 14 2009 Andrew Beekhof - 1.0.4-2 - Reference authors from the project AUTHORS file instead of listing in description - Change Source0 to reference the Mercurial repo - Cleaned up the summaries and descriptions - Incorporate the results of Fedora package self-review * Thu Jun 04 2009 Andrew Beekhof - 1.0.4-1 - Update source tarball to revision: 1d87d3e0fc7f (stable-1.0) - Statistics: Changesets: 209 Diff: 266 files changed, 12010 insertions(+), 8276 deletions(-) * Wed Apr 08 2009 Andrew Beekhof - 1.0.3-1 - Update source tarball to revision: b133b3f19797 (stable-1.0) tip - Statistics: Changesets: 383 Diff: 329 files changed, 15471 insertions(+), 15119 deletions(-) * Mon Feb 16 2009 Andrew Beekhof - 1.0.2-1 - Update source tarball to revision: d232d19daeb9 (stable-1.0) tip - Statistics: Changesets: 441 Diff: 639 files changed, 20871 insertions(+), 21594 deletions(-) * Tue Nov 18 2008 Andrew Beekhof - 1.0.1-1 - Update source tarball to revision: 6fc5ce8302ab (stable-1.0) tip - Statistics: Changesets: 170 Diff: 816 files changed, 7633 insertions(+), 6286 deletions(-) * Thu Oct 16 2008 Andrew Beekhof - 1.0.0-1 - Update source tarball to revision: 388654dfef8f tip - Statistics: Changesets: 261 Diff: 3021 files changed, 244985 insertions(+), 111596 deletions(-) * Mon Sep 22 2008 Andrew Beekhof - 0.7.3-1 - Update source tarball to revision: 33e677ab7764+ tip - Statistics: Changesets: 133 Diff: 89 files changed, 7492 insertions(+), 1125 deletions(-) * Wed Aug 20 2008 Andrew Beekhof - 0.7.1-1 - Update source tarball to revision: f805e1b30103+ tip - Statistics: Changesets: 184 Diff: 513 files changed, 43408 insertions(+), 43783 deletions(-) * Fri Jul 18 2008 Andrew Beekhof - 0.7.0-19 - Update source tarball to revision: 007c3a1c50f5 (unstable) tip - Statistics: Changesets: 108 Diff: 216 files changed, 4632 insertions(+), 4173 deletions(-) * Wed Jun 25 2008 Andrew Beekhof - 0.7.0-1 - Update source tarball to revision: bde0c7db74fb tip - Statistics: Changesets: 439 Diff: 676 files changed, 41310 insertions(+), 52071 deletions(-) * Thu Jun 19 2008 Andrew Beekhof - 0.6.5-1 - Update source tarball to revision: b9fe723d1ac5 tip - Statistics: Changesets: 48 Diff: 37 files changed, 1204 insertions(+), 234 deletions(-) * Thu May 22 2008 Andrew Beekhof - 0.6.4-1 - Update source tarball to revision: 226d8e356924 tip - Statistics: Changesets: 55 Diff: 199 files changed, 7103 insertions(+), 12378 deletions(-) * Wed Apr 23 2008 Andrew Beekhof - 0.6.3-1 - Update source tarball to revision: fd8904c9bc67 tip - Statistics: Changesets: 117 Diff: 354 files changed, 19094 insertions(+), 11338 deletions(-) * Thu Feb 14 2008 Andrew Beekhof - 0.6.2-1 - Update source tarball to revision: 28b1a8c1868b tip - Statistics: Changesets: 11 Diff: 7 files changed, 58 insertions(+), 18 deletions(-) * Tue Feb 12 2008 Andrew Beekhof - 0.6.1-1 - Update source tarball to revision: e7152d1be933 tip - Statistics: Changesets: 25 Diff: 37 files changed, 1323 insertions(+), 227 deletions(-) * Mon Jan 14 2008 Andrew Beekhof - 0.6.0-2 - This is the first release of the Pacemaker Cluster Resource Manager formerly part of Heartbeat. - For those looking for the GUI, mgmtd, CIM or TSA components, they are now found in the new pacemaker-pygui project. Build dependancies prevent them from being included in Heartbeat (since the built-in CRM is no longer supported) and, being non-core components, are not included with Pacemaker. - Update source tarball to revision: c94b92d550cf - Statistics: Changesets: 347 Diff: 2272 files changed, 132508 insertions(+), 305991 deletions(-) - Test hardware: + 6-node vmware cluster (sles10-sp1/256Mb/vmware stonith) on a single host (opensuse10.3/2Gb/2.66Ghz Quad Core2) + 7-node EMC Centera cluster (sles10/512Mb/2Ghz Xeon/ssh stonith) - Notes: Heartbeat Stack + All testing was performed with STONITH enabled + The CRM was enabled using the "crm respawn" directive - Notes: OpenAIS Stack + This release contains a preview of support for the OpenAIS cluster stack + The current release of the OpenAIS project is missing two important patches that we require. OpenAIS packages containing these patches are available for most major distributions at: http://download.opensuse.org/repositories/server:/ha-clustering + The OpenAIS stack is not currently recommended for use in clusters that have shared data as STONITH support is not yet implimented + pingd is not yet available for use with the OpenAIS stack + 3 significant OpenAIS issues were found during testing of 4 and 6 node clusters. We are activly working together with the OpenAIS project to get these resolved. - Pending bugs encountered during testing: + OpenAIS #1736 - Openais membership took 20s to stabilize + Heartbeat #1750 - ipc_bufpool_update: magic number in head does not match + OpenAIS #1793 - Assertion failure in memb_state_gather_enter() + OpenAIS #1796 - Cluster message corruption * Mon Dec 10 2007 Andrew Beekhof - 0.6.0-1 - Initial opensuse package check-in diff --git a/shell/Makefile.am b/shell/Makefile.am deleted file mode 100644 index 2b85852168..0000000000 --- a/shell/Makefile.am +++ /dev/null @@ -1,26 +0,0 @@ -# -# doc: Pacemaker code -# -# Copyright (C) 2008 Andrew Beekhof -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -MAINTAINERCLEANFILES = Makefile.in - -sbin_SCRIPTS = crm - -EXTRA_DIST = crm - -SUBDIRS = templates regression modules diff --git a/shell/crm b/shell/crm deleted file mode 100755 index 34268c9f05..0000000000 --- a/shell/crm +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/python -# - -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# - -minimum_version = '2.4' -import sys - -from distutils import version -v_min = version.StrictVersion(minimum_version) -v_this = version.StrictVersion(sys.version[:3]) -if v_min > v_this: - sys.stderr.write("abort: minimum python version support is %s\n", \ - minimum_version) - sys.exit(-1) - -try: - from crm import main -except ImportError: - try: - # Perhaps we're running from the source directory - from modules import main - except ImportError: - sys.stderr.write("abort: couldn't find crm libraries in [%s]\n" % - ' '.join(sys.path)) - sys.stderr.write("(check your install and PYTHONPATH)\n") - sys.exit(-1) - -try: - main.run() -except KeyboardInterrupt: - print "Ctrl-C, leaving" - sys.exit(1) -# vim:ts=4:sw=4:et: diff --git a/shell/modules/Makefile.am b/shell/modules/Makefile.am deleted file mode 100644 index 94be8e64e8..0000000000 --- a/shell/modules/Makefile.am +++ /dev/null @@ -1,47 +0,0 @@ -# -# doc: Pacemaker code -# -# Copyright (C) 2008 Andrew Beekhof -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -MAINTAINERCLEANFILES = Makefile.in - -modules = __init__.py \ - cache.py \ - cibconfig.py \ - cibstatus.py \ - clidisplay.py \ - cliformat.py \ - completion.py \ - help.py \ - idmgmt.py \ - levels.py \ - main.py \ - msg.py \ - parse.py \ - ra.py \ - singletonmixin.py \ - template.py \ - term.py \ - ui.py \ - userprefs.py \ - utils.py \ - vars.py \ - xmlutil.py - -shelllibdir = $(pyexecdir)/crm - -shelllib_PYTHON = $(modules) diff --git a/shell/modules/__init__.py b/shell/modules/__init__.py deleted file mode 100644 index feff2bbd39..0000000000 --- a/shell/modules/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file is required for python packages. -# It is intentionally empty. diff --git a/shell/modules/cache.py b/shell/modules/cache.py deleted file mode 100644 index 450e9f01b6..0000000000 --- a/shell/modules/cache.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import time -from singletonmixin import Singleton - -class WCache(Singleton): - "Cache stuff. A naive implementation." - def __init__(self): - self.lists = {} - self.stamp = time.time() - self.max_cache_age = 600 # seconds - def is_cached(self,name): - if time.time() - self.stamp > self.max_cache_age: - self.stamp = time.time() - self.clear() - return name in self.lists - def store(self,name,lst): - self.lists[name] = lst - return lst - def retrieve(self,name): - if self.is_cached(name): - return self.lists[name] - else: - return None - def clear(self): - self.lists = {} - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/cibconfig.py b/shell/modules/cibconfig.py deleted file mode 100644 index 9cc9751658..0000000000 --- a/shell/modules/cibconfig.py +++ /dev/null @@ -1,2423 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import sys -import subprocess -import copy -import xml.dom.minidom -import re - -from singletonmixin import Singleton -from userprefs import Options, UserPrefs -from vars import Vars -from cliformat import * -from utils import * -from xmlutil import * -from msg import * -from parse import CliParser -from clidisplay import CliDisplay -from cibstatus import CibStatus -from idmgmt import IdMgmt -from ra import RAInfo, get_properties_list, get_pe_meta - -def show_unrecognized_elems(doc): - try: - conf = doc.getElementsByTagName("configuration")[0] - except: - common_warn("CIB has no configuration element") - return - for topnode in conf.childNodes: - if not is_element(topnode): - continue - if is_defaults(topnode): - continue - for c in topnode.childNodes: - if not is_element(c): - continue - if not c.tagName in cib_object_map: - common_warn("unrecognized CIB element %s" % c.tagName) - -# -# object sets (enables operations on sets of elements) -# -def mkset_obj(*args): - if args and args[0] == "xml": - obj = lambda: CibObjectSetRaw(*args[1:]) - else: - obj = lambda: CibObjectSetCli(*args) - return obj() - -class CibObjectSet(object): - ''' - Edit or display a set of cib objects. - repr() for objects representation and - save() used to store objects into internal structures - are defined in subclasses. - ''' - def __init__(self, *args): - self.obj_list = [] - def _open_url(self,src): - import urllib - try: - return urllib.urlopen(src) - except: - pass - if src == "-": - return sys.stdin - try: - return open(src) - except: - pass - common_err("could not open %s" % src) - return False - def init_aux_lists(self): - ''' - Before edit, initialize two auxiliary lists which will - hold a list of objects to be removed and a list of - objects which were created. Then, we can create a new - object list which will match the current state of - affairs, i.e. the object set after the last edit. - ''' - self.remove_objs = copy.copy(self.obj_list) - self.add_objs = [] - def recreate_obj_list(self): - ''' - Recreate obj_list: remove deleted objects and add - created objects - ''' - for obj in self.remove_objs: - self.obj_list.remove(obj) - self.obj_list += self.add_objs - rmlist = [] - for obj in self.obj_list: - if obj.invalid: - rmlist.append(obj) - for obj in rmlist: - self.obj_list.remove(obj) - def edit_save(self,s,erase = False): - ''' - Save string s to a tmp file. Invoke editor to edit it. - Parse/save the resulting file. In case of syntax error, - allow user to reedit. If erase is True, erase the CIB - first. - If no changes are done, return silently. - ''' - tmp = str2tmp(s) - if not tmp: - return False - filehash = hash(s) - rc = False - while True: - if edit_file(tmp) != 0: - break - try: f = open(tmp,'r') - except IOError, msg: - common_err(msg) - break - s = ''.join(f) - f.close() - if hash(s) == filehash: # file unchanged - rc = True - break - if erase: - cib_factory.erase() - if not self.save(s): - if ask("Do you want to edit again?"): - continue - rc = True - break - try: os.unlink(tmp) - except: pass - return rc - def edit(self): - if options.batch: - common_info("edit not allowed in batch mode") - return False - cli_display.set_no_pretty() - s = self.repr() - cli_display.reset_no_pretty() - return self.edit_save(s) - def filter_save(self,filter,s): - ''' - Pipe string s through a filter. Parse/save the output. - If no changes are done, return silently. - ''' - rc,outp = filter_string(filter,s) - if rc != 0: - return False - if hash(outp) == hash(s): - return True - return self.save(outp) - def filter(self,filter): - cli_display.set_no_pretty() - s = self.repr(format = -1) - cli_display.reset_no_pretty() - return self.filter_save(filter,s) - def save_to_file(self,fname): - if fname == "-": - f = sys.stdout - else: - if not options.batch and os.access(fname,os.F_OK): - if not ask("File %s exists. Do you want to overwrite it?"%fname): - return False - try: f = open(fname,"w") - except IOError, msg: - common_err(msg) - return False - rc = True - cli_display.set_no_pretty() - s = self.repr() - cli_display.reset_no_pretty() - if s: - f.write(s) - f.write('\n') - elif self.obj_list: - rc = False - if f != sys.stdout: - f.close() - return rc - def show(self): - s = self.repr() - if not s: - if self.obj_list: # objects could not be displayed - return False - else: - return True - page_string(s) - def import_file(self,method,fname): - if not cib_factory.is_cib_sane(): - return False - if method not in ("replace", "update"): - common_err("unknown method %s" % method) - return False - if method == "replace": - if options.interactive and cib_factory.has_cib_changed(): - if not ask("This operation will erase all changes. Do you want to proceed?"): - return False - cib_factory.erase() - f = self._open_url(fname) - if not f: - return False - s = ''.join(f) - if f != sys.stdin: - f.close() - return self.save(s, method == "update") - def repr(self): - ''' - Return a string with objects's representations (either - CLI or XML). - ''' - return '' - def save(self, s, update = False): - ''' - For each object: - - try to find a corresponding object in obj_list - - if (update and not found) or found: - replace the object in the obj_list with - the new object - - if not found: create new - See below for specific implementations. - ''' - pass - def __check_unique_clash(self, set_obj_all): - 'Check whether resource parameters with attribute "unique" clash' - def process_primitive(prim, clash_dict): - ''' - Update dict clash_dict with - (ra_class, ra_provider, ra_type, name, value) -> [ resourcename ] - if parameter "name" should be unique - ''' - ra_class = prim.getAttribute("class") - ra_provider = prim.getAttribute("provider") - ra_type = prim.getAttribute("type") - ra_id = prim.getAttribute("id") - ra = RAInfo(ra_class, ra_type, ra_provider) - if ra == None: - return - ra_params = ra.params() - for a in prim.getElementsByTagName("instance_attributes"): - # params are in instance_attributes just below the parent - # operations may have some as well, e.g. OCF_CHECK_LEVEL - if a.parentNode != prim: - continue - for p in a.getElementsByTagName("nvpair"): - name = p.getAttribute("name") - # don't fail if the meta-data doesn't contain the - # expected attributes - try: - if ra_params[ name ].get("unique") == "1": - value = p.getAttribute("value") - k = (ra_class, ra_provider, ra_type, name, value) - try: - clash_dict[k].append(ra_id) - except: - clash_dict[k] = [ra_id] - except: pass - return - # we check the whole CIB for clashes as a clash may originate between - # an object already committed and a new one - clash_dict = {} - for obj in set_obj_all.obj_list: - node = obj.node - if is_primitive(node): - process_primitive(node, clash_dict) - # but we only warn if a 'new' object is involved - check_set = set([o.node.getAttribute("id") for o in self.obj_list if is_primitive(o.node)]) - rc = 0 - for param, resources in clash_dict.items(): - # at least one new object must be involved - if len(resources) > 1 and len(set(resources) & check_set) > 0: - rc = 2 - msg = 'Resources %s violate uniqueness for parameter "%s": "%s"' %\ - (",".join(sorted(resources)), param[3], param[4]) - common_warning(msg) - return rc - def semantic_check(self, set_obj_all): - ''' - Test objects for sanity. This is about semantics. - ''' - rc = self.__check_unique_clash(set_obj_all) - for obj in self.obj_list: - rc |= obj.check_sanity() - return rc - def lookup_cli(self,cli_list): - for obj in self.obj_list: - if obj.matchcli(cli_list): - return obj - def lookup(self,xml_obj_type,obj_id): - for obj in self.obj_list: - if obj.match(xml_obj_type,obj_id): - return obj - def drop_remaining(self): - 'Any remaining objects in obj_list are deleted.' - l = [x.obj_id for x in self.remove_objs] - return cib_factory.delete(*l) - -def get_comments(cli_list): - if not cli_list: - return [] - last = cli_list[len(cli_list)-1] - try: - if last[0] == "comments": - cli_list.pop() - return last[1] - except: pass - return [] - -class CibObjectSetCli(CibObjectSet): - ''' - Edit or display a set of cib objects (using cli notation). - ''' - def __init__(self, *args): - CibObjectSet.__init__(self, *args) - self.obj_list = cib_factory.mkobj_list("cli",*args) - def repr(self, format = 1): - "Return a string containing cli format of all objects." - if not self.obj_list: - return '' - return '\n'.join(obj.repr_cli(format = format) \ - for obj in processing_sort_cli(self.obj_list)) - def process(self, cli_list, update = False): - ''' - Create new objects or update existing ones. - ''' - myobj = obj = self.lookup_cli(cli_list) - if update and not obj: - obj = cib_factory.find_object_for_cli(cli_list) - if obj: - rc = cib_factory.update_from_cli(obj,cli_list,update) != False - if myobj: - self.remove_objs.remove(myobj) - else: - obj = cib_factory.create_from_cli(cli_list) - rc = obj != None - if rc: - self.add_objs.append(obj) - return rc - def save(self, s, update = False): - ''' - Save a user supplied cli format configuration. - On errors user is typically asked to review the - configuration (for instance on editting). - - On syntax error (return code 1), no changes are done, but - on semantic errors (return code 2), some changes did take - place so object list must be updated properly. - - Finally, once syntax check passed, there's no way back, - all changes are applied to the current configuration. - - TODO: Implement undo configuration changes. - ''' - l = [] - id_list = [] - rc = True - err_buf.start_tmp_lineno() - cp = CliParser() - for cli_text in lines2cli(s): - err_buf.incr_lineno() - cli_list = cp.parse(cli_text) - if cli_list: - id = find_value(cli_list[0][1],"id") - if id: - if id in id_list: - common_err("duplicate element %s" % id) - rc = False - id_list.append(id) - l.append(cli_list) - elif cli_list == False: - rc = False - err_buf.stop_tmp_lineno() - # we can't proceed if there was a syntax error, but we - # can ask the user to fix problems - if not rc: - return rc - self.init_aux_lists() - if l: - for cli_list in processing_sort_cli(l): - if self.process(cli_list,update) == False: - rc = False - if not self.drop_remaining(): - # this is tricky, we don't know what was removed! - # it could happen that the user dropped a resource - # which was running and therefore couldn't be removed - rc = False - self.recreate_obj_list() - return rc - -cib_verify = "crm_verify -V -p" -class CibObjectSetRaw(CibObjectSet): - ''' - Edit or display one or more CIB objects (XML). - ''' - actions_filter = "grep LogActions: | grep -vw Leave" - def __init__(self, *args): - CibObjectSet.__init__(self, *args) - self.obj_list = cib_factory.mkobj_list("xml",*args) - def repr(self, format = "ignored"): - "Return a string containing xml of all objects." - doc = cib_factory.objlist2doc(self.obj_list) - s = doc.toprettyxml(user_prefs.xmlindent) - doc.unlink() - return s - def repr_configure(self): - ''' - Return a string containing xml of configure and its - children. - ''' - doc = cib_factory.objlist2doc(self.obj_list) - conf_node = doc.getElementsByTagName("configuration")[0] - s = conf_node.toprettyxml(user_prefs.xmlindent) - doc.unlink() - return s - def process(self, node, update = False): - if not cib_factory.is_cib_sane(): - return False - myobj = obj = self.lookup(node.tagName,node.getAttribute("id")) - if update and not obj: - obj = cib_factory.find_object_for_node(node) - if obj: - rc = cib_factory.update_from_node(obj,node) - if myobj: - self.remove_objs.remove(obj) - else: - new_obj = cib_factory.create_from_node(node) - rc = new_obj != None - if rc: - self.add_objs.append(new_obj) - return rc - def save(self, s, update = False): - try: - doc = xml.dom.minidom.parseString(s) - except xml.parsers.expat.ExpatError,msg: - cib_parse_err(msg,s) - return False - rc = True - sanitize_cib(doc) - show_unrecognized_elems(doc) - newnodes = get_interesting_nodes(doc,[]) - self.init_aux_lists() - if newnodes: - for node in processing_sort(newnodes): - if not self.process(node,update): - rc = False - if not self.drop_remaining(): - rc = False - doc.unlink() - self.recreate_obj_list() - return rc - def verify(self): - if not self.obj_list: - return True - cli_display.set_no_pretty() - rc = pipe_string(cib_verify,self.repr(format = -1)) - cli_display.reset_no_pretty() - return rc in (0,1) - def ptest(self, nograph, scores, utilization, actions, verbosity): - if not cib_factory.is_cib_sane(): - return False - if verbosity: - if actions: - verbosity = 'v' * max(3,len(verbosity)) - ptest = "ptest -X -%s" % verbosity.upper() - if scores: - ptest = "%s -s" % ptest - if utilization: - ptest = "%s -U" % ptest - if user_prefs.dotty and not nograph: - fd,dotfile = mkstemp() - ptest = "%s -D %s" % (ptest,dotfile) - else: - dotfile = None - doc = cib_factory.objlist2doc(self.obj_list) - cib = doc.childNodes[0] - status = cib_status.get_status() - if not status: - common_err("no status section found") - return False - cib.appendChild(doc.importNode(status,1)) - # ptest prints to stderr - if actions: - ptest = "%s 2>&1 | %s | %s" % \ - (ptest, self.actions_filter, user_prefs.pager) - else: - ptest = "%s 2>&1 | %s" % (ptest, user_prefs.pager) - pipe_string(ptest,doc.toprettyxml()) - doc.unlink() - if dotfile: - show_dot_graph(dotfile) - vars.tmpfiles.append(dotfile) - else: - if not nograph: - common_info("install graphviz to see a transition graph") - return True - -# -# XML generate utilities -# -def set_id(node,oldnode,id_hint,id_required = True): - ''' - Set the id attribute for the node. - Procedure: - - if the node already contains "id", keep it - - if the old node contains "id", copy that - - if neither is true, then create a new one using id_hint - (exception: if not id_required, then no new id is generated) - Finally, save the new id in id_store. - ''' - old_id = None - new_id = node.getAttribute("id") - if oldnode and oldnode.getAttribute("id"): - old_id = oldnode.getAttribute("id") - if not new_id: - new_id = old_id - if not new_id: - if id_required: - new_id = id_store.new(node,id_hint) - else: - id_store.save(new_id) - if new_id: - node.setAttribute("id",new_id) - if oldnode and old_id == new_id: - set_id_used_attr(oldnode) - -def mkxmlsimple(e,oldnode,id_hint): - ''' - Create an xml node from the (name,dict) pair. The name is the - name of the element. The dict contains a set of attributes. - ''' - node = cib_factory.createElement(e[0]) - for n,v in e[1]: - if n == "$children": # this one's skipped - continue - if n == "operation": - v = v.lower() - if n.startswith('$'): - n = n.lstrip('$') - if (type(v) != type('') and type(v) != type(u'')) \ - or v: # skip empty strings - node.setAttribute(n,v) - id_ref = node.getAttribute("id-ref") - if id_ref: - id_ref_2 = cib_factory.resolve_id_ref(e[0],id_ref) - node.setAttribute("id-ref",id_ref_2) - else: - set_id(node,lookup_node(node,oldnode),id_hint) - return node - -def mkxmlnvpairs(e,oldnode,id_hint): - ''' - Create xml from the (name,dict) pair. The name is the name of - the element. The dict contains a set of nvpairs. Stuff such - as instance_attributes. - NB: Other tags not containing nvpairs are fine if the dict is empty. - ''' - xml_node_type = e[0] in vars.defaults_tags and "meta_attributes" or e[0] - node = cib_factory.createElement(xml_node_type) - # another exception: - # cluster_property_set has nvpairs as direct children - # in that case the id_hint is equal id - # and this is important in case there are multiple sets - if e[0] == "cluster_property_set" and id_hint: - node.setAttribute("id",id_hint) - match_node = lookup_node(node,oldnode) - #if match_node: - #print "found nvpairs set:",match_node.tagName,match_node.getAttribute("id") - id_ref = find_value(e[1],"$id-ref") - if id_ref: - id_ref_2 = cib_factory.resolve_id_ref(e[0],id_ref) - node.setAttribute("id-ref",id_ref_2) - if e[0] != "operations": - return node # id_ref is the only attribute (if not operations) - e[1].remove(["$id-ref",id_ref]) - v = find_value(e[1],"$id") - if v: - node.setAttribute("id",v) - e[1].remove(["$id",v]) - elif e[0] in vars.nvset_cli_names: - node.setAttribute("id",id_hint) - else: - if e[0] == "operations": # operations don't need no id - set_id(node,match_node,id_hint,id_required = False) - else: - set_id(node,match_node,id_hint) - try: - subpfx = vars.subpfx_list[e[0]] - except: subpfx = '' - subpfx = subpfx and "%s_%s" % (id_hint,subpfx) or id_hint - nvpair_pfx = node.getAttribute("id") or subpfx - for n,v in e[1]: - nvpair = cib_factory.createElement("nvpair") - node.appendChild(nvpair) - nvpair.setAttribute("name",n) - if v != None: - nvpair.setAttribute("value",v) - set_id(nvpair,lookup_node(nvpair,match_node),nvpair_pfx) - return node - -def mkxmlop(e,oldnode,id_hint): - ''' - Create an operation xml from the (name,dict) pair. - ''' - node = cib_factory.createElement(e[0]) - inst_attr = [] - for n,v in e[1]: - if n in olist(vars.req_op_attributes + vars.op_attributes): - node.setAttribute(n,v) - else: - inst_attr.append([n,v]) - tmp = cib_factory.createElement("operations") - oldops = lookup_node(tmp,oldnode) # first find old operations - oldop = lookup_node(node,oldops) - set_id(node,oldop,id_hint) - if inst_attr: - e = ["instance_attributes",inst_attr] - nia = mkxmlnvpairs(e,oldop,node.getAttribute("id")) - node.appendChild(nia) - return node - -def mkxmldate(e,oldnode,id_hint): - ''' - Create a date_expression xml from the (name,dict) pair. - ''' - node = cib_factory.createElement(e[0]) - operation = find_value(e[1],"operation").lower() - node.setAttribute("operation", operation) - old_date = lookup_node(node,oldnode) # first find old date element - set_id(node,old_date,id_hint) - date_spec_attr = [] - for n,v in e[1]: - if n in olist(vars.date_ops) or n == "operation": - continue - elif n in vars.in_range_attrs: - node.setAttribute(n,v) - else: - date_spec_attr.append([n,v]) - if not date_spec_attr: - return node - elem = operation == "date_spec" and "date_spec" or "duration" - tmp = cib_factory.createElement(elem) - old_date_spec = lookup_node(tmp,old_date) # first find old date element - set_id(tmp,old_date_spec,id_hint) - for n,v in date_spec_attr: - tmp.setAttribute(n,v) - node.appendChild(tmp) - return node - -def mkxmlrsc_set(e,oldnode,id_hint): - ''' - Create a resource_set xml from the (name,dict) pair. - ''' - node = cib_factory.createElement(e[0]) - old_rsc_set = lookup_node(node,oldnode) # first find old date element - set_id(node,old_rsc_set,id_hint) - for ref in e[1]: - if ref[0] == "resource_ref": - ref_node = cib_factory.createElement(ref[0]) - ref_node.setAttribute(ref[1][0],ref[1][1]) - node.appendChild(ref_node) - elif ref[0] in ("sequential", "action", "role"): - node.setAttribute(ref[0], ref[1]) - return node - -def mkxmlaclrole_ref(e): - ''' - Create a role reference xml. Very simple, but different from - everything else. - ''' - node = cib_factory.createElement(e[0]) - node.setAttribute(e[1][0],e[1][1]) - return node - -conv_list = { - "params": "instance_attributes", - "meta": "meta_attributes", - "property": "cluster_property_set", - "rsc_defaults": "rsc_defaults", - "op_defaults": "op_defaults", - "attributes": "instance_attributes", - "utilization": "utilization", - "operations": "operations", - "op": "op", -} -def mkxmlnode(e,oldnode,id_hint): - ''' - Create xml from the (name,dict) pair. The name is the name of - the element. The dict contains either a set of nvpairs or a - set of attributes. The id is either generated or copied if - found in the provided xml. Stuff such as instance_attributes. - ''' - if e[0] in conv_list: - e[0] = conv_list[e[0]] - if e[0] in ("instance_attributes","meta_attributes","operations","rsc_defaults","op_defaults","cluster_property_set","utilization"): - return mkxmlnvpairs(e,oldnode,id_hint) - elif e[0] == "op": - return mkxmlop(e,oldnode,id_hint) - elif e[0] == "date_expression": - return mkxmldate(e,oldnode,id_hint) - elif e[0] == "resource_set": - return mkxmlrsc_set(e,oldnode,id_hint) - elif e[0] == "role_ref": - return mkxmlaclrole_ref(e) - else: - return mkxmlsimple(e,oldnode,id_hint) - -def set_nvpair(set_node,name,value): - n_id = set_node.getAttribute("id") - for c in set_node.childNodes: - if is_element(c) and c.getAttribute("name") == name: - c.setAttribute("value",value) - return - np = cib_factory.createElement("nvpair") - np.setAttribute("name",name) - np.setAttribute("value",value) - new_id = id_store.new(np,n_id) - np.setAttribute("id",new_id) - set_node.appendChild(np) - -# -# cib element classes (CibObject the parent class) -# -class CibObject(object): - ''' - The top level object of the CIB. Resources and constraints. - ''' - state_fmt = "%16s %-8s%-8s%-8s%-8s%-8s%-4s" - set_names = {} - def __init__(self,xml_obj_type,obj_id = None): - if not xml_obj_type in cib_object_map: - unsupported_err(xml_obj_type) - return - self.obj_type = cib_object_map[xml_obj_type][0] - self.parent_type = cib_object_map[xml_obj_type][2] - self.xml_obj_type = xml_obj_type - self.origin = "" # where did it originally come from? - self.nocli = False # we don't support this one - self.nocli_warn = True # don't issue warnings all the time - self.updated = False # was the object updated - self.invalid = False # the object has been invalidated (removed) - self.moved = False # the object has been moved (from/to a container) - self.recreate = False # constraints to be recreated - self.parent = None # object superior (group/clone/ms) - self.children = [] # objects inferior - if obj_id: - if not self.mknode(obj_id): - self = None # won't do :( - else: - self.obj_id = None - self.node = None - def dump_state(self): - 'Print object status' - print self.state_fmt % \ - (self.obj_id,self.origin,self.updated,self.moved,self.invalid, \ - self.parent and self.parent.obj_id or "", \ - len(self.children)) - def repr_cli_xml(self,node,format): - h = cli_display.keyword("xml") - l = node.toprettyxml('\t').split('\n') - l = [x for x in l if x] # drop empty lines - if format > 0: - return "%s %s" % (h,' \\\n'.join(l)) - else: - return "%s %s" % (h,''.join(l)) - def repr_cli(self,node = None,format = 1): - ''' - CLI representation for the node. - repr_cli_head and repr_cli_child in subclasess. - ''' - if not node: - node = self.node - if self.nocli: - return self.repr_cli_xml(node,format) - l = [] - head_s = self.repr_cli_head(node) - if not head_s: # everybody must have a head - return None - comments = [] - l.append(head_s) - cli_add_description(node,l) - for c in node.childNodes: - if is_comment(c): - comments.append(c.data) - continue - if not is_element(c): - continue - s = self.repr_cli_child(c,format) - if s: - l.append(s) - return self.cli_format(l,comments,format) - def repr_cli_child(self,c,format): - if c.tagName in self.set_names: - return "%s %s" % \ - (cli_display.keyword(self.set_names[c.tagName]), \ - cli_pairs(nvpairs2list(c))) - def cli2node(self,cli,oldnode = None): - ''' - Convert CLI representation to a DOM node. - Defined in subclasses. - ''' - cli_list = mk_cli_list(cli) - if not cli_list: - return None - if not oldnode: - if self.obj_type == "property": - oldnode = get_topnode(cib_factory.doc,self.parent_type) - elif self.xml_obj_type in vars.defaults_tags: - oldnode = self.node.parentNode - else: - oldnode = self.node - comments = get_comments(cli_list) - node = self.cli_list2node(cli_list,oldnode) - if comments and node: - stuff_comments(node,comments) - return node - def cli_format(self,l,comments,format): - ''' - Format and add comment (if any). - ''' - s = cli_format(l,format) - cs = '\n'.join(comments) - return (comments and format >=0) and '\n'.join([cs,s]) or s - def move_comments(self): - ''' - Move comments to the top of the node. - ''' - l = [] - firstelem = None - for n in self.node.childNodes: - if is_comment(n): - if firstelem: - l.append(n) - else: - if not firstelem and is_element(n): - firstelem = n - for comm_node in l: - common_debug("move comm %s" % comm_node.toprettyxml()) - self.node.insertBefore(comm_node, firstelem) - common_debug("obj %s node: %s" % (self.obj_id,self.node.toprettyxml())) - def mknode(self,obj_id): - if not cib_factory.is_cib_sane(): - return False - if id_store.id_in_use(obj_id): - return False - if self.xml_obj_type in vars.defaults_tags: - tag = "meta_attributes" - else: - tag = self.xml_obj_type - self.node = cib_factory.createElement(tag) - self.obj_id = obj_id - self.node.setAttribute("id",self.obj_id) - self.origin = "user" - return True - def can_be_renamed(self): - ''' - Return False if this object can't be renamed. - ''' - if is_rsc_running(self.obj_id): - common_err("cannot rename a running resource (%s)" % self.obj_id) - return False - if not is_live_cib() and self.node.tagName == "node": - common_err("cannot rename nodes") - return False - return True - def attr_exists(self,attr): - if not attr in self.node.attributes.keys(): - no_attribute_err(attr,self.obj_id) - return False - return True - def cli_use_validate(self): - ''' - Check validity of the object, as we know it. It may - happen that we don't recognize a construct, but that the - object is still valid for the CRM. In that case, the - object is marked as "CLI read only", i.e. we will neither - convert it to CLI nor try to edit it in that format. - - The validation procedure: - we convert xml to cli and then back to xml. If the two - xml representations match then we can understand the xml. - ''' - if not self.node: - return True - if not self.attr_exists("id"): - return False - cli_display.set_no_pretty() - cli_text = self.repr_cli(format = 0) - cli_display.reset_no_pretty() - if not cli_text: - return False - common_debug("clitext: %s" % cli_text) - xml2 = self.cli2node(cli_text) - if not xml2: - return False - rc = xml_cmp(self.node, xml2, show = True) - xml2.unlink() - return rc - def check_sanity(self): - ''' - Right now, this is only for primitives. - And groups/clones/ms and cluster properties. - ''' - return 0 - def matchcli(self,cli_list): - head = cli_list[0] - return self.obj_type == head[0] \ - and self.obj_id == find_value(head[1],"id") - def match(self,xml_obj_type,obj_id): - return self.xml_obj_type == xml_obj_type and self.obj_id == obj_id - def obj_string(self): - return "%s:%s" % (self.obj_type,self.obj_id) - def reset_updated(self): - self.updated = False - self.moved = False - self.recreate = False - for child in self.children: - child.reset_updated() - def propagate_updated(self): - if self.parent: - self.parent.updated = self.updated - self.parent.propagate_updated() - def top_parent(self): - '''Return the top parent or self''' - if self.parent: - return self.parent.top_parent() - else: - return self - def find_child_in_node(self,child): - for c in self.node.childNodes: - if not is_element(c): - continue - if c.tagName == child.obj_type and \ - c.getAttribute("id") == child.obj_id: - return c - return None - def filter(self,*args): - "Filter objects." - if not args: - return True - if args[0] == "NOOBJ": - return False - if args[0] == "changed": - return self.updated or self.origin == "user" - if args[0].startswith("type:"): - return self.obj_type == args[0][5:] - return self.obj_id in args - -def mk_cli_list(cli): - 'Sometimes we get a string and sometimes a list.' - if type(cli) == type('') or type(cli) == type(u''): - cp = CliParser() - # what follows looks strange, but the last string actually matters - # the previous ones may be comments and are collected by the parser - for s in lines2cli(cli): - cli_list = cp.parse(s) - return cli_list - else: - return cli - -class CibNode(CibObject): - ''' - Node and node's attributes. - ''' - set_names = { - "instance_attributes": "attributes", - "utilization": "utilization", - } - def repr_cli_head(self,node): - obj_type = vars.cib_cli_map[node.tagName] - node_id = node.getAttribute("id") - uname = node.getAttribute("uname") - s = cli_display.keyword(obj_type) - if node_id != uname: - s = '%s $id="%s"' % (s, node_id) - s = '%s %s' % (s, cli_display.id(uname)) - type = node.getAttribute("type") - if type != vars.node_default_type: - s = '%s:%s' % (s, type) - return s - def cli_list2node(self,cli_list,oldnode): - head = copy.copy(cli_list[0]) - head[0] = backtrans[head[0]] - obj_id = find_value(head[1],"$id") - if not obj_id: - obj_id = find_value(head[1],"uname") - if not obj_id: - return None - type = find_value(head[1],"type") - if not type: - type = vars.node_default_type - head[1].append(["type",type]) - headnode = mkxmlsimple(head,get_topnode(cib_factory.doc,self.parent_type),'node') - id_hint = headnode.getAttribute("uname") - for e in cli_list[1:]: - n = mkxmlnode(e,oldnode,id_hint) - headnode.appendChild(n) - remove_id_used_attributes(get_topnode(cib_factory.doc,self.parent_type)) - return headnode - -def get_ra(node): - ra_type = node.getAttribute("type") - ra_class = node.getAttribute("class") - ra_provider = node.getAttribute("provider") - return RAInfo(ra_class,ra_type,ra_provider) - -class CibPrimitive(CibObject): - ''' - Primitives. - ''' - set_names = { - "instance_attributes": "params", - "meta_attributes": "meta", - "utilization": "utilization", - } - def repr_cli_head(self,node): - obj_type = vars.cib_cli_map[node.tagName] - node_id = node.getAttribute("id") - ra_type = node.getAttribute("type") - ra_class = node.getAttribute("class") - ra_provider = node.getAttribute("provider") - s1 = s2 = '' - if ra_class: - s1 = "%s:"%ra_class - if ra_provider: - s2 = "%s:"%ra_provider - s = cli_display.keyword(obj_type) - id = cli_display.id(node_id) - return "%s %s %s" % (s, id, ''.join((s1,s2,ra_type))) - def repr_cli_child(self,c,format): - if c.tagName in self.set_names: - return "%s %s" % \ - (cli_display.keyword(self.set_names[c.tagName]), \ - cli_pairs(nvpairs2list(c))) - elif c.tagName == "operations": - return cli_operations(c,format) - def cli_list2node(self,cli_list,oldnode): - ''' - Convert a CLI description to DOM node. - Try to preserve as many ids as possible in case there's - an old XML version. - ''' - head = copy.copy(cli_list[0]) - head[0] = backtrans[head[0]] - headnode = mkxmlsimple(head,oldnode,'rsc') - id_hint = headnode.getAttribute("id") - operations = None - for e in cli_list[1:]: - n = mkxmlnode(e,oldnode,id_hint) - if keyword_cmp(e[0], "operations"): - operations = n - if not keyword_cmp(e[0], "op"): - headnode.appendChild(n) - else: - if not operations: - operations = mkxmlnode(["operations",{}],oldnode,id_hint) - headnode.appendChild(operations) - operations.appendChild(n) - remove_id_used_attributes(oldnode) - return headnode - def add_operation(self,cli_list): - # check if there is already an op with the same interval - comments = get_comments(cli_list) - head = copy.copy(cli_list[0]) - name = find_value(head[1], "name") - interval = find_value(head[1], "interval") - if find_operation(self.node,name,interval): - common_err("%s already has a %s op with interval %s" % \ - (self.obj_id, name, interval)) - return None - # drop the rsc attribute - head[1].remove(["rsc",self.obj_id]) - # create an xml node - mon_node = mkxmlsimple(head, None, self.obj_id) - # get the place to append it to - try: - op_node = self.node.getElementsByTagName("operations")[0] - except: - op_node = cib_factory.createElement("operations") - self.node.appendChild(op_node) - op_node.appendChild(mon_node) - if comments and self.node: - stuff_comments(self.node,comments) - # the resource is updated - self.updated = True - self.propagate_updated() - return self - def check_sanity(self): - ''' - Check operation timeouts and if all required parameters - are defined. - ''' - if not self.node: # eh? - common_err("%s: no xml (strange)" % self.obj_id) - return user_prefs.get_check_rc() - rc3 = sanity_check_meta(self.obj_id,self.node,vars.rsc_meta_attributes) - ra = get_ra(self.node) - if not ra.mk_ra_node(): # no RA found? - if cib_factory.is_asymm_cluster(): - return rc3 - ra.error("no such resource agent") - return user_prefs.get_check_rc() - params = [] - for c in self.node.childNodes: - if not is_element(c): - continue - if c.tagName == "instance_attributes": - params += nvpairs2list(c) - rc1 = ra.sanity_check_params(self.obj_id, params) - actions = {} - for c in self.node.childNodes: - if not is_element(c): - continue - if c.tagName == "operations": - for c2 in c.childNodes: - if is_element(c2) and c2.tagName == "op": - op,pl = op2list(c2) - if op: - actions[op] = pl - default_timeout = get_default_timeout() - rc2 = ra.sanity_check_ops(self.obj_id, actions, default_timeout) - return rc1 | rc2 | rc3 - -class CibContainer(CibObject): - ''' - Groups and clones and ms. - ''' - set_names = { - "instance_attributes": "params", - "meta_attributes": "meta", - } - def repr_cli_head(self,node): - try: - obj_type = vars.cib_cli_map[node.tagName] - except: - unsupported_err(node.tagName) - return None - node_id = node.getAttribute("id") - children = [] - for c in node.childNodes: - if not is_element(c): - continue - if (obj_type == "group" and is_primitive(c)) or \ - is_child_rsc(c): - children.append(cli_display.rscref(c.getAttribute("id"))) - elif obj_type in vars.clonems_tags and is_child_rsc(c): - children.append(cli_display.rscref(c.getAttribute("id"))) - s = cli_display.keyword(obj_type) - id = cli_display.id(node_id) - return "%s %s %s" % (s, id, ' '.join(children)) - def cli_list2node(self,cli_list,oldnode): - head = copy.copy(cli_list[0]) - head[0] = backtrans[head[0]] - headnode = mkxmlsimple(head,oldnode,'grp') - id_hint = headnode.getAttribute("id") - for e in cli_list[1:]: - n = mkxmlnode(e,oldnode,id_hint) - headnode.appendChild(n) - v = find_value(head[1],"$children") - if v: - for child_id in v: - obj = cib_factory.find_object(child_id) - if obj: - n = obj.node.cloneNode(1) - headnode.appendChild(n) - else: - no_object_err(child_id) - remove_id_used_attributes(oldnode) - return headnode - def check_sanity(self): - ''' - Check meta attributes. - ''' - if not self.node: # eh? - common_err("%s: no xml (strange)" % self.obj_id) - return user_prefs.get_check_rc() - if self.obj_type == "group": - l = vars.rsc_meta_attributes - elif self.obj_type == "clone": - l = vars.clone_meta_attributes - elif self.obj_type == "ms": - l = vars.clone_meta_attributes + vars.ms_meta_attributes - rc = sanity_check_nvpairs(self.obj_id,self.node,l) - return rc - -class CibLocation(CibObject): - ''' - Location constraint. - ''' - def repr_cli_head(self,node): - obj_type = vars.cib_cli_map[node.tagName] - node_id = node.getAttribute("id") - rsc = cli_display.rscref(node.getAttribute("rsc")) - s = cli_display.keyword(obj_type) - id = cli_display.id(node_id) - s = "%s %s %s"%(s,id,rsc) - pref_node = node.getAttribute("node") - score = cli_display.score(get_score(node)) - if pref_node: - return "%s %s %s" % (s,score,pref_node) - else: - return s - def repr_cli_child(self,c,format): - if c.tagName == "rule": - return "%s %s" % \ - (cli_display.keyword("rule"), cli_rule(c)) - def cli_list2node(self,cli_list,oldnode): - head = copy.copy(cli_list[0]) - head[0] = backtrans[head[0]] - headnode = mkxmlsimple(head,oldnode,'location') - id_hint = headnode.getAttribute("id") - oldrule = None - for e in cli_list[1:]: - if e[0] in ("expression","date_expression"): - n = mkxmlnode(e,oldrule,id_hint) - else: - n = mkxmlnode(e,oldnode,id_hint) - if keyword_cmp(e[0], "rule"): - add_missing_attr(n) - rule = n - headnode.appendChild(n) - oldrule = lookup_node(rule,oldnode,location_only=True) - else: - rule.appendChild(n) - remove_id_used_attributes(oldnode) - return headnode - def check_sanity(self): - ''' - Check if node references match existing nodes. - ''' - if not self.node: # eh? - common_err("%s: no xml (strange)" % self.obj_id) - return user_prefs.get_check_rc() - uname = self.node.getAttribute("node") - if uname and uname not in cib_factory.node_id_list(): - common_warn("%s: referenced node %s does not exist" % (self.obj_id,uname)) - return 1 - rc = 0 - for enode in self.node.getElementsByTagName("expression"): - if enode.getAttribute("attribute") == "#uname": - uname = enode.getAttribute("value") - if uname and uname not in cib_factory.node_id_list(): - common_warn("%s: referenced node %s does not exist" % (self.obj_id,uname)) - rc = 1 - return rc - -class CibSimpleConstraint(CibObject): - ''' - Colocation and order constraints. - ''' - def repr_cli_head(self,node): - obj_type = vars.cib_cli_map[node.tagName] - node_id = node.getAttribute("id") - s = cli_display.keyword(obj_type) - id = cli_display.id(node_id) - score = cli_display.score(get_score(node)) - if node.getElementsByTagName("resource_set"): - col = rsc_set_constraint(node,obj_type) - else: - col = two_rsc_constraint(node,obj_type) - if not col: - return None - symm = node.getAttribute("symmetrical") - if symm: - col.append("symmetrical=%s"%symm) - return "%s %s %s %s" % (s,id,score,' '.join(col)) - def repr_cli_child(self,c,format): - pass # no children here - def cli_list2node(self,cli_list,oldnode): - head = copy.copy(cli_list[0]) - head[0] = backtrans[head[0]] - headnode = mkxmlsimple(head,oldnode,'') - id_hint = headnode.getAttribute("id") - for e in cli_list[1:]: - # if more than one element, it's a resource set - n = mkxmlnode(e,oldnode,id_hint) - headnode.appendChild(n) - remove_id_used_attributes(oldnode) - return headnode - -class CibProperty(CibObject): - ''' - Cluster properties. - ''' - def repr_cli_head(self,node): - return '%s $id="%s"' % \ - (cli_display.keyword(self.obj_type), node.getAttribute("id")) - def repr_cli_child(self,c,format): - name = c.getAttribute("name") - if "value" in c.attributes.keys(): - value = c.getAttribute("value") - else: - value = None - return nvpair_format(name,value) - def cli_list2node(self,cli_list,oldnode): - head = copy.copy(cli_list[0]) - head[0] = backtrans[head[0]] - obj_id = find_value(head[1],"$id") - if not obj_id: - obj_id = cib_object_map[self.xml_obj_type][3] - headnode = mkxmlnode(head,oldnode,obj_id) - remove_id_used_attributes(oldnode) - return headnode - def matchcli(self,cli_list): - head = cli_list[0] - if self.obj_type != head[0]: - return False - # if no id specified return True - # (match the first of a kind) - if not find_value(head[1],"$id"): - return True - return self.obj_id == find_value(head[1],"$id") - def check_sanity(self): - ''' - Match properties with PE metadata. - ''' - if not self.node: # eh? - common_err("%s: no xml (strange)" % self.obj_id) - return user_prefs.get_check_rc() - l = [] - if self.obj_type == "property": - l = get_properties_list() - l += ("dc-version","cluster-infrastructure","last-lrm-refresh") - elif self.obj_type == "op_defaults": - l = vars.op_attributes - elif self.obj_type == "rsc_defaults": - l = vars.rsc_meta_attributes - rc = sanity_check_nvpairs(self.obj_id,self.node,l) - return rc - -class CibAcl(CibObject): - ''' - User and role ACL. - ''' - def repr_cli_head(self,node): - obj_type = vars.cib_cli_map[node.tagName] - id = node.getAttribute("id") - s = cli_display.keyword(obj_type) - id = cli_display.id(id) - return "%s %s" % (s,id) - def repr_cli_child(self,c,format): - if c.tagName in vars.acl_rule_names: - return cli_acl_rule(c,format) - else: - return cli_acl_roleref(c,format) - def cli_list2node(self,cli_list,oldnode): - head = copy.copy(cli_list[0]) - head[0] = backtrans[head[0]] - headnode = mkxmlsimple(head,oldnode,'') - if len(cli_list) == 1: - return headnode - id_hint = headnode.getAttribute("id") - for e in cli_list[1:]: - n = mkxmlnode(e,oldnode,id_hint) - headnode.appendChild(n) - remove_id_used_attributes(oldnode) - return headnode -# -################################################################ - -# -# cib factory -# -cib_piped = "cibadmin -p" - -def get_default_timeout(): - t = cib_factory.get_op_default("timeout") - if t: - return t - t = cib_factory.get_property("default-action-timeout") - if t: - return t - try: - return get_pe_meta().param_default("default-action-timeout") - except: - return 0 - -# xml -> cli translations (and classes) -cib_object_map = { - "node": ( "node", CibNode, "nodes" ), - "primitive": ( "primitive", CibPrimitive, "resources" ), - "group": ( "group", CibContainer, "resources" ), - "clone": ( "clone", CibContainer, "resources" ), - "master": ( "ms", CibContainer, "resources" ), - "rsc_location": ( "location", CibLocation, "constraints" ), - "rsc_colocation": ( "colocation", CibSimpleConstraint, "constraints" ), - "rsc_order": ( "order", CibSimpleConstraint, "constraints" ), - "cluster_property_set": ( "property", CibProperty, "crm_config", "cib-bootstrap-options" ), - "rsc_defaults": ( "rsc_defaults", CibProperty, "rsc_defaults", "rsc-options" ), - "op_defaults": ( "op_defaults", CibProperty, "op_defaults", "op-options" ), - "acl_role": ( "role", CibAcl, "acls" ), - "acl_user": ( "user", CibAcl, "acls" ), -} -backtrans = odict() # generate a translation cli -> tag -for key in cib_object_map: - backtrans[cib_object_map[key][0]] = key - -def can_migrate(node): - for c in node.childNodes: - if not is_element(c) or c.tagName != "meta_attributes": - continue - pl = nvpairs2list(c) - if find_value(pl,"allow-migrate") == "true": - return True - return False - -cib_upgrade = "cibadmin --upgrade --force" -class CibFactory(Singleton): - ''' - Juggle with CIB objects. - See check_structure below for details on the internal cib - representation. - ''' - shadowcmd = ">/dev/null 1: - common_warn("%s contains more than one %s, using first" % \ - (obj.obj_id,attr_list_type)) - id = node_l[0].getAttribute("id") - if not id: - common_err("%s reference not found" % id_ref) - return id_ref # hope that user will fix that - return id - # verify if id_ref exists - node_l = self.doc.getElementsByTagName(attr_list_type) - for node in node_l: - if node.getAttribute("id") == id_ref: - return id_ref - common_err("%s reference not found" % id_ref) - return id_ref # hope that user will fix that - def _get_attr_value(self,obj_type,attr): - if not self.is_cib_sane(): - return None - for obj in self.cib_objects: - if obj.obj_type == obj_type and obj.node: - pl = nvpairs2list(obj.node) - v = find_value(pl, attr) - if v: - return v - return None - def get_property(self,property): - ''' - Get the value of the given cluster property. - ''' - return self._get_attr_value("property",property) - def get_op_default(self,attr): - ''' - Get the value of the attribute from op_defaults. - ''' - return self._get_attr_value("op_defaults",attr) - def is_asymm_cluster(self): - symm = self.get_property("symmetric-cluster") - return symm and symm != "true" - def new_object(self,obj_type,obj_id): - "Create a new object of type obj_type." - if id_store.id_in_use(obj_id): - return None - for xml_obj_type,v in cib_object_map.items(): - if v[0] == obj_type: - obj = v[1](xml_obj_type,obj_id) - if obj.obj_id: - return obj - else: - return None - return None - def mkobj_list(self,mode,*args): - obj_list = [] - for obj in self.cib_objects: - f = lambda: obj.filter(*args) - if not f(): - continue - if mode == "cli" and obj.nocli and obj.nocli_warn: - obj.nocli_warn = False - obj_cli_warn(obj.obj_id) - obj_list.append(obj) - return obj_list - def is_cib_empty(self): - return not self.mkobj_list("cli","type:primitive") - def has_cib_changed(self): - return self.mkobj_list("xml","changed") or self.remove_queue - def verify_constraints(self,node): - ''' - Check if all resources referenced in a constraint exist - ''' - rc = True - constraint_id = node.getAttribute("id") - for obj_id in referenced_resources(node): - if not self.find_object(obj_id): - constraint_norefobj_err(constraint_id,obj_id) - rc = False - return rc - def verify_rsc_children(self,node): - ''' - Check prerequisites: - a) all children must exist - b) no child may have other parent than me - (or should we steal children?) - c) there may not be duplicate children - ''' - obj_id = node.getAttribute("id") - if not obj_id: - common_err("element %s has no id" % node.tagName) - return False - try: - obj_type = cib_object_map[node.tagName][0] - except: - common_err("element %s (%s) not recognized"%(node.tagName,obj_id)) - return False - c_ids = get_rsc_children_ids(node) - if not c_ids: - return True - rc = True - c_dict = {} - for child_id in c_ids: - if not self.verify_child(child_id,obj_type,obj_id): - rc = False - if child_id in c_dict: - common_err("in group %s child %s listed more than once"%(obj_id,child_id)) - rc = False - c_dict[child_id] = 1 - return rc - def verify_child(self,child_id,obj_type,obj_id): - 'Check if child exists and obj_id is (or may become) its parent.' - child = self.find_object(child_id) - if not child: - no_object_err(child_id) - return False - if child.parent and child.parent.obj_id != obj_id: - common_err("%s already in use at %s"%(child_id,child.parent.obj_id)) - return False - if obj_type == "group" and child.obj_type != "primitive": - common_err("a group may contain only primitives; %s is %s"%(child_id,child.obj_type)) - return False - if not child.obj_type in vars.children_tags: - common_err("%s may contain a primitive or a group; %s is %s"%(obj_type,child_id,child.obj_type)) - return False - return True - def verify_element(self,node): - ''' - Can we create this object given its CLI representation. - This is not about syntax, we're past that, but about - semantics. - Right now we check if the children, if any, are fit for - the parent. And if this is a constraint, if all - referenced resources are present. - ''' - rc = True - if not self.verify_rsc_children(node): - rc = False - if not self.verify_constraints(node): - rc = False - return rc - def create_object(self,*args): - return self.create_from_cli(CliParser().parse(list(args))) != None - def set_property_cli(self,cli_list): - comments = get_comments(cli_list) - head_pl = cli_list[0] - obj_type = head_pl[0].lower() - pset_id = find_value(head_pl[1],"$id") - if pset_id: - head_pl[1].remove(["$id",pset_id]) - else: - pset_id = cib_object_map[backtrans[obj_type]][3] - obj = self.find_object(pset_id) - if not obj: - if not is_id_valid(pset_id): - invalid_id_err(pset_id) - return None - obj = self.new_object(obj_type,pset_id) - if not obj: - return None - get_topnode(self.doc,obj.parent_type).appendChild(obj.node) - obj.origin = "user" - self.cib_objects.append(obj) - for n,v in head_pl[1]: - set_nvpair(obj.node,n,v) - if comments and obj.node: - stuff_comments(obj.node,comments) - obj.updated = True - return obj - def add_op(self,cli_list): - '''Add an op to a primitive.''' - head = cli_list[0] - # does the referenced primitive exist - rsc_id = find_value(head[1],"rsc") - rsc_obj = self.find_object(rsc_id) - if not rsc_obj: - no_object_err(rsc_id) - return None - if rsc_obj.obj_type != "primitive": - common_err("%s is not a primitive" % rsc_id) - return None - return rsc_obj.add_operation(cli_list) - def create_from_cli(self,cli): - 'Create a new cib object from the cli representation.' - cli_list = mk_cli_list(cli) - if not cli_list: - return None - head = cli_list[0] - obj_type = head[0].lower() - obj_id = find_value(head[1],"id") - if obj_id and not is_id_valid(obj_id): - invalid_id_err(obj_id) - return None - if len(cli_list) >= 2 and cli_list[1][0] == "raw": - doc = xml.dom.minidom.parseString(cli_list[1][1]) - return self.create_from_node(doc.childNodes[0]) - if obj_type in olist(vars.nvset_cli_names): - return self.set_property_cli(cli_list) - if obj_type == "op": - return self.add_op(cli_list) - if obj_type == "node": - obj = self.find_object(obj_id) - # make an exception and allow updating nodes - if obj: - self.merge_from_cli(obj,cli_list) - return obj - obj = self.new_object(obj_type,obj_id) - if not obj: - return None - node = obj.cli2node(cli_list) - return self.add_element(obj, node) - def update_from_cli(self,obj,cli_list,update = False): - ''' - Replace element from the cli intermediate. - If this is an update and the element is properties, then - the new properties should be merged with the old. - Otherwise, users may be surprised. - ''' - id_store.remove_xml(obj.node) - if len(cli_list) >= 2 and cli_list[1][0] == "raw": - doc = xml.dom.minidom.parseString(cli_list[1][1]) - id_store.store_xml(doc.childNodes[0]) - return self.update_element(obj,doc.childNodes[0]) - elif update and obj.obj_type in vars.nvset_cli_names: - self.merge_from_cli(obj,cli_list) - return True - else: - return self.update_element(obj,obj.cli2node(cli_list)) - def update_from_node(self,obj,node): - 'Update element from a doc node.' - id_store.replace_xml(obj.node,node) - return self.update_element(obj,node) - def update_element(self,obj,newnode): - 'Update element from a doc node.' - if not newnode: - return False - if not self.is_cib_sane(): - id_store.replace_xml(newnode,obj.node) - return False - oldnode = obj.node - if xml_cmp(oldnode,newnode): - newnode.unlink() - return True # the new and the old versions are equal - obj.node = newnode - if not self.test_element(obj,newnode): - id_store.replace_xml(newnode,oldnode) - obj.node = oldnode - newnode.unlink() - return False - obj.node = self.replaceNode(newnode,oldnode) - obj.nocli = False # try again after update - self.adjust_children(obj) - if not obj.cli_use_validate(): - obj.nocli_warn = True - obj.nocli = True - oldnode.unlink() - obj.updated = True - obj.propagate_updated() - return True - def merge_from_cli(self,obj,cli_list): - node = obj.cli2node(cli_list) - if not node: - return - if obj.obj_type in vars.nvset_cli_names: - rc = merge_nvpairs(obj.node, node) - else: - rc = merge_nodes(obj.node, node) - if rc: - obj.updated = True - obj.propagate_updated() - def update_moved(self,obj): - 'Updated the moved flag. Mark affected constraints.' - obj.moved = not obj.moved - if obj.moved: - for c_obj in self.related_constraints(obj): - c_obj.recreate = True - def adjust_children(self,obj): - ''' - All stuff children related: manage the nodes of children, - update the list of children for the parent, update - parents in the children. - ''' - new_children_ids = get_rsc_children_ids(obj.node) - if not new_children_ids: - return - old_children = obj.children - obj.children = [self.find_object(x) for x in new_children_ids] - self._relink_orphans_to_top(old_children,obj.children) - self._update_children(obj) - def _relink_child_to_top(self,obj): - 'Relink a child to the top node.' - obj.node.parentNode.removeChild(obj.node) - get_topnode(self.doc,obj.parent_type).appendChild(obj.node) - if obj.origin == "cib": - self.update_moved(obj) - obj.parent = None - def _update_children(self,obj): - '''For composite objects: update all children nodes. - ''' - # unlink all and find them in the new node - for child in obj.children: - oldnode = child.node - child.node = obj.find_child_in_node(child) - if child.children: # and children of children - self._update_children(child) - rmnode(oldnode) - if not child.parent and child.origin == "cib": - self.update_moved(child) - if child.parent and child.parent != obj: - child.parent.updated = True # the other parent updated - child.parent = obj - def _relink_orphans_to_top(self,old_children,new_children): - "New orphans move to the top level for the object type." - for child in old_children: - if child not in new_children: - self._relink_child_to_top(child) - def test_element(self,obj,node): - if not node.getAttribute("id"): - return False - if not obj.xml_obj_type in vars.defaults_tags: - if not self.verify_element(node): - return False - if user_prefs.is_check_always() \ - and obj.check_sanity() > 1: - return False - return True - def update_links(self,obj): - ''' - Update the structure links for the object (obj.children, - obj.parent). Update also the dom nodes, if necessary. - ''' - obj.children = [] - if obj.obj_type not in vars.container_tags: - return - for c in obj.node.childNodes: - if is_child_rsc(c): - child = self.find_object_for_node(c) - if not child: - missing_obj_err(c) - continue - child.parent = obj - obj.children.append(child) - if not c.isSameNode(child.node): - rmnode(child.node) - child.node = c - def add_element(self,obj,node): - obj.node = node - obj.obj_id = node.getAttribute("id") - if not self.test_element(obj, node): - id_store.remove_xml(node) - node.unlink() - return None - pnode = get_topnode(self.doc,obj.parent_type) - common_debug("append child %s to %s" % (obj.obj_id, pnode.tagName)) - pnode.appendChild(node) - self.adjust_children(obj) - self.redirect_children_constraints(obj) - if not obj.cli_use_validate(): - self.nocli_warn = True - obj.nocli = True - self.update_links(obj) - obj.origin = "user" - self.cib_objects.append(obj) - return obj - def create_from_node(self,node): - 'Create a new cib object from a document node.' - if not node: - return None - try: - obj_type = cib_object_map[node.tagName][0] - except: - return None - if is_defaults(node): - node = get_rscop_defaults_meta_node(node) - if not node: - return None - if node.ownerDocument != self.doc: - node = self.doc.importNode(node,1) - obj = self.new_object(obj_type, node.getAttribute("id")) - if not obj: - return None - if not id_store.store_xml(node): - return None - return self.add_element(obj, node) - def cib_objects_string(self, obj_list = None): - l = [] - if not obj_list: - obj_list = self.cib_objects - for obj in obj_list: - l.append(obj.obj_string()) - return ' '.join(l) - def _remove_obj(self,obj): - "Remove a cib object and its children." - # remove children first - # can't remove them here from obj.children! - common_debug("remove object %s" % obj.obj_string()) - for child in obj.children: - #self._remove_obj(child) - # just relink, don't remove children - self._relink_child_to_top(child) - if obj.parent: # remove obj from its parent, if any - obj.parent.children.remove(obj) - id_store.remove_xml(obj.node) - rmnode(obj.node) - obj.invalid = True - self.add_to_remove_queue(obj) - self.cib_objects.remove(obj) - for c_obj in self.related_constraints(obj): - if is_simpleconstraint(c_obj.node) and obj.children: - # the first child inherits constraints - rename_rscref(c_obj,obj.obj_id,obj.children[0].obj_id) - delete_rscref(c_obj,obj.obj_id) - if silly_constraint(c_obj.node,obj.obj_id): - # remove invalid constraints - self._remove_obj(c_obj) - if not self._no_constraint_rm_msg: - err_buf.info("hanging %s deleted" % c_obj.obj_string()) - def related_constraints(self,obj): - if not is_resource(obj.node): - return [] - c_list = [] - for obj2 in self.cib_objects: - if not is_constraint(obj2.node): - continue - if rsc_constraint(obj.obj_id,obj2.node): - c_list.append(obj2) - return c_list - def redirect_children_constraints(self,obj): - ''' - Redirect constraints to the new parent - ''' - for child in obj.children: - for c_obj in self.related_constraints(child): - rename_rscref(c_obj,child.obj_id,obj.obj_id) - # drop useless constraints which may have been created above - for c_obj in self.related_constraints(obj): - if silly_constraint(c_obj.node,obj.obj_id): - self._no_constraint_rm_msg = True - self._remove_obj(c_obj) - self._no_constraint_rm_msg = False - def add_to_remove_queue(self,obj): - if obj.origin == "cib": - self.remove_queue.append(obj) - #print self.cib_objects_string(self.remove_queue) - def delete_1(self,obj): - ''' - Remove an object and its parent in case the object is the - only child. - ''' - if obj.parent and len(obj.parent.children) == 1: - self.delete_1(obj.parent) - if obj in self.cib_objects: # don't remove parents twice - self._remove_obj(obj) - def delete(self,*args): - 'Delete a cib object.' - if not self.doc: - empty_cib_err() - return False - rc = True - l = [] - for obj_id in args: - obj = self.find_object(obj_id) - if not obj: - no_object_err(obj_id) - rc = False - continue - if is_rsc_running(obj_id): - common_err("resource %s is running, can't delete it" % obj_id) - rc = False - else: - l.append(obj) - if l: - l = processing_sort_cli(l) - for obj in reversed(l): - self.delete_1(obj) - return rc - def rename(self,old_id,new_id): - ''' - Rename a cib object. - - check if the resource (if it's a resource) is stopped - - check if the new id is not taken - - find the object with old id - - rename old id to new id in all related objects - (constraints) - - if the object came from the CIB, then it must be - deleted and the one with the new name created - - rename old id to new id in the object - ''' - if not self.doc: - empty_cib_err() - return False - if id_store.id_in_use(new_id): - return False - obj = self.find_object(old_id) - if not obj: - no_object_err(old_id) - return False - if not obj.can_be_renamed(): - return False - for c_obj in self.related_constraints(obj): - rename_rscref(c_obj,old_id,new_id) - rename_id(obj.node,old_id,new_id) - obj.obj_id = new_id - id_store.rename(old_id,new_id) - obj.updated = True - obj.propagate_updated() - def erase(self): - "Remove all cib objects." - # remove only bottom objects and no constraints - # the rest will automatically follow - if not self.doc: - empty_cib_err() - return False - erase_ok = True - l = [] - for obj in [obj for obj in self.cib_objects \ - if not obj.children and not is_constraint(obj.node) \ - and obj.obj_type != "node" ]: - if is_rsc_running(obj.obj_id): - common_warn("resource %s is running, can't delete it" % obj.obj_id) - erase_ok = False - else: - l.append(obj) - if not erase_ok: - common_err("CIB erase aborted (nothing was deleted)") - return False - self._no_constraint_rm_msg = True - for obj in l: - self.delete(obj.obj_id) - self._no_constraint_rm_msg = False - remaining = 0 - for obj in self.cib_objects: - if obj.obj_type != "node": - remaining += 1 - if remaining > 0: - common_err("strange, but these objects remained:") - for obj in self.cib_objects: - if obj.obj_type != "node": - print >> sys.stderr, obj.obj_string() - self.cib_objects = [] - return True - def erase_nodes(self): - "Remove nodes only." - if not self.doc: - empty_cib_err() - return False - l = [obj for obj in self.cib_objects if obj.obj_type == "node"] - for obj in l: - self.delete(obj.obj_id) - def refresh(self): - "Refresh from the CIB." - self.reset() - self.initialize() - -user_prefs = UserPrefs.getInstance() -options = Options.getInstance() -err_buf = ErrorBuffer.getInstance() -vars = Vars.getInstance() -cib_factory = CibFactory.getInstance() -cli_display = CliDisplay.getInstance() -cib_status = CibStatus.getInstance() -id_store = IdMgmt.getInstance() - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/cibstatus.py b/shell/modules/cibstatus.py deleted file mode 100644 index b7e0e79742..0000000000 --- a/shell/modules/cibstatus.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import sys -import os -import re -import time -from singletonmixin import Singleton -from vars import Vars -from xmlutil import * -from msg import * - -def get_tag_by_id(node,tag,id): - "Find a doc node which matches tag and id." - for n in node.getElementsByTagName(tag): - if n.getAttribute("id") == id: - return n - return None -def get_status_node_id(n): - try: n = n.parentNode - except: return None - if n.tagName != "node_state": - return get_status_node_id(n) - return n.getAttribute("id") -def get_status_node(status_node,node): - for n in status_node.childNodes: - if not is_element(n) or n.tagName != "node_state": - continue - if n.getAttribute("id") == node: - return n - return None -def get_status_ops(status_node,rsc,op,interval,node = ''): - ''' - Find a doc node which matches the operation. interval set to - "-1" means to lookup an operation with non-zero interval (for - monitors). Empty interval means any interval is fine. - ''' - l = [] - for n in status_node.childNodes: - if not is_element(n) or n.tagName != "node_state": - continue - if node and n.getAttribute("id") != node: - continue - for r in n.getElementsByTagName("lrm_resource"): - if r.getAttribute("id") != rsc: - continue - for o in r.getElementsByTagName("lrm_rsc_op"): - if o.getAttribute("operation") != op: - continue - if o.getAttribute("interval") == interval or \ - (interval == "-1" and o.getAttribute("interval") != "0"): - l.append(o) - return l - -def split_op(op): - if op == "probe": - return "monitor","0" - elif op == "monitor": - return "monitor","-1" - elif op[0:8] == "monitor:": - return "monitor",op[8:] - return op,"0" - -def cib_path(source): - return source[0:7] == "shadow:" and shadowfile(source[7:]) or source - -class CibStatus(Singleton): - ''' - CIB status management - ''' - cmd_inject = "/dev/null 2>&1 crm_simulate -x %s -I %s" - cmd_run = "2>&1 crm_simulate -R -x %s" - cmd_simulate = "2>&1 crm_simulate -S -x %s" - node_ops = { - "online": "-u", - "offline": "-d", - "unclean": "-f", - } - def __init__(self): - self.origin = "live" - self.backing_file = "" # file to keep the live cib - self.status_node = None - self.doc = None - self.cib = None - self.reset_state() - def _cib_path(self,source): - if source[0:7] == "shadow:": - return shadowfile(source[7:]) - else: - return source - def _load_cib(self,source): - if source == "live": - if not self.backing_file: - self.backing_file = cib2tmp() - if not self.backing_file: - return None,None - else: - cibdump2file(self.backing_file) - f = self.backing_file - else: - f = cib_path(source) - return read_cib(file2doc,f) - def _load(self,source): - doc,cib = self._load_cib(source) - if not doc: - return False - status = get_conf_elem(doc, "status") - if not status: - return False - self.doc,self.cib = doc,cib - self.status_node = status - self.reset_state() - return True - def reset_state(self): - self.modified = False - self.quorum = '' - self.node_changes = {} - self.op_changes = {} - return True - def source_file(self): - if self.origin == "live": - return self.backing_file - else: - return cib_path(self.origin) - def status_node_list(self): - if not self.get_status(): - return - return [x.getAttribute("id") for x in self.doc.getElementsByTagName("node_state")] - def status_rsc_list(self): - if not self.get_status(): - return - rsc_list = [x.getAttribute("id") for x in self.doc.getElementsByTagName("lrm_resource")] - # how to uniq? - d = {} - for e in rsc_list: - d[e] = 0 - return d.keys() - def load(self,source): - ''' - Load the status section from the given source. The source - may be cluster ("live"), shadow CIB, or CIB in a file. - ''' - if self.backing_file: - os.unlink(self.backing_file) - self.backing_file = "" - if not self._load(source): - common_err("the cib contains no status") - return False - self.origin = source - return True - def save(self,dest = None): - ''' - Save the modified status section to a file/shadow. If the - file exists, then it must be a cib file and the status - section is replaced with our status section. If the file - doesn't exist, then our section and some (?) configuration - is saved. - ''' - if not self.modified: - common_info("apparently you didn't modify status") - return False - if (not dest and self.origin == "live") or dest == "live": - common_warn("cannot save status to the cluster") - return False - doc,cib = self.doc,self.cib - if dest: - dest_path = cib_path(dest) - if os.path.isfile(dest_path): - doc,cib = self._load_cib(dest) - if not doc or not cib: - common_err("%s exists, but no cib inside" % dest) - return False - else: - dest_path = cib_path(self.origin) - if doc != self.doc: - status = get_conf_elem(doc, "status") - rmnode(status) - cib.appendChild(doc.importNode(self.status_node,1)) - xml = doc.toprettyxml(user_prefs.xmlindent) - try: f = open(dest_path,"w") - except IOError, msg: - common_err(msg) - return False - f.write(xml) - f.close() - return True - def _crm_simulate(self, cmd, nograph, scores, utilization, verbosity): - if verbosity: - cmd = "%s -%s" % (cmd,verbosity.upper()) - if scores: - cmd = "%s -s" % cmd - if utilization: - cmd = "%s -U" % cmd - if user_prefs.dotty and not nograph: - fd,dotfile = mkstemp() - cmd = "%s -D %s" % (cmd,dotfile) - else: - dotfile = None - rc = ext_cmd(cmd % self.source_file()) - if dotfile: - show_dot_graph(dotfile) - vars.tmpfiles.append(dotfile) - return rc == 0 - # actions is ignored - def run(self, nograph, scores, utilization, actions, verbosity): - return self._crm_simulate(self.cmd_run, \ - nograph, scores, utilization, verbosity) - # actions is ignored - def simulate(self, nograph, scores, utilization, actions, verbosity): - return self._crm_simulate(self.cmd_simulate, \ - nograph, scores, utilization, verbosity) - def get_status(self): - ''' - Return the status section node. - ''' - if (not self.status_node or \ - (self.origin == "live" and not self.modified)) \ - and not self._load(self.origin): - return None - return self.status_node - def list_changes(self): - ''' - Dump a set of changes done. - ''' - if not self.modified: - return True - for node in self.node_changes: - print node,self.node_changes[node] - for op in self.op_changes: - print op,self.op_changes[op] - if self.quorum: - print "quorum:",self.quorum - return True - def show(self): - ''' - Page the "pretty" XML of the status section. - ''' - if not self.get_status(): - return False - page_string(self.status_node.toprettyxml(user_prefs.xmlindent)) - return True - def inject(self,opts): - return ext_cmd("%s %s" % \ - (self.cmd_inject % (self.source_file(), self.source_file()), opts)) - def set_quorum(self, v): - rc = self.inject("--quorum=%s" % (v and "true" or "false")) - if rc != 0: - return False - self._load(self.origin) - self.quorum = v and "true" or "false" - self.modified = True - return True - def edit_node(self,node,state): - ''' - Modify crmd, expected, and join attributes of node_state - to set the node's state to online, offline, or unclean. - ''' - if not self.get_status(): - return False - if not state in self.node_ops: - common_err("unknown state %s" % state) - return False - node_node = get_tag_by_id(self.status_node,"node_state",node) - if not node_node: - common_info("node %s created" % node) - return False - rc = self.inject("%s %s" % (self.node_ops[state], node)) - if rc != 0: - return False - self._load(self.origin) - self.node_changes[node] = state - self.modified = True - return True - def edit_op(self,op,rsc,rc_code,op_status,node = ''): - ''' - Set rc-code and op-status in the lrm_rsc_op status - section element. - ''' - if not self.get_status(): - return False - l_op,l_int = split_op(op) - op_nodes = get_status_ops(self.status_node,rsc,l_op,l_int,node) - if l_int == "-1" and len(op_nodes) != 1: - common_err("need interval for the monitor op") - return False - if node == '' and len(op_nodes) != 1: - if op_nodes: - nodelist = [get_status_node_id(x) for x in op_nodes] - common_err("operation %s found at %s" % (op,' '.join(nodelist))) - else: - common_err("operation %s not found" % op) - return False - # either the op is fully specified (maybe not found) - # or we found exactly one op_node - if len(op_nodes) == 1: - op_node = op_nodes[0] - if not node: - node = get_status_node_id(op_node) - if not node: - common_err("node not found for the operation %s" % op) - return False - if l_int == "-1": - l_int = op_node.getAttribute("interval") - op_op = op_status == "0" and "-i" or "-F" - rc = self.inject("%s %s_%s_%s@%s=%s" % \ - (op_op, rsc, l_op, l_int, node, rc_code)) - if rc != 0: - return False - self.op_changes[node+":"+rsc+":"+op] = "rc="+rc_code - if op_status: - self.op_changes[node+":"+rsc+":"+op] += "," "op-status="+op_status - self._load(self.origin) - self.modified = True - return True - -vars = Vars.getInstance() -# vim:ts=4:sw=4:et: diff --git a/shell/modules/clidisplay.py b/shell/modules/clidisplay.py deleted file mode 100644 index 86b0bfb58a..0000000000 --- a/shell/modules/clidisplay.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -from singletonmixin import Singleton -from userprefs import Options, UserPrefs -from vars import Vars -from utils import * -from msg import * -from term import TerminalController - -class CliDisplay(Singleton): - """ - Display output for various syntax elements. - """ - def __init__(self): - self.no_pretty = False - def set_no_pretty(self): - self.no_pretty = True - def reset_no_pretty(self): - self.no_pretty = False - def colorstring(self, clrnum, s): - if self.no_pretty: - return s - else: - return termctrl.render("${%s}%s${NORMAL}" % \ - (user_prefs.colorscheme[clrnum].upper(), s)) - def keyword(self, kw): - s = kw - if "uppercase" in user_prefs.output: - s = s.upper() - if "color" in user_prefs.output: - s = self.colorstring(0, s) - return s - def otherword(self, n, s): - if "color" in user_prefs.output: - return self.colorstring(n, s) - else: - return s - def id(self, s): - return self.otherword(1, s) - def attr_name(self, s): - return self.otherword(2, s) - def attr_value(self, s): - return self.otherword(3, s) - def rscref(self, s): - return self.otherword(4, s) - def idref(self, s): - return self.otherword(4, s) - def score(self, s): - return self.otherword(5, s) - -user_prefs = UserPrefs.getInstance() -vars = Vars.getInstance() -termctrl = TerminalController.getInstance() - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/cliformat.py b/shell/modules/cliformat.py deleted file mode 100644 index 66f926c03b..0000000000 --- a/shell/modules/cliformat.py +++ /dev/null @@ -1,304 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -from vars import Vars -from clidisplay import CliDisplay -from xmlutil import * -from utils import * -from msg import * - -# -# CLI format generation utilities (from XML) -# -def cli_format(pl,format): - if format > 0: - return ' \\\n\t'.join(pl) - else: - return ' '.join(pl) -def cli_operations(node,format = 1): - l = [] - node_id = node.getAttribute("id") - s = '' - if node_id: - s = '$id="%s"' % node_id - idref = node.getAttribute("id-ref") - if idref: - s = '%s $id-ref="%s"' % (s,idref) - if s: - l.append("%s %s" % (cli_display.keyword("operations"),s)) - for c in node.childNodes: - if is_element(c) and c.tagName == "op": - l.append(cli_op(c)) - return cli_format(l,format) -def nvpair_format(n,v): - return v == None and cli_display.attr_name(n) \ - or '%s="%s"'%(cli_display.attr_name(n),cli_display.attr_value(v)) -def cli_pairs(pl): - 'Return a string of name="value" pairs (passed in a list of pairs).' - l = [] - for n,v in pl: - l.append(nvpair_format(n,v)) - return ' '.join(l) - -def nvpairs2list(node, add_id = False): - ''' - Convert nvpairs to a list of pairs. - The id attribute is normally skipped, since they tend to be - long and therefore obscure the relevant content. For some - elements, however, they are included (e.g. properties). - ''' - pl = [] - # if there's id-ref, there can be then _only_ id-ref - value = node.getAttribute("id-ref") - if value: - pl.append(["$id-ref",value]) - return pl - if add_id or \ - (not node.childNodes and len(node.attributes) == 1): - value = node.getAttribute("id") - if value: - pl.append(["$id",value]) - for c in node.childNodes: - if not is_element(c): - continue - if c.tagName == "attributes": - pl = nvpairs2list(c) - elif c.tagName != "nvpair": - node_debug("expected nvpair got", c) - continue - name = c.getAttribute("name") - if "value" in c.attributes.keys(): - value = c.getAttribute("value") - else: - value = None - pl.append([name,value]) - return pl - -def op2list(node): - pl = [] - action = "" - for name in node.attributes.keys(): - if name == "name": - action = node.getAttribute(name) - elif name != "id": # skip the id - pl.append([name,node.getAttribute(name)]) - if not action: - common_err("op is invalid (no name)") - return action,pl -def op_instattr(node): - pl = [] - for c in node.childNodes: - if not is_element(c): - continue - if c.tagName != "instance_attributes": - common_err("only instance_attributes are supported in operations") - else: - pl += nvpairs2list(c) - return pl -def cli_op(node): - action,pl = op2list(node) - if not action: - return "" - pl += op_instattr(node) - return "%s %s %s" % (cli_display.keyword("op"),action,cli_pairs(pl)) -def date_exp2cli(node): - l = [] - operation = node.getAttribute("operation") - l.append(cli_display.keyword("date")) - l.append(cli_display.keyword(operation)) - if operation in olist(vars.simple_date_ops): - value = node.getAttribute(keyword_cmp(operation,'lt') and "end" or "start") - l.append('"%s"' % cli_display.attr_value(value)) - else: - if operation == 'in_range': - for name in vars.in_range_attrs: - v = node.getAttribute(name) - if v: - l.append(nvpair_format(name,v)) - for c in node.childNodes: - if is_element(c) and c.tagName in ("duration","date_spec"): - pl = [] - for name in c.attributes.keys(): - if name != "id": - pl.append([name,c.getAttribute(name)]) - l.append(cli_pairs(pl)) - return ' '.join(l) -def binary_op_format(op): - l = op.split(':') - if len(l) == 2: - return "%s:%s" % (l[0], cli_display.keyword(l[1])) - else: - return cli_display.keyword(op) -def exp2cli(node): - operation = node.getAttribute("operation") - type = node.getAttribute("type") - if type: - operation = "%s:%s" % (type, operation) - attribute = node.getAttribute("attribute") - value = node.getAttribute("value") - if not value: - return "%s %s" % (binary_op_format(operation),attribute) - else: - return "%s %s %s" % (attribute,binary_op_format(operation),value) -def get_score(node): - score = node.getAttribute("score") - if not score: - score = node.getAttribute("score-attribute") - else: - if score.find("INFINITY") >= 0: - score = score.replace("INFINITY","inf") - return score + ":" -def cli_rule(node): - s = [] - node_id = node.getAttribute("id") - if node_id: - s.append('$id="%s"' % node_id) - else: - idref = node.getAttribute("id-ref") - if idref: - return '$id-ref="%s"' % idref - rsc_role = node.getAttribute("role") - if rsc_role: - s.append('$role="%s"' % rsc_role) - s.append(cli_display.score(get_score(node))) - bool_op = node.getAttribute("boolean-op") - if not bool_op: - bool_op = "and" - exp = [] - for c in node.childNodes: - if not is_element(c): - continue - if c.tagName == "date_expression": - exp.append(date_exp2cli(c)) - elif c.tagName == "expression": - exp.append(exp2cli(c)) - expression = (" %s "%cli_display.keyword(bool_op)).join(exp) - return "%s %s" % (' '.join(s),expression) -def cli_add_description(node,l): - desc = node.getAttribute("description") - if desc: - l.append(nvpair_format("description",desc)) - -def mkrscrole(node,n): - rsc = cli_display.rscref(node.getAttribute(n)) - rsc_role = node.getAttribute(n + "-role") - rsc_instance = node.getAttribute(n + "-instance") - if rsc_role: - return "%s:%s"%(rsc,rsc_role) - elif rsc_instance: - return "%s:%s"%(rsc,rsc_instance) - else: - return rsc -def mkrscaction(node,n): - rsc = cli_display.rscref(node.getAttribute(n)) - rsc_action = node.getAttribute(n + "-action") - rsc_instance = node.getAttribute(n + "-instance") - if rsc_action: - return "%s:%s"%(rsc,rsc_action) - elif rsc_instance: - return "%s:%s"%(rsc,rsc_instance) - else: - return rsc -def rsc_set_constraint(node,obj_type): - col = [] - cnt = 0 - for n in node.getElementsByTagName("resource_set"): - sequential = True - if n.getAttribute("sequential") == "false": - sequential = False - if not sequential: - col.append("(") - role = n.getAttribute("role") - action = n.getAttribute("action") - for r in n.getElementsByTagName("resource_ref"): - rsc = cli_display.rscref(r.getAttribute("id")) - q = (obj_type == "colocation") and role or action - col.append(q and "%s:%s"%(rsc,q) or rsc) - cnt += 1 - if not sequential: - col.append(")") - if cnt <= 2: # a degenerate thingie - col.insert(0,"_rsc_set_") - return col -def two_rsc_constraint(node,obj_type): - col = [] - if obj_type == "colocation": - col.append(mkrscrole(node,"rsc")) - col.append(mkrscrole(node,"with-rsc")) - else: - col.append(mkrscaction(node,"first")) - col.append(mkrscaction(node,"then")) - return col - -# this pre (or post)-processing is oversimplified -# but it will do for now -# (a shortcut with more than one placeholder in a single expansion -# cannot have more than one expansion) -# ("...'@@'...'@@'...","...") <- that won't work -def build_exp_re(exp_l): - return [x.replace(r'@@',r'([a-zA-Z_][a-zA-Z0-9_.-]*)') for x in exp_l] -def match_acl_shortcut(xpath,re_l): - import re - for i in range(len(re_l)): - s = ''.join(re_l[0:i+1]) - r = re.match(s + r"$",xpath) - if r: - return (True,r.groups()[0:i+1]) - return (False,None) -def find_acl_shortcut(xpath): - for shortcut in vars.acl_shortcuts: - l = build_exp_re(vars.acl_shortcuts[shortcut]) - (ec,spec_l) = match_acl_shortcut(xpath,l) - if ec: - return (shortcut,spec_l) - return (None,None) -def acl_spec_format(xml_spec,v): - key_f = cli_display.keyword(vars.acl_spec_map[xml_spec]) - if xml_spec == "xpath": - (shortcut,spec_l) = find_acl_shortcut(v) - if shortcut: - key_f = cli_display.keyword(shortcut) - v_f = ':'.join([cli_display.attr_value(x) for x in spec_l]) - else: - v_f = '"%s"' % cli_display.attr_value(v) - elif xml_spec == "ref": - v_f = '%s' % cli_display.attr_value(v) - else: # tag and attribute - v_f = '%s' % cli_display.attr_value(v) - return v_f and '%s:%s' % (key_f,v_f) or key_f -def cli_acl_rule(node,format = 1): - l = [] - acl_rule_name = node.tagName - l.append(cli_display.keyword(acl_rule_name)) - for xml_spec in vars.acl_spec_map: - v = node.getAttribute(xml_spec) - if v: - l.append(acl_spec_format(xml_spec,v)) - return ' '.join(l) -def cli_acl_roleref(node,format = 1): - l = [] - l.append(cli_display.keyword("role")) - l.append(":") - l.append(cli_display.attr_value(node.getAttribute("id"))) - return ''.join(l) -# -################################################################ - -vars = Vars.getInstance() -cli_display = CliDisplay.getInstance() - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/completion.py b/shell/modules/completion.py deleted file mode 100644 index c8b13371ba..0000000000 --- a/shell/modules/completion.py +++ /dev/null @@ -1,478 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import os -import time -import copy -import readline - -from cibconfig import CibFactory -from cibstatus import CibStatus -from levels import Levels -from ra import * -from vars import Vars -from utils import * -from xmlutil import * - -class CompletionHelp(object): - ''' - Print some help on whatever last word in the line. - ''' - timeout = 60 # don't print again and again - def __init__(self): - self.laststamp = 0 - self.lastitem = '' - def help(self,f,*args): - words = readline.get_line_buffer().split() - if not words: - return - key = words[-1] - if key.endswith('='): - key = key[0:-1] - if self.lastitem == key and \ - time.time() - self.laststamp < self.timeout: - return - help_s = f(key,*args) - if help_s: - print "\n%s" % help_s - print "%s%s" % (vars.prompt,readline.get_line_buffer()), - self.laststamp = time.time() - self.lastitem = key - -def attr_cmds(idx,delimiter = False): - if delimiter: - return ' ' - return ["delete","set","show"] -def nodes_list(idx,delimiter = False): - if delimiter: - return ' ' - return listnodes() -def shadows_list(idx,delimiter = False): - if delimiter: - return ' ' - return listshadows() -def templates_list(idx,delimiter = False): - if delimiter: - return ' ' - return listtemplates() -def config_list(idx,delimiter = False): - if delimiter: - return ' ' - return listconfigs() -def config_list_method(idx,delimiter = False): - if delimiter: - return ' ' - return listconfigs() + ["replace","update"] -def shadows_live_list(idx,delimiter = False): - if delimiter: - return ' ' - return listshadows() + ['live'] -def rsc_list(idx,delimiter = False): - if delimiter: - return ' ' - doc = resources_xml() - if not doc: - return [] - nodes = get_interesting_nodes(doc,[]) - return [x.getAttribute("id") for x in nodes if is_resource(x)] -def null_list(idx,delimiter = False): - if delimiter: - return ' ' - return [] -def loop(idx,delimiter = False): - "just a marker in a list" - pass -def id_xml_list(idx,delimiter = False): - if delimiter: - return ' ' - return cib_factory.id_list() + ['xml','changed'] -def id_list(idx,delimiter = False): - if delimiter: - return ' ' - return cib_factory.id_list() -def f_prim_id_list(idx,delimiter = False): - if delimiter: - return ' ' - return cib_factory.f_prim_id_list() -def f_children_id_list(idx,delimiter = False): - if delimiter: - return ' ' - return cib_factory.f_children_id_list() -def rsc_id_list(idx,delimiter = False): - if delimiter: - return ' ' - return cib_factory.rsc_id_list() -def node_id_list(idx,delimiter = False): - if delimiter: - return ' ' - return cib_factory.node_id_list() -def node_attr_keyw_list(idx,delimiter = False): - if delimiter: - return ' ' - return vars.node_attributes_keyw -def status_node_list(idx,delimiter = False): - if delimiter: - return ' ' - return cib_status.status_node_list() -def status_rsc_list(idx,delimiter = False): - if delimiter: - return ' ' - return cib_status.status_rsc_list() -def node_states_list(idx,delimiter = False): - if delimiter: - return ' ' - return vars.node_states -def ra_operations_list(idx,delimiter = False): - if delimiter: - return ' ' - return vars.ra_operations -def lrm_exit_codes_list(idx,delimiter = False): - if delimiter: - return ' ' - return vars.lrm_exit_codes.keys() -def lrm_status_codes_list(idx,delimiter = False): - if delimiter: - return ' ' - return vars.lrm_status_codes.keys() -def skills_list(idx,delimiter = False): - if delimiter: - return ' ' - return user_prefs.skill_levels.keys() -def ra_classes_list(idx,delimiter = False): - if delimiter: - return ':' - return ra_classes() - -# -# completion for primitives including help for parameters -# (help also available for properties) -# -def get_primitive_type(words): - try: - idx = words.index("primitive") + 2 - type_word = words[idx] - except: type_word = '' - return type_word -def ra_type_list(toks,idx,delimiter): - if idx == 2: - if toks[0] == "ocf": - dchar = ':' - l = ra_providers_all() - else: - dchar = ' ' - l = ra_types(toks[0]) - elif idx == 3: - dchar = ' ' - if toks[0] == "ocf": - l = ra_types(toks[0],toks[1]) - else: - l = ra_types(toks[0]) - if delimiter: - return dchar - return l -def prim_meta_attr_list(idx,delimiter = False): - if delimiter: - return '=' - return vars.rsc_meta_attributes -def op_attr_list(idx,delimiter = False): - if delimiter: - return '=' - return vars.op_attributes -def operations_list(): - return vars.op_cli_names -def prim_complete_meta(ra,delimiter = False): - if delimiter: - return '=' - return prim_meta_attr_list(0,delimiter) -def prim_complete_op(ra,delimiter): - words = split_buffer() - if (readline.get_line_buffer()[-1] == ' ' and words[-1] == "op") \ - or (readline.get_line_buffer()[-1] != ' ' and words[-2] == "op"): - dchar = ' ' - l = operations_list() - else: - if readline.get_line_buffer()[-1] == '=': - dchar = ' ' - l = [] - else: - dchar = '=' - l = op_attr_list() - if delimiter: - return dchar - return l -def prim_complete_params(ra,delimiter): - if readline.get_line_buffer()[-1] == '=': - dchar = ' ' - l = [] - else: - dchar = '=' - l = ra.completion_params() - if delimiter: - return dchar - return l -def prim_params_info(key,ra): - return ra.meta_parameter(key) -def meta_attr_info(key,ra): - pass -def op_attr_info(key,ra): - pass -def get_lastkeyw(words,keyw): - revwords = copy.copy(words) - revwords.reverse() - for w in revwords: - if w in keyw: - return w -def primitive_complete_complex(idx,delimiter = False): - ''' - This completer depends on the content of the line, i.e. on - previous tokens, in particular on the type of the RA. - ''' - completers_set = { - "params": (prim_complete_params, prim_params_info), - "meta": (prim_complete_meta, meta_attr_info), - "op": (prim_complete_op, op_attr_info), - } - # manage the resource type - words = readline.get_line_buffer().split() - type_word = get_primitive_type(words) - toks = type_word.split(':') - if toks[0] != "ocf": - idx += 1 - if idx in (2,3): - return ra_type_list(toks,idx,delimiter) - # create an ra object - ra = None - ra_class,provider,rsc_type = disambiguate_ra_type(type_word) - if ra_type_validate(type_word,ra_class,provider,rsc_type): - ra = RAInfo(ra_class,rsc_type,provider) - keywords = completers_set.keys() - if idx == 4: - if delimiter: - return ' ' - return keywords - lastkeyw = get_lastkeyw(words,keywords) - if '=' in words[-1] and readline.get_line_buffer()[-1] != ' ': - if not delimiter and lastkeyw and \ - readline.get_line_buffer()[-1] == '=' and len(words[-1]) > 1: - compl_help.help(completers_set[lastkeyw][1],ra) - if delimiter: - return ' ' - return ['*'] - else: - if lastkeyw: - return completers_set[lastkeyw][0](ra,delimiter) -def property_complete(idx,delimiter = False): - ''' - This completer depends on the content of the line, i.e. on - previous tokens. - ''' - ra = get_properties_meta() - words = readline.get_line_buffer().split() - if '=' in words[-1] and readline.get_line_buffer()[-1] != ' ': - if not delimiter and \ - readline.get_line_buffer()[-1] == '=' and len(words[-1]) > 1: - compl_help.help(prim_params_info,ra) - if delimiter: - return ' ' - return ['*'] - else: - return prim_complete_params(ra,delimiter) - -# -# core completer stuff -# -def lookup_dynamic(fun_list,idx,f_idx,words): - if not fun_list: - return [] - if fun_list[f_idx] == loop: - f_idx -= 1 - f = fun_list[f_idx] - w = words[0] - wordlist = f(idx) - delimiter = f(idx,1) - if len(wordlist) == 1 and wordlist[0] == '*': - return lookup_dynamic(fun_list,idx+1,f_idx+1,words[1:]) - elif len(words) == 1: - return [x+delimiter for x in wordlist if x.startswith(w)] - return lookup_dynamic(fun_list,idx+1,f_idx+1,words[1:]) -def lookup_words(ctab,words): - if not ctab: - return [] - if type(ctab) == type(()): - return lookup_dynamic(ctab,0,0,words) - if len(words) == 1: - return [x+' ' for x in ctab if x.startswith(words[0])] - elif words[0] in ctab.keys(): - return lookup_words(ctab[words[0]],words[1:]) - return [] -def split_buffer(): - p = readline.get_line_buffer() - p = p.replace(':',' ').replace('=',' ') - return p.split() - -def completer(txt,state): - levels = Levels.getInstance() - words = split_buffer() - if readline.get_begidx() == readline.get_endidx(): - words.append('') - matched = lookup_words(levels.completion_tab,words) - matched.append(None) - return matched[state] -def setup_readline(): - readline.set_history_length(100) - readline.parse_and_bind("tab: complete") - readline.set_completer(completer) - readline.set_completer_delims(\ - readline.get_completer_delims().replace('-','').replace('/','').replace('=','')) - try: readline.read_history_file(vars.hist_file) - except: pass - -# -# a dict of completer functions -# (feel free to add more completers) -# -completer_lists = { - "options" : { - "skill-level" : (skills_list,), - "editor" : None, - "pager" : None, - "user" : None, - "output" : None, - "colorscheme" : None, - "check-frequency" : None, - "check-mode" : None, - "sort-elements" : None, - "save" : None, - "show" : None, - }, - "cib" : { - "new" : None, - "delete" : (shadows_list,), - "reset" : (shadows_list,), - "commit" : (shadows_list,), - "use" : (shadows_live_list,), - "diff" : None, - "list" : None, - "import" : None, - "cibstatus" : None, - }, - "template" : { - "new" : (null_list,templates_list,loop), - "load" : (config_list,), - "edit" : (config_list,), - "delete" : (config_list,), - "show" : (config_list,), - "apply" : (config_list_method,config_list), - "list" : None, - }, - "resource" : { - "status" : (rsc_list,), - "start" : (rsc_list,), - "stop" : (rsc_list,), - "restart" : (rsc_list,), - "promote" : (rsc_list,), - "demote" : (rsc_list,), - "manage" : (rsc_list,), - "unmanage" : (rsc_list,), - "migrate" : (rsc_list,nodes_list), - "unmigrate" : (rsc_list,), - "param" : (rsc_list,attr_cmds), - "meta" : (rsc_list,attr_cmds), - "utilization" : (rsc_list,attr_cmds), - "failcount" : (rsc_list,attr_cmds,nodes_list), - "cleanup" : (rsc_list,nodes_list), - "refresh" : (nodes_list,), - "reprobe" : (nodes_list,), - }, - "node" : { - "status" : (nodes_list,), - "show" : (nodes_list,), - "standby" : (nodes_list,), - "online" : (nodes_list,), - "fence" : (nodes_list,), - "delete" : (nodes_list,), - "clearstate" : (nodes_list,), - "attribute" : (nodes_list,attr_cmds), - "utilization" : (nodes_list,attr_cmds), - "status-attr" : (nodes_list,attr_cmds), - }, - "ra" : { - "classes" : None, - "list" : None, - "providers" : None, - "meta" : None, - }, - "cibstatus" : { - "show" : None, - "save" : None, - "load" : None, - "origin" : None, - "node" : (status_node_list,node_states_list), - "op" : (ra_operations_list,status_rsc_list,lrm_exit_codes_list,lrm_status_codes_list,status_node_list), - "run" : None, - "simulate" : None, - "quorum" : None, - }, - "configure" : { - "erase" : None, - "verify" : None, - "refresh" : None, - "ptest" : None, - "commit" : None, - "upgrade" : None, - "show" : (id_xml_list,id_list,loop), - "edit" : (id_xml_list,id_list,loop), - "filter" : (null_list,id_xml_list,id_list,loop), - "delete" : (id_list,loop), - "default-timeouts" : (id_list,loop), - "rename" : (id_list,id_list), - "save" : None, - "load" : None, - "node" : (node_id_list,node_attr_keyw_list), - "primitive" : (null_list,ra_classes_list,primitive_complete_complex,loop), - "group" : (null_list,f_prim_id_list,loop), - "clone" : (null_list,f_children_id_list), - "ms" : (null_list,f_children_id_list), - "location" : (null_list,rsc_id_list), - "colocation" : (null_list,null_list,rsc_id_list,loop), - "order" : (null_list,null_list,rsc_id_list,loop), - "property" : (property_complete,loop), - "rsc_defaults" : (prim_complete_meta,loop), - "op_defaults" : (op_attr_list,loop), - "xml" : None, - "monitor" : None, - "ra" : None, - "cib" : None, - "cibstatus" : None, - "template" : None, - "_test" : None, - "_regtest" : None, - "_objects" : None, - }, -} -def get_completer_list(level,cmd): - 'Return a list of completer functions.' - try: return completer_lists[level][cmd] - except: return None - -compl_help = CompletionHelp() -user_prefs = UserPrefs.getInstance() -vars = Vars.getInstance() -cib_status = CibStatus.getInstance() -cib_factory = CibFactory.getInstance() - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/help.py.in b/shell/modules/help.py.in deleted file mode 100644 index 685c0acf53..0000000000 --- a/shell/modules/help.py.in +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import os -import re -from cache import WCache -from utils import odict, page_string -from vars import gethomedir -from msg import * - -# -# help or make users feel less lonely -# -def add_shorthelp(topic,shorthelp,topic_help): - ''' - Join topics ("%s,%s") if they share the same short - description. - ''' - for i in range(len(topic_help)): - if topic_help[i][1] == shorthelp: - topic_help[i][0] = "%s,%s" % (topic_help[i][0], topic) - return - topic_help.append([topic, shorthelp]) -def dump_short_help(help_tab): - topic_help = [] - for topic in help_tab: - if topic == '.': - continue - # with odict, for whatever reason, python parses differently: - # help_tab["..."] = ("...","...") and - # help_tab["..."] = ("...",""" - # ...""") - # a parser bug? - if type(help_tab[topic][0]) == type(()): - shorthelp = help_tab[topic][0][0] - else: - shorthelp = help_tab[topic][0] - add_shorthelp(topic,shorthelp,topic_help) - for t,d in topic_help: - print "\t%-16s %s" % (t,d) -def overview(help_tab): - print "" - print help_tab['.'][1] - print "" - print "Available commands:" - print "" - dump_short_help(help_tab) - print "" -def topic_help(help_tab,topic): - if topic not in help_tab: - print "There is no help for topic %s" % topic - return - if type(help_tab[topic][0]) == type(()): - shorthelp = help_tab[topic][0][0] - longhelp = help_tab[topic][0][1] - else: - shorthelp = help_tab[topic][0] - longhelp = help_tab[topic][1] - if longhelp: - page_string(longhelp) - else: - print shorthelp -def cmd_help(help_tab,topic = ''): - "help!" - # help_tab is an odict (ordered dictionary): - # help_tab[topic] = (short_help,long_help) - # topic '.' is a special entry for the top level - if not help_tab: - common_info("sorry, help not available") - return - if not topic: - overview(help_tab) - else: - topic_help(help_tab,topic) - -def is_level(s): - return len(s.split("_")) == 2 - -def help_short(s): - r = re.search("help_[^,]+,(.*)\]\]", s) - return r and r.group(1) or '' - -class HelpSystem(object): - ''' - The help system. All help is in the following form in the - manual: - [[cmdhelp__,]] - === ... - Long help text. - ... - [[cmdhelp__,]] - - Help for the level itself is like this: - - [[cmdhelp_,]] - ''' - help_text_file = "@datadir@/@PACKAGE@/crm.8.txt" - index_file = os.path.join(gethomedir(),".crm_help_index") - def __init__(self): - self.key_pos = {} - self.leveld = {} - self.no_help_file = False # don't print repeatedly messages - self.bad_index = False # don't print repeatedly warnings for bad index - def open_file(self,name,mode): - try: - f = open(name,mode) - return f - except IOError,msg: - common_err("%s open: %s"%(name,msg)) - common_err("extensive help system is not available") - self.no_help_file = True - return None - def drop_index(self): - common_info("removing index") - os.unlink(self.index_file) - self.key_pos = {} - self.leveld = {} - self.bad_index = True - def mk_index(self): - ''' - Prepare an index file, sorted by topic, with seek positions - Do we need a hash on content? - ''' - if self.no_help_file: - return False - crm_help_v = os.getenv("CRM_HELP_FILE") - if crm_help_v: - self.help_text_file = crm_help_v - help_f = self.open_file(self.help_text_file,"r") - if not help_f: - return False - idx_f = self.open_file(self.index_file,"w") - if not idx_f: - return False - common_debug("building help index") - key_pos = odict() - while 1: - pos = help_f.tell() - s = help_f.readline() - if not s: - break - if s.startswith("[["): - r = re.search(r'..([^,]+),', s) - if r: - key_pos[r.group(1)] = pos - help_f.close() - for key in key_pos: - print >>idx_f, '%s %d' % (key,key_pos[key]) - idx_f.close() - return True - def is_index_old(self): - try: - t_idx = os.path.getmtime(self.index_file) - except: - return True - try: - t_help = os.path.getmtime(self.help_text_file) - except: - return True - return t_help > t_idx - def load_index(self): - if self.is_index_old(): - self.mk_index() - self.key_pos = {} - self.leveld = {} - idx_f = self.open_file(self.index_file,"r") - if not idx_f: - return False - cur_lvl = '' - for s in idx_f: - a = s.split() - if len(a) != 2: - if not self.bad_index: - common_err("index file corrupt") - idx_f.close() - self.drop_index() - return self.load_index() # this runs only once - return False - key = a[0] - fpos = long(a[1]) - if key.startswith("cmdhelp_"): - if is_level(key): - if key != cur_lvl: - cur_lvl = key - self.leveld[cur_lvl] = [] - else: - self.leveld[cur_lvl].append(key) - self.key_pos[key] = fpos - idx_f.close() - return True - def __filter(self,s): - if '<<' in s: - return re.sub(r'<<[^,]+,(.+)>>', r'\1', s) - else: - return s - def __load_help_one(self,key,skip = 2): - longhelp = '' - self.help_f.seek(self.key_pos[key]) - shorthelp = help_short(self.help_f.readline()) - for i in range(skip-1): - self.help_f.readline() - l = [] - for s in self.help_f: - if s.startswith("[[") or s.startswith("="): - break - l.append(self.__filter(s)) - if l and l[-1] == '\n': # drop the last line of empty - l.pop() - if l: - longhelp = ''.join(l) - if not shorthelp or not longhelp: - if not self.bad_index: - common_warn("help topic %s not found" % key) - self.drop_index() - return shorthelp,longhelp - def cmdhelp(self,s): - if not self.key_pos and not self.load_index(): - return None,None - if not s in self.key_pos: - if not self.bad_index: - common_warn("help topic %s not found" % s) - self.drop_index() - return None,None - return self.__load_help_one(s) - def __load_level(self,lvl): - ''' - For the given level, create a help table. - ''' - if wcache.is_cached("lvl_help_tab_%s" % lvl): - return wcache.retrieve("lvl_help_tab_%s" % lvl) - if not self.key_pos and not self.load_index(): - return None - self.help_f = self.open_file(self.help_text_file,"r") - if not self.help_f: - return None - lvl_s = "cmdhelp_%s" % lvl - if not lvl_s in self.leveld: - if not self.bad_index: - common_warn("help table for level %s not found" % lvl) - self.drop_index() - return None - common_debug("loading help table for level %s" % lvl) - help_tab = odict() - help_tab["."] = self.__load_help_one(lvl_s) - try: - for key in self.leveld[lvl_s]: - cmd = key[len(lvl_s)+1:] - help_tab[cmd] = self.__load_help_one(key) - except: pass - self.help_f.close() - help_tab["quit"] = ("exit the program", "") - help_tab["help"] = ("show help", "") - help_tab["end"] = ("go back one level", "") - return help_tab - def load_level(self,lvl): - help_tab = self.__load_level(lvl) - if self.bad_index: # try again - help_tab = self.__load_level(lvl) - return wcache.store("lvl_help_tab_%s" % lvl, help_tab) - -wcache = WCache.getInstance() - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/idmgmt.py b/shell/modules/idmgmt.py deleted file mode 100644 index e8afe2f126..0000000000 --- a/shell/modules/idmgmt.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -from vars import Vars -from xmlutil import * -from msg import * - -class IdMgmt(Singleton): - ''' - Make sure that ids are unique. - ''' - def __init__(self): - self._id_store = {} - self.ok = True # error var - def new(self,node,pfx): - ''' - Create a unique id for the xml node. - ''' - name = node.getAttribute("name") - if node.tagName == "nvpair": - node_id = "%s-%s" % (pfx,name) - elif node.tagName == "op": - interval = node.getAttribute("interval") - if interval: - node_id = "%s-%s-%s" % (pfx,name,interval) - else: - node_id = "%s-%s" % (pfx,name) - else: - try: - subpfx = vars.subpfx_list[node.tagName] - except: subpfx = '' - if subpfx: - node_id = "%s-%s" % (pfx,subpfx) - else: - node_id = "%s" % pfx - if self.is_used(node_id): - for cnt in range(99): # shouldn't really get here - try_id = "%s-%d" % (node_id,cnt) - if not self.is_used(try_id): - node_id = try_id - break - self.save(node_id) - return node_id - def check_node(self,node,lvl): - node_id = node.getAttribute("id") - if not node_id: - return - if self.id_in_use(node_id): - common_error("id_store: id %s is in use" % node_id) - self.ok = False - return - def _store_node(self,node,lvl): - self.save(node.getAttribute("id")) - def _drop_node(self,node,lvl): - self.remove(node.getAttribute("id")) - def check_xml(self,node): - self.ok = True - xmltraverse_thin(node,self.check_node) - return self.ok - def store_xml(self,node): - if not self.check_xml(node): - return False - xmltraverse_thin(node,self._store_node) - return True - def remove_xml(self,node): - xmltraverse_thin(node,self._drop_node) - def replace_xml(self,oldnode,newnode): - self.remove_xml(oldnode) - if not self.store_xml(newnode): - self.store_xml(oldnode) - return False - return True - def is_used(self,node_id): - return node_id in self._id_store - def id_in_use(self,obj_id): - if self.is_used(obj_id): - id_used_err(obj_id) - return True - return False - def save(self,node_id): - if not node_id: return - common_debug("id_store: saved %s" % node_id) - self._id_store[node_id] = 1 - def rename(self,old_id,new_id): - if not old_id or not new_id: return - if not self.is_used(old_id): return - if self.is_used(new_id): return - self.remove(old_id) - self.save(new_id) - def remove(self,node_id): - if not node_id: return - try: - del self._id_store[node_id] - common_debug("id_store: removed %s" % node_id) - except KeyError: - pass - def clear(self): - self._id_store = {} - -vars = Vars.getInstance() - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/levels.py b/shell/modules/levels.py deleted file mode 100644 index 2aac00d7d9..0000000000 --- a/shell/modules/levels.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import sys -import re -from singletonmixin import Singleton - -def topics_dict(help_tab): - if not help_tab: - return {} - topics = {} - for topic in help_tab: - if topic != '.': - topics[topic] = None - return topics - -def mk_completion_tab(obj,ctab): - from completion import get_completer_list - cmd_table = obj.cmd_table - for key,value in cmd_table.items(): - if key.startswith("_"): - continue - if type(value) == type(object): - ctab[key] = {} - elif key == "help": - ctab[key] = topics_dict(obj.help_table) - else: - ctab[key] = get_completer_list(obj.lvl_name,key) - -class Levels(Singleton): - ''' - Keep track of levels and prompts. - ''' - def __init__(self,start_level): - self._marker = 0 - self._in_transit = False - self.level_stack = [] - self.comp_stack = [] - self.current_level = start_level() - self.parse_root = self.current_level.cmd_table - self.prompts = [] - self.completion_tab = {} - mk_completion_tab(self.current_level,self.completion_tab) - def getprompt(self): - return ' '.join(self.prompts) - def mark(self): - self._marker = len(self.level_stack) - self._in_transit = False - def release(self): - while len(self.level_stack) > self._marker: - self.droplevel() - def new_level(self,level_obj,token): - self.level_stack.append(self.current_level) - self.comp_stack.append(self.completion_tab) - self.prompts.append(token) - self.current_level = level_obj() - self.parse_root = self.current_level.cmd_table - try: - if not self.completion_tab[token]: - mk_completion_tab(self.current_level,self.completion_tab[token]) - self.completion_tab = self.completion_tab[token] - except: - pass - self._in_transit = True - def previous(self): - if self.level_stack: - return self.level_stack[-1] - def droplevel(self): - if self.level_stack: - self.current_level.end_game(self._in_transit) - self.current_level = self.level_stack.pop() - self.completion_tab = self.comp_stack.pop() - self.parse_root = self.current_level.cmd_table - self.prompts.pop() - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/main.py b/shell/modules/main.py deleted file mode 100644 index 7c25c336e0..0000000000 --- a/shell/modules/main.py +++ /dev/null @@ -1,306 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import sys -import shlex -import getopt - -from utils import * -from userprefs import Options, UserPrefs -from vars import Vars -from ui import cmd_exit -from msg import * -from levels import Levels - -def load_rc(rcfile): - try: f = open(rcfile) - except: return - save_stdin = sys.stdin - sys.stdin = f - while True: - inp = multi_input() - if inp == None: - break - try: parse_line(levels,shlex.split(inp)) - except ValueError, msg: - common_err(msg) - f.close() - sys.stdin = save_stdin - -def multi_input(prompt = ''): - """ - Get input from user - Allow multiple lines using a continuation character - """ - line = [] - while True: - try: - text = raw_input(prompt) - except EOFError: - return None - err_buf.incr_lineno() - if options.regression_tests: - print ".INP:",text - sys.stdout.flush() - sys.stderr.flush() - stripped = text.strip() - if stripped.endswith('\\'): - stripped = stripped.rstrip('\\') - line.append(stripped) - if prompt: - prompt = '> ' - else: - line.append(stripped) - break - return ''.join(line) - -def check_args(args,argsdim): - if not argsdim: return True - if len(argsdim) == 1: - minargs = argsdim[0] - return len(args) >= minargs - else: - minargs,maxargs = argsdim - return len(args) >= minargs and len(args) <= maxargs - -# -# Note on parsing -# -# Parsing tables are python dictionaries. -# -# Keywords are used as keys and the corresponding values are -# lists (actually tuples, since they should be read-only) or -# classes. In the former case, the keyword is a terminal and -# in the latter, a new object for the class is created. The class -# must have the cmd_table variable. -# -# The list has the following content: -# -# function: a function to handle this command -# numargs_list: number of minimum/maximum arguments; for example, -# (0,1) means one optional argument, (1,1) one required; if the -# list is empty then the function will parse arguments itself -# required minimum skill level: operator, administrator, expert -# (encoded as a small integer from 0 to 2) -# can the command cause transition to start (0 or 1) -# used to check whether to wait4dc to end the transition -# - -def show_usage(cmd): - p = None - try: p = cmd.__doc__ - except: pass - if p: - print >> sys.stderr, p - else: - syntax_err(cmd.__name__) - -def parse_line(lvl,s): - if not s: return True - if s[0].startswith('#'): return True - lvl.mark() - pt = lvl.parse_root - cmd = None - i = 0 - for i in range(len(s)): - token = s[i] - if token in pt: - if type(pt[token]) == type(object): - # on entering new level we need to set the - # interactive option _before_ creating the level - if not options.interactive and i == len(s)-1: - set_interactive() - lvl.new_level(pt[token],token) - pt = lvl.parse_root # move to the next level - else: - cmd = pt[token] # terminal symbol - break # and stop parsing - else: - syntax_err(s[i:]) - lvl.release() - return False - if cmd: # found a terminal symbol - if not user_prefs.check_skill_level(cmd[2]): - lvl.release() - skill_err(s[i]) - return False - args = s[i+1:] - if not check_args(args,cmd[1]): - lvl.release() - show_usage(cmd[0]) - return False - args = s[i:] - d = lambda: cmd[0](*args) - rv = d() # execute the command - # should we wait till the command takes effect? - if user_prefs.get_wait() and rv != False and cmd[3] == 1: - if not wait4dc(token, not options.batch): - rv = False - lvl.release() - return rv != False - return True - -def prereqs(): - proglist = "which cibadmin crm_resource crm_attribute crm_mon" - for prog in proglist.split(): - if not is_program(prog): - print >> sys.stderr, "%s not available, check your installation"%prog - sys.exit(1) - -# three modes: interactive (no args supplied), batch (input from -# a file), half-interactive (args supplied, but not batch) -def cib_prompt(): - return vars.cib_in_use or "live" - -def usage(rc): - f = sys.stderr - if rc == 0: - f = sys.stdout - print >> f, """ -usage: - crm [-D display_type] [-f file] [-hF] [args] - - Use crm without arguments for an interactive session. - Supply one or more arguments for a "single-shot" use. - Specify with -f a file which contains a script. Use '-' for - standard input or use pipe/redirection. - - crm displays cli format configurations using a color scheme - and/or in uppercase. Pick one of "color" or "uppercase", or - use "-D color,uppercase" if you want colorful uppercase. - Get plain output by "-D plain". The default may be set in - user preferences (options). - - -F stands for force, if set all operations will behave as if - force was specified on the line (e.g. configure commit). - -Examples: - - # crm -f stopapp2.cli - # crm < stopapp2.cli - # crm resource stop global_www - # crm status - - """ - sys.exit(rc) - -user_prefs = UserPrefs.getInstance() -options = Options.getInstance() -err_buf = ErrorBuffer.getInstance() -vars = Vars.getInstance() -levels = Levels.getInstance() - -# prefer the user set PATH -os.putenv("PATH", "%s:%s" % (os.getenv("PATH"),vars.crm_daemon_dir)) - -def set_interactive(): - '''Set the interactive option only if we're on a tty.''' - if sys.stdin.isatty(): - options.interactive = True - -def run(): - prereqs() - inp_file = '' - - load_rc(vars.rc_file) - - if not sys.stdin.isatty(): - err_buf.reset_lineno() - options.batch = True - else: - options.interactive = True - - try: - opts, args = getopt.getopt(sys.argv[1:], \ - 'whdf:FRD:', ("wait","version","help","debug","file=",\ - "force","regression-tests","display=")) - for o,p in opts: - if o in ("-h","--help"): - usage(0) - elif o in ("--version"): - print >> sys.stdout,("""%s -Written by Dejan Muhamedagic -""" % vars.crm_version) - sys.exit(0) - - elif o == "-d": - user_prefs.set_debug() - elif o == "-R": - options.regression_tests = True - elif o in ("-D","--display"): - user_prefs.set_output(p) - elif o in ("-F","--force"): - user_prefs.set_force() - elif o in ("-f","--file"): - options.batch = True - options.interactive = False - err_buf.reset_lineno() - inp_file = p - elif o in ("-w","--wait"): - user_prefs.wait = "yes" - except getopt.GetoptError,msg: - print msg - usage(1) - - # this special case is silly, but we have to keep it to - # preserve the backward compatibility - if len(args) == 1 and args[0].startswith("conf"): - parse_line(levels,["configure"]) - elif len(args) > 0: - err_buf.reset_lineno() - # we're not sure yet whether it's an interactive session or not - # (single-shot commands aren't) - options.interactive = False - if parse_line(levels,shlex.split(' '.join(args))): - # if the user entered a level, then just continue - if not levels.previous(): - sys.exit(0) - else: - sys.exit(1) - - if inp_file == "-": - pass - elif inp_file: - try: - f = open(inp_file) - except IOError, msg: - common_err(msg) - usage(2) - sys.stdin = f - - if options.interactive and not options.batch: - from completion import setup_readline - setup_readline() - - rc = 0 - while True: - if options.interactive and not options.batch: - vars.prompt = "crm(%s)%s# " % (cib_prompt(),levels.getprompt()) - inp = multi_input(vars.prompt) - if inp == None: - if options.interactive: - cmd_exit("eof") - else: - cmd_exit("eof", rc) - try: - if not parse_line(levels,shlex.split(inp)): - rc = 1 - except ValueError, msg: - rc = 1 - common_err(msg) - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/msg.py b/shell/modules/msg.py deleted file mode 100644 index c36769c72e..0000000000 --- a/shell/modules/msg.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import sys -from singletonmixin import Singleton -from userprefs import Options, UserPrefs - -class ErrorBuffer(Singleton): - ''' - Show error messages either immediately or buffered. - ''' - def __init__(self): - self.msg_list = [] - self.mode = "immediate" - self.lineno = -1 - def buffer(self): - self.mode = "keep" - def release(self): - if self.msg_list: - print >> sys.stderr, '\n'.join(self.msg_list) - if not options.batch: - try: - raw_input("Press enter to continue... ") - except EOFError: - pass - self.msg_list = [] - self.mode = "immediate" - def writemsg(self,msg): - if self.mode == "immediate": - if options.regression_tests: - print msg - else: - print >> sys.stderr, msg - else: - self.msg_list.append(msg) - def reset_lineno(self): - self.lineno = 0 - def incr_lineno(self): - if self.lineno >= 0: - self.lineno += 1 - def start_tmp_lineno(self): - self._save_lineno = self.lineno - self.reset_lineno() - def stop_tmp_lineno(self): - self.lineno = self._save_lineno - def add_lineno(self,s): - if self.lineno > 0: - return "%d: %s" % (self.lineno,s) - else: return s - def error(self,s): - self.writemsg("ERROR: %s" % self.add_lineno(s)) - def warning(self,s): - self.writemsg("WARNING: %s" % self.add_lineno(s)) - def info(self,s): - self.writemsg("INFO: %s" % self.add_lineno(s)) - def debug(self,s): - if user_prefs.get_debug(): - self.writemsg("DEBUG: %s" % self.add_lineno(s)) - -def common_error(s): - err_buf.error(s) -def common_err(s): - err_buf.error(s) -def common_warning(s): - err_buf.warning(s) -def common_warn(s): - err_buf.warning(s) -def common_info(s): - err_buf.info(s) -def common_debug(s): - err_buf.debug(s) -def no_prog_err(name): - err_buf.error("%s not available, check your installation"%name) -def missing_prog_warn(name): - err_buf.warning("could not find any %s on the system"%name) -def node_err(msg, node): - err_buf.error("%s: %s" % (msg,node.toprettyxml())) -def node_debug(msg, node): - err_buf.debug("%s: %s" % (msg,node.toprettyxml())) -def no_attribute_err(attr,obj_type): - err_buf.error("required attribute %s not found in %s"%(attr,obj_type)) -def bad_def_err(what,msg): - err_buf.error("bad %s definition: %s"%(what,msg)) -def unsupported_err(name): - err_buf.error("%s is not supported"%name) -def no_such_obj_err(name): - err_buf.error("%s object is not supported"%name) -def obj_cli_warn(name): - err_buf.info("object %s cannot be represented in the CLI notation"%name) -def missing_obj_err(node): - err_buf.error("object %s:%s missing (shouldn't have happened)"% \ - (node.tagName,node.getAttribute("id"))) -def constraint_norefobj_err(constraint_id,obj_id): - err_buf.error("constraint %s references a resource %s which doesn't exist"% \ - (constraint_id,obj_id)) -def obj_exists_err(name): - err_buf.error("object %s already exists"%name) -def no_object_err(name): - err_buf.error("object %s does not exist"%name) -def invalid_id_err(obj_id): - err_buf.error("%s: invalid object id"%obj_id) -def id_used_err(node_id): - err_buf.error("%s: id is already in use"%node_id) -def skill_err(s): - err_buf.error("%s: this command is not allowed at this skill level"%' '.join(s)) -def syntax_err(s,token = '',context = ''): - pfx = "syntax" - if context: - pfx = "%s in %s" %(pfx,context) - if type(s) == type(''): - err_buf.error("%s near <%s>"%(pfx,s)) - elif token: - err_buf.error("%s near <%s>: %s"%(pfx,token,' '.join(s))) - else: - err_buf.error("%s: %s"%(pfx,' '.join(s))) -def bad_usage(cmd,args): - err_buf.error("bad usage: %s %s"%(cmd,args)) -def empty_cib_err(): - err_buf.error("No CIB!") -def cib_parse_err(msg,s): - err_buf.error("%s"%msg) - err_buf.info("offending string: %s" % s) -def cib_no_elem_err(el_name): - err_buf.error("CIB contains no '%s' element!"%el_name) -def cib_ver_unsupported_err(validator,rel): - err_buf.error("CIB not supported: validator '%s', release '%s'"% (validator,rel)) - err_buf.error("You may try the upgrade command") -def update_err(obj_id,cibadm_opt,xml,rc): - if cibadm_opt == '-U': - task = "update" - elif cibadm_opt == '-D': - task = "delete" - else: - task = "replace" - err_buf.error("could not %s %s"%(task,obj_id)) - if rc == 54: - err_buf.info("Permission denied.") - else: - err_buf.info("offending xml: %s" % xml) - -def not_impl_info(s): - err_buf.info("%s is not implemented yet" % s) - -user_prefs = UserPrefs.getInstance() -err_buf = ErrorBuffer.getInstance() -options = Options.getInstance() -# vim:ts=4:sw=4:et: diff --git a/shell/modules/parse.py b/shell/modules/parse.py deleted file mode 100644 index a3b246363e..0000000000 --- a/shell/modules/parse.py +++ /dev/null @@ -1,765 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import shlex -import re -import xml.dom.minidom -from utils import * -from vars import Vars -from msg import * -from ra import disambiguate_ra_type, ra_type_validate - -# -# CLI parsing utilities -# WARNING: ugly code ahead (to be replaced some day by a proper -# yacc parser, if there's such a thing) -# -def cli_parse_rsctype(s, pl): - ''' - Parse the resource type. - ''' - ra_class,provider,rsc_type = disambiguate_ra_type(s) - if not ra_type_validate(s,ra_class,provider,rsc_type): - return None - pl.append(["class",ra_class]) - if ra_class == "ocf": - pl.append(["provider",provider]) - pl.append(["type",rsc_type]) -def is_attribute(p,a): - return p.startswith(a + '=') -def cli_parse_attr_strict(s,pl): - ''' - Parse attributes in the 'p=v' form. - ''' - if s and '=' in s[0]: - n,v = s[0].split('=',1) - if not n: - return - pl.append([n,v]) - cli_parse_attr_strict(s[1:],pl) -def cli_parse_attr(s,pl): - ''' - Parse attributes in the 'p=v' form. - Allow also the 'p' form (no value) unless p is one of the - attr_list_keyw words. - ''' - attr_lists_keyw = olist(["params","meta","utilization","operations","op","attributes"]) - if s: - if s[0] in attr_lists_keyw: - return - if '=' in s[0]: - n,v = s[0].split('=',1) - else: - n = s[0]; v = None - if not n: - return - pl.append([n,v]) - cli_parse_attr(s[1:],pl) -def is_only_id(pl,keyw): - if len(pl) > 1: - common_err("%s: only single $id or $id-ref attribute is allowed" % keyw) - return False - if len(pl) == 1 and pl[0][0] not in ("$id","$id-ref"): - common_err("%s: only single $id or $id-ref attribute is allowed" % keyw) - return False - return True -def check_operation(pl): - op_name = find_value(pl,"name") - if not op_name in olist(vars.op_cli_names): - common_warn("%s: operation not recognized" % op_name) - if op_name == "monitor" and not find_value(pl,"interval"): - common_err("monitor requires interval") - return False - return True -def parse_resource(s): - el_type = s[0].lower() - if el_type == "master": # ugly kludge :( - el_type = "ms" - attr_lists_keyw = olist(["params","meta","utilization"]) - cli_list = [] - # the head - head = [] - head.append(["id",s[1]]) - i = 3 - if el_type == "primitive": - cli_parse_rsctype(s[2],head) - if not find_value(head,"type"): - syntax_err(s[2:], context = "primitive") - return False - else: - cl = [] - cl.append(s[2]) - if el_type == "group": - while i < len(s): - if s[i] in attr_lists_keyw: - break - elif is_attribute(s[i],"description"): - break - else: - cl.append(s[i]) - i += 1 # skip to the next token - head.append(["$children",cl]) - try: # s[i] may be out of range - if is_attribute(s[i],"description"): - cli_parse_attr(s[i:i+1],head) - i += 1 # skip to the next token - except: pass - cli_list.append([el_type,head]) - # the rest - state = 0 # 1: reading operations; 2: operations read - while len(s) > i+1: - pl = [] - keyw = s[i].lower() - if keyw in attr_lists_keyw: - if state == 1: - state = 2 - elif el_type == "primitive" and state == 0 and keyword_cmp(keyw, "operations"): - state = 1 - elif el_type == "primitive" and state <= 1 and keyword_cmp(keyw, "op"): - if state == 0: - state = 1 - pl.append(["name",s[i+1]]) - else: - syntax_err(s[i:], context = 'primitive') - return False - if keyword_cmp(keyw, "op"): - if len(s) > i+2: - cli_parse_attr(s[i+2:],pl) - if not check_operation(pl): - return False - else: - cli_parse_attr(s[i+1:],pl) - if len(pl) == 0: - syntax_err(s[i:], context = 'primitive') - return False - if keyword_cmp(keyw, "operations") and not is_only_id(pl,keyw): - return False - i += len(pl)+1 - # interval is obligatory for ops, supply 0 if not there - if keyword_cmp(keyw, "op") and not find_value(pl,"interval"): - pl.append(["interval","0"]) - cli_list.append([keyw,pl]) - if len(s) > i: - syntax_err(s[i:], context = 'primitive') - return False - return cli_list -def parse_op(s): - if len(s) != 3: - syntax_err(s, context = s[0]) - return False - cli_list = [] - head_pl = [] - # this is an op - cli_list.append(["op",head_pl]) - if not cli_parse_rsc_role(s[1],head_pl): - return False - if not cli_parse_op_times(s[2],head_pl): - return False - # rename rsc-role to role - for i in range(len(head_pl)): - if head_pl[i][0] == "rsc-role": - head_pl[i][0] = "role" - break - # add the operation name - head_pl.append(["name",s[0]]) - return cli_list - -def cli_parse_score(score,pl,noattr = False): - if score.endswith(':'): - score = score.rstrip(':') - else: - syntax_err(score, context = 'score') - return False - if score in vars.score_types: - pl.append(["score",vars.score_types[score]]) - elif re.match("^[+-]?(inf|infinity|INFINITY|[[0-9]+)$",score): - score = score.replace("infinity","INFINITY") - score = score.replace("inf","INFINITY") - pl.append(["score",score]) - elif score: - if noattr: - common_err("attribute not allowed for score in orders") - return False - else: - pl.append(["score-attribute",score]) - return True -def is_binary_op(s): - l = s.split(':') - if len(l) == 2: - return l[0] in vars.binary_types and l[1] in olist(vars.binary_ops) - elif len(l) == 1: - return l[0] in olist(vars.binary_ops) - else: - return False -def cli_parse_binary_op(s,pl): - l = s.split(':') - if len(l) == 2: - pl.append(["type",l[0]]) - pl.append(["operation",l[1]]) - else: - pl.append(["operation",l[0]]) -def cli_parse_expression(s,pl): - if len(s) > 1 and s[0] in olist(vars.unary_ops): - pl.append(["operation",s[0]]) - pl.append(["attribute",s[1]]) - elif len(s) > 2 and is_binary_op(s[1]): - pl.append(["attribute",s[0]]) - cli_parse_binary_op(s[1],pl) - pl.append(["value",s[2]]) - else: - return False - return True -def cli_parse_dateexpr(s,pl): - if len(s) < 3: - return False - if s[1] not in olist(vars.date_ops): - return False - pl.append(["operation",s[1]]) - if s[1] in olist(vars.simple_date_ops): - pl.append([keyword_cmp(s[1], 'lt') and "end" or "start",s[2]]) - return True - cli_parse_attr_strict(s[2:],pl) - return True -def parse_rule(s): - if not keyword_cmp(s[0], "rule"): - syntax_err(s,context = "rule") - return 0,None - rule_list = [] - head_pl = [] - rule_list.append([s[0].lower(),head_pl]) - i = 1 - cli_parse_attr_strict(s[i:],head_pl) - i += len(head_pl) - if find_value(head_pl,"$id-ref"): - return i,rule_list - if not cli_parse_score(s[i],head_pl): - return i,None - i += 1 - bool_op = '' - while len(s) > i+1: - pl = [] - if keyword_cmp(s[i], "date"): - fun = cli_parse_dateexpr - elem = "date_expression" - else: - fun = cli_parse_expression - elem = "expression" - if not fun(s[i:],pl): - syntax_err(s[i:],context = "rule") - return i,None - rule_list.append([elem,pl]) - i += len(pl) - if find_value(pl, "type"): - i -= 1 # reduce no of tokens by one if there was "type:op" - if elem == "date_expression": - i += 1 # increase no of tokens by one if it was date expression - if len(s) > i and s[i] in olist(vars.boolean_ops): - if bool_op and not keyword_cmp(bool_op, s[i]): - common_err("rule contains different bool operations: %s" % ' '.join(s)) - return i,None - else: - bool_op = s[i].lower() - i += 1 - if len(s) > i and keyword_cmp(s[i], "rule"): - break - if bool_op and not keyword_cmp(bool_op, 'and'): - head_pl.append(["boolean-op",bool_op]) - return i,rule_list -def parse_location(s): - cli_list = [] - head_pl = [] - head_pl.append(["id",s[1]]) - head_pl.append(["rsc",s[2]]) - cli_list.append([s[0].lower(),head_pl]) - if len(s) == 5 and not keyword_cmp(s[3], "rule"): # the short node preference form - if not cli_parse_score(s[3],head_pl): - return False - head_pl.append(["node",s[4]]) - return cli_list - i = 3 - while i < len(s): - numtoks,l = parse_rule(s[i:]) - if not l: - return False - cli_list += l - i += numtoks - if len(s) < i: - syntax_err(s[i:],context = "location") - return False - return cli_list - -def cli_opt_symmetrical(p,pl): - if not p: - return True - pl1 = [] - cli_parse_attr([p],pl1) - if len(pl1) != 1 or not find_value(pl1,"symmetrical"): - syntax_err(p,context = "order") - return False - pl += pl1 - return True -def cli_parse_rsc_role(s,pl,attr_pfx = ''): - l = s.split(':') - pl.append([attr_pfx+"rsc",l[0]]) - if len(l) == 2: - if l[1] in vars.roles_names: - pl.append([attr_pfx+"rsc-role",l[1]]) - elif l[1].isdigit(): - pl.append([attr_pfx+"rsc-instance",l[1]]) - else: - bad_def_err("resource role/instance",s) - return False - elif len(l) > 2: - bad_def_err("resource role/instance",s) - return False - return True -def cli_parse_op_times(s,pl): - l = s.split(':') - pl.append(["interval",l[0]]) - if len(l) == 2: - pl.append(["timeout",l[1]]) - elif len(l) > 2: - bad_def_err("op times",s) - return False - return True - -class ResourceSet(object): - ''' - Constraint resource set parser. Parses sth like: - a ( b c:start ) d:Master e ... - Appends one or more lists to cli_list. - Lists are in form: - list :: ["resource_set",set_pl] - set_pl :: [["sequential","false"], ["action"|"role",action|role], - ["resource_ref",["id",rsc]], ...] - (the first two elements of set_pl are optional) - Action/role change makes a new resource set. - ''' - def __init__(self,type,s,cli_list): - self.type = type - self.valid_q = (type == "order") and vars.actions_names or vars.roles_names - self.q_attr = (type == "order") and "action" or "role" - self.tokens = s - self.cli_list = cli_list - self.reset_set() - self.sequential = True - self.fix_parentheses() - def fix_parentheses(self): - newtoks = [] - for p in self.tokens: - if p.startswith('(') and len(p) > 1: - newtoks.append('(') - newtoks.append(p[1:]) - elif p.endswith(')') and len(p) > 1: - newtoks.append(p[0:len(p)-1]) - newtoks.append(')') - else: - newtoks.append(p) - self.tokens = newtoks - def reset_set(self): - self.set_pl = [] - self.prev_q = '' # previous qualifier (action or role) - self.curr_attr = '' # attribute (action or role) - def save_set(self): - if not self.set_pl: - return - if self.curr_attr: - self.set_pl.insert(0,[self.curr_attr,self.prev_q]) - if not self.sequential: - self.set_pl.insert(0,["sequential","false"]) - self.cli_list.append(["resource_set",self.set_pl]) - self.reset_set() - def splitrsc(self,p): - l = p.split(':') - return (len(l) == 1) and [p,''] or l - def parse(self): - tokpos = -1 - for p in self.tokens: - tokpos += 1 - if p == "_rsc_set_": - continue # a degenerate resource set - if p == '(': - if self.set_pl: # save the set before - self.save_set() - self.sequential = False - continue - if p == ')': - if self.sequential: # no '(' - syntax_err(self.tokens[tokpos:],context = self.type) - return False - if not self.set_pl: # empty sets not allowed - syntax_err(self.tokens[tokpos:],context = self.type) - return False - self.save_set() - self.sequential = True - continue - rsc,q = self.splitrsc(p) - if q != self.prev_q: # one set can't have different roles/actions - self.save_set() - self.prev_q = q - if q: - if q not in self.valid_q: - common_err("%s: invalid %s in %s" % (q,self.q_attr,self.type)) - return False - if not self.curr_attr: - self.curr_attr = self.q_attr - else: - self.curr_attr = '' - self.set_pl.append(["resource_ref",["id",rsc]]) - if not self.sequential: # no ')' - syntax_err(self.tokens[tokpos:],context = self.type) - return False - if self.set_pl: # save the final set - self.save_set() - return True - -def parse_colocation(s): - cli_list = [] - head_pl = [] - type = s[0] - if type == "collocation": # another ugly :( - type = "colocation" - cli_list.append([type,head_pl]) - if len(s) < 5: - syntax_err(s,context = "colocation") - return False - head_pl.append(["id",s[1]]) - if not cli_parse_score(s[2],head_pl): - return False - if len(s) == 5: - if not cli_parse_rsc_role(s[3],head_pl): - return False - if not cli_parse_rsc_role(s[4],head_pl,'with-'): - return False - else: - resource_set_obj = ResourceSet(type,s[3:],cli_list) - if not resource_set_obj.parse(): - return False - return cli_list -def cli_parse_rsc_action(s,pl,rsc_pos): - l = s.split(':') - pl.append([rsc_pos,l[0]]) - if len(l) == 2: - if l[1] in vars.actions_names: - pl.append([rsc_pos+"-action",l[1]]) - elif l[1].isdigit(): - pl.append([rsc_pos+"-instance",l[1]]) - else: - bad_def_err("resource action/instance",s) - return False - elif len(l) > 1: - bad_def_err("resource action/instance",s) - return False - return True - -def parse_order(s): - cli_list = [] - head_pl = [] - type = "order" - cli_list.append([s[0],head_pl]) - if len(s) < 5: - syntax_err(s,context = "order") - return False - head_pl.append(["id",s[1]]) - if not cli_parse_score(s[2],head_pl,noattr = True): - return False - # save symmetrical for later (if it exists) - symm = "" - if is_attribute(s[len(s)-1],"symmetrical"): - symm = s.pop() - if len(s) == 5: - if not cli_parse_rsc_action(s[3],head_pl,'first'): - return False - if not cli_parse_rsc_action(s[4],head_pl,'then'): - return False - else: - resource_set_obj = ResourceSet(type,s[3:],cli_list) - if not resource_set_obj.parse(): - return False - if not cli_opt_symmetrical(symm,head_pl): - return False - return cli_list - -def parse_constraint(s): - if keyword_cmp(s[0], "location"): - return parse_location(s) - elif s[0] in olist(["colocation","collocation"]): - return parse_colocation(s) - elif keyword_cmp(s[0], "order"): - return parse_order(s) -def parse_property(s): - cli_list = [] - head_pl = [] - cli_list.append([s[0],head_pl]) - cli_parse_attr_strict(s[1:],head_pl) - if len(head_pl) < 0 or len(s) > len(head_pl)+1: - syntax_err(s, context = s[0]) - return False - return cli_list -def cli_parse_uname(s, pl): - l = s.split(':') - if not l or len(l) > 2: - return None - pl.append(["uname",l[0]]) - if len(l) == 2: - pl.append(["type",l[1]]) -def parse_node(s): - cli_list = [] - # the head - head = [] - # optional $id - id = '' - opt_id_l = [] - i = 1 - cli_parse_attr_strict(s[i:],opt_id_l) - if opt_id_l: - id = find_value(opt_id_l,"$id") - i += 1 - # uname[:type] - cli_parse_uname(s[i],head) - uname = find_value(head,"uname") - if not uname: - return False - head.append(["id",id and id or uname]) - # drop type if default - type = find_value(head,"type") - if type == vars.node_default_type: - head.remove(["type",type]) - cli_list.append([s[0],head]) - if len(s) == i: - return cli_list - # the rest - i += 1 - try: # s[i] may be out of range - if is_attribute(s[i],"description"): - cli_parse_attr(s[i:i+1],head) - i += 1 # skip to the next token - except: pass - while len(s) > i+1: - if not s[i] in olist(vars.node_attributes_keyw): - syntax_err(s[i:], context = 'node') - return False - pl = [] - cli_parse_attr(s[i+1:],pl) - if len(pl) == 0: - syntax_err(s[i:], context = 'node') - return False - cli_list.append([s[i],pl]) - i += len(pl)+1 - if len(s) > i: - syntax_err(s[i:], context = 'node') - return False - return cli_list -def parse_xml(s): - cli_list = [] - head = [] - try: - xml_s = ' '.join(s[1:]) - except: - syntax_err(s, context = 'xml') - return False - # strip spaces between elements - # they produce text elements - xml_s = re.sub(r">\s+<", "><", xml_s) - try: - doc = xml.dom.minidom.parseString(xml_s) - except xml.parsers.expat.ExpatError, msg: - common_err("cannot parse xml chunk: %s" % xml_s) - common_err(msg) - return False - try: - elnode = doc.childNodes[0] - except: - common_err("no elements in %s" % xml_s) - return False - try: - el_type = vars.cib_cli_map[elnode.tagName] - except: - common_err("element %s not recognized" % elnode.tagName) - return False - id = elnode.getAttribute("id") - head.append(["id",id]) - cli_list.append([el_type,head]) - cli_list.append(["raw",xml_s]) - return cli_list - -def expand_acl_shortcuts(l): - ''' - Expand xpath shortcuts. The input list l contains the user - input. If no shortcut was found, just return l. - In case of syntax error, return empty list. Otherwise, l[0] - contains 'xpath' and l[1] the expansion as found in - vars.acl_shortcuts. The id placeholders '@@' are replaced - with the given attribute names or resource references. - ''' - shortcut=l[0] - try: expansion = vars.acl_shortcuts[shortcut] - except: return l - l[0] = "xpath" - if len(l) == 1: - if '@@' in expansion[0]: - return [] - l.append(expansion[0]) - return l - a = l[1].split(':') - xpath = "" - exp_i = 0 - for tok in a: - try: - # some expansions may contain no id placeholders - # of course, they don't consume input tokens - if '@@' not in expansion[exp_i]: - xpath += expansion[exp_i] - exp_i += 1 - xpath += expansion[exp_i].replace('@@',tok) - exp_i += 1 - except: - return [] - # need to remove backslash chars which were there to escape - # special characters in expansions when used as regular - # expressions (mainly '[]') - l[1] = xpath.replace("\\","") - return l -def is_acl_rule_name(a): - return a in olist(vars.acl_rule_names) -def get_acl_specs(s): - l = [] - eligible_specs = vars.acl_spec_map.values() - for spec in s: - a = spec.split(':',1) - a = expand_acl_shortcuts(a) - if len(a) != 2 or a[0] not in eligible_specs: - return l - l.append([a[0],a[1]]) - eligible_specs.remove(a[0]) - if a[0] == "xpath": - eligible_specs.remove("ref") - eligible_specs.remove("tag") - elif a[0] in ("ref","tag"): - # this can happen twice - try: eligible_specs.remove("xpath") - except: pass - else: - break # nothing after "attribute" - return l -def cli_parse_acl_rules(s,obj_type,cli_list): - i = 0 - while i < len(s): - if not is_acl_rule_name(s[i]): - syntax_err(s, context = obj_type) - return False - rule_name = s[i] - i += 1 - if i >= len(s): - syntax_err(s, context = obj_type) - return False - l = get_acl_specs(s[i:]) - if len(l) < 1: - syntax_err(s, context = obj_type) - return False - i += len(l) - cli_list.append([rule_name,l]) - return cli_list -def parse_acl(s): - cli_list = [] - head_pl = [] - obj_type = s[0] - cli_list.append([obj_type,head_pl]) - head_pl.append(["id",s[1]]) - if keyword_cmp(obj_type, "user") and s[2].startswith("role:"): - for i in range(2,len(s)): - a = s[i].split(':',1) - if len(a) != 2 or a[0] != "role": - syntax_err(s, context = obj_type) - return False - cli_list.append(["role_ref",["id",a[1]]]) - return cli_list - return cli_parse_acl_rules(s[2:],obj_type,cli_list) - -def xml_lex(s): - l = lines2cli(s) - a = [] - for p in l: - a += p.split() - return a - -class CliParser(object): - parsers = { - "primitive": (3,parse_resource), - "group": (3,parse_resource), - "clone": (3,parse_resource), - "ms": (3,parse_resource), - "master": (3,parse_resource), - "location": (3,parse_constraint), - "colocation": (3,parse_constraint), - "collocation": (3,parse_constraint), - "order": (3,parse_constraint), - "monitor": (3,parse_op), - "node": (2,parse_node), - "property": (2,parse_property), - "rsc_defaults": (2,parse_property), - "op_defaults": (2,parse_property), - "role": (3,parse_acl), - "user": (3,parse_acl), - "xml": (3,parse_xml), - } - def __init__(self): - self.comments = [] - def parse(self,s): - ''' - Input: a list of tokens (or a CLI format string). - Return: a list of items; each item is a tuple - with two members: a string (tag) and a nvpairs or - attributes dict. - ''' - cli_list = '' - if type(s) == type(u''): - s = s.encode('ascii') - if type(s) == type(''): - if s and s.startswith('#'): - self.comments.append(s) - return None - if s.startswith('xml'): - s = xml_lex(s) - else: - try: - s = shlex.split(s) - except ValueError, msg: - common_err(msg) - return False - # but there shouldn't be any newlines (?) - while '\n' in s: - s.remove('\n') - if not s: - return None - if s[0] not in self.parsers.keys(): - syntax_err(s) - return False - mintoks,parser_fn = self.parsers[s[0]] - if len(s) < mintoks: - syntax_err(s) - return False - cli_list = parser_fn(s) - if not cli_list: - return False - if self.comments: - cli_list.append(["comments",self.comments]) - self.comments = [] - return cli_list - -vars = Vars.getInstance() -# vim:ts=4:sw=4:et: diff --git a/shell/modules/ra.py.in b/shell/modules/ra.py.in deleted file mode 100644 index 8b5e8a3684..0000000000 --- a/shell/modules/ra.py.in +++ /dev/null @@ -1,665 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import os -import sys -import subprocess -import copy -import xml.dom.minidom -import re -import glob -from userprefs import Options, UserPrefs -from cache import WCache -from vars import Vars, getuser -from utils import * -from msg import * - -# -# Resource Agents interface (meta-data, parameters, etc) -# -ocf_root = os.getenv("OCF_ROOT") -if not ocf_root: - ocf_root = "@OCF_ROOT_DIR@" - if not ocf_root: - ocf_root = "/usr/lib/ocf" - os.putenv("OCF_ROOT",ocf_root) -class RaLrmd(object): - ''' - Getting information from the resource agents. - ''' - lrmadmin_prog = "lrmadmin" - def __init__(self): - self.good = self.is_lrmd_accessible() - def lrmadmin(self, opts, xml = False): - ''' - Get information directly from lrmd using lrmadmin. - ''' - l = stdout2list("%s %s" % (self.lrmadmin_prog,opts)) - if l and not xml: - l = l[1:] # skip the first line - return l - def is_lrmd_accessible(self): - if not (is_program(self.lrmadmin_prog) and is_process("lrmd")): - return False - return subprocess.call(\ - add_sudo(">/dev/null 2>&1 %s -C" % self.lrmadmin_prog), \ - shell=True) == 0 - def meta(self, ra_class,ra_type,ra_provider): - return self.lrmadmin("-M %s %s %s"%(ra_class,ra_type,ra_provider),True) - def providers(self, ra_type,ra_class = "ocf"): - 'List of providers for a class:type.' - return self.lrmadmin("-P %s %s" % (ra_class,ra_type),True) - def classes(self): - 'List of classes.' - return self.lrmadmin("-C") - def types(self, ra_class = "ocf", ra_provider = ""): - 'List of types for a class.' - return self.lrmadmin("-T %s" % ra_class) - -class RaOS(object): - ''' - Getting information from the resource agents (direct). - ''' - def __init__(self): - self.good = True - def meta(self, ra_class,ra_type,ra_provider): - l = [] - if ra_class == "ocf": - l = stdout2list("%s/resource.d/%s/%s meta-data" % \ - (ocf_root,ra_provider,ra_type)) - elif ra_class == "stonith": - l = stdout2list("stonith -m -t %s" % ra_type) - return l - def providers(self, ra_type,ra_class = "ocf"): - 'List of providers for a class:type.' - l = [] - if ra_class == "ocf": - for s in glob.glob("%s/resource.d/*/%s" % (ocf_root,ra_type)): - a = s.split("/") - if len(a) == 7: - l.append(a[5]) - return l - def classes(self): - 'List of classes.' - return "heartbeat lsb ocf stonith".split() - def types(self, ra_class = "ocf", ra_provider = ""): - 'List of types for a class.' - l = [] - prov = ra_provider and ra_provider or "*" - if ra_class == "ocf": - l = os_types_list("%s/resource.d/%s/*" % (ocf_root,prov)) - elif ra_class == "lsb": - l = os_types_list("/etc/init.d/*") - elif ra_class == "stonith": - l = stdout2list("stonith -L") - l = list(set(l)) - l.sort() - return l - -def ra_if(): - if vars.ra_if: - return vars.ra_if - if getuser() in ("root",vars.crm_daemon_user): - vars.ra_if = RaLrmd() - if not vars.ra_if or not vars.ra_if.good: - vars.ra_if = RaOS() - return vars.ra_if - -def ra_classes(): - ''' - List of RA classes. - ''' - if wcache.is_cached("ra_classes"): - return wcache.retrieve("ra_classes") - l = ra_if().classes() - l.sort() - return wcache.store("ra_classes",l) -def ra_providers(ra_type,ra_class = "ocf"): - 'List of providers for a class:type.' - id = "ra_providers-%s-%s" % (ra_class,ra_type) - if wcache.is_cached(id): - return wcache.retrieve(id) - l = ra_if().providers(ra_type,ra_class) - l.sort() - return wcache.store(id,l) -def ra_providers_all(ra_class = "ocf"): - ''' - List of providers for a class. - ''' - id = "ra_providers_all-%s" % ra_class - if wcache.is_cached(id): - return wcache.retrieve(id) - dir = ocf_root + "/resource.d" - l = [] - for s in os.listdir(dir): - if os.path.isdir("%s/%s" % (dir,s)): - l.append(s) - l.sort() - return wcache.store(id,l) -def ra_types(ra_class = "ocf", ra_provider = ""): - ''' - List of RA type for a class. - ''' - if not ra_class: - ra_class = "ocf" - id = "ra_types-%s-%s" % (ra_class,ra_provider) - if wcache.is_cached(id): - return wcache.retrieve(id) - if ra_provider: - list = [] - for ra in ra_if().types(ra_class): - if ra_provider in ra_providers(ra,ra_class): - list.append(ra) - else: - list = ra_if().types(ra_class) - list.sort() - return wcache.store(id,list) - -def get_pe_meta(): - if not vars.pe_metadata: - vars.pe_metadata = RAInfo("pengine","metadata") - return vars.pe_metadata -def get_crmd_meta(): - if not vars.crmd_metadata: - vars.crmd_metadata = RAInfo("crmd","metadata") - vars.crmd_metadata.set_advanced_params(vars.crmd_advanced) - return vars.crmd_metadata -def get_stonithd_meta(): - if not vars.stonithd_metadata: - vars.stonithd_metadata = RAInfo("stonithd","metadata") - return vars.stonithd_metadata -def get_cib_meta(): - if not vars.cib_metadata: - vars.cib_metadata = RAInfo("cib","metadata") - return vars.cib_metadata -def get_properties_meta(): - if not vars.crm_properties_metadata: - get_pe_meta() - get_crmd_meta() - get_cib_meta() - vars.crm_properties_metadata = copy.deepcopy(vars.crmd_metadata) - vars.crm_properties_metadata.add_ra_params(vars.pe_metadata) - vars.crm_properties_metadata.add_ra_params(vars.cib_metadata) - return vars.crm_properties_metadata -def get_properties_list(): - try: - return get_properties_meta().params().keys() - except: - return [] - -def prog_meta(prog): - ''' - Do external program metadata. - ''' - l = [] - if is_program(prog): - l = stdout2list("%s metadata" % prog) - return l -def get_nodes_text(n,tag): - try: - node = n.getElementsByTagName(tag)[0] - for c in node.childNodes: - if c.nodeType == c.TEXT_NODE: - return c.data.strip() - except: return '' - -def mk_monitor_name(role,depth): - depth = depth != "0" and ("_%s" % depth) or "" - return role and role != "Started" and \ - "monitor_%s%s" % (role,depth) or \ - "monitor%s" % depth -def monitor_name_node(node): - depth = node.getAttribute("depth") or '0' - role = node.getAttribute("role") - return mk_monitor_name(role,depth) -def monitor_name_pl(pl): - depth = find_value(pl, "depth") or '0' - role = find_value(pl, "role") - return mk_monitor_name(role,depth) -def crm_msec(t): - ''' - See lib/common/utils.c:crm_get_msec(). - ''' - convtab = { - 'ms': (1,1), - 'msec': (1,1), - 'us': (1,1000), - 'usec': (1,1000), - '': (1000,1), - 's': (1000,1), - 'sec': (1000,1), - 'm': (60*1000,1), - 'min': (60*1000,1), - 'h': (60*60*1000,1), - 'hr': (60*60*1000,1), - } - if not t: - return -1 - r = re.match("\s*(\d+)\s*([a-zA-Z]+)?", t) - if not r: - return -1 - if not r.group(2): - q = '' - else: - q = r.group(2).lower() - try: - mult,div = convtab[q] - except: - return -1 - return (int(r.group(1))*mult)/div -def crm_time_cmp(a, b): - return crm_msec(a) - crm_msec(b) - -class RAInfo(object): - ''' - A resource agent and whatever's useful about it. - ''' - ra_tab = " " # four horses - required_ops = ("start", "stop") - skip_ops = ("meta-data", "validate-all") - skip_op_attr = ("name", "depth", "role") - def __init__(self,ra_class,ra_type,ra_provider = "heartbeat"): - self.advanced_params = [] - self.ra_class = ra_class - self.ra_type = ra_type - self.ra_provider = ra_provider - if not self.ra_provider: - self.ra_provider = "heartbeat" - self.ra_node = None - def ra_string(self): - return self.ra_class == "ocf" and \ - "%s:%s:%s" % (self.ra_class, self.ra_provider, self.ra_type) or \ - "%s:%s" % (self.ra_class, self.ra_type) - def error(self, s): - common_err("%s: %s" % (self.ra_string(), s)) - def warn(self, s): - common_warn("%s: %s" % (self.ra_string(), s)) - def set_advanced_params(self, l): - self.advanced_params = l - def filter_crmd_attributes(self): - for n in self.ra_node.getElementsByTagName("parameter"): - if not n.getAttribute("name") in vars.crmd_user_attributes: - n.parentNode.removeChild(n) - def add_ra_params(self,ra): - ''' - Add parameters from another RAInfo instance. - ''' - try: - if not self.mk_ra_node() or not ra.mk_ra_node(): - return - except: - return - try: - params_node = self.doc.getElementsByTagName("parameters")[0] - except: - params_node = self.doc.createElement("parameters") - self.ra_node.appendChild(params_node) - for n in ra.ra_node.getElementsByTagName("parameter"): - params_node.appendChild(self.doc.importNode(n,1)) - def mk_ra_node(self): - ''' - Return the resource_agent node. - ''' - if self.ra_node: - return self.ra_node - meta = self.meta() - try: - self.doc = xml.dom.minidom.parseString('\n'.join(meta)) - except: - self.error("could not parse meta-data: %s" % '\n'.join(meta)) - self.ra_node = None - return None - try: - self.ra_node = self.doc.getElementsByTagName("resource-agent")[0] - except: - self.error("meta-data contains no resource-agent element") - self.ra_node = None - return None - if self.ra_class == "stonith": - self.add_ra_params(get_stonithd_meta()) - return self.ra_node - def param_type_default(self,n): - try: - content = n.getElementsByTagName("content")[0] - type = content.getAttribute("type") - default = content.getAttribute("default") - return type,default - except: - return None,None - def params(self): - ''' - Construct a dict of dicts: parameters are keys and - dictionary of attributes/values are values. Cached too. - ''' - id = "ra_params-%s" % self.ra_string() - if wcache.is_cached(id): - return wcache.retrieve(id) - if not self.mk_ra_node(): - return None - d = {} - for pset in self.ra_node.getElementsByTagName("parameters"): - for c in pset.getElementsByTagName("parameter"): - name = c.getAttribute("name") - if not name: - continue - required = c.getAttribute("required") - unique = c.getAttribute("unique") - type,default = self.param_type_default(c) - d[name] = { - "required": required, - "unique": unique, - "type": type, - "default": default, - } - return wcache.store(id,d) - def completion_params(self): - ''' - Extra method for completion, for we want to filter some - (advanced) parameters out. And we want this to be fast. - ''' - if not self.mk_ra_node(): - return None - return [c.getAttribute("name") - for c in self.ra_node.getElementsByTagName("parameter") - if c.getAttribute("name") - and c.getAttribute("name") not in self.advanced_params - ] - def actions(self): - ''' - Construct a dict of dicts: actions are keys and - dictionary of attributes/values are values. Cached too. - ''' - id = "ra_actions-%s" % self.ra_string() - if wcache.is_cached(id): - return wcache.retrieve(id) - if not self.mk_ra_node(): - return None - d = {} - for pset in self.ra_node.getElementsByTagName("actions"): - for c in pset.getElementsByTagName("action"): - name = c.getAttribute("name") - if not name or name in self.skip_ops: - continue - if name == "monitor": - name = monitor_name_node(c) - d[name] = {} - for a in c.attributes.keys(): - if a in self.skip_op_attr: - continue - v = c.getAttribute(a) - if v: - d[name][a] = v - # add monitor ops without role, if they don't already - # exist - d2 = {} - for op in d.keys(): - if re.match("monitor_[^0-9]", op): - norole_op = re.sub(r'monitor_[^0-9_]+_(.*)', r'monitor_\1', op) - if not norole_op in d: - d2[norole_op] = d[op] - d.update(d2) - return wcache.store(id,d) - def reqd_params_list(self): - ''' - List of required parameters. - ''' - d = self.params() - if not d: return [] - return [x for x in d if d[x]["required"] == '1'] - def param_default(self,pname): - ''' - Parameter's default. - ''' - d = self.params() - try: return d[pname]["default"] - except: return None - def sanity_check_params(self, id, pl): - ''' - pl is a list of (attribute,value) pairs. - - are all required parameters defined - - do all parameters exist - ''' - rc = 0 - d = {} - for p,v in pl: - d[p] = v - for p in self.reqd_params_list(): - if p not in d: - common_err("%s: required parameter %s not defined" % (id,p)) - rc |= user_prefs.get_check_rc() - for p in d: - if p not in self.params(): - common_err("%s: parameter %s does not exist" % (id,p)) - rc |= user_prefs.get_check_rc() - return rc - def get_adv_timeout(self, op, node = None): - if node and op == "monitor": - name = monitor_name_node(node) - else: - name = op - try: - return self.actions()[name]["timeout"] - except: - return None - def sanity_check_ops(self, id, ops, default_timeout): - ''' - ops is a dict, operation names are keys and values are - lists of (attribute,value) pairs. - - do all operations exist - - are timeouts sensible - ''' - rc = 0 - n_ops = {} - for op in ops: - n_op = op == "monitor" and monitor_name_pl(ops[op]) or op - n_ops[n_op] = {} - for p,v in ops[op]: - if p in self.skip_op_attr: - continue - n_ops[n_op][p] = v - for req_op in self.required_ops: - if req_op not in n_ops: - n_ops[req_op] = {} - for op in n_ops: - if op not in self.actions(): - common_warn("%s: action %s not advertised in meta-data, it may not be supported by the RA" % (id,op)) - rc |= 1 - continue - if "interval" in n_ops[op]: - if n_ops[op]["interval"] != "0": - if op == "start" or op == "stop": - v = n_ops[op]["interval"] - common_warn("%s: Specified interval for %s is %s, this is greater than 0 thus invalid" %(id,op,v)) - try: - adv_timeout = self.actions()[op]["timeout"] - except: - continue - if "timeout" in n_ops[op]: - v = n_ops[op]["timeout"] - timeout_string = "specified timeout" - else: - v = default_timeout - timeout_string = "default timeout" - if crm_msec(v) < 0: - continue - if crm_time_cmp(adv_timeout,v) > 0: - common_warn("%s: %s %s for %s is smaller than the advised %s" % \ - (id,timeout_string,v,op,adv_timeout)) - rc |= 1 - return rc - def meta(self): - ''' - RA meta-data as raw xml. - ''' - id = "ra_meta-%s" % self.ra_string() - if wcache.is_cached(id): - return wcache.retrieve(id) - if self.ra_class in vars.meta_progs: - l = prog_meta(self.ra_class) - else: - l = ra_if().meta(self.ra_class,self.ra_type,self.ra_provider) - return wcache.store(id, l) - def meta_pretty(self): - ''' - Print the RA meta-data in a human readable form. - ''' - if not self.mk_ra_node(): - return '' - l = [] - title = self.meta_title() - l.append(title) - longdesc = get_nodes_text(self.ra_node,"longdesc") - if longdesc: - l.append(longdesc) - if self.ra_class != "heartbeat": - params = self.meta_parameters() - if params: - l.append(params.rstrip()) - actions = self.meta_actions() - if actions: - l.append(actions) - return '\n\n'.join(l) - def get_shortdesc(self,n): - name = n.getAttribute("name") - shortdesc = get_nodes_text(n,"shortdesc") - longdesc = get_nodes_text(n,"longdesc") - if shortdesc and shortdesc not in (name,longdesc,self.ra_type): - return shortdesc - return '' - def meta_title(self): - s = self.ra_string() - shortdesc = self.get_shortdesc(self.ra_node) - if shortdesc: - s = "%s (%s)" % (shortdesc,s) - return s - def meta_param_head(self,n): - name = n.getAttribute("name") - if not name: - return None - s = name - if n.getAttribute("required") == "1": - s = s + "*" - type,default = self.param_type_default(n) - if type and default: - s = "%s (%s, [%s])" % (s,type,default) - elif type: - s = "%s (%s)" % (s,type) - shortdesc = self.get_shortdesc(n) - s = "%s: %s" % (s,shortdesc) - return s - def format_parameter(self,n): - l = [] - head = self.meta_param_head(n) - if not head: - self.error("no name attribute for parameter") - return "" - l.append(head) - longdesc = get_nodes_text(n,"longdesc") - if longdesc: - longdesc = self.ra_tab + longdesc.replace("\n","\n"+self.ra_tab) + '\n' - l.append(longdesc) - return '\n'.join(l) - def meta_parameter(self,param): - if not self.mk_ra_node(): - return '' - l = [] - for pset in self.ra_node.getElementsByTagName("parameters"): - for c in pset.getElementsByTagName("parameter"): - if c.getAttribute("name") == param: - return self.format_parameter(c) - def meta_parameters(self): - if not self.mk_ra_node(): - return '' - l = [] - for pset in self.ra_node.getElementsByTagName("parameters"): - for c in pset.getElementsByTagName("parameter"): - s = self.format_parameter(c) - if s: - l.append(s) - if l: - return "Parameters (* denotes required, [] the default):\n\n" + '\n'.join(l) - def meta_action_head(self,n): - name = n.getAttribute("name") - if not name: - return '' - if name in self.skip_ops: - return '' - if name == "monitor": - name = monitor_name_node(n) - s = "%-13s" % name - for a in n.attributes.keys(): - if a in self.skip_op_attr: - continue - v = n.getAttribute(a) - if v: - s = "%s %s=%s" % (s,a,v) - return s - def meta_actions(self): - l = [] - for aset in self.ra_node.getElementsByTagName("actions"): - for c in aset.getElementsByTagName("action"): - s = self.meta_action_head(c) - if s: - l.append(self.ra_tab + s) - if l: - return "Operations' defaults (advisory minimum):\n\n" + '\n'.join(l) - -# -# resource type definition -# -def ra_type_validate(s, ra_class, provider, rsc_type): - ''' - Only ocf ra class supports providers. - ''' - if not rsc_type: - common_err("bad resource type specification %s"%s) - return False - if ra_class == "ocf": - if not provider: - common_err("provider could not be determined for %s"%s) - return False - else: - if provider: - common_warn("ra class %s does not support providers"%ra_class) - return True - return True -def disambiguate_ra_type(s): - ''' - Unravel [class:[provider:]]type - ''' - l = s.split(':') - if not l or len(l) > 3: - return ["","",""] - if len(l) == 3: - return l - elif len(l) == 2: - ra_class,ra_type = l - else: - ra_class = "ocf" - ra_type = l[0] - ra_provider = '' - if ra_class == "ocf": - pl = ra_providers(ra_type,ra_class) - if pl and len(pl) == 1: - ra_provider = pl[0] - elif not pl: - ra_provider = 'heartbeat' - return ra_class,ra_provider,ra_type - -wcache = WCache.getInstance() -vars = Vars.getInstance() -# vim:ts=4:sw=4:et: diff --git a/shell/modules/singletonmixin.py b/shell/modules/singletonmixin.py deleted file mode 100644 index 68211ab385..0000000000 --- a/shell/modules/singletonmixin.py +++ /dev/null @@ -1,509 +0,0 @@ -""" -A Python Singleton mixin class that makes use of some of the ideas -found at http://c2.com/cgi/wiki?PythonSingleton. Just inherit -from it and you have a singleton. No code is required in -subclasses to create singleton behavior -- inheritance from -Singleton is all that is needed. - -Singleton creation is threadsafe. - -USAGE: - -Just inherit from Singleton. If you need a constructor, include -an __init__() method in your class as you usually would. However, -if your class is S, you instantiate the singleton using S.getInstance() -instead of S(). Repeated calls to S.getInstance() return the -originally-created instance. - -For example: - -class S(Singleton): - - def __init__(self, a, b=1): - pass - -S1 = S.getInstance(1, b=3) - - -Most of the time, that's all you need to know. However, there are some -other useful behaviors. Read on for a full description: - -1) Getting the singleton: - - S.getInstance() - -returns the instance of S. If none exists, it is created. - -2) The usual idiom to construct an instance by calling the class, i.e. - - S() - -is disabled for the sake of clarity. - -For one thing, the S() syntax means instantiation, but getInstance() -usually does not cause instantiation. So the S() syntax would -be misleading. - -Because of that, if S() were allowed, a programmer who didn't -happen to notice the inheritance from Singleton (or who -wasn't fully aware of what a Singleton pattern -does) might think he was creating a new instance, -which could lead to very unexpected behavior. - -So, overall, it is felt that it is better to make things clearer -by requiring the call of a class method that is defined in -Singleton. An attempt to instantiate via S() will result -in a SingletonException being raised. - -3) Use __S.__init__() for instantiation processing, -since S.getInstance() runs S.__init__(), passing it the args it has received. - -If no data needs to be passed in at instantiation time, you don't need S.__init__(). - -4) If S.__init__(.) requires parameters, include them ONLY in the -first call to S.getInstance(). If subsequent calls have arguments, -a SingletonException is raised by default. - -If you find it more convenient for subsequent calls to be allowed to -have arguments, but for those argumentsto be ignored, just include -'ignoreSubsequent = True' in your class definition, i.e.: - - class S(Singleton): - - ignoreSubsequent = True - - def __init__(self, a, b=1): - pass - -5) For testing, it is sometimes convenient for all existing singleton -instances to be forgotten, so that new instantiations can occur. For that -reason, a forgetAllSingletons() function is included. Just call - - forgetAllSingletons() - -and it is as if no earlier instantiations have occurred. - -6) As an implementation detail, classes that inherit -from Singleton may not have their own __new__ -methods. To make sure this requirement is followed, -an exception is raised if a Singleton subclass includ -es __new__. This happens at subclass instantiation -time (by means of the MetaSingleton metaclass. - - -By Gary Robinson, grobinson@flyfi.com. No rights reserved -- -placed in the public domain -- which is only reasonable considering -how much it owes to other people's code and ideas which are in the -public domain. The idea of using a metaclass came from -a comment on Gary's blog (see -http://www.garyrobinson.net/2004/03/python_singleto.html#comments). -Other improvements came from comments and email from other -people who saw it online. (See the blog post and comments -for further credits.) - -Not guaranteed to be fit for any particular purpose. Use at your -own risk. -""" - -import threading - -class SingletonException(Exception): - pass - -_stSingletons = set() -_lockForSingletons = threading.RLock() -_lockForSingletonCreation = threading.RLock() # Ensure only one instance of each Singleton - # class is created. This is not bound to the - # individual Singleton class since we need to - # ensure that there is only one mutex for each - # Singleton class, which would require having - # a lock when setting up the Singleton class, - # which is what this is anyway. So, when any - # Singleton is created, we lock this lock and - # then we don't need to lock it again for that - # class. - -def _createSingletonInstance(cls, lstArgs, dctKwArgs): - _lockForSingletonCreation.acquire() - try: - if cls._isInstantiated(): # some other thread got here first - return - - instance = cls.__new__(cls) - try: - instance.__init__(*lstArgs, **dctKwArgs) - except TypeError, e: - if e.message.find('__init__() takes') != -1: - raise SingletonException, 'If the singleton requires __init__ args, supply them on first call to getInstance().' - else: - raise - cls.cInstance = instance - _addSingleton(cls) - finally: - _lockForSingletonCreation.release() - -def _addSingleton(cls): - _lockForSingletons.acquire() - try: - assert cls not in _stSingletons - _stSingletons.add(cls) - finally: - _lockForSingletons.release() - -def _removeSingleton(cls): - _lockForSingletons.acquire() - try: - if cls in _stSingletons: - _stSingletons.remove(cls) - finally: - _lockForSingletons.release() - -def forgetAllSingletons(): - '''This is useful in tests, since it is hard to know which singletons need to be cleared to make a test work.''' - _lockForSingletons.acquire() - try: - for cls in _stSingletons.copy(): - cls._forgetClassInstanceReferenceForTesting() - - # Might have created some Singletons in the process of tearing down. - # Try one more time - there should be a limit to this. - iNumSingletons = len(_stSingletons) - if len(_stSingletons) > 0: - for cls in _stSingletons.copy(): - cls._forgetClassInstanceReferenceForTesting() - iNumSingletons -= 1 - assert iNumSingletons == len(_stSingletons), 'Added a singleton while destroying ' + str(cls) - assert len(_stSingletons) == 0, _stSingletons - finally: - _lockForSingletons.release() - -class MetaSingleton(type): - def __new__(metaclass, strName, tupBases, dct): - if dct.has_key('__new__'): - raise SingletonException, 'Can not override __new__ in a Singleton' - return super(MetaSingleton, metaclass).__new__(metaclass, strName, tupBases, dct) - - def __call__(cls, *lstArgs, **dictArgs): - raise SingletonException, 'Singletons may only be instantiated through getInstance()' - -class Singleton(object): - __metaclass__ = MetaSingleton - - def getInstance(cls, *lstArgs, **dctKwArgs): - """ - Call this to instantiate an instance or retrieve the existing instance. - If the singleton requires args to be instantiated, include them the first - time you call getInstance. - """ - if cls._isInstantiated(): - if (lstArgs or dctKwArgs) and not hasattr(cls, 'ignoreSubsequent'): - raise SingletonException, 'Singleton already instantiated, but getInstance() called with args.' - else: - _createSingletonInstance(cls, lstArgs, dctKwArgs) - - return cls.cInstance - getInstance = classmethod(getInstance) - - def _isInstantiated(cls): - # Don't use hasattr(cls, 'cInstance'), because that screws things up if there is a singleton that - # extends another singleton. hasattr looks in the base class if it doesn't find in subclass. - return 'cInstance' in cls.__dict__ - _isInstantiated = classmethod(_isInstantiated) - - # This can be handy for public use also - isInstantiated = _isInstantiated - - def _forgetClassInstanceReferenceForTesting(cls): - """ - This is designed for convenience in testing -- sometimes you - want to get rid of a singleton during test code to see what - happens when you call getInstance() under a new situation. - - To really delete the object, all external references to it - also need to be deleted. - """ - try: - if hasattr(cls.cInstance, '_prepareToForgetSingleton'): - # tell instance to release anything it might be holding onto. - cls.cInstance._prepareToForgetSingleton() - del cls.cInstance - _removeSingleton(cls) - except AttributeError: - # run up the chain of base classes until we find the one that has the instance - # and then delete it there - for baseClass in cls.__bases__: - if issubclass(baseClass, Singleton): - baseClass._forgetClassInstanceReferenceForTesting() - _forgetClassInstanceReferenceForTesting = classmethod(_forgetClassInstanceReferenceForTesting) - - -if __name__ == '__main__': - - import unittest - import time - - class singletonmixin_Public_TestCase(unittest.TestCase): - def testReturnsSameObject(self): - """ - Demonstrates normal use -- just call getInstance and it returns a singleton instance - """ - - class A(Singleton): - def __init__(self): - super(A, self).__init__() - - a1 = A.getInstance() - a2 = A.getInstance() - self.assertEquals(id(a1), id(a2)) - - def testInstantiateWithMultiArgConstructor(self): - """ - If the singleton needs args to construct, include them in the first - call to get instances. - """ - - class B(Singleton): - - def __init__(self, arg1, arg2): - super(B, self).__init__() - self.arg1 = arg1 - self.arg2 = arg2 - - b1 = B.getInstance('arg1 value', 'arg2 value') - b2 = B.getInstance() - self.assertEquals(b1.arg1, 'arg1 value') - self.assertEquals(b1.arg2, 'arg2 value') - self.assertEquals(id(b1), id(b2)) - - def testInstantiateWithKeywordArg(self): - - class B(Singleton): - - def __init__(self, arg1=5): - super(B, self).__init__() - self.arg1 = arg1 - - b1 = B.getInstance('arg1 value') - b2 = B.getInstance() - self.assertEquals(b1.arg1, 'arg1 value') - self.assertEquals(id(b1), id(b2)) - - def testTryToInstantiateWithoutNeededArgs(self): - - class B(Singleton): - - def __init__(self, arg1, arg2): - super(B, self).__init__() - self.arg1 = arg1 - self.arg2 = arg2 - - self.assertRaises(SingletonException, B.getInstance) - - def testPassTypeErrorIfAllArgsThere(self): - """ - Make sure the test for capturing missing args doesn't interfere with a normal TypeError. - """ - class B(Singleton): - - def __init__(self, arg1, arg2): - super(B, self).__init__() - self.arg1 = arg1 - self.arg2 = arg2 - raise TypeError, 'some type error' - - self.assertRaises(TypeError, B.getInstance, 1, 2) - - def testTryToInstantiateWithoutGetInstance(self): - """ - Demonstrates that singletons can ONLY be instantiated through - getInstance, as long as they call Singleton.__init__ during construction. - - If this check is not required, you don't need to call Singleton.__init__(). - """ - - class A(Singleton): - def __init__(self): - super(A, self).__init__() - - self.assertRaises(SingletonException, A) - - def testDontAllowNew(self): - - def instantiatedAnIllegalClass(): - class A(Singleton): - def __init__(self): - super(A, self).__init__() - - def __new__(metaclass, strName, tupBases, dct): - return super(MetaSingleton, metaclass).__new__(metaclass, strName, tupBases, dct) - - self.assertRaises(SingletonException, instantiatedAnIllegalClass) - - - def testDontAllowArgsAfterConstruction(self): - class B(Singleton): - - def __init__(self, arg1, arg2): - super(B, self).__init__() - self.arg1 = arg1 - self.arg2 = arg2 - - B.getInstance('arg1 value', 'arg2 value') - self.assertRaises(SingletonException, B, 'arg1 value', 'arg2 value') - - def test_forgetClassInstanceReferenceForTesting(self): - class A(Singleton): - def __init__(self): - super(A, self).__init__() - class B(A): - def __init__(self): - super(B, self).__init__() - - # check that changing the class after forgetting the instance produces - # an instance of the new class - a = A.getInstance() - assert a.__class__.__name__ == 'A' - A._forgetClassInstanceReferenceForTesting() - b = B.getInstance() - assert b.__class__.__name__ == 'B' - - # check that invoking the 'forget' on a subclass still deletes the instance - B._forgetClassInstanceReferenceForTesting() - a = A.getInstance() - B._forgetClassInstanceReferenceForTesting() - b = B.getInstance() - assert b.__class__.__name__ == 'B' - - def test_forgetAllSingletons(self): - # Should work if there are no singletons - forgetAllSingletons() - - class A(Singleton): - ciInitCount = 0 - def __init__(self): - super(A, self).__init__() - A.ciInitCount += 1 - - A.getInstance() - self.assertEqual(A.ciInitCount, 1) - - A.getInstance() - self.assertEqual(A.ciInitCount, 1) - - forgetAllSingletons() - A.getInstance() - self.assertEqual(A.ciInitCount, 2) - - def test_threadedCreation(self): - # Check that only one Singleton is created even if multiple - # threads try at the same time. If fails, would see assert in _addSingleton - class Test_Singleton(Singleton): - def __init__(self): - super(Test_Singleton, self).__init__() - - class Test_SingletonThread(threading.Thread): - def __init__(self, fTargetTime): - super(Test_SingletonThread, self).__init__() - self._fTargetTime = fTargetTime - self._eException = None - - def run(self): - try: - fSleepTime = self._fTargetTime - time.time() - if fSleepTime > 0: - time.sleep(fSleepTime) - Test_Singleton.getInstance() - except Exception, e: - self._eException = e - - fTargetTime = time.time() + 0.1 - lstThreads = [] - for _ in xrange(100): - t = Test_SingletonThread(fTargetTime) - t.start() - lstThreads.append(t) - eException = None - for t in lstThreads: - t.join() - if t._eException and not eException: - eException = t._eException - if eException: - raise eException - - def testNoInit(self): - """ - Demonstrates use with a class not defining __init__ - """ - - class A(Singleton): - pass - - #INTENTIONALLY UNDEFINED: - #def __init__(self): - # super(A, self).__init__() - - A.getInstance() #Make sure no exception is raised - - def testMultipleGetInstancesWithArgs(self): - - class A(Singleton): - - ignoreSubsequent = True - - def __init__(self, a, b=1): - pass - - a1 = A.getInstance(1) - a2 = A.getInstance(2) # ignores the second call because of ignoreSubsequent - - class B(Singleton): - - def __init__(self, a, b=1): - pass - - b1 = B.getInstance(1) - self.assertRaises(SingletonException, B.getInstance, 2) # No ignoreSubsequent included - - class C(Singleton): - - def __init__(self, a=1): - pass - - c1 = C.getInstance(a=1) - self.assertRaises(SingletonException, C.getInstance, a=2) # No ignoreSubsequent included - - def testInheritance(self): - """ - It's sometimes said that you can't subclass a singleton (see, for instance, - http://steve.yegge.googlepages.com/singleton-considered-stupid point e). This - test shows that at least rudimentary subclassing works fine for us. - """ - - class A(Singleton): - - def setX(self, x): - self.x = x - - def setZ(self, z): - raise NotImplementedError - - class B(A): - - def setX(self, x): - self.x = -x - - def setY(self, y): - self.y = y - - a = A.getInstance() - a.setX(5) - b = B.getInstance() - b.setX(5) - b.setY(50) - self.assertEqual((a.x, b.x, b.y), (5, -5, 50)) - self.assertRaises(AttributeError, eval, 'a.setY', {}, locals()) - self.assertRaises(NotImplementedError, b.setZ, 500) - - unittest.main() - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/template.py b/shell/modules/template.py deleted file mode 100644 index fd4c1f0731..0000000000 --- a/shell/modules/template.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import re -from vars import Vars -from utils import * -from msg import * - -def get_var(l,key): - for s in l: - a = s.split() - if len(a) == 2 and a[0] == key: - return a[1] - return '' -def chk_var(l,key): - for s in l: - a = s.split() - if len(a) == 2 and a[0] == key and a[1]: - return True - return False -def chk_key(l,key): - for s in l: - a = s.split() - if len(a) >= 1 and a[0] == key: - return True - return False -def validate_template(l): - 'Test for required stuff in a template.' - if not chk_var(l,'%name'): - common_err("invalid template: missing '%name'") - return False - if not chk_key(l,'%generate'): - common_err("invalid template: missing '%generate'") - return False - g = l.index('%generate') - if not (chk_key(l[0:g],'%required') or chk_key(l[0:g],'%optional')): - common_err("invalid template: missing '%required' or '%optional'") - return False - return True -def fix_tmpl_refs(l,id,pfx): - for i in range(len(l)): - l[i] = l[i].replace(id,pfx) -def fix_tmpl_refs_re(l,regex,repl): - for i in range(len(l)): - l[i] = re.sub(regex,repl,l[i]) -class LoadTemplate(object): - ''' - Load a template and its dependencies, generate a - configuration file which should be relatively easy and - straightforward to parse. - ''' - edit_instructions = '''# Edit instructions: -# -# Add content only at the end of lines starting with '%%'. -# Only add content, don't remove or replace anything. -# The parameters following '%required' are not optional, -# unlike those following '%optional'. -# You may also add comments for future reference.''' - no_more_edit = '''# Don't edit anything below this line.''' - def __init__(self,name): - self.name = name - self.all_pre_gen = [] - self.all_post_gen = [] - self.all_pfx = [] - def new_pfx(self,name): - i = 1 - pfx = name - while pfx in self.all_pfx: - pfx = "%s_%d" % (name,i) - i += 1 - self.all_pfx.append(pfx) - return pfx - def generate(self): - return '\n'.join([ \ - "# Configuration: %s" % self.name, \ - '', \ - self.edit_instructions, \ - '', \ - '\n'.join(self.all_pre_gen), \ - self.no_more_edit, \ - '', \ - '%generate', \ - '\n'.join(self.all_post_gen)]) - def write_config(self,name): - try: - f = open("%s/%s" % (vars.tmpl_conf_dir, name),"w") - except IOError,msg: - common_err("open: %s"%msg) - return False - print >>f, self.generate() - f.close() - return True - def load_template(self,tmpl): - try: - f = open("%s/%s" % (vars.tmpl_dir, tmpl)) - except IOError,msg: - common_err("open: %s"%msg) - return '' - l = (''.join(f)).split('\n') - if not validate_template(l): - return '' - common_info("pulling in template %s" % tmpl) - g = l.index('%generate') - pre_gen = l[0:g] - post_gen = l[g+1:] - name = get_var(pre_gen,'%name') - for s in l[0:g]: - if s.startswith('%depends_on'): - a = s.split() - if len(a) != 2: - common_warn("%s: wrong usage" % s) - continue - tmpl_id = a[1] - tmpl_pfx = self.load_template(a[1]) - if tmpl_pfx: - fix_tmpl_refs(post_gen,'%'+tmpl_id,'%'+tmpl_pfx) - pfx = self.new_pfx(name) - fix_tmpl_refs(post_gen, '%_:', '%'+pfx+':') - # replace remaining %_, it may be useful at times - fix_tmpl_refs(post_gen, '%_', pfx) - v_idx = pre_gen.index('%required') or pre_gen.index('%optional') - pre_gen.insert(v_idx,'%pfx ' + pfx) - self.all_pre_gen += pre_gen - self.all_post_gen += post_gen - return pfx - def post_process(self, params): - pfx_re = '(%s)' % '|'.join(self.all_pfx) - for n in params: - fix_tmpl_refs(self.all_pre_gen, '%% '+n, "%% "+n+" "+params[n]) - fix_tmpl_refs_re(self.all_post_gen, \ - '%'+pfx_re+'([^:]|$)', r'\1\2') - # process %if ... [%else] ... %fi - rmidx_l = [] - if_seq = False - for i in range(len(self.all_post_gen)): - s = self.all_post_gen[i] - if if_seq: - a = s.split() - if len(a) >= 1 and a[0] == '%fi': - if_seq = False - rmidx_l.append(i) - elif len(a) >= 1 and a[0] == '%else': - outcome = not outcome - rmidx_l.append(i) - else: - if not outcome: - rmidx_l.append(i) - continue - if not s: - continue - a = s.split() - if len(a) == 2 and a[0] == '%if': - outcome = not a[1].startswith('%') # not replaced -> false - if_seq = True - rmidx_l.append(i) - rmidx_l.reverse() - for i in rmidx_l: - del self.all_post_gen[i] - -vars = Vars.getInstance() - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/term.py b/shell/modules/term.py deleted file mode 100644 index cc69007e7b..0000000000 --- a/shell/modules/term.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import sys -import re -from singletonmixin import Singleton - -# from: http://code.activestate.com/recipes/475116/ - -class TerminalController(Singleton): - """ - A class that can be used to portably generate formatted output to - a terminal. - `TerminalController` defines a set of instance variables whose - values are initialized to the control sequence necessary to - perform a given action. These can be simply included in normal - output to the terminal: - >>> term = TerminalController() - >>> print 'This is '+term.GREEN+'green'+term.NORMAL - Alternatively, the `render()` method can used, which replaces - '${action}' with the string required to perform 'action': - >>> term = TerminalController() - >>> print term.render('This is ${GREEN}green${NORMAL}') - If the terminal doesn't support a given action, then the value of - the corresponding instance variable will be set to ''. As a - result, the above code will still work on terminals that do not - support color, except that their output will not be colored. - Also, this means that you can test whether the terminal supports a - given action by simply testing the truth value of the - corresponding instance variable: - >>> term = TerminalController() - >>> if term.CLEAR_SCREEN: - ... print 'This terminal supports clearning the screen.' - Finally, if the width and height of the terminal are known, then - they will be stored in the `COLS` and `LINES` attributes. - """ - # Cursor movement: - BOL = '' #: Move the cursor to the beginning of the line - UP = '' #: Move the cursor up one line - DOWN = '' #: Move the cursor down one line - LEFT = '' #: Move the cursor left one char - RIGHT = '' #: Move the cursor right one char - # Deletion: - CLEAR_SCREEN = '' #: Clear the screen and move to home position - CLEAR_EOL = '' #: Clear to the end of the line. - CLEAR_BOL = '' #: Clear to the beginning of the line. - CLEAR_EOS = '' #: Clear to the end of the screen - # Output modes: - BOLD = '' #: Turn on bold mode - BLINK = '' #: Turn on blink mode - DIM = '' #: Turn on half-bright mode - REVERSE = '' #: Turn on reverse-video mode - NORMAL = '' #: Turn off all modes - # Cursor display: - HIDE_CURSOR = '' #: Make the cursor invisible - SHOW_CURSOR = '' #: Make the cursor visible - # Terminal size: - COLS = None #: Width of the terminal (None for unknown) - LINES = None #: Height of the terminal (None for unknown) - # Foreground colors: - BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = '' - # Background colors: - BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = '' - BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = '' - _STRING_CAPABILITIES = """ - BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1 - CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold - BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0 - HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split() - _COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split() - _ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split() - def __init__(self, term_stream=sys.stdout): - """ - Create a `TerminalController` and initialize its attributes - with appropriate values for the current terminal. - `term_stream` is the stream that will be used for terminal - output; if this stream is not a tty, then the terminal is - assumed to be a dumb terminal (i.e., have no capabilities). - """ - # Curses isn't available on all platforms - try: import curses - except: - sys.stderr.write("INFO: no curses support: you won't see colors\n") - return - # If the stream isn't a tty, then assume it has no capabilities. - if not term_stream.isatty(): return - # Check the terminal type. If we fail, then assume that the - # terminal has no capabilities. - try: curses.setupterm() - except: return - # Look up numeric capabilities. - self.COLS = curses.tigetnum('cols') - self.LINES = curses.tigetnum('lines') - # Look up string capabilities. - for capability in self._STRING_CAPABILITIES: - (attrib, cap_name) = capability.split('=') - setattr(self, attrib, self._tigetstr(cap_name) or '') - # Colors - set_fg = self._tigetstr('setf') - if set_fg: - for i,color in zip(range(len(self._COLORS)), self._COLORS): - setattr(self, color, curses.tparm(set_fg, i) or '') - set_fg_ansi = self._tigetstr('setaf') - if set_fg_ansi: - for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS): - setattr(self, color, curses.tparm(set_fg_ansi, i) or '') - set_bg = self._tigetstr('setb') - if set_bg: - for i,color in zip(range(len(self._COLORS)), self._COLORS): - setattr(self, 'BG_'+color, curses.tparm(set_bg, i) or '') - set_bg_ansi = self._tigetstr('setab') - if set_bg_ansi: - for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS): - setattr(self, 'BG_'+color, curses.tparm(set_bg_ansi, i) or '') - def _tigetstr(self, cap_name): - # String capabilities can include "delays" of the form "$<2>". - # For any modern terminal, we should be able to just ignore - # these, so strip them out. - import curses - cap = curses.tigetstr(cap_name) or '' - return re.sub(r'\$<\d+>[/*]?', '', cap) - def render(self, template): - """ - Replace each $-substitutions in the given template string with - the corresponding terminal control string (if it's defined) or - '' (if it's not). - """ - return re.sub(r'\$\$|\${\w+}', self._render_sub, template) - def _render_sub(self, match): - s = match.group() - if s == '$$': return s - else: return getattr(self, s[2:-1]) - def is_color(self, s): - try: - attr = getattr(self, s.upper()) - return attr != None - except: return False - -# vim:ts=4:sw=4:et: diff --git a/shell/modules/ui.py.in b/shell/modules/ui.py.in deleted file mode 100644 index a4ecb45f68..0000000000 --- a/shell/modules/ui.py.in +++ /dev/null @@ -1,1730 +0,0 @@ -# Copyright (C) 2008 Dejan Muhamedagic -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# - -import sys -import re -import os -import shlex -import time -import bz2 - -from help import HelpSystem, cmd_help -from vars import Vars -from levels import Levels -from cibconfig import mkset_obj, CibFactory -from cibstatus import CibStatus -from template import LoadTemplate -from cliformat import nvpairs2list -from ra import * -from msg import * -from utils import * -from xmlutil import * - -def cmd_end(cmd,dir = ".."): - "Go up one level." - levels.droplevel() -def cmd_exit(cmd,rc = 0): - "Exit the crm program" - cmd_end(cmd) - if options.interactive and not options.batch: - print "bye" - try: - from readline import write_history_file - write_history_file(vars.hist_file) - except: - pass - for f in vars.tmpfiles: - os.unlink(f) - sys.exit(rc) - -class UserInterface(object): - ''' - Stuff common to all user interface classes. - ''' - global_cmd_aliases = { - "quit": ("bye","exit"), - "end": ("cd","up"), - } - def __init__(self): - self.help_table = odict() - self.cmd_table = odict() - self.cmd_table["help"] = (self.help,(0,1),0,0) - self.cmd_table["quit"] = (self.exit,(0,0),0,0) - self.cmd_table["end"] = (self.end,(0,1),0,0) - self.cmd_aliases = self.global_cmd_aliases.copy() - if options.interactive: - self.help_table = help_sys.load_level(self.lvl_name) - def end_game(self, no_questions_asked = False): - pass - def help(self,cmd,topic = ''): - "usage: help []" - if not self.help_table: - self.help_table = help_sys.load_level(self.lvl_name) - setup_help_aliases(self) - cmd_help(self.help_table,topic) - def end(self,cmd,dir = ".."): - "usage: end" - self.end_game() - cmd_end(cmd,dir) - def exit(self,cmd): - "usage: exit" - self.end_game() - cmd_exit(cmd) - -class CliOptions(UserInterface): - ''' - Manage user preferences - ''' - lvl_name = "options" - desc_short = "user preferences" - desc_long = """ -Several user preferences are available. Note that it is possible -to save the preferences to a startup file. -""" - def __init__(self): - UserInterface.__init__(self) - self.cmd_table["skill-level"] = (self.set_skill_level,(1,1),0,0) - self.cmd_table["editor"] = (self.set_editor,(1,1),0,0) - self.cmd_table["pager"] = (self.set_pager,(1,1),0,0) - self.cmd_table["user"] = (self.set_crm_user,(0,1),0,0) - self.cmd_table["output"] = (self.set_output,(1,1),0,0) - self.cmd_table["colorscheme"] = (self.set_colors,(1,1),0,0) - self.cmd_table["check-frequency"] = (self.set_check_frequency,(1,1),0,0) - self.cmd_table["check-mode"] = (self.set_check_mode,(1,1),0,0) - self.cmd_table["sort-elements"] = (self.set_sort_elements,(1,1),0,0) - self.cmd_table["wait"] = (self.set_wait,(1,1),0,0) - self.cmd_table["save"] = (self.save_options,(0,0),0,0) - self.cmd_table["show"] = (self.show_options,(0,0),0,0) - setup_aliases(self) - def set_skill_level(self,cmd,skill_level): - """usage: skill-level - level: operator | administrator | expert""" - return user_prefs.set_skill_level(skill_level) - def set_editor(self,cmd,prog): - "usage: editor " - return user_prefs.set_editor(prog) - def set_pager(self,cmd,prog): - "usage: pager " - return user_prefs.set_pager(prog) - def set_crm_user(self,cmd,user = ''): - "usage: user []" - return user_prefs.set_crm_user(user) - def set_output(self,cmd,otypes): - "usage: output " - return user_prefs.set_output(otypes) - def set_colors(self,cmd,scheme): - "usage: colorscheme " - return user_prefs.set_colors(scheme) - def set_check_frequency(self,cmd,freq): - "usage: check-frequence " - return user_prefs.set_check_freq(freq) - def set_check_mode(self,cmd,mode): - "usage: check-mode " - return user_prefs.set_check_mode(mode) - def set_sort_elements(self,cmd,opt): - "usage: sort-elements {yes|no}" - if not verify_boolean(opt): - common_err("%s: bad boolean option"%opt) - return True - return user_prefs.set_sort_elems(opt) - def set_wait(self,cmd,opt): - "usage: wait {yes|no}" - if not verify_boolean(opt): - common_err("%s: bad boolean option"%opt) - return True - return user_prefs.set_wait(opt) - def show_options(self,cmd): - "usage: show" - return user_prefs.write_rc(sys.stdout) - def save_options(self,cmd): - "usage: save" - return user_prefs.save_options(vars.rc_file) - def end_game(self, no_questions_asked = False): - if no_questions_asked and not options.interactive: - self.save_options("save") - -class CibShadow(UserInterface): - ''' - CIB shadow management class - ''' - lvl_name = "cib" - desc_short = "manage shadow CIBs" - desc_long = """ -A shadow CIB is a regular cluster configuration which is kept in -a file. The CRM and the CRM tools may manage a shadow CIB in the -same way as the live CIB (i.e. the current cluster configuration). -A shadow CIB may be applied to the cluster in one step. -""" - extcmd = ">/dev/null &1" % self.extcmd) - except os.error: - no_prog_err(self.extcmd) - return False - return True - def new(self,cmd,name,*args): - "usage: new [withstatus] [force] [empty]" - if not is_filename_sane(name): - return False - for par in args: - if not par in ("force","--force","withstatus","empty"): - syntax_err((cmd,name,par), context = 'new') - return False - if "empty" in args: - new_cmd = "%s -e '%s'" % (self.extcmd,name) - else: - new_cmd = "%s -c '%s'" % (self.extcmd,name) - if user_prefs.get_force() or "force" in args or "--force" in args: - new_cmd = "%s --force" % new_cmd - if ext_cmd(new_cmd) == 0: - common_info("%s shadow CIB created"%name) - self.use("use",name) - if "withstatus" in args: - cib_status.load("shadow:%s" % name) - def _find_pe(self,infile): - 'Find a pe input' - for p in ("%s/%s", "%s/%s.bz2", "%s/pe-*-%s.bz2"): - fl = glob.glob(p % (vars.pe_dir,infile)) - if fl: - break - if not fl: - common_err("no %s pe input file"%infile) - return '' - if len(fl) > 1: - common_err("more than one %s pe input file: %s" % \ - (infile,' '.join(fl))) - return '' - return fl[0] - def pe_import(self,cmd,infile,name = None): - "usage: import {|} []" - if name and not is_filename_sane(name): - return False - # where's the input? - if not os.access(infile,os.F_OK): - if "/" in infile: - common_err("%s: no such file"%infile) - return False - infile = self._find_pe(infile) - if not infile: - return False - if not name: - name = os.path.basename(infile) - # read input - try: - f = open(infile) - except IOError,msg: - common_err("open: %s"%msg) - return - s = ''.join(f) - f.close() - # decompresed and rename shadow if it ends with .bz2 - if infile.endswith(".bz2"): - name = name.replace(".bz2","") - s = bz2.decompress(s) - # copy input to the shadow - try: - f = open(shadowfile(name), "w") - except IOError,msg: - common_err("open: %s"%msg) - return - f.write(s) - f.close() - # use the shadow and load the status from there - return self.use("use",name,"withstatus") - def delete(self,cmd,name): - "usage: delete " - if not is_filename_sane(name): - return False - if vars.cib_in_use == name: - common_err("%s shadow CIB is in use"%name) - return False - if ext_cmd("%s -D '%s' --force" % (self.extcmd,name)) == 0: - common_info("%s shadow CIB deleted"%name) - else: - common_err("failed to delete %s shadow CIB"%name) - return False - def reset(self,cmd,name): - "usage: reset " - if not is_filename_sane(name): - return False - if ext_cmd("%s -r '%s'" % (self.extcmd,name)) == 0: - common_info("copied live CIB to %s"%name) - else: - common_err("failed to copy live CIB to %s"%name) - return False - def commit(self,cmd,name): - "usage: commit " - if not is_filename_sane(name): - return False - if ext_cmd("%s -C '%s' --force" % (self.extcmd,name)) == 0: - common_info("commited '%s' shadow CIB to the cluster"%name) - else: - common_err("failed to commit the %s shadow CIB"%name) - return False - def diff(self,cmd): - "usage: diff" - s = get_stdout(add_sudo("%s -d" % self.extcmd_stdout)) - page_string(s) - def list(self,cmd): - "usage: list" - if options.regression_tests: - for t in listshadows(): - print t - else: - multicolumn(listshadows()) - def _use(self,name,withstatus): - # Choose a shadow cib for further changes. If the name - # provided is empty, then choose the live (cluster) cib. - # Don't allow ' in shadow names - if not name or name == "live": - os.unsetenv(vars.shadow_envvar) - vars.cib_in_use = "" - if withstatus: - cib_status.load("live") - else: - os.putenv(vars.shadow_envvar,name) - vars.cib_in_use = name - if withstatus: - cib_status.load("shadow:%s" % name) - def use(self,cmd,name = '', withstatus = ''): - "usage: use [] [withstatus]" - # check the name argument - if name and not is_filename_sane(name): - return False - if name and name != "live": - if not os.access(shadowfile(name),os.F_OK): - common_err("%s: no such shadow CIB"%name) - return False - if withstatus and withstatus != "withstatus": - syntax_err((cmd,withstatus), context = 'use') - return False - # If invoked from configure - # take special precautions - try: - prev_level = levels.previous().myname() - except: - prev_level = '' - if prev_level != "cibconfig": - self._use(name,withstatus) - return True - if not cib_factory.has_cib_changed(): - self._use(name,withstatus) - # new CIB: refresh the CIB factory - cib_factory.refresh() - return True - saved_cib = vars.cib_in_use - self._use(name,'') # don't load the status yet - if not cib_factory.is_current_cib_equal(silent = True): - # user made changes and now wants to switch to a - # different and unequal CIB; we refuse to cooperate - common_err("the requested CIB is different from the current one") - if user_prefs.get_force(): - common_info("CIB overwrite forced") - elif not ask("All changes will be dropped. Do you want to proceed?"): - self._use(saved_cib,'') # revert to the previous CIB - return False - self._use(name,withstatus) # now load the status too - return True - -def check_transition(inp,state,possible_l): - if not state in possible_l: - common_err("input (%s) in wrong state %s" % (inp,state)) - return False - return True -class Template(UserInterface): - ''' - Configuration templates. - ''' - lvl_name = "template" - def __init__(self): - UserInterface.__init__(self) - self.cmd_table["new"] = (self.new,(2,),1,0) - self.cmd_table["load"] = (self.load,(0,1),1,0) - self.cmd_table["edit"] = (self.edit,(0,1),1,0) - self.cmd_table["delete"] = (self.delete,(1,2),1,0) - self.cmd_table["show"] = (self.show,(0,1),0,0) - self.cmd_table["apply"] = (self.apply,(0,2),1,0) - self.cmd_table["list"] = (self.list,(0,1),0,0) - setup_aliases(self) - self.init_dir() - self.curr_conf = '' - def init_dir(self): - '''Create the conf directory, link to templates''' - if not os.path.isdir(vars.tmpl_conf_dir): - try: - os.makedirs(vars.tmpl_conf_dir) - except os.error,msg: - common_err("makedirs: %s"%msg) - return - def get_depends(self,tmpl): - '''return a list of required templates''' - # Not used. May need it later. - try: - tf = open("%s/%s" % (vars.tmpl_dir, tmpl),"r") - except IOError,msg: - common_err("open: %s"%msg) - return - l = [] - for s in tf: - a = s.split() - if len(a) >= 2 and a[0] == '%depends_on': - l += a[1:] - tf.close() - return l - def replace_params(self,s,user_data): - change = False - for i in range(len(s)): - word = s[i] - for p in user_data: - # is parameter in the word? - pos = word.find('%' + p) - if pos < 0: - continue - endpos = pos + len('%' + p) - # and it isn't part of another word? - if re.match("[A-Za-z0-9]", word[endpos:endpos+1]): - continue - # if the value contains a space or - # it is a value of an attribute - # put quotes around it - if user_data[p].find(' ') >= 0 or word[pos-1:pos] == '=': - v = '"' + user_data[p] + '"' - else: - v = user_data[p] - word = word.replace('%' + p, v) - change = True # we did replace something - if change: - s[i] = word - if 'opt' in s: - if not change: - s = [] - else: - s.remove('opt') - return s - def generate(self,l,user_data): - '''replace parameters (user_data) and generate output - ''' - l2 = [] - for piece in l: - piece2 = [] - for s in piece: - s = self.replace_params(s,user_data) - if s: - piece2.append(' '.join(s)) - if piece2: - l2.append(' \\\n\t'.join(piece2)) - return '\n'.join(l2) - def process(self,config = ''): - '''Create a cli configuration from the current config''' - try: - f = open("%s/%s" % (vars.tmpl_conf_dir, config or self.curr_conf),'r') - except IOError,msg: - common_err("open: %s"%msg) - return '' - l = [] - piece = [] - user_data = {} - # states - START = 0; PFX = 1; DATA = 2; GENERATE = 3 - state = START - err_buf.start_tmp_lineno() - rc = True - for inp in f: - err_buf.incr_lineno() - if inp.startswith('#'): - continue - if type(inp) == type(u''): - inp = inp.encode('ascii') - inp = inp.strip() - try: - s = shlex.split(inp) - except ValueError, msg: - common_err(msg) - continue - while '\n' in s: - s.remove('\n') - if not s: - if state == GENERATE and piece: - l.append(piece) - piece = [] - elif s[0] in ("%name","%depends_on","%suggests"): - continue - elif s[0] == "%pfx": - if check_transition(inp,state,(START,DATA)) and len(s) == 2: - pfx = s[1] - state = PFX - elif s[0] == "%required": - if check_transition(inp,state,(PFX,)): - state = DATA - data_reqd = True - elif s[0] == "%optional": - if check_transition(inp,state,(PFX,DATA)): - state = DATA - data_reqd = False - elif s[0] == "%%": - if state != DATA: - common_warn("user data in wrong state %s" % state) - if len(s) < 2: - common_warn("parameter name missing") - elif len(s) == 2: - if data_reqd: - common_err("required parameter %s not set" % s[1]) - rc = False - elif len(s) == 3: - user_data["%s:%s" % (pfx,s[1])] = s[2] - else: - common_err("%s: syntax error" % inp) - elif s[0] == "%generate": - if check_transition(inp,state,(DATA,)): - state = GENERATE - piece = [] - elif state == GENERATE: - if s: - piece.append(s) - else: - common_err("<%s> unexpected" % inp) - if piece: - l.append(piece) - err_buf.stop_tmp_lineno() - f.close() - if not rc: - return '' - return self.generate(l,user_data) - def new(self,cmd,name,*args): - "usage: new