diff --git a/.gitignore b/.gitignore index 23bd6c9326..d87273f551 100644 --- a/.gitignore +++ b/.gitignore @@ -1,155 +1,156 @@ # Common GPATH GRTAGS GTAGS TAGS Makefile Makefile.in .deps .libs *.pc *.pyc *.bz2 *.rpm *.la *.lo *.o *~ # Autobuild aclocal.m4 autoconf autoheader autom4te.cache/ automake build.counter compile config.guess config.log config.status config.sub configure depcomp install-sh include/stamp-* libltdl.tar libtool libtool.m4 ltdl.m4 ltmain.sh missing py-compile m4/ltoptions.m4 m4/ltsugar.m4 m4/ltversion.m4 m4/lt~obsolete.m4 # Configure targets cts/CTSvars.py cts/LSBDummy cts/benchmark/clubench include/config.h include/config.h.in include/crm_config.h mcp/pacemaker mcp/pacemaker.service pengine/regression.core.sh publican.cfg shell/modules/help.py shell/modules/ra.py shell/modules/ui.py shell/modules/vars.py tools/coverage.sh tools/crm_report lrmd/regression.py +fencing/regression.py # Build targets *.7 *.7.xml *.7.html *.8 *.8.xml *.8.html doc/*/en-US/images/*.png doc/*/tmp/** doc/*/publish cib/cib cib/cibmon cib/cibpipe crmd/atest crmd/crmd doc/Clusters_from_Scratch.txt doc/Pacemaker_Explained.txt doc/acls.html doc/crm_fencing.html fencing/stonith-test fencing/stonith_admin fencing/stonithd fencing/stonithd.xml lrmd/lrmd lrmd/lrmd_test mcp/pacemakerd pengine/pengine pengine/pengine.xml pengine/ptest shell/regression/testcases/confbasic-xml.filter scratch tools/attrd tools/attrd_updater tools/cibadmin tools/crm_attribute tools/crm_diff tools/crm_mon tools/crm_node tools/crm_resource tools/crm_shadow tools/crm_simulate tools/crm_uuid tools/crm_verify tools/crmadmin tools/iso8601 tools/crm_ticket tools/report.collector.1 xml/crm.dtd xml/pacemaker.rng extra/rgmanager/ccs2cib extra/rgmanager/ccs_flatten extra/rgmanager/disable_rgmanager doc/Clusters_from_Scratch/en-US/Ap-Configuration.xml doc/Clusters_from_Scratch/en-US/Ap-Corosync-Conf.xml doc/Clusters_from_Scratch/en-US/Ap-Reading.xml doc/Clusters_from_Scratch/en-US/Ch-Active-Active.xml doc/Clusters_from_Scratch/en-US/Ch-Active-Passive.xml doc/Clusters_from_Scratch/en-US/Ch-Apache.xml doc/Clusters_from_Scratch/en-US/Ch-Installation.xml doc/Clusters_from_Scratch/en-US/Ch-Intro.xml doc/Clusters_from_Scratch/en-US/Ch-Shared-Storage.xml doc/Clusters_from_Scratch/en-US/Ch-Stonith.xml doc/Clusters_from_Scratch/en-US/Ch-Tools.xml doc/Clusters_from_Scratch/en-US/Ch-Verification.xml doc/Pacemaker_Explained/en-US/Ch-Basics.xml doc/Pacemaker_Explained/en-US/Ch-Options.xml lib/gnu/libgnu.a lib/gnu/stdalign.h #Other mock HTML pacemaker.spec pengine/.regression.failed.diff ClusterLabs-pacemaker-*.tar.gz coverity-* compat_reports .ABI-build abi_dumps logs *.patch *.diff *.sed *.orig *.rej *.swp pengine/test10/shadow.* diff --git a/TODO.markdown b/TODO.markdown index ae5f2a1b3f..c3096062ce 100644 --- a/TODO.markdown +++ b/TODO.markdown @@ -1,53 +1,52 @@ # Semi-random collection of tasks we'd like to get done ## Targeted for 1.2 - Move location of cib.xml to somewhere more appropriate - Avoid the use of xmlNode in fencing register_callback() call types - Need a way to indicate when unfencing operations need to be initiated from the host to be unfenced ## Targeted for 1.2.x - Check for uppercase letters in node names, warn if found - Imply startup-failure-is-fatal from on-fail="restart" - Show an english version of the config with crm_resource --rules - Convert cts/CIB.py into a supported Python API for the CIB - Use crm_log_tag() in the PE to allow per-resource trace logging - Reduce the amount of stonith-ng logging - Reduce the amount of attrd logging - Use dlopen for snmp in crm_mon ## Targeted for 1.4 - Support A colocated with (B || C || D) - Implement a truely atomic version of attrd - Support rolling average values in attrd - Support heartbeat with the mcp - Freeze/Thaw - Create Pacemaker plugin for snmpd - http://www.net-snmp.org/ - Investigate using a DB as the back-end for the CIB - Decide whether to fully support or drop failover domains # Testing -- Write a regression test for Stonith-NG - Convert BandwidthTest CTS test into a Scenario wrapper - find_operations() is not covered by PE regression tests - Some node states in determine_online_status_fencing() are untested by PE regression tests - no_quorum_policy==suicide is not covered by PE regression tests - parse_xml_duration() is not covered by PE regression tests - phase_of_the_moon() is not covered by PE regression tests - test_role_expression() is not covered by PE regression tests - native_parameter() is not covered by PE regression tests - clone_resource_state() is not covered by PE regression tests - clone_active() is not covered by PE regression tests - convert_non_atomic_task() in native.c is not covered by PE regression tests - clone_rsc_colocation_lh() is not covered by PE regression tests - group_rsc_colocation_lh() is not covered by PE regression tests - Test on-fail=standby # Documentation - Clusters from Scratch: Mail - Clusters from Scratch: MySQL - Document reload in Pacemaker Explained - Document advanced fencing logic in Pacemaker Explained - Use ann:defaultValue="..." instead of in the schema more often - Allow Clusters from Scratch to be built in two flavors - pcs and crm shell diff --git a/configure.ac b/configure.ac index c01fe0b558..03132082d9 100644 --- a/configure.ac +++ b/configure.ac @@ -1,1773 +1,1774 @@ dnl dnl autoconf for Pacemaker dnl dnl License: GNU General Public License (GPL) dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.59) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services AC_INIT(pacemaker, 1.1.7, pacemaker@oss.clusterlabs.org) CRM_DTD_VERSION="1.2" PCMK_FEATURES="" HB_PKG=heartbeat AC_CONFIG_AUX_DIR(.) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except lha_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/crm_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/crm_config.h.in to have configure include dnl new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AM_CONFIG_HEADER(include/config.h include/crm_config.h) ALL_LINGUAS="en fr" AC_ARG_WITH(version, [ --with-version=version Override package version (if you're a packager needing to pretend) ], [ PACKAGE_VERSION="$withval" ]) AC_ARG_WITH(pkg-name, [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ], [ PACKAGE_NAME="$withval" ]) AM_INIT_AUTOMAKE($PACKAGE_NAME, $PACKAGE_VERSION) AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$PACKAGE_VERSION", Current pacemaker version) PACKAGE_SERIES=`echo $PACKAGE_VERSION | awk -F. '{ print $1"."$2 }'` AC_SUBST(PACKAGE_SERIES) AC_SUBST(PACKAGE_VERSION) dnl automake >= 1.11 offers --enable-silent-rules for suppressing the output from dnl normal compilation. When a failure occurs, it will then display the full dnl command line dnl Wrap in m4_ifdef to avoid breaking on older platforms m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])]) dnl Example 2.4. Silent Custom Rule to Generate a File dnl %-bar.pc: %.pc dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@ CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== AC_PROG_CC dnl Can force other with environment variable "CC". AM_PROG_CC_C_O AC_PROG_CC_STDC gl_EARLY gl_INIT AC_LIBTOOL_DLOPEN dnl Enable dlopen support... AC_LIBLTDL_CONVENIENCE dnl make libltdl a convenience lib AC_PROG_LIBTOOL AC_PROG_YACC AM_PROG_LEX AC_C_STRINGIZE AC_TYPE_SIZE_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) AC_STRUCT_TIMEZONE dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CFLAGS="$@" AC_MSG_CHECKING(whether $CC supports "$@") AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])], [RC=0; AC_MSG_RESULT(yes)],[RC=1; AC_MSG_RESULT(no)]) return $RC } try_extract_header_define() { AC_MSG_CHECKING(if $2 in $1 exists. If not defaulting to $3) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) {\n" >> ${Cfile}.c printf "#ifdef %s\n" $2 >> ${Cfile}.c printf "printf(\"%%s\", %s);\n" $2 >> ${Cfile}.c printf "#endif \n return 0; }\n" >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` if test x"${value}" == x""; then value=$3 fi AC_MSG_RESULT($value) printf $value rm -rf ${Cfile}.c ${Cfile} ${Cfile}.dSYM ${Cfile}.gcno } extract_header_define() { AC_MSG_CHECKING(for $2 in $1) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` AC_MSG_RESULT($value) printf $value rm -rf ${Cfile}.c ${Cfile} ${Cfile}.dSYM ${Cfile}.gcno } dnl =============================================== dnl Configure Options dnl =============================================== dnl Some systems, like Solaris require a custom package name AC_ARG_WITH(pkgname, [ --with-pkgname=name name for pkg (typically for Solaris) ], [ PKGNAME="$withval" ], [ PKGNAME="LXHAhb" ], ) AC_SUBST(PKGNAME) AC_ARG_ENABLE([ansi], [ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers. [default=no]]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings very pedantic and fatal warnings for gcc [default=yes]]) AC_ARG_ENABLE([quiet], [ --enable-quiet Supress make output unless there is an error [default=no]]) AC_ARG_ENABLE([thread-safe], [ --enable-thread-safe Enable some client libraries to be thread safe. [default=no]]) AC_ARG_ENABLE([bundled-ltdl], [ --enable-bundled-ltdl Configure, build and install the standalone ltdl library bundled with ${PACKAGE} [default=no]]) LTDL_LIBS="" AC_ARG_ENABLE([no-stack], [ --enable-no-stack Only build the Policy Engine and pieces needed to support it [default=no]]) AC_ARG_ENABLE([upstart], [ --enable-upstart Do not build support for the Upstart init system [default=yes]]) AC_ARG_ENABLE([systemd], [ --enable-systemd Do not build support for the Systemd init system [default=yes]]) AC_ARG_WITH(ais, [ --with-ais Support the Corosync messaging and membership layer ], [ SUPPORT_CS=$withval ], [ SUPPORT_CS=try ], ) AC_ARG_WITH(corosync, [ --with-corosync Support the Corosync messaging and membership layer ], [ SUPPORT_CS=$withval ] dnl initialized in AC_ARG_WITH(ais...) already, dnl don't reset to try if it was given as --without-ais ) AC_ARG_WITH(heartbeat, [ --with-heartbeat Support the Heartbeat messaging and membership layer ], [ SUPPORT_HEARTBEAT=$withval ], [ SUPPORT_HEARTBEAT=try ], ) AC_ARG_WITH(cman, [ --with-cman Support the consumption of membership and quorum from cman ], [ SUPPORT_CMAN=$withval ], [ SUPPORT_CMAN=try ], ) AC_ARG_WITH(cpg, [ --with-cs-quorum Support the consumption of membership and quorum from corosync ], [ SUPPORT_CS_QUORUM=$withval ], [ SUPPORT_CS_QUORUM=try ], ) AC_ARG_WITH(snmp, [ --with-snmp Support the SNMP protocol ], [ SUPPORT_SNMP=$withval ], [ SUPPORT_SNMP=try ], ) AC_ARG_WITH(esmtp, [ --with-esmtp Support the sending mail notifications with the esmtp library ], [ SUPPORT_ESMTP=$withval ], [ SUPPORT_ESMTP=try ], ) AC_ARG_WITH(acl, [ --with-acl Support CIB ACL ], [ SUPPORT_ACL=$withval ], [ SUPPORT_ACL=no ], ) CSPREFIX="" AC_ARG_WITH(ais-prefix, [ --with-ais-prefix=DIR Prefix used when Corosync was installed [$prefix]], [ CSPREFIX=$withval ], [ CSPREFIX=$prefix ]) LCRSODIR="" AC_ARG_WITH(lcrso-dir, [ --with-lcrso-dir=DIR Corosync lcrso files. ], [ LCRSODIR="$withval" ]) INITDIR="" AC_ARG_WITH(initdir, [ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]], [ INITDIR="$withval" ]) SUPPORT_PROFILING=0 AC_ARG_WITH(profiling, [ --with-profiling Support gprof profiling ], [ SUPPORT_PROFILING=$withval ]) SUPPORT_GCOV=0 AC_ARG_WITH(gcov, [ --with-gcov Support gcov coverage testing ], [ SUPPORT_GCOV=$withval ]) PUBLICAN_BRAND="common" AC_ARG_WITH(brand, [ --with-brand=brand Brand to use for generated documentation [$PUBLICAN_BRAND]], [ PUBLICAN_BRAND="$withval" ]) AC_SUBST(PUBLICAN_BRAND) dnl =============================================== dnl General Processing dnl =============================================== AC_SUBST(HB_PKG) INIT_EXT="" echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in dnl For consistency with Heartbeat, map NONE->$prefix NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac AC_MSG_NOTICE(Sanitizing ais_prefix: ${CSPREFIX}) case $CSPREFIX in dnl For consistency with Heartbeat, map NONE->$prefix NONE) CSPREFIX=$prefix;; prefix) CSPREFIX=$prefix;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done AC_MSG_RESULT($INITDIR);; esac AC_SUBST(INITDIR) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in dnl For consistency with Heartbeat, map NONE->$prefix *prefix*|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac dnl Expand autoconf variables so that we dont end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl Home-grown variables eval INITDIR="${INITDIR}" eval docdir="`eval echo ${docdir}`" if test x"${docdir}" = x""; then docdir=${datadir}/doc/${PACKAGE}-${VERSION} #docdir=${datadir}/doc/packages/${PACKAGE} fi AC_SUBST(docdir) for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". case "$host_os" in *bsd*) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" INIT_EXT=".sh" ;; *solaris*) ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) CFLAGS="$CFLAGS -I${prefix}/include" ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac dnl Eventually remove this CFLAGS="$CFLAGS -I${prefix}/include/heartbeat" AC_SUBST(INIT_EXT) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac esac AC_MSG_CHECKING(which format is needed to print uint64_t) ac_save_CFLAGS=$CFLAGS CFLAGS="-Wall -Werror" AC_COMPILE_IFELSE( [AC_LANG_PROGRAM( [ #include #include #include ], [ int max = 512; uint64_t bignum = 42; char *buffer = malloc(max); const char *random = "random"; snprintf(buffer, max-1, "", bignum, random); fprintf(stderr, "Result: %s\n", buffer); ] )], [U64T="%lu"], [U64T="%llu"] ) CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT($U64T) AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH dnl Replacing AC_PROG_LIBTOOL with AC_CHECK_PROG because LIBTOOL dnl was NOT being expanded all the time thus causing things to fail. AC_CHECK_PROGS(LIBTOOL, glibtool libtool libtool15 libtool13) AM_PATH_PYTHON AC_CHECK_PROGS(MAKE, gmake make) AC_PATH_PROGS(HTML2TXT, lynx w3m) AC_PATH_PROGS(HELP2MAN, help2man) AC_PATH_PROGS(POD2MAN, pod2man, pod2man) AC_PATH_PROGS(ASCIIDOC, asciidoc) AC_PATH_PROGS(PUBLICAN, publican) AC_PATH_PROGS(INKSCAPE, inkscape) AC_PATH_PROGS(XSLTPROC, xsltproc) AC_PATH_PROGS(FOP, fop) AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh) AC_PATH_PROGS(SCP, scp, /usr/bin/scp) AC_PATH_PROGS(TAR, tar) AC_PATH_PROGS(MD5, md5) AC_PATH_PROGS(TEST, test) AC_PATH_PROGS(PKGCONFIG, pkg-config) AC_PATH_PROGS(XML2CONFIG, xml2-config) AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind) AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command) dnl Disable these until we decide if the stonith config file should be supported dnl AC_PATH_PROGS(BISON, bison) dnl AC_PATH_PROGS(FLEX, flex) dnl AC_PATH_PROGS(HAVE_YACC, $YACC) if test x"${LIBTOOL}" = x""; then AC_MSG_ERROR(You need (g)libtool installed in order to build ${PACKAGE}) fi if test x"${MAKE}" = x""; then AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE}) fi AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"") if test x"${HELP2MAN}" != x""; then PCMK_FEATURES="$PCMK_FEATURES generated-manpages" fi MANPAGE_XSLT="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) XSLT=`find ${datadir} -name docbook.xsl` for xsl in $XSLT; do dname=`dirname $xsl` bname=`basename $dname` if test "$bname" = "manpages"; then MANPAGE_XSLT="$xsl" break fi done fi AC_MSG_RESULT($MANPAGE_XSLT) AC_SUBST(MANPAGE_XSLT) AM_CONDITIONAL(BUILD_XML_HELP, test x"${MANPAGE_XSLT}" != x"") if test x"${MANPAGE_XSLT}" != x""; then PCMK_FEATURES="$PCMK_FEATURES agent-manpages" fi AM_CONDITIONAL(BUILD_ASCIIDOC, test x"${ASCIIDOC}" != x"") if test x"${ASCIIDOC}" != x""; then PCMK_FEATURES="$PCMK_FEATURES ascii-docs" fi SUPPORT_STONITH_CONFIG=0 if test x"${HAVE_YACC}" != x"" -a x"${FLEX}" != x"" -a x"${BISON}" != x""; then SUPPORT_STONITH_CONFIG=1 PCMK_FEATURES="$PCMK_FEATURES st-conf" fi AM_CONDITIONAL(BUILD_STONITH_CONFIG, test $SUPPORT_STONITH_CONFIG = 1) AC_DEFINE_UNQUOTED(SUPPORT_STONITH_CONFIG, $SUPPORT_STONITH_CONFIG, Support a stand-alone stonith config file in addition to the CIB) AM_CONDITIONAL(BUILD_DOCBOOK, test x"${PUBLICAN}" != x"" -a x"${INKSCAPE}" != x"") if test x"${PUBLICAN}" != x"" -a x"${INKSCAPE}" != x""; then AC_MSG_NOTICE(Enabling publican) PCMK_FEATURES="$PCMK_FEATURES publican-docs" fi dnl ======================================================================== dnl checks for library functions to replace them dnl dnl NoSuchFunctionName: dnl is a dummy function which no system supplies. It is here to make dnl the system compile semi-correctly on OpenBSD which doesn't know dnl how to create an empty archive dnl dnl scandir: Only on BSD. dnl System-V systems may have it, but hidden and/or deprecated. dnl A replacement function is supplied for it. dnl dnl setenv: is some bsdish function that should also be avoided (use dnl putenv instead) dnl On the other hand, putenv doesn't provide the right API for the dnl code and has memory leaks designed in (sigh...) Fortunately this dnl A replacement function is supplied for it. dnl dnl strerror: returns a string that corresponds to an errno. dnl A replacement function is supplied for it. dnl dnl strnlen: is a gnu function similar to strlen, but safer. dnl We wrote a tolearably-fast replacement function for it. dnl dnl strndup: is a gnu function similar to strdup, but safer. dnl We wrote a tolearably-fast replacement function for it. AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir setenv strerror strchrnul unsetenv strnlen strndup) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(c, dlopen) dnl if dlopen is in libc... AC_CHECK_LIB(dl, dlopen) dnl -ldl (for Linux) AC_CHECK_LIB(rt, sched_getscheduler) dnl -lrt (for Tru64) AC_CHECK_LIB(gnugetopt, getopt_long) dnl -lgnugetopt ( if available ) AC_CHECK_LIB(pam, pam_start) dnl -lpam (if available) AC_CHECK_LIB(uuid, uuid_parse) dnl load the library if necessary AC_CHECK_FUNCS(uuid_unparse) dnl OSX ships uuid_* as standard functions AC_CHECK_HEADERS(uuid/uuid.h) if test "x$ac_cv_func_uuid_unparse" != xyes; then AC_MSG_ERROR(You do not have the libuuid development package installed) fi if test x"${PKGCONFIG}" = x""; then AC_MSG_ERROR(You need pkgconfig installed in order to build ${PACKAGE}) fi if test "x${enable_thread_safe}" = "xyes"; then GPKGNAME="gthread-2.0" else GPKGNAME="glib-2.0" fi if $PKGCONFIG --exists $GPKGNAME then GLIBCONFIG="$PKGCONFIG $GPKGNAME" else set -x echo PKG_CONFIG_PATH=$PKG_CONFIG_PATH $PKGCONFIG --exists $GPKGNAME; echo $? $PKGCONFIG --cflags $GPKGNAME; echo $? $PKGCONFIG $GPKGNAME; echo $? set +x AC_MSG_ERROR(You need glib2-devel installed in order to build ${PACKAGE}) fi AC_MSG_RESULT(using $GLIBCONFIG) AC_CHECK_LIB(glib-2.0, g_hash_table_get_values) if test "x$ac_cv_lib_glib_2_0_g_hash_table_get_values" != x""yes; then AC_MSG_WARN(Your version of Glib is too old, you should have at least 2.14) fi AC_CHECK_LIB(glib-2.0, g_list_free_full) if test "x$ac_cv_lib_glib_2_0_g_list_free_full" != x""yes; then AC_DEFINE_UNQUOTED(NEED_G_LIST_FREE_FULL, 1, glib-2.0 has no g_list_free_full) fi if $PKGCONFIG --exists systemd then systemdunitdir=`$PKGCONFIG --variable=systemdsystemunitdir systemd` AC_SUBST(systemdunitdir) fi AM_CONDITIONAL(HAVE_SYSTEMD, test -n "$systemdunitdir" -a "x$systemdunitdir" != xno) # # Where is dlopen? # if test "$ac_cv_lib_c_dlopen" = yes; then LIBADD_DL="" elif test "$ac_cv_lib_dl_dlopen" = yes; then LIBADD_DL=-ldl else LIBADD_DL=${lt_cv_dlopen_libs} fi dnl dnl Check for location of gettext dnl dnl On at least Solaris 2.x, where it is in libc, specifying lintl causes dnl grief. Ensure minimal result, not the sum of all possibilities. dnl And do libc first. dnl Known examples: dnl c: Linux, Solaris 2.6+ dnl intl: BSD, AIX AC_CHECK_LIB(c, gettext) if test x$ac_cv_lib_c_gettext != xyes; then AC_CHECK_LIB(intl, gettext) fi if test x$ac_cv_lib_c_gettext != xyes -a x$ac_cv_lib_intl_gettext != xyes; then AC_MSG_ERROR(You need gettext installed in order to build ${PACKAGE}) fi if test "X$GLIBCONFIG" != X; then AC_MSG_CHECKING(for special glib includes: ) GLIBHEAD=`$GLIBCONFIG --cflags` AC_MSG_RESULT($GLIBHEAD) CPPFLAGS="$CPPFLAGS $GLIBHEAD" AC_MSG_CHECKING(for glib library flags) GLIBLIB=`$GLIBCONFIG --libs` AC_MSG_RESULT($GLIBLIB) LIBS="$LIBS $GLIBLIB" fi dnl ======================================================================== dnl Headers dnl ======================================================================== AC_HEADER_STDC AC_CHECK_HEADERS(arpa/inet.h) AC_CHECK_HEADERS(asm/types.h) AC_CHECK_HEADERS(assert.h) AC_CHECK_HEADERS(auth-client.h) AC_CHECK_HEADERS(ctype.h) AC_CHECK_HEADERS(dirent.h) AC_CHECK_HEADERS(errno.h) AC_CHECK_HEADERS(fcntl.h) AC_CHECK_HEADERS(getopt.h) AC_CHECK_HEADERS(glib.h) AC_CHECK_HEADERS(grp.h) AC_CHECK_HEADERS(limits.h) AC_CHECK_HEADERS(linux/errqueue.h) AC_CHECK_HEADERS(malloc.h) AC_CHECK_HEADERS(netdb.h) AC_CHECK_HEADERS(netinet/in.h) AC_CHECK_HEADERS(netinet/ip.h) AC_CHECK_HEADERS(pam/pam_appl.h) AC_CHECK_HEADERS(pthread.h) AC_CHECK_HEADERS(pwd.h) AC_CHECK_HEADERS(security/pam_appl.h) AC_CHECK_HEADERS(sgtty.h) AC_CHECK_HEADERS(signal.h) AC_CHECK_HEADERS(stdarg.h) AC_CHECK_HEADERS(stddef.h) AC_CHECK_HEADERS(stdio.h) AC_CHECK_HEADERS(stdlib.h) AC_CHECK_HEADERS(string.h) AC_CHECK_HEADERS(strings.h) AC_CHECK_HEADERS(sys/dir.h) AC_CHECK_HEADERS(sys/ioctl.h) AC_CHECK_HEADERS(sys/param.h) AC_CHECK_HEADERS(sys/poll.h) AC_CHECK_HEADERS(sys/resource.h) AC_CHECK_HEADERS(sys/select.h) AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS(sys/stat.h) AC_CHECK_HEADERS(sys/time.h) AC_CHECK_HEADERS(sys/timeb.h) AC_CHECK_HEADERS(sys/types.h) AC_CHECK_HEADERS(sys/uio.h) AC_CHECK_HEADERS(sys/un.h) AC_CHECK_HEADERS(sys/utsname.h) AC_CHECK_HEADERS(sys/wait.h) AC_CHECK_HEADERS(time.h) AC_CHECK_HEADERS(unistd.h) AC_CHECK_HEADERS(winsock.h) dnl These headers need prerequisits before the tests will pass dnl AC_CHECK_HEADERS(net/if.h) dnl AC_CHECK_HEADERS(netinet/icmp6.h) dnl AC_CHECK_HEADERS(netinet/ip6.h) dnl AC_CHECK_HEADERS(netinet/ip_icmp.h) AC_MSG_CHECKING(for special libxml2 includes) if test "x$XML2CONFIG" = "x"; then AC_MSG_ERROR(libxml2 config not found) else XML2HEAD="`$XML2CONFIG --cflags`" AC_MSG_RESULT($XML2HEAD) AC_CHECK_LIB(xml2, xmlReadMemory) AC_CHECK_LIB(xslt, xsltApplyStylesheet) fi CPPFLAGS="$CPPFLAGS $XML2HEAD" AC_CHECK_HEADERS(libxml/xpath.h) AC_CHECK_HEADERS(libxslt/xslt.h) if test "$ac_cv_header_libxml_xpath_h" != "yes"; then AC_MSG_ERROR(The libxml developement headers were not found) fi if test "$ac_cv_header_libxslt_xslt_h" != "yes"; then AC_MSG_ERROR(The libxslt developement headers were not found) fi dnl ======================================================================== dnl Structures dnl ======================================================================== AC_CHECK_MEMBERS([struct tm.tm_gmtoff],,,[[#include ]]) AC_CHECK_MEMBERS([lrm_op_t.rsc_deleted],,,[[#include ]]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(g_log_set_default_handler) AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) AC_CHECK_FUNCS(nanosleep, AC_DEFINE(HAVE_DECL_NANOSLEEP, 1, [Have nanosleep function])) dnl ======================================================================== dnl ltdl dnl ======================================================================== AC_CHECK_LIB(ltdl, lt_dlopen, [LTDL_foo=1]) if test "x${enable_bundled_ltdl}" = "xyes"; then if test $ac_cv_lib_ltdl_lt_dlopen = yes; then AC_MSG_NOTICE([Disabling usage of installed ltdl]) fi ac_cv_lib_ltdl_lt_dlopen=no fi LIBLTDL_DIR="" if test $ac_cv_lib_ltdl_lt_dlopen != yes ; then AC_MSG_NOTICE([Installing local ltdl]) LIBLTDL_DIR=libltdl ( cd $srcdir ; $TAR -xvf libltdl.tar ) if test "$?" -ne 0; then AC_MSG_ERROR([$TAR of libltdl.tar in $srcdir failed]) fi AC_CONFIG_SUBDIRS(libltdl) else LIBS="$LIBS -lltdl" AC_MSG_NOTICE([Using installed ltdl]) INCLTDL="" LIBLTDL="" fi AC_SUBST(INCLTDL) AC_SUBST(LIBLTDL) AC_SUBST(LIBLTDL_DIR) dnl ======================================================================== dnl bzip2 dnl ======================================================================== AC_CHECK_HEADERS(bzlib.h) AC_CHECK_LIB(bz2, BZ2_bzBuffToBuffCompress) if test x$ac_cv_lib_bz2_BZ2_bzBuffToBuffCompress != xyes ; then AC_MSG_ERROR(BZ2 libraries not found) fi if test x$ac_cv_header_bzlib_h != xyes; then AC_MSG_ERROR(BZ2 Development headers not found) fi dnl ======================================================================== dnl ncurses dnl ======================================================================== dnl dnl A few OSes (e.g. Linux) deliver a default "ncurses" alongside "curses". dnl Many non-Linux deliver "curses"; sites may add "ncurses". dnl dnl However, the source-code recommendation for both is to #include "curses.h" dnl (i.e. "ncurses" still wants the include to be simple, no-'n', "curses.h"). dnl dnl ncurse takes precedence. dnl AC_CHECK_HEADERS(curses.h) AC_CHECK_HEADERS(curses/curses.h) AC_CHECK_HEADERS(ncurses.h) AC_CHECK_HEADERS(ncurses/ncurses.h) dnl Although n-library is preferred, only look for it if the n-header was found. CURSESLIBS='' if test "$ac_cv_header_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [CURSESLIBS='-lncurses'; AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)] ) fi if test "$ac_cv_header_ncurses_ncurses_h" = "yes"; then AC_CHECK_LIB(ncurses, printw, [CURSESLIBS='-lncurses'; AC_DEFINE(HAVE_LIBNCURSES,1, have ncurses library)] ) fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)] ) fi dnl Only look for non-n-library if there was no n-library. if test X"$CURSESLIBS" = X"" -a "$ac_cv_header_curses_curses_h" = "yes"; then AC_CHECK_LIB(curses, printw, [CURSESLIBS='-lcurses'; AC_DEFINE(HAVE_LIBCURSES,1, have curses library)] ) fi if test "x$CURSESLIBS" != "x"; then PCMK_FEATURES="$PCMK_FEATURES ncurses" fi dnl Check for printw() prototype compatibility if test X"$CURSESLIBS" != X"" && cc_supports_flag -Wcast-qual && cc_supports_flag -Werror; then AC_MSG_CHECKING(whether printw() requires argument of "const char *") ac_save_LIBS=$LIBS LIBS="$CURSESLIBS $LIBS" ac_save_CFLAGS=$CFLAGS CFLAGS="-Wcast-qual -Werror" AC_LINK_IFELSE( [AC_LANG_PROGRAM( [ #if defined(HAVE_CURSES_H) # include #elif defined(HAVE_NCURSES_H) # include #endif ], [printw((const char *)"Test");] )], [ac_cv_compatible_printw=yes], [ac_cv_compatible_printw=no] ) LIBS=$ac_save_LIBS CFLAGS=$ac_save_CFLAGS AC_MSG_RESULT([$ac_cv_compatible_printw]) if test "$ac_cv_compatible_printw" = no; then AC_MSG_WARN([The printw() function of your ncurses or curses library is old, we will disable usage of the library. If you want to use this library anyway, please update to newer version of the library, ncurses 5.4 or later is recommended. You can get the library from http://www.gnu.org/software/ncurses/.]) AC_MSG_NOTICE([Disabling curses]) AC_DEFINE(HAVE_INCOMPATIBLE_PRINTW, 1, [Do we have incompatible printw() in curses library?]) fi fi AC_SUBST(CURSESLIBS) dnl ======================================================================== dnl Profiling and GProf dnl ======================================================================== case $SUPPORT_PROFILING in 1|yes|true) SUPPORT_PROFILING=1 dnl Enable gprof #LIBS="$LIBS -pg" #CFLAGS="$CFLAGS -pg" dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fno-omit-frame-pointer" #CFLAGS="$CFLAGS -fno-inline-functions -fno-inline-functions-called-once -fno-optimize-sibling-calls" dnl CFLAGS="$CFLAGS -fno-default-inline -fno-inline" dnl Update features PCMK_FEATURES="$PCMK_FEATURES gprof" ;; *) SUPPORT_PROFILING=0;; esac AC_DEFINE_UNQUOTED(SUPPORT_PROFILING, $SUPPORT_PROFILING, Support for gprof profiling) case $SUPPORT_GCOV in 1|yes|true) SUPPORT_GCOV=1 dnl Enable gprof #LIBS="$LIBS -pg" #CFLAGS="$CFLAGS -pg" dnl Disable various compiler optimizations CFLAGS="$CFLAGS -fprofile-arcs -ftest-coverage -fno-inline" dnl Turn off optimization so code coverage tool dnl can get accurate line numbers AC_MSG_NOTICE(Old CFLAGS: $CFLAGS) CFLAGS=`echo $CFLAGS | sed -e 's/-O.\ //g' -e 's/-Wp,-D_FORTIFY_SOURCE=.\ //g'` CFLAGS="$CFLAGS -O0" AC_MSG_NOTICE(New CFLAGS: $CFLAGS) dnl Update features PCMK_FEATURES="$PCMK_FEATURES gcov" ;; *) SUPPORT_PROFILING=0;; esac AC_DEFINE_UNQUOTED(SUPPORT_GCOV, $SUPPORT_GCOV, Support for gcov coverage testing) dnl ======================================================================== dnl Cluster infrastructure - Heartbeat / LibQB dnl ======================================================================== dnl Compatability checks AC_CHECK_MEMBERS([struct lrm_ops.fail_rsc],,,[[#include ]]) if test x${enable_no_stack} = xyes; then SUPPORT_HEARTBEAT=no SUPPORT_CS=no fi PKG_CHECK_MODULES(libqb, libqb, HAVE_libqb=1, HAVE_libqb=0) AC_CHECK_HEADERS(qb/qbipc_common.h) AC_CHECK_LIB(qb, qb_ipcc_is_connected) AC_CHECK_FUNCS(qb_ipcc_is_connected) LIBQB_LOG=1 PCMK_FEATURES="$PCMK_FEATURES libqb-logging libqb-ipc" if test $ac_cv_lib_qb_qb_ipcc_is_connected != yes; then AC_MSG_FAILURE(Version of IPC in libqb is not new enough) fi AC_DEFINE_UNQUOTED(LIBQB_LOGGING, $LIBQB_LOG, Use libqb for logging) AC_DEFINE_UNQUOTED(LIBQB_IPC, 0, Use libqb for IPC) LIBS="$LIBS $libqb_LIBS" AC_CHECK_HEADERS(heartbeat/hb_config.h) AC_CHECK_HEADERS(heartbeat/glue_config.h) AC_CHECK_HEADERS(stonith/stonith.h) AC_CHECK_HEADERS(agent_config.h) GLUE_HEADER=none HAVE_GLUE=0 if test "$ac_cv_header_heartbeat_glue_config_h" = "yes"; then GLUE_HEADER=glue_config.h HAVE_GLUE=1 elif test "$ac_cv_header_heartbeat_hb_config_h" = "yes"; then GLUE_HEADER=hb_config.h HAVE_GLUE=1 else AC_MSG_WARN(cluster-glue development headers were not found) fi if test "$ac_cv_header_stonith_stonith_h" = "yes"; then PCMK_FEATURES="$PCMK_FEATURES lha-fencing" fi if test $HAVE_GLUE = 1; then dnl On Debian, AC_CHECK_LIBS fail if a library has any unresolved symbols dnl So check for all the depenancies (so they're added to LIBS) before checking for -lplumb AC_CHECK_LIB(pils, PILLoadPlugin) AC_CHECK_LIB(plumb, G_main_add_IPC_Channel) fi dnl =============================================== dnl Variables needed for substitution dnl =============================================== CRM_DTD_DIRECTORY="${datadir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DTD_DIRECTORY,"$CRM_DTD_DIRECTORY", Location for the Pacemaker Relax-NG Schema) AC_SUBST(CRM_DTD_DIRECTORY) AC_DEFINE_UNQUOTED(CRM_DTD_VERSION,"$CRM_DTD_VERSION", Current version of the Pacemaker Relax-NG Schema) AC_SUBST(CRM_DTD_VERSION) CRM_CORE_DIR=`try_extract_header_define $GLUE_HEADER HA_COREDIR ${localstatedir}/lib/heartbeat/cores` AC_DEFINE_UNQUOTED(CRM_CORE_DIR,"$CRM_CORE_DIR", Location to store core files produced by Pacemaker daemons) AC_SUBST(CRM_CORE_DIR) CRM_DAEMON_USER=`try_extract_header_define $GLUE_HEADER HA_CCMUSER hacluster` AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_USER) CRM_DAEMON_GROUP=`try_extract_header_define $GLUE_HEADER HA_APIGROUP haclient` AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as) AC_SUBST(CRM_DAEMON_GROUP) CRM_STATE_DIR=${localstatedir}/run/crm AC_DEFINE_UNQUOTED(CRM_STATE_DIR,"$CRM_STATE_DIR", Where to keep state files and sockets) AC_SUBST(CRM_STATE_DIR) CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps) AC_SUBST(CRM_BLACKBOX_DIR) PE_STATE_DIR="${localstatedir}/lib/pengine" AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep PEngine outputs) AC_SUBST(PE_STATE_DIR) dnl Eventually move out of the heartbeat dir tree and create compatability code CRM_CONFIG_DIR="${localstatedir}/lib/heartbeat/crm" AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep CIB configuration files) AC_SUBST(CRM_CONFIG_DIR) CRM_DAEMON_DIR="${libexecdir}/pacemaker" AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons) AC_SUBST(CRM_DAEMON_DIR) HB_DAEMON_DIR=`try_extract_header_define $GLUE_HEADER HA_LIBHBDIR $libdir/heartbeat` AC_DEFINE_UNQUOTED(HB_DAEMON_DIR,"$HB_DAEMON_DIR", Location for Heartbeat expects Pacemaker daemons to be in) AC_SUBST(HB_DAEMON_DIR) dnl Needed so that the Corosync plugin can clear out the directory as Heartbeat does HA_STATE_DIR=`try_extract_header_define $GLUE_HEADER HA_VARRUNDIR ${localstatedir}/run` AC_DEFINE_UNQUOTED(HA_STATE_DIR,"$HA_STATE_DIR", Where Heartbeat keeps state files and sockets) AC_SUBST(HA_STATE_DIR) CRM_RSCTMP_DIR=`try_extract_header_define agent_config.h HA_RSCTMPDIR $HA_STATE_DIR/heartbeat/rsctmp` AC_MSG_CHECKING(Scratch dir for resource agents) AC_MSG_RESULT($CRM_RSCTMP_DIR) AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files) AC_SUBST(CRM_RSCTMP_DIR) dnl Needed for the location of hostcache in CTS.py HA_VARLIBHBDIR=`try_extract_header_define $GLUE_HEADER HA_VARLIBHBDIR ${localstatedir}/lib/heartbeat` AC_SUBST(HA_VARLIBHBDIR) AC_DEFINE_UNQUOTED(UUID_FILE,"$localstatedir/lib/heartbeat/hb_uuid", Location of Heartbeat's UUID file) OCF_ROOT_DIR=`try_extract_header_define $GLUE_HEADER OCF_ROOT_DIR /usr/lib/ocf` if test "X$OCF_ROOT_DIR" = X; then AC_MSG_ERROR(Could not locate OCF directory) fi AC_SUBST(OCF_ROOT_DIR) OCF_RA_DIR=`try_extract_header_define $GLUE_HEADER OCF_RA_DIR $OCF_ROOT_DIR/resource.d` AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) RH_STONITH_DIR="$sbindir" AC_DEFINE_UNQUOTED(RH_STONITH_DIR,"$RH_STONITH_DIR", Location for Red Hat Stonith agents) RH_STONITH_PREFIX="fence_" AC_DEFINE_UNQUOTED(RH_STONITH_PREFIX,"$RH_STONITH_PREFIX", Prefix for Red Hat Stonith agents) AC_PATH_PROGS(GIT, git false) AC_MSG_CHECKING(build version) -BUILD_VERSION=$Format:%H$ -if test $BUILD_VERSION != ":%H$"; then +BUILD_VERSION=$Format:%h$ +if test $BUILD_VERSION != ":%h$"; then AC_MSG_RESULT(archive hash: $BUILD_VERSION) elif test -x $GIT -a -d .git; then BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1` AC_MSG_RESULT(git hash: $BUILD_VERSION) else # The current directory name make a reasonable default # Most generated archives will include the hash or tag BASE=`basename $PWD` BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::` AC_MSG_RESULT(directory based hash: $BUILD_VERSION) fi AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version) AC_SUBST(BUILD_VERSION) HAVE_gio=1 HAVE_upstart=0 HAVE_systemd=0 PKG_CHECK_MODULES(GIO, gio-2.0, ,HAVE_gio=0) AC_CHECK_TYPE([GDBusProxy],,,[[#include ]]) if test x$ac_cv_type_GDBusProxy != xyes; then HAVE_gio=0 AC_MSG_WARN(Unable to support systemd/upstart. You need to use glib >= 2.26) fi if test $HAVE_gio = 1 -a "x${enable_upstart}" != xno; then HAVE_upstart=1 PCMK_FEATURES="$PCMK_FEATURES upstart" fi AC_DEFINE_UNQUOTED(SUPPORT_UPSTART, $HAVE_upstart, Support upstart based system services) AM_CONDITIONAL(BUILD_UPSTART, test $HAVE_upstart = 1) if test $HAVE_gio = 1 -a "x${enable_systemd}" != xno; then HAVE_systemd=1 PCMK_FEATURES="$PCMK_FEATURES systemd" fi AC_DEFINE_UNQUOTED(SUPPORT_SYSTEMD, $HAVE_systemd, Support systemd based system services) AM_CONDITIONAL(BUILD_SYSTEMD, test $HAVE_systemd = 1) STACKS="" CLUSTERLIBS="" dnl ======================================================================== dnl Cluster stack - Heartbeat dnl ======================================================================== case $SUPPORT_HEARTBEAT in 1|yes|true|try) AC_MSG_CHECKING(for heartbeat support) AC_CHECK_LIB(hbclient, ll_cluster_new, [SUPPORT_HEARTBEAT=1], [if $SUPPORT_HEARTBEAT != try; then AC_MSG_FAILURE(Unable to support Heartbeat: client libraries not found) fi]) if test $SUPPORT_HEARTBEAT = 1 ; then STACKS="$STACKS heartbeat" AC_DEFINE_UNQUOTED(CCM_LIBRARY, "libccmclient.so.1", Library to load for ccm support) AC_DEFINE_UNQUOTED(HEARTBEAT_LIBRARY, "libhbclient.so.1", Library to load for heartbeat support) else SUPPORT_HEARTBEAT=0 fi ;; *) SUPPORT_HEARTBEAT=0;; esac AM_CONDITIONAL(BUILD_HEARTBEAT_SUPPORT, test $SUPPORT_HEARTBEAT = 1) AC_DEFINE_UNQUOTED(SUPPORT_HEARTBEAT, $SUPPORT_HEARTBEAT, Support the Heartbeat messaging and membership layer) AC_SUBST(SUPPORT_HEARTBEAT) dnl ======================================================================== dnl Cluster stack - Corosync dnl ======================================================================== dnl Normalize the values case $SUPPORT_CS in 1|yes|true) SUPPORT_CS=yes missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_CS=no;; esac AC_MSG_CHECKING(for native corosync) COROSYNC_LIBS="" CS_USES_LIBQB=0 PCMK_SERVICE_ID=9 LCRSODIR="$libdir" if test $SUPPORT_CS = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_CS=0 else AC_MSG_RESULT($SUPPORT_CS, with '$CSPREFIX') PKG_CHECK_MODULES(cpg, libcpg) dnl Fatal PKG_CHECK_MODULES(cfg, libcfg) dnl Fatal PKG_CHECK_MODULES(cmap, libcmap, HAVE_cmap=1, HAVE_cmap=0) PKG_CHECK_MODULES(cman, libcman, HAVE_cman=1, HAVE_cman=0) PKG_CHECK_MODULES(confdb, libconfdb, HAVE_confdb=1, HAVE_confdb=0) PKG_CHECK_MODULES(fenced, libfenced, HAVE_fenced=1, HAVE_fenced=0) PKG_CHECK_MODULES(quorum, libquorum, HAVE_quorum=1, HAVE_quorum=0) PKG_CHECK_MODULES(oldipc, libcoroipcc, HAVE_oldipc=1, HAVE_oldipc=0) if test $HAVE_oldipc = 1; then SUPPORT_CS=1 CFLAGS="$CFLAGS $oldipc_FLAGS $cpg_FLAGS $cfg_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $oldipc_LIBS $cpg_LIBS $cfg_LIBS" elif test $HAVE_libqb = 1; then SUPPORT_CS=1 CS_USES_LIBQB=1 CFLAGS="$CFLAGS $libqb_FLAGS $cpg_FLAGS $cfg_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $libqb_LIBS $cpg_LIBS $cfg_LIBS" AC_CHECK_LIB(corosync_common, cs_strerror) else aisreason="corosync/libqb IPC libraries not found by pkg_config" fi AC_DEFINE_UNQUOTED(HAVE_CONFDB, $HAVE_confdb, Have the old herarchial Corosync config API) AC_DEFINE_UNQUOTED(HAVE_CMAP, $HAVE_cmap, Have the new non-herarchial Corosync config API) fi if test $SUPPORT_CS = 1 -a x$HAVE_oldipc = x0 ; then dnl Support for plugins was removed about the time the IPC was dnl moved to libqb. dnl The only option now is the built-in quorum API CFLAGS="$CFLAGS $cmap_CFLAGS $quorum_CFLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $cmap_LIBS $quorum_LIBS" STACKS="$STACKS corosync-native" AC_DEFINE_UNQUOTED(SUPPORT_CS_QUORUM, 1, Support the consumption of membership and quorum from corosync) fi if test $SUPPORT_CS = 1 -a x$HAVE_confdb = x1; then dnl Need confdb to support cman and the plugins LCRSODIR=`$PKGCONFIG corosync --variable=lcrsodir` STACKS="$STACKS corosync-plugin" COROSYNC_LIBS="$COROSYNC_LIBS $confdb_LIBS" if test $SUPPORT_CMAN != no; then if test $HAVE_cman = 1 -a $HAVE_fenced = 1; then SUPPORT_CMAN=1 STACKS="$STACKS cman" CFLAGS="$CFLAGS $cman_FLAGS $fenced_FLAGS" COROSYNC_LIBS="$COROSYNC_LIBS $cman_LIBS $fenced_LIBS" fi fi fi dnl Normalize SUPPORT_CS and SUPPORT_CMAN for use with #if directives if test $SUPPORT_CMAN != 1; then SUPPORT_CMAN=0 fi if test $SUPPORT_CS = 1; then CLUSTERLIBS="$CLUSTERLIBS $COROSYNC_LIBS" elif test $SUPPORT_CS != 0; then SUPPORT_CS=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support Corosync: $aisreason) else AC_MSG_FAILURE(Unable to support Corosync: $aisreason) fi fi AC_DEFINE_UNQUOTED(SUPPORT_COROSYNC, $SUPPORT_CS, Support the Corosync messaging and membership layer) AC_DEFINE_UNQUOTED(SUPPORT_CMAN, $SUPPORT_CMAN, Support the consumption of membership and quorum from cman) AC_DEFINE_UNQUOTED(CS_USES_LIBQB, $CS_USES_LIBQB, Does corosync use libqb for its ipc) AC_DEFINE_UNQUOTED(PCMK_SERVICE_ID, $PCMK_SERVICE_ID, Corosync service number) AM_CONDITIONAL(BUILD_CS_SUPPORT, test $SUPPORT_CS = 1) AM_CONDITIONAL(BUILD_CS_PLUGIN, test $HAVE_confdb = 1) dnl confdb went away at about the same time as plugins AC_SUBST(SUPPORT_CMAN) AC_SUBST(SUPPORT_CS) dnl dnl Cluster stack - Sanity dnl if test x${enable_no_stack} = xyes; then AC_MSG_NOTICE(No cluster stack supported. Just building the Policy Engine) PCMK_FEATURES="$PCMK_FEATURES no-cluster-stack" else AC_MSG_CHECKING(for supported stacks) if test x"$STACKS" = x; then AC_MSG_FAILURE(You must support at least one cluster stack (heartbeat or corosync) ) fi AC_MSG_RESULT($STACKS) PCMK_FEATURES="$PCMK_FEATURES $STACKS" fi AC_SUBST(CLUSTERLIBS) AC_SUBST(LCRSODIR) dnl ======================================================================== dnl SNMP dnl ======================================================================== case $SUPPORT_SNMP in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_SNMP=no;; esac SNMPLIBS="" AC_MSG_CHECKING(for snmp support) if test $SUPPORT_SNMP = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_SNMP=0 else SNMPCONFIG="" AC_MSG_RESULT($SUPPORT_SNMP) AC_CHECK_HEADERS(net-snmp/net-snmp-config.h) if test "x${ac_cv_header_net_snmp_net_snmp_config_h}" != "xyes"; then SUPPORT_SNMP="no" fi if test $SUPPORT_SNMP != no; then AC_PATH_PROGS(SNMPCONFIG, net-snmp-config) if test "X${SNMPCONFIG}" = "X"; then AC_MSG_RESULT(You need the net_snmp development package to continue.) SUPPORT_SNMP=no fi fi if test $SUPPORT_SNMP != no; then AC_MSG_CHECKING(for special snmp libraries) SNMPLIBS=`$SNMPCONFIG --agent-libs` AC_MSG_RESULT($SNMPLIBS) fi if test $SUPPORT_SNMP != no; then savedLibs=$LIBS LIBS="$LIBS $SNMPLIBS" dnl On many systems libcrypto is needed when linking against libsnmp. dnl Check to see if it exists, and if so use it. dnl AC_CHECK_LIB(crypto, CRYPTO_free, CRYPTOLIB="-lcrypto",) dnl AC_SUBST(CRYPTOLIB) AC_CHECK_FUNCS(netsnmp_transport_open_client) if test $ac_cv_func_netsnmp_transport_open_client != yes; then AC_CHECK_FUNCS(netsnmp_tdomain_transport) if test $ac_cv_func_netsnmp_tdomain_transport != yes; then SUPPORT_SNMP=no else AC_DEFINE_UNQUOTED(NETSNMPV53, 1, [Use the older 5.3 version of the net-snmp API]) fi fi LIBS=$savedLibs fi if test $SUPPORT_SNMP = no; then SNMPLIBS="" SUPPORT_SNMP=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support SNMP) else AC_MSG_FAILURE(Unable to support SNMP) fi else SUPPORT_SNMP=1 fi fi if test $SUPPORT_SNMP = 1; then PCMK_FEATURES="$PCMK_FEATURES snmp" fi AC_SUBST(SNMPLIBS) AM_CONDITIONAL(ENABLE_SNMP, test "$SUPPORT_SNMP" = "1") AC_DEFINE_UNQUOTED(ENABLE_SNMP, $SUPPORT_SNMP, Build in support for sending SNMP traps) dnl ======================================================================== dnl ESMTP dnl ======================================================================== case $SUPPORT_ESMTP in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_ESMTP=no;; esac ESMTPLIB="" AC_MSG_CHECKING(for esmtp support) if test $SUPPORT_ESMTP = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ESMTP=0 else ESMTPCONFIG="" AC_MSG_RESULT($SUPPORT_ESMTP) AC_CHECK_HEADERS(libesmtp.h) if test "x${ac_cv_header_libesmtp_h}" != "xyes"; then ENABLE_ESMTP="no" fi if test $SUPPORT_ESMTP != no; then AC_PATH_PROGS(ESMTPCONFIG, libesmtp-config) if test "X${ESMTPCONFIG}" = "X"; then AC_MSG_RESULT(You need the libesmtp development package to continue.) SUPPORT_ESMTP=no fi fi if test $SUPPORT_ESMTP != no; then AC_MSG_CHECKING(for special esmtp libraries) ESMTPLIBS=`$ESMTPCONFIG --libs | tr '\n' ' '` AC_MSG_RESULT($ESMTPLIBS) fi if test $SUPPORT_ESMTP = no; then SUPPORT_ESMTP=0 if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ESMTP) else AC_MSG_FAILURE(Unable to support ESMTP) fi else SUPPORT_ESMTP=1 PCMK_FEATURES="$PCMK_FEATURES libesmtp" fi fi AC_SUBST(ESMTPLIBS) AM_CONDITIONAL(ENABLE_ESMTP, test "$SUPPORT_ESMTP" = "1") AC_DEFINE_UNQUOTED(ENABLE_ESMTP, $SUPPORT_ESMTP, Build in support for sending mail notifications with ESMTP) dnl ======================================================================== dnl ACL dnl ======================================================================== case $SUPPORT_ACL in 1|yes|true) missingisfatal=1;; try) missingisfatal=0;; *) SUPPORT_ACL=no;; esac AC_MSG_CHECKING(for acl support) if test $SUPPORT_ACL = no; then AC_MSG_RESULT(no (disabled)) SUPPORT_ACL=0 else AC_MSG_RESULT($SUPPORT_ACL) AC_CHECK_LIB(qb, qb_ipcs_connection_auth_set) if test $ac_cv_lib_qb_qb_ipcs_connection_auth_set != yes; then SUPPORT_ACL=0 else SUPPORT_ACL=1 fi if test $SUPPORT_ACL = 0; then if test $missingisfatal = 0; then AC_MSG_WARN(Unable to support ACL. You need to use libqb > 0.13.0) else AC_MSG_FAILURE(Unable to support ACL. You need to use libqb > 0.13.0) fi fi fi if test $SUPPORT_ACL = 1; then PCMK_FEATURES="$PCMK_FEATURES acls" fi AM_CONDITIONAL(ENABLE_ACL, test "$SUPPORT_ACL" = "1") AC_DEFINE_UNQUOTED(ENABLE_ACL, $SUPPORT_ACL, Build in support for CIB ACL) dnl ======================================================================== dnl GnuTLS dnl ======================================================================== AC_CHECK_HEADERS(gnutls/gnutls.h) AC_CHECK_HEADERS(security/pam_appl.h pam/pam_appl.h) dnl GNUTLS library: Attempt to determine by 'libgnutls-config' program. dnl If no 'libgnutls-config', try traditional autoconf means. AC_PATH_PROGS(LIBGNUTLS_CONFIG, libgnutls-config) if test -n "$LIBGNUTLS_CONFIG"; then AC_MSG_CHECKING(for gnutls header flags) GNUTLSHEAD="`$LIBGNUTLS_CONFIG --cflags`"; AC_MSG_RESULT($GNUTLSHEAD) AC_MSG_CHECKING(for gnutls library flags) GNUTLSLIBS="`$LIBGNUTLS_CONFIG --libs`"; AC_MSG_RESULT($GNUTLSLIBS) fi AC_CHECK_LIB(gnutls, gnutls_init) AC_CHECK_FUNCS(gnutls_priority_set_direct) AC_SUBST(GNUTLSHEAD) AC_SUBST(GNUTLSLIBS) dnl ======================================================================== dnl System Health dnl ======================================================================== dnl Check if servicelog development package is installed SERVICELOG=servicelog-1 SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG packages) if $PKGCONFIG --exists $SERVICELOG then PKG_CHECK_MODULES([SERVICELOG], [servicelog-1]) SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_SERVICELOG, test "$SERVICELOG_EXISTS" = "yes") dnl Check if OpenIMPI packages and servicelog are installed OPENIPMI="OpenIPMI OpenIPMIposix" OPENIPMI_SERVICELOG_EXISTS="no" AC_MSG_CHECKING(for $SERVICELOG $OPENIPMI packages) if $PKGCONFIG --exists $OPENIPMI $SERVICELOG then PKG_CHECK_MODULES([OPENIPMI_SERVICELOG],[OpenIPMI OpenIPMIposix]) OPENIPMI_SERVICELOG_EXISTS="yes" fi AC_MSG_RESULT($OPENIPMI_SERVICELOG_EXISTS) AM_CONDITIONAL(BUILD_OPENIPMI_SERVICELOG, test "$OPENIPMI_SERVICELOG_EXISTS" = "yes") dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. CC_ERRORS="" CC_EXTRAS="" if export | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" enable_fatal_warnings=no else CFLAGS="$CFLAGS -ggdb" # We had to eliminate -Wnested-externs because of libtool changes EXTRA_FLAGS="-fgnu89-inline -fstack-protector-all -Wall -Waggregate-return -Wbad-function-cast -Wcast-align -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat=2 -Wformat-security -Wformat-nonliteral -Wmissing-prototypes -Wmissing-declarations -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wno-unused-but-set-variable -Wpointer-arith -Wstrict-prototypes -Wunsigned-char -Wwrite-strings" # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $j then CC_EXTRAS="$CC_EXTRAS $j" fi done dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'` AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4) dnl System specific options case "$host_os" in *linux*|*bsd*) if test "${enable_fatal_warnings}" = "unknown"; then enable_fatal_warnings=yes fi ;; esac if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then enable_fatal_warnings=yes else enable_fatal_warnings=no fi if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS -Werror" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LIBADD_DL) dnl extra flags for dynamic linking libraries AC_SUBST(LIBADD_INTL) dnl extra flags for GNU gettext stuff... AC_SUBST(LOCALE) dnl Options for cleaning up the compiler output QUIET_LIBTOOL_OPTS="" QUIET_MAKE_OPTS="" if test "x${enable_quiet}" = "xyes"; then QUIET_LIBTOOL_OPTS="--quiet" QUIET_MAKE_OPTS="--quiet" fi AC_MSG_RESULT(Supress make details: ${enable_quiet}) dnl Put the above variables to use LIBTOOL="${LIBTOOL} --tag=CC \$(QUIET_LIBTOOL_OPTS)" MAKE="${MAKE} \$(QUIET_MAKE_OPTS)" AC_SUBST(CC) AC_SUBST(MAKE) AC_SUBST(LIBTOOL) AC_SUBST(QUIET_MAKE_OPTS) AC_SUBST(QUIET_LIBTOOL_OPTS) AC_DEFINE_UNQUOTED(CRM_FEATURES, "$PCMK_FEATURES", Set of enabled features) AC_SUBST(PCMK_FEATURES) dnl The Makefiles and shell scripts we output AC_CONFIG_FILES(Makefile \ cts/Makefile \ cts/CTSvars.py \ cts/LSBDummy \ cts/benchmark/Makefile \ cts/benchmark/clubench \ cib/Makefile \ crmd/Makefile \ pengine/Makefile \ pengine/regression.core.sh \ doc/Makefile \ doc/Pacemaker_Explained/publican.cfg \ doc/Clusters_from_Scratch/publican.cfg \ include/Makefile \ include/crm/Makefile \ include/crm/cib/Makefile \ include/crm/common/Makefile \ include/crm/cluster/Makefile \ include/crm/fencing/Makefile \ include/crm/pengine/Makefile \ replace/Makefile \ lib/Makefile \ lib/pcmk.pc \ lib/pcmk-pe.pc \ lib/pcmk-cib.pc \ lib/ais/Makefile \ lib/common/Makefile \ lib/cluster/Makefile \ lib/cib/Makefile \ lib/pengine/Makefile \ lib/transition/Makefile \ lib/fencing/Makefile \ lib/lrmd/Makefile \ lib/services/Makefile \ mcp/Makefile \ mcp/pacemaker \ mcp/pacemaker.service \ fencing/Makefile \ + fencing/regression.py \ lrmd/Makefile \ lrmd/regression.py \ extra/Makefile \ extra/resources/Makefile \ extra/rgmanager/Makefile \ tools/Makefile \ tools/crm_report \ tools/coverage.sh \ xml/Makefile \ lib/gnu/Makefile \ ) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION} (Build: $BUILD_VERSION)]) AC_MSG_RESULT([ Features =${PCMK_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([ Corosync Plugins = ${LCRSODIR}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Use system LTDL = ${ac_cv_lib_ltdl_lt_dlopen}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ HA group name = ${CRM_DAEMON_GROUP}]) AC_MSG_RESULT([ HA user name = ${CRM_DAEMON_USER}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) diff --git a/doc/Pacemaker_Explained/en-US/Ch-Basics.txt b/doc/Pacemaker_Explained/en-US/Ch-Basics.txt index e5df1c439f..b23597679e 100644 --- a/doc/Pacemaker_Explained/en-US/Ch-Basics.txt +++ b/doc/Pacemaker_Explained/en-US/Ch-Basics.txt @@ -1,349 +1,349 @@ = Configuration Basics = == Configuration Layout == The cluster is written using XML notation and divided into two main sections: configuration and status. The status section contains the history of each resource on each node and based on this data, the cluster can construct the complete current state of the cluster. The authoritative source for the status section is the local resource manager (lrmd) process on each cluster node and the cluster will occasionally repopulate the entire section. For this reason it is never written to disk and administrators are advised against modifying it in any way. The configuration section contains the more traditional information like cluster options, lists of resources and indications of where they should be placed. The configuration section is the primary focus of this document. The configuration section itself is divided into four parts: * Configuration options (called +crm_config+) * Nodes * Resources * Resource relationships (called +constraints+) .An empty configuration [source,XML] ------- - + ------- == The Current State of the Cluster == Before one starts to configure a cluster, it is worth explaining how to view the finished product. For this purpose we have created the pass:[crm_mon] utility that will display the current state of an active cluster. It can show the cluster status by node or by resource and can be used in either single-shot or dynamically-updating mode. There are also modes for displaying a list of the operations performed (grouped by node and resource) as well as information about failures. Using this tool, you can examine the state of the cluster for irregularities and see how it responds when you cause or simulate failures. Details on all the available options can be obtained using the pass:[crm_mon --help] command. .Sample output from crm_mon ------- ============ Last updated: Fri Nov 23 15:26:13 2007 Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec) 3 Nodes configured. 5 Resources configured. ============ Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1 192.168.100.182 (heartbeat:IPaddr): Started sles-1 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1 rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1 child_DoFencing:2 (stonith:external/vmware): Started sles-1 Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3 rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3 child_DoFencing:0 (stonith:external/vmware): Started sles-3 ------- .Sample output from crm_mon -n ------- ============ Last updated: Fri Nov 23 15:26:13 2007 Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec) 3 Nodes configured. 5 Resources configured. ============ Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online Resource Group: group-1 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1 192.168.100.182 (heartbeat:IPaddr): Started sles-1 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1 rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1 rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3 rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3 Clone Set: DoFencing child_DoFencing:0 (stonith:external/vmware): Started sles-3 child_DoFencing:1 (stonith:external/vmware): Stopped child_DoFencing:2 (stonith:external/vmware): Started sles-1 ------- The DC (Designated Controller) node is where all the decisions are made and if the current DC fails a new one is elected from the remaining cluster nodes. The choice of DC is of no significance to an administrator beyond the fact that its logs will generally be more interesting. == How Should the Configuration be Updated? == There are three basic rules for updating the cluster configuration: * Rule 1 - Never edit the cib.xml file manually. Ever. I'm not making this up. * Rule 2 - Read Rule 1 again. * Rule 3 - The cluster will notice if you ignored rules 1 & 2 and refuse to use the configuration. Now that it is clear how NOT to update the configuration, we can begin to explain how you should. The most powerful tool for modifying the configuration is the +cibadmin+ command which talks to a running cluster. With +cibadmin+, the user can query, add, remove, update or replace any part of the configuration; all changes take effect immediately, so there is no need to perform a reload-like operation. The simplest way of using cibadmin is to use it to save the current configuration to a temporary file, edit that file with your favorite text or XML editor and then upload the revised configuration. .Safely using an editor to modify the cluster configuration [source,Bash] -------- # cibadmin --query > tmp.xml # vi tmp.xml # cibadmin --replace --xml-file tmp.xml -------- Some of the better XML editors can make use of a Relax NG schema to help make sure any changes you make are valid. The schema describing the configuration can normally be found in pass:[/usr/lib/heartbeat/pacemaker.rng] on most systems. If you only wanted to modify the resources section, you could instead do .Safely using an editor to modify a subsection of the cluster configuration [source,Bash] -------- # cibadmin --query --obj_type resources > tmp.xml -# vi tmp.xml] +# vi tmp.xml # cibadmin --replace --obj_type resources --xml-file tmp.xml -------- to avoid modifying any other part of the configuration. == Quickly Deleting Part of the Configuration == Identify the object you wish to delete. Eg. run .Searching for STONITH related configuration items [source,Bash] -------- -# cibadmin -Q | grep stonith +# cibadmin -Q | grep stonith -------- Next identify the resource's tag name and id (in this case we'll choose +primitive+ and +child_DoFencing+). Then simply execute: -pass:[cibadmin --delete --crm_xml ‘<primitive id="child_DoFencing"/>'] +pass:[cibadmin --delete --crm_xml '<primitive id="child_DoFencing"/>'] == Updating the Configuration Without Using XML == Some common tasks can also be performed with one of the higher level tools that avoid the need to read or edit XML. To enable stonith for example, one could run: pass:[crm_attribute --attr-name stonith-enabled --attr-value true] Or, to see if +somenode+ is allowed to run resources, there is: pass:[crm_standby --get-value --node-uname somenode] Or, to find the current location of +my-test-rsc+, one can use: pass:[crm_resource --locate --resource my-test-rsc] [[s-config-sandboxes]] == Making Configuration Changes in a Sandbox == Often it is desirable to preview the effects of a series of changes before updating the configuration atomically. For this purpose we have created pass:[crm_shadow] which creates a "shadow" copy of the configuration and arranges for all the command line tools to use it. To begin, simply invoke pass:[crm_shadow] and give it the name of a configuration to create footnote:[Shadow copies are identified with a name, making it possible to have more than one.] ; be sure to follow the simple on-screen instructions. -Read the above carefully, failure to do so could result in you -destroying the cluster's active configuration! +WARNING: Read the above carefully, failure to do so could result in you +destroying the cluster's active configuration! .Creating and displaying the active sandbox [source,Bash] -------- # crm_shadow --create test Setting up shadow instance Type Ctrl-D to exit the crm_shadow shell shadow[test]: shadow[test] # crm_shadow --which test -------- From this point on, all cluster commands will automatically use the shadow copy instead of talking to the cluster's active configuration. Once you have finished experimenting, you can either commit the changes, or discard them as shown below. Again, be sure to follow the on-screen instructions carefully. For a full list of pass:[crm_shadow] options and commands, invoke it with the --help option. .Using a sandbox to make multiple changes atomically [source,Bash] -------- shadow[test] # crm_failcount -G -r rsc_c001n01 name=fail-count-rsc_c001n01 value=0 shadow[test] # crm_standby -v on -n c001n02 shadow[test] # crm_standby -G -n c001n02 name=c001n02 scope=nodes value=on shadow[test] # cibadmin --erase --force shadow[test] # cibadmin --query shadow[test] # crm_shadow --delete test --force Now type Ctrl-D to exit the crm_shadow shell shadow[test] # exit # crm_shadow --which No shadow instance provided # cibadmin -Q -------- Making changes in a sandbox and verifying the real configuration is untouched [[s-config-testing-changes]] == Testing Your Configuration Changes == We saw previously how to make a series of changes to a "shadow" copy of the configuration. Before loading the changes back into the cluster (eg. pass:[crm_shadow --commit mytest --force]), it is often advisable to simulate the effect of the changes with +ptest+, eg. pass:[ptest --live-check -VVVVV --save-graph tmp.graph --save-dotfile tmp.dot] The tool uses the same library as the live cluster to show what it would have done given the supplied input. It's output, in addition to a significant amount of logging, is stored in two files +tmp.graph+ and +tmp.dot+, both are representations of the same thing -- the cluster's response to your changes. In the graph file is stored the complete transition, containing a list of all the actions, their parameters and their pre-requisites. Because the transition graph is not terribly easy to read, the tool also generates a Graphviz dot-file representing the same information. == Interpreting the Graphviz output == * Arrows indicate ordering dependencies * Dashed-arrows indicate dependencies that are not present in the transition graph * Actions with a dashed border of any color do not form part of the transition graph * Actions with a green border form part of the transition graph * Actions with a red border are ones the cluster would like to execute but cannot run * Actions with a blue border are ones the cluster does not feel need to be executed * Actions with orange text are pseudo/pretend actions that the cluster uses to simplify the graph * Actions with black text are sent to the LRM * Resource actions have text of the form pass:[rsc]_pass:[action]_pass:[interval] pass:[node] * Any action depending on an action with a red border will not be able to execute. * Loops are _really_ bad. Please report them to the development team. === Small Cluster Transition === image::images/Policy-Engine-small.png["An example transition graph as represented by Graphviz",width="16cm",height="6cm",align="center"] In the above example, it appears that a new node, +node2+, has come online and that the cluster is checking to make sure +rsc1+, +rsc2+ and +rsc3+ are not already running there (Indicated by the +*_monitor_0+ entries). Once it did that, and assuming the resources were not active there, it would have liked to stop +rsc1+ and +rsc2+ on +node1+ and move them to +node2+. However, there appears to be some problem and the cluster cannot or is not permitted to perform the stop actions which implies it also cannot perform the start actions. For some reason the cluster does not want to start +rsc3+ anywhere. For information on the options supported by ptest, use pass:[ptest --help]. === Complex Cluster Transition === image::images/Policy-Engine-big.png["Another, slightly more complex, transition graph that you're not expected to be able to read",width="16cm",height="20cm",align="center"] == Do I Need to Update the Configuration on all Cluster Nodes? == No. Any changes are immediately synchronized to the other active members of the cluster. To reduce bandwidth, the cluster only broadcasts the incremental updates that result from your changes and uses MD5 checksums to ensure that each copy is completely consistent. diff --git a/doc/Pacemaker_Explained/pot/Ch-Basics.pot b/doc/Pacemaker_Explained/pot/Ch-Basics.pot index e5d9931dcc..4954f2d95b 100644 --- a/doc/Pacemaker_Explained/pot/Ch-Basics.pot +++ b/doc/Pacemaker_Explained/pot/Ch-Basics.pot @@ -1,513 +1,513 @@ # # AUTHOR , YEAR. # msgid "" msgstr "" "Project-Id-Version: 0\n" "POT-Creation-Date: 2012-02-27T09:17:56\n" "PO-Revision-Date: 2012-02-27T09:17:56\n" "Last-Translator: Automatically generated\n" "Language-Team: None\n" "MIME-Version: 1.0\n" "Content-Type: application/x-publican; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Tag: title #, no-c-format msgid "Configuration Basics" msgstr "" #. Tag: title #, no-c-format msgid "Configuration Layout" msgstr "" #. Tag: para #, no-c-format msgid "The cluster is written using XML notation and divided into two main sections: configuration and status." msgstr "" #. Tag: para #, no-c-format msgid "The status section contains the history of each resource on each node and based on this data, the cluster can construct the complete current state of the cluster. The authoritative source for the status section is the local resource manager (lrmd) process on each cluster node and the cluster will occasionally repopulate the entire section. For this reason it is never written to disk and administrators are advised against modifying it in any way." msgstr "" #. Tag: para #, no-c-format msgid "The configuration section contains the more traditional information like cluster options, lists of resources and indications of where they should be placed. The configuration section is the primary focus of this document." msgstr "" #. Tag: para #, no-c-format msgid "The configuration section itself is divided into four parts:" msgstr "" #. Tag: para #, no-c-format msgid "Configuration options (called crm_config)" msgstr "" #. Tag: para #, no-c-format msgid "Nodes" msgstr "" #. Tag: para #, no-c-format msgid "Resources" msgstr "" #. Tag: para #, no-c-format msgid "Resource relationships (called constraints)" msgstr "" #. Tag: programlisting #, no-c-format -msgid " <cib generated=\"true\" admin_epoch=\"0\" epoch=\"0\" num_updates=\"0\" have-quorum=\"false\">\n" +msgid " <cib admin_epoch=\"0\" epoch=\"0\" num_updates=\"0\" have-quorum=\"false\">\n" " <configuration>\n" " <crm_config/>\n" " <nodes/>\n" " <resources/>\n" " <constraints/>\n" " </configuration>\n" " <status/>\n" " </cib>" msgstr "" #. Tag: title #, no-c-format msgid "The Current State of the Cluster" msgstr "" #. Tag: para #, no-c-format msgid "Before one starts to configure a cluster, it is worth explaining how to view the finished product. For this purpose we have created the crm_mon utility that will display the current state of an active cluster. It can show the cluster status by node or by resource and can be used in either single-shot or dynamically-updating mode. There are also modes for displaying a list of the operations performed (grouped by node and resource) as well as information about failures." msgstr "" #. Tag: para #, no-c-format msgid "Using this tool, you can examine the state of the cluster for irregularities and see how it responds when you cause or simulate failures." msgstr "" #. Tag: para #, no-c-format msgid "Details on all the available options can be obtained using the crm_mon --help command." msgstr "" #. Tag: title #, no-c-format msgid "Sample output from crm_mon" msgstr "" #. Tag: screen #, no-c-format msgid " ============\n" " Last updated: Fri Nov 23 15:26:13 2007\n" " Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec)\n" " 3 Nodes configured.\n" " 5 Resources configured.\n" " ============\n" "\n" " Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online\n" " 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1\n" " 192.168.100.182 (heartbeat:IPaddr): Started sles-1\n" " 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1\n" " rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1\n" " child_DoFencing:2 (stonith:external/vmware): Started sles-1\n" " Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby\n" " Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online\n" " rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3\n" " rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3\n" " child_DoFencing:0 (stonith:external/vmware): Started sles-3" msgstr "" #. Tag: title #, no-c-format msgid "Sample output from crm_mon -n" msgstr "" #. Tag: screen #, no-c-format msgid " ============\n" " Last updated: Fri Nov 23 15:26:13 2007\n" " Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec)\n" " 3 Nodes configured.\n" " 5 Resources configured.\n" " ============\n" "\n" " Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online\n" " Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby\n" " Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online\n" "\n" " Resource Group: group-1\n" " 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1\n" " 192.168.100.182 (heartbeat:IPaddr): Started sles-1\n" " 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1\n" " rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1\n" " rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3\n" " rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3\n" " Clone Set: DoFencing\n" " child_DoFencing:0 (stonith:external/vmware): Started sles-3\n" " child_DoFencing:1 (stonith:external/vmware): Stopped\n" " child_DoFencing:2 (stonith:external/vmware): Started sles-1" msgstr "" #. Tag: para #, no-c-format msgid "The DC (Designated Controller) node is where all the decisions are made and if the current DC fails a new one is elected from the remaining cluster nodes. The choice of DC is of no significance to an administrator beyond the fact that its logs will generally be more interesting." msgstr "" #. Tag: title #, no-c-format msgid "How Should the Configuration be Updated?" msgstr "" #. Tag: para #, no-c-format msgid "There are three basic rules for updating the cluster configuration:" msgstr "" #. Tag: para #, no-c-format msgid "Rule 1 - Never edit the cib.xml file manually. Ever. I’m not making this up." msgstr "" #. Tag: para #, no-c-format msgid "Rule 2 - Read Rule 1 again." msgstr "" #. Tag: para #, no-c-format msgid "Rule 3 - The cluster will notice if you ignored rules 1 & 2 and refuse to use the configuration." msgstr "" #. Tag: para #, no-c-format msgid "Now that it is clear how NOT to update the configuration, we can begin to explain how you should." msgstr "" #. Tag: para #, no-c-format msgid "The most powerful tool for modifying the configuration is the cibadmin command which talks to a running cluster. With cibadmin, the user can query, add, remove, update or replace any part of the configuration; all changes take effect immediately, so there is no need to perform a reload-like operation." msgstr "" #. Tag: para #, no-c-format msgid "The simplest way of using cibadmin is to use it to save the current configuration to a temporary file, edit that file with your favorite text or XML editor and then upload the revised configuration." msgstr "" #. Tag: programlisting #, no-c-format msgid "# cibadmin --query > tmp.xml\n" "# vi tmp.xml\n" "# cibadmin --replace --xml-file tmp.xml" msgstr "" #. Tag: para #, no-c-format msgid "Some of the better XML editors can make use of a Relax NG schema to help make sure any changes you make are valid. The schema describing the configuration can normally be found in /usr/lib/heartbeat/pacemaker.rng on most systems." msgstr "" #. Tag: para #, no-c-format msgid "If you only wanted to modify the resources section, you could instead do" msgstr "" #. Tag: programlisting #, no-c-format msgid "# cibadmin --query --obj_type resources > tmp.xml\n" -"# vi tmp.xml</command>]\n" +"# vi tmp.xml\n" "# cibadmin --replace --obj_type resources --xml-file tmp.xml" msgstr "" #. Tag: para #, no-c-format msgid "to avoid modifying any other part of the configuration." msgstr "" #. Tag: title #, no-c-format msgid "Quickly Deleting Part of the Configuration" msgstr "" #. Tag: para #, no-c-format msgid "Identify the object you wish to delete. Eg. run" msgstr "" #. Tag: programlisting #, no-c-format -msgid "# cibadmin -Q | grep stonith</command>\n" +msgid "# cibadmin -Q | grep stonith\n" " <nvpair id=\"cib-bootstrap-options-stonith-action\" name=\"stonith-action\" value=\"reboot\"/>\n" " <nvpair id=\"cib-bootstrap-options-stonith-enabled\" name=\"stonith-enabled\" value=\"1\"/>\n" " <primitive id=\"child_DoFencing\" class=\"stonith\" type=\"external/vmware\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:1\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:2\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:3\" type=\"external/vmware\" class=\"stonith\">" msgstr "" #. Tag: para #, no-c-format msgid "Next identify the resource’s tag name and id (in this case we’ll choose primitive and child_DoFencing). Then simply execute:" msgstr "" #. Tag: para #, no-c-format -msgid "cibadmin --delete --crm_xml ‘<primitive id=\"child_DoFencing\"/>'" +msgid "cibadmin --delete --crm_xml '<primitive id=\"child_DoFencing\"/>'" msgstr "" #. Tag: title #, no-c-format msgid "Updating the Configuration Without Using XML" msgstr "" #. Tag: para #, no-c-format msgid "Some common tasks can also be performed with one of the higher level tools that avoid the need to read or edit XML." msgstr "" #. Tag: para #, no-c-format msgid "To enable stonith for example, one could run:" msgstr "" #. Tag: para #, no-c-format msgid "crm_attribute --attr-name stonith-enabled --attr-value true" msgstr "" #. Tag: para #, no-c-format msgid "Or, to see if somenode is allowed to run resources, there is:" msgstr "" #. Tag: para #, no-c-format msgid "crm_standby --get-value --node-uname somenode" msgstr "" #. Tag: para #, no-c-format msgid "Or, to find the current location of my-test-rsc, one can use:" msgstr "" #. Tag: para #, no-c-format msgid "crm_resource --locate --resource my-test-rsc" msgstr "" #. Tag: title #, no-c-format msgid "Making Configuration Changes in a Sandbox" msgstr "" #. Tag: para #, no-c-format msgid "Often it is desirable to preview the effects of a series of changes before updating the configuration atomically. For this purpose we have created crm_shadow which creates a \"shadow\" copy of the configuration and arranges for all the command line tools to use it." msgstr "" #. Tag: para #, no-c-format msgid "To begin, simply invoke crm_shadow and give it the name of a configuration to create Shadow copies are identified with a name, making it possible to have more than one. ; be sure to follow the simple on-screen instructions." msgstr "" #. Tag: para #, no-c-format -msgid "Read the above carefully, failure to do so could result in you destroying the cluster’s active configuration!</warning>" +msgid "Read the above carefully, failure to do so could result in you destroying the cluster’s active configuration!" msgstr "" #. Tag: programlisting #, no-c-format msgid " # crm_shadow --create test\n" " Setting up shadow instance\n" " Type Ctrl-D to exit the crm_shadow shell\n" " shadow[test]:\n" " shadow[test] # crm_shadow --which\n" " test" msgstr "" #. Tag: para #, no-c-format msgid "From this point on, all cluster commands will automatically use the shadow copy instead of talking to the cluster’s active configuration. Once you have finished experimenting, you can either commit the changes, or discard them as shown below. Again, be sure to follow the on-screen instructions carefully." msgstr "" #. Tag: para #, no-c-format msgid "For a full list of crm_shadow options and commands, invoke it with the <parameter>--help</parameter> option." msgstr "" #. Tag: programlisting #, no-c-format msgid " shadow[test] # crm_failcount -G -r rsc_c001n01\n" " name=fail-count-rsc_c001n01 value=0\n" " shadow[test] # crm_standby -v on -n c001n02\n" " shadow[test] # crm_standby -G -n c001n02\n" " name=c001n02 scope=nodes value=on\n" " shadow[test] # cibadmin --erase --force\n" " shadow[test] # cibadmin --query\n" " <cib cib_feature_revision=\"1\" validate-with=\"pacemaker-1.0\" admin_epoch=\"0\" crm_feature_set=\"3.0\" have-quorum=\"1\" epoch=\"112\"\n" " dc-uuid=\"c001n01\" num_updates=\"1\" cib-last-written=\"Fri Jun 27 12:17:10 2008\">\n" " <configuration>\n" " <crm_config/>\n" " <nodes/>\n" " <resources/>\n" " <constraints/>\n" " </configuration>\n" " <status/>\n" " </cib>\n" " shadow[test] # crm_shadow --delete test --force\n" " Now type Ctrl-D to exit the crm_shadow shell\n" " shadow[test] # exit\n" " # crm_shadow --which\n" " No shadow instance provided\n" " # cibadmin -Q\n" " <cib cib_feature_revision=\"1\" validate-with=\"pacemaker-1.0\" admin_epoch=\"0\" crm_feature_set=\"3.0\" have-quorum=\"1\" epoch=\"110\"\n" " dc-uuid=\"c001n01\" num_updates=\"551\">\n" " <configuration>\n" " <crm_config>\n" " <cluster_property_set id=\"cib-bootstrap-options\">\n" " <nvpair id=\"cib-bootstrap-1\" name=\"stonith-enabled\" value=\"1\"/>\n" " <nvpair id=\"cib-bootstrap-2\" name=\"pe-input-series-max\" value=\"30000\"/>" msgstr "" #. Tag: para #, no-c-format msgid "Making changes in a sandbox and verifying the real configuration is untouched" msgstr "" #. Tag: title #, no-c-format msgid "Testing Your Configuration Changes" msgstr "" #. Tag: para #, no-c-format msgid "We saw previously how to make a series of changes to a \"shadow\" copy of the configuration. Before loading the changes back into the cluster (eg. crm_shadow --commit mytest --force), it is often advisable to simulate the effect of the changes with ptest, eg." msgstr "" #. Tag: para #, no-c-format msgid "ptest --live-check -VVVVV --save-graph tmp.graph --save-dotfile tmp.dot" msgstr "" #. Tag: para #, no-c-format msgid "The tool uses the same library as the live cluster to show what it would have done given the supplied input. It’s output, in addition to a significant amount of logging, is stored in two files tmp.graph and tmp.dot, both are representations of the same thing — the cluster’s response to your changes." msgstr "" #. Tag: para #, no-c-format msgid "In the graph file is stored the complete transition, containing a list of all the actions, their parameters and their pre-requisites. Because the transition graph is not terribly easy to read, the tool also generates a Graphviz dot-file representing the same information." msgstr "" #. Tag: title #, no-c-format msgid "Interpreting the Graphviz output" msgstr "" #. Tag: para #, no-c-format msgid "Arrows indicate ordering dependencies" msgstr "" #. Tag: para #, no-c-format msgid "Dashed-arrows indicate dependencies that are not present in the transition graph" msgstr "" #. Tag: para #, no-c-format msgid "Actions with a dashed border of any color do not form part of the transition graph" msgstr "" #. Tag: para #, no-c-format msgid "Actions with a green border form part of the transition graph" msgstr "" #. Tag: para #, no-c-format msgid "Actions with a red border are ones the cluster would like to execute but cannot run" msgstr "" #. Tag: para #, no-c-format msgid "Actions with a blue border are ones the cluster does not feel need to be executed" msgstr "" #. Tag: para #, no-c-format msgid "Actions with orange text are pseudo/pretend actions that the cluster uses to simplify the graph" msgstr "" #. Tag: para #, no-c-format msgid "Actions with black text are sent to the LRM" msgstr "" #. Tag: para #, no-c-format msgid "Resource actions have text of the form rsc_action_interval node" msgstr "" #. Tag: para #, no-c-format msgid "Any action depending on an action with a red border will not be able to execute." msgstr "" #. Tag: para #, no-c-format msgid "Loops are really bad. Please report them to the development team." msgstr "" #. Tag: title #, no-c-format msgid "Small Cluster Transition" msgstr "" #. Tag: phrase #, no-c-format msgid "An example transition graph as represented by Graphviz" msgstr "" #. Tag: para #, no-c-format msgid "In the above example, it appears that a new node, node2, has come online and that the cluster is checking to make sure rsc1, rsc2 and rsc3 are not already running there (Indicated by the *_monitor_0 entries). Once it did that, and assuming the resources were not active there, it would have liked to stop rsc1 and rsc2 on node1 and move them to node2. However, there appears to be some problem and the cluster cannot or is not permitted to perform the stop actions which implies it also cannot perform the start actions. For some reason the cluster does not want to start rsc3 anywhere." msgstr "" #. Tag: para #, no-c-format msgid "For information on the options supported by ptest, use ptest --help." msgstr "" #. Tag: title #, no-c-format msgid "Complex Cluster Transition" msgstr "" #. Tag: phrase #, no-c-format msgid "Another, slightly more complex, transition graph that you're not expected to be able to read" msgstr "" #. Tag: title #, no-c-format msgid "Do I Need to Update the Configuration on all Cluster Nodes?" msgstr "" #. Tag: para #, no-c-format msgid "No. Any changes are immediately synchronized to the other active members of the cluster." msgstr "" #. Tag: para #, no-c-format msgid "To reduce bandwidth, the cluster only broadcasts the incremental updates that result from your changes and uses MD5 checksums to ensure that each copy is completely consistent." msgstr "" diff --git a/doc/Pacemaker_Explained/ro-RO/Ch-Basics.po b/doc/Pacemaker_Explained/ro-RO/Ch-Basics.po index d5a0ff2093..76bc686f6a 100644 --- a/doc/Pacemaker_Explained/ro-RO/Ch-Basics.po +++ b/doc/Pacemaker_Explained/ro-RO/Ch-Basics.po @@ -1,665 +1,665 @@ msgid "" msgstr "" "Project-Id-Version: Pacemaker 1.1\n" "POT-Creation-Date: 2012-01-01T17:48:32\n" "PO-Revision-Date: 2012-01-01T17:48:32\n" "Last-Translator: Dan Frîncu \n" "Language-Team: None\n" "MIME-Version: 1.0\n" "Content-Type: application/x-publican; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" #. Tag: title #, no-c-format msgid "Configuration Basics" msgstr "Bazele Configurării" #. Tag: title #, no-c-format msgid "Configuration Layout" msgstr "Aşezarea Configuraţiei" #. Tag: para #, no-c-format msgid "The cluster is written using XML notation and divided into two main sections: configuration and status." msgstr "Clusterul este scris folosind notaţie XML şi este împărţit în două secţiuni principale: configurare şi status." #. Tag: para #, no-c-format msgid "The status section contains the history of each resource on each node and based on this data, the cluster can construct the complete current state of the cluster. The authoritative source for the status section is the local resource manager (lrmd) process on each cluster node and the cluster will occasionally repopulate the entire section. For this reason it is never written to disk and administrators are advised against modifying it in any way." msgstr "Secţiunea de status conţine istoricul fiecărei resurse de pe fiecare nod şi pe baza acestor date, clusterul poate construi starea curentă completă a clusterului. Sursa autoritativă pentru secţiunea de status este procesul managerului de resurse local (lrmd) pe fiecare nod din cluster iar clusterul va repopula în mod ocazional întreaga secţiune. Din acest motiv nu este scris niciodată pe disc iar administratorii sunt sfătuiţi împotriva modificării în orice fel a acestuia." #. Tag: para #, no-c-format msgid "The configuration section contains the more traditional information like cluster options, lists of resources and indications of where they should be placed. The configuration section is the primary focus of this document." msgstr "Secţiunea de configurare conţine informaţiile mai tradiţionale precum opţiuni ale clusterului, liste de resurse şi indicaţii despre unde ar trebui acestea plasate. Secţiunea de configurare este scopul primar al acestui document." #. Tag: para #, no-c-format msgid "The configuration section itself is divided into four parts:" msgstr "Secţiunea de configurare în sine este împărţită în patru părţi:" #. Tag: para #, no-c-format msgid "Configuration options (called crm_config)" msgstr "Opţiuni de configurare (numite crm_config)" #. Tag: para #, no-c-format msgid "Nodes" msgstr "Noduri" #. Tag: para #, no-c-format msgid "Resources" msgstr "Resurse" #. Tag: para #, no-c-format msgid "Resource relationships (called constraints)" msgstr "Relaţii între resurse (numite restricţii)" #. Tag: title #, no-c-format msgid "An empty configuration" msgstr "O configuraţie goală" #. Tag: programlisting #, no-c-format msgid "\n" -" <cib generated=\"true\" admin_epoch=\"0\" epoch=\"0\" num_updates=\"0\" have-quorum=\"false\">\n" +" <cib admin_epoch=\"0\" epoch=\"0\" num_updates=\"0\" have-quorum=\"false\">\n" " <configuration>\n" " <crm_config/>\n" " <nodes/>\n" " <resources/>\n" " <constraints/>\n" " </configuration>\n" " <status/>\n" " </cib>\n" "\n" " " msgstr "" "\n" -" <cib generated=\"true\" admin_epoch=\"0\" epoch=\"0\" num_updates=\"0\" have-quorum=\"false\">\n" +" <cib admin_epoch=\"0\" epoch=\"0\" num_updates=\"0\" have-quorum=\"false\">\n" " <configuration>\n" " <crm_config/>\n" " <nodes/>\n" " <resources/>\n" " <constraints/>\n" " </configuration>\n" " <status/>\n" " </cib>\n" "\n" " " #. Tag: title #, no-c-format msgid "The Current State of the Cluster" msgstr "Starea Curentă a Clusterului" #. Tag: para #, no-c-format msgid "Before one starts to configure a cluster, it is worth explaining how to view the finished product. For this purpose we have created the crm_mon utility that will display the current state of an active cluster. It can show the cluster status by node or by resource and can be used in either single-shot or dynamically-updating mode. There are also modes for displaying a list of the operations performed (grouped by node and resource) as well as information about failures." msgstr "Înainte de a începe să configurăm clusterul, merită explicat cum să vizualizăm produsul finit. Pentru acest scop am creat utilitarul crm_mon care va arăta starea curentă a unui cluster activ. Poate arăta statusul clusterului după nod sau după resurse şi poate fi folosit fie în mod single-shot (o singură listare) sau dynamically-updating (updatându-se dinamic). Sunt de asemenea moduri pentru prezentarea unei liste de operaţiuni efectuate (grupată după nod si resursă) precum şi informaţii despre eşecuri." #. Tag: para #, no-c-format msgid "Using this tool, you can examine the state of the cluster for irregularities and see how it responds when you cause or simulate failures." msgstr "Folosind acest utilitar, puteţi examina starea clusterului pentru neconcordanţe şi pentru a vedea cum răspunde atunci când provocaţi sau simulaţi eşecuri." #. Tag: para #, no-c-format msgid "Details on all the available options can be obtained using the crm_mon --help command." msgstr "Detalii despre toate opţiunile disponibile pot fi obţinute folosind comanda crm_mon --help." #. Tag: title #, no-c-format msgid "Sample output from crm_mon" msgstr "Exemplu de rezultat obţinut din crm_mon" #. Tag: screen #, no-c-format msgid "# crm_mon\n" " ============\n" " Last updated: Fri Nov 23 15:26:13 2007\n" " Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec)\n" " 3 Nodes configured.\n" " 5 Resources configured.\n" " ============\n" "\n" " Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online\n" " 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1\n" " 192.168.100.182 (heartbeat:IPaddr): Started sles-1\n" " 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1\n" " rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1\n" " child_DoFencing:2 (stonith:external/vmware): Started sles-1\n" " Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby\n" " Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online\n" " rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3\n" " rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3\n" " child_DoFencing:0 (stonith:external/vmware): Started sles-3" msgstr "" "# crm_mon\n" " ============\n" " Last updated: Fri Nov 23 15:26:13 2007\n" " Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec)\n" " 3 Nodes configured.\n" " 5 Resources configured.\n" " ============\n" "\n" " Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online\n" " 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1\n" " 192.168.100.182 (heartbeat:IPaddr): Started sles-1\n" " 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1\n" " rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1\n" " child_DoFencing:2 (stonith:external/vmware): Started sles-1\n" " Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby\n" " Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online\n" " rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3\n" " rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3\n" " child_DoFencing:0 (stonith:external/vmware): Started sles-3" #. Tag: title #, no-c-format msgid "Sample output from crm_mon -n" msgstr "Exemplu de rezultat obţinut din crm_mon -n" #. Tag: screen #, no-c-format msgid "# crm_mon -n\n" " ============\n" " Last updated: Fri Nov 23 15:26:13 2007\n" " Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec)\n" " 3 Nodes configured.\n" " 5 Resources configured.\n" " ============\n" "\n" " Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online\n" " Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby\n" " Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online\n" "\n" " Resource Group: group-1\n" " 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1\n" " 192.168.100.182 (heartbeat:IPaddr): Started sles-1\n" " 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1\n" " rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1\n" " rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3\n" " rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3\n" " Clone Set: DoFencing\n" " child_DoFencing:0 (stonith:external/vmware): Started sles-3\n" " child_DoFencing:1 (stonith:external/vmware): Stopped\n" " child_DoFencing:2 (stonith:external/vmware): Started sles-1" msgstr "" "\n" "# crm_mon -n\n" " ============\n" " Last updated: Fri Nov 23 15:26:13 2007\n" " Current DC: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec)\n" " 3 Nodes configured.\n" " 5 Resources configured.\n" " ============\n" "\n" " Node: sles-1 (1186dc9a-324d-425a-966e-d757e693dc86): online\n" " Node: sles-2 (02fb99a8-e30e-482f-b3ad-0fb3ce27d088): standby\n" " Node: sles-3 (2298606a-6a8c-499a-9d25-76242f7006ec): online\n" "\n" " Resource Group: group-1\n" " 192.168.100.181 (heartbeat::ocf:IPaddr): Started sles-1\n" " 192.168.100.182 (heartbeat:IPaddr): Started sles-1\n" " 192.168.100.183 (heartbeat::ocf:IPaddr): Started sles-1\n" " rsc_sles-1 (heartbeat::ocf:IPaddr): Started sles-1\n" " rsc_sles-2 (heartbeat::ocf:IPaddr): Started sles-3\n" " rsc_sles-3 (heartbeat::ocf:IPaddr): Started sles-3\n" " Clone Set: DoFencing\n" " child_DoFencing:0 (stonith:external/vmware): Started sles-3\n" " child_DoFencing:1 (stonith:external/vmware): Stopped\n" " child_DoFencing:2 (stonith:external/vmware): Started sles-1" #. Tag: para #, no-c-format msgid "The DC (Designated Controller) node is where all the decisions are made and if the current DC fails a new one is elected from the remaining cluster nodes. The choice of DC is of no significance to an administrator beyond the fact that its logs will generally be more interesting." msgstr "Nodul DC (Designated Controller - Controller Desemnat) este locul unde toate deciziile sunt luate şi dacă DC-ul curent eşuează unul nou este ales din nodurile rămase în cluster. Alegerea unui DC nu are nici o semnificaţie pentru administrator dincolo de faptul că logurile acestuia vor fi în general mai interesante." #. Tag: title #, no-c-format msgid "How Should the Configuration be Updated?" msgstr "Cum Ar Trebui să fie Actualizată Configuraţia" #. Tag: para #, no-c-format msgid "There are three basic rules for updating the cluster configuration:" msgstr "Sunt trei reguli de bază pentru actualizarea configuraţiei clusterului:" #. Tag: para #, no-c-format msgid "Rule 1 - Never edit the cib.xml file manually. Ever. I'm not making this up." msgstr "Regula 1 - Nu editaţi niciodată fişierul cib.xml manual. Niciodată. Nu inventez acest lucru." #. Tag: para #, no-c-format msgid "Rule 2 - Read Rule 1 again." msgstr "Regula 2 - Citiţi Regula 1 din nou." #. Tag: para #, no-c-format msgid "Rule 3 - The cluster will notice if you ignored rules 1 & 2 and refuse to use the configuration." msgstr "Regula 3 - Clusterul va detecta că aţi ignorat regulile 1 & 2 şi va refuza să folosească configuraţia." #. Tag: para #, no-c-format msgid "Now that it is clear how NOT to update the configuration, we can begin to explain how you should." msgstr "Acum că este clar cum să NU actualizăm configuraţia, putem începe să explicăm cum ar trebui să realizăm acest lucru." #. Tag: para #, no-c-format msgid "The most powerful tool for modifying the configuration is the cibadmin command which talks to a running cluster. With cibadmin, the user can query, add, remove, update or replace any part of the configuration; all changes take effect immediately, so there is no need to perform a reload-like operation." msgstr "Cea mai puternică unealtă pentru modificarea configuraţiei este comanda cibadmin care comunică cu un cluster funcţional. Cu cibadmin, utilizatorul poate interoga, adăuga, înlătura, actualiza sau înlocui orice parte a configuraţiei; toate modificările iau efect imediat aşa că nu este necesar să executaţi operaţiuni de tip reîncărcare." #. Tag: para #, no-c-format msgid "The simplest way of using cibadmin is to use it to save the current configuration to a temporary file, edit that file with your favorite text or XML editor and then upload the revised configuration." msgstr "Cel mai simplu mod de a folosi cibadmin este de a-l folosi pentru a salva configuraţia curentă într-un fişier temporar, să editaţi acel fişier cu editorul de text sau XML favorit şi apoi să încărcaţi configuraţia revizuită." #. Tag: title #, no-c-format msgid "Safely using an editor to modify the cluster configuration" msgstr "Folosind cu siguranţă un editor pentru a modifica configuraţia clusterului" #. Tag: programlisting #, no-c-format msgid "cibadmin --query > tmp.xml\n" " vi tmp.xml\n" " cibadmin --replace --xml-file tmp.xml\n" " " msgstr "" "cibadmin --query > tmp.xml\n" " vi tmp.xml\n" " cibadmin --replace --xml-file tmp.xml\n" " " #. Tag: para #, no-c-format msgid "Some of the better XML editors can make use of a Relax NG schema to help make sure any changes you make are valid. The schema describing the configuration can normally be found in /usr/lib/heartbeat/pacemaker.rng on most systems." msgstr "Unele dintre editoarele mai bune de XML pot folosi schema Relax NG pentru a vă ajuta să fiţi siguri că modificările pe care le realizaţi sunt valide. Schema care descrie configuraţia poate fi găsită în mod normal în /usr/lib/heartbeat/pacemaker.rng pe majoritatea sistemelor." #. Tag: para #, no-c-format msgid "If you only wanted to modify the resources section, you could instead do" msgstr "Dacă aţi dorit să modificaţi doar secţiunea de resurse, aţi putea alternativ să executaţi" #. Tag: title #, no-c-format msgid "Safely using an editor to modify a subsection of the cluster configuration" msgstr "Folosind cu siguranţă un editor pentru a modifica o subsecţiune din configuraţia clusterlui" #. Tag: programlisting #, no-c-format msgid "cibadmin --query --obj_type resources > tmp.xml\n" " vi tmp.xml\n" " cibadmin --replace --obj_type resources --xml-file tmp.xml\n" " " msgstr "" "cibadmin --query --obj_type resources > tmp.xml\n" " vi tmp.xml\n" " cibadmin --replace --obj_type resources --xml-file tmp.xml\n" " " #. Tag: para #, no-c-format msgid "to avoid modifying any other part of the configuration." msgstr "pentru a evita modificările survenite la orice altă parte a configuraţiei." #. Tag: title #, no-c-format msgid "Quickly Deleting Part of the Configuration" msgstr "Ştergerea Rapidă a unei Părţi din Configuraţie" #. Tag: para #, no-c-format msgid "Identify the object you wish to delete. Eg. do" msgstr "Identificaţi obiectul pe care doriţi să îl ştergeţi. ex." #. Tag: title #, no-c-format msgid "Searching for STONITH related configuration items" msgstr "Căutând pentru elemente de configurare legate de STONITH" #. Tag: screen #, no-c-format -msgid "# cibadmin -Q | grep stonith\n" +msgid "# cibadmin -Q | grep stonith\n" "\n" " <nvpair id=\"cib-bootstrap-options-stonith-action\" name=\"stonith-action\" value=\"reboot\"/>\n" " <nvpair id=\"cib-bootstrap-options-stonith-enabled\" name=\"stonith-enabled\" value=\"1\"/>\n" " <primitive id=\"child_DoFencing\" class=\"stonith\" type=\"external/vmware\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:1\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:2\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:3\" type=\"external/vmware\" class=\"stonith\">\n" "" msgstr "" -"# cibadmin -Q | grep stonith\n" +"# cibadmin -Q | grep stonith\n" "\n" " <nvpair id=\"cib-bootstrap-options-stonith-action\" name=\"stonith-action\" value=\"reboot\"/>\n" " <nvpair id=\"cib-bootstrap-options-stonith-enabled\" name=\"stonith-enabled\" value=\"1\"/>\n" " <primitive id=\"child_DoFencing\" class=\"stonith\" type=\"external/vmware\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:1\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:2\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:0\" type=\"external/vmware\" class=\"stonith\">\n" " <lrm_resource id=\"child_DoFencing:3\" type=\"external/vmware\" class=\"stonith\">\n" #. Tag: para #, no-c-format msgid "Next identify the resource's tag name and id (in this case we'll choose primitive and child_DoFencing). Then simply execute:" msgstr "Apoi identificaţi numele tag-ului resursei şi id-ul (în acest caz vom alege primitive şi child_DoFencing). Apoi executaţi:" #. Tag: para #, no-c-format -msgid "cibadmin --delete --crm_xml ‘<primitive id=\"child_DoFencing\"/>'" -msgstr "cibadmin --delete --crm_xml ‘<primitive id=\"child_DoFencing\"/>'" +msgid "cibadmin --delete --crm_xml '<primitive id=\"child_DoFencing\"/>'" +msgstr "cibadmin --delete --crm_xml '<primitive id=\"child_DoFencing\"/>'" #. Tag: title #, no-c-format msgid "Updating the Configuration Without Using XML" msgstr "Updatând Configuraţia Fără să Folosim XML" #. Tag: para #, no-c-format msgid "Some common tasks can also be performed with one of the higher level tools that avoid the need to read or edit XML." msgstr "Câteva task-uri comune pot fi efectuate de asemenea cu unul dintre utilitarele de nivel înalt pentru a evita nevoia de a citi sau edita XML." #. Tag: para #, no-c-format msgid "To enable stonith for example, one could run:" msgstr "Pentru a activa stonith de exemplu, aţi putea rula:" #. Tag: para #, no-c-format msgid "crm_attribute --attr-name stonith-enabled --attr-value true" msgstr "crm_attribute --attr-name stonith-enabled --attr-value true" #. Tag: para #, no-c-format msgid "Or, to see if somenode is allowed to run resources, there is:" msgstr "Sau pentru a vedea dacă somenode îi este permis să ruleze resurse, există:" #. Tag: para #, no-c-format msgid "crm_standby --get-value --node-uname somenode" msgstr "crm_standby --get-value --node-uname somenode" #. Tag: para #, no-c-format msgid "Or, to find the current location of my-test-rsc, one can use:" msgstr "Sau pentru a afla locaţia curentă a my-test-rsc, aţi putea folosi:" #. Tag: para #, no-c-format msgid "crm_resource --locate --resource my-test-rsc" msgstr "crm_resource --locate --resource my-test-rsc" #. Tag: title #, no-c-format msgid "Making Configuration Changes in a Sandbox" msgstr "Realizând Modificări de Configurare într-un Sandbox" #. Tag: para #, no-c-format msgid "Often it is desirable to preview the effects of a series of changes before updating the configuration atomically. For this purpose we have created crm_shadow which creates a \"shadow\" copy of the configuration and arranges for all the command line tools to use it." msgstr "Deseori este de dorit să previzualizaţi efectele unei serii de modificări înainte să actualizaţi configuraţia în mod atomic. Pentru acest scop am creat crm_shadow care creează o copie \"ascunsă\" a configuraţiei şi aranjează astfel încât toate utilitarele din linia de comandă să o folosească." #. Tag: para #, no-c-format msgid "To begin, simply invoke crm_shadow and give it the name of a configuration to create Shadow copies are identified with a name, making it possible to have more than one. ; be sure to follow the simple on-screen instructions." msgstr "Pentru a începe, invocaţi pur şi simplu crm_shadow şi daţi-i numele unei configuraţii pe care să o creeze Copiile ascunse sunt identificate cu un nume, fiind posibil să avem mai mult de una. ; asiguraţi-vă că urmaţi simplele instrucţiuni de pe ecran." #. Tag: para #, no-c-format msgid "Read the above carefully, failure to do so could result in you destroying the cluster's active configuration!" msgstr "Citiţi cele de mai sus cu atenţie, nerespectarea acestora poate rezulta în distrugerea de către voi a configuraţiei active a clusterului!" #. Tag: title #, no-c-format msgid "Creating and displaying the active sandbox" msgstr "Crearea şi prezentarea sandbox-ului activ" #. Tag: programlisting #, no-c-format msgid "# crm_shadow --create test\n" " Setting up shadow instance\n" " Type Ctrl-D to exit the crm_shadow shell\n" " shadow[test]: \n" " shadow[test] # crm_shadow --which\n" " test" msgstr "" " # crm_shadow --create test\n" " Setting up shadow instance\n" " Type Ctrl-D to exit the crm_shadow shell\n" " shadow[test]: \n" " shadow[test] # crm_shadow --which\n" " test" #. Tag: para #, no-c-format msgid "From this point on, all cluster commands will automatically use the shadow copy instead of talking to the cluster's active configuration. Once you have finished experimenting, you can either commit the changes, or discard them as shown below. Again, be sure to follow the on-screen instructions carefully." msgstr "Din acest punct înainte, toate comenzile clusterului vor folosi în mod automat copia ascunsă în loc să comunice cu copia activă a configuraţiei clusterului. Odată ce aţi terminat de experimentat, puteţi fie să aplicaţi modificările sau să le înlăturaţi după cum este prezentat mai jos. Din nou, asiguraţi-vă că urmaţi instrucţiunile de pe ecran cu grijă." #. Tag: para #, no-c-format msgid "For a full list of crm_shadow options and commands, invoke it with the --help option." msgstr "Pentru o listă completă de opţiuni şi comenzi ale crm_shadow, apelaţi-l cu opţiunea --help." #. Tag: title #, no-c-format msgid "Using a sandbox to make multiple changes atomically" msgstr "Folosirea unui sandbox pentru a realiza mai multe modificări în mod atomic" #. Tag: programlisting #, no-c-format msgid "shadow[test] # crm_failcount -G -r rsc_c001n01\n" " name=fail-count-rsc_c001n01 value=0\n" " shadow[test] # crm_standby -v on -n c001n02\n" " shadow[test] # crm_standby -G -n c001n02\n" " name=c001n02 scope=nodes value=on\n" " shadow[test] # cibadmin --erase --force\n" " shadow[test] # cibadmin --query\n" "\n" " <cib cib_feature_revision=\"1\" validate-with=\"pacemaker-1.0\" admin_epoch=\"0\" crm_feature_set=\"3.0\" have-quorum=\"1\" epoch=\"112\"\n" " dc-uuid=\"c001n01\" num_updates=\"1\" cib-last-written=\"Fri Jun 27 12:17:10 2008\">\n" " <configuration>\n" " <crm_config/>\n" " <nodes/>\n" " <resources/>\n" " <constraints/>\n" " </configuration>\n" " <status/>\n" " </cib>\n" "\n" " shadow[test] # crm_shadow --delete test --force\n" " Now type Ctrl-D to exit the crm_shadow shell\n" " shadow[test] # exit\n" " # crm_shadow --which\n" " No shadow instance provided\n" " # cibadmin -Q\n" "\n" " <cib cib_feature_revision=\"1\" validate-with=\"pacemaker-1.0\" admin_epoch=\"0\" crm_feature_set=\"3.0\" have-quorum=\"1\" epoch=\"110\"\n" " dc-uuid=\"c001n01\" num_updates=\"551\">\n" " <configuration>\n" " <crm_config>\n" " <cluster_property_set id=\"cib-bootstrap-options\">\n" " <nvpair id=\"cib-bootstrap-1\" name=\"stonith-enabled\" value=\"1\"/>\n" " <nvpair id=\"cib-bootstrap-2\" name=\"pe-input-series-max\" value=\"30000\"/>\n" "\n" " " msgstr "" "shadow[test] # crm_failcount -G -r rsc_c001n01\n" " name=fail-count-rsc_c001n01 value=0\n" " shadow[test] # crm_standby -v on -n c001n02\n" " shadow[test] # crm_standby -G -n c001n02\n" " name=c001n02 scope=nodes value=on\n" " shadow[test] # cibadmin --erase --force\n" " shadow[test] # cibadmin --query\n" "\n" " <cib cib_feature_revision=\"1\" validate-with=\"pacemaker-1.0\" admin_epoch=\"0\" crm_feature_set=\"3.0\" have-quorum=\"1\" epoch=\"112\"\n" " dc-uuid=\"c001n01\" num_updates=\"1\" cib-last-written=\"Fri Jun 27 12:17:10 2008\">\n" " <configuration>\n" " <crm_config/>\n" " <nodes/>\n" " <resources/>\n" " <constraints/>\n" " </configuration>\n" " <status/>\n" " </cib>\n" "\n" " shadow[test] # crm_shadow --delete test --force\n" " Now type Ctrl-D to exit the crm_shadow shell\n" " shadow[test] # exit\n" " # crm_shadow --which\n" " No shadow instance provided\n" " # cibadmin -Q\n" "\n" " <cib cib_feature_revision=\"1\" validate-with=\"pacemaker-1.0\" admin_epoch=\"0\" crm_feature_set=\"3.0\" have-quorum=\"1\" epoch=\"110\"\n" " dc-uuid=\"c001n01\" num_updates=\"551\">\n" " <configuration>\n" " <crm_config>\n" " <cluster_property_set id=\"cib-bootstrap-options\">\n" " <nvpair id=\"cib-bootstrap-1\" name=\"stonith-enabled\" value=\"1\"/>\n" " <nvpair id=\"cib-bootstrap-2\" name=\"pe-input-series-max\" value=\"30000\"/>\n" "\n" " " #. Tag: para #, no-c-format msgid "Making changes in a sandbox and verifying the real configuration is untouched" msgstr "Realizarea de modificări într-un sandbox şi apoi verificarea că, configuraţia reală nu este atinsă." #. Tag: title #, no-c-format msgid "Testing Your Configuration Changes" msgstr "Testarea Modificărilor Voastre de Configurare" #. Tag: para #, no-c-format msgid "We saw previously how to make a series of changes to a \"shadow\" copy of the configuration. Before loading the changes back into the cluster (eg. crm_shadow --commit mytest --force), it is often advisable to simulate the effect of the changes with ptest, eg." msgstr "Am văzut anterior cum să realizăm o serie de modificări într-o copie \"ascunsă\" a configuraţiei. Înainte de a încărca modificările înapoi în cluster (ex. crm_shadow --commit mytest --force), este adeseori recomandat să simulaţi efectul modificărilor cu ptest, ex." #. Tag: programlisting #, no-c-format msgid "ptest --live-check -VVVVV --save-graph tmp.graph --save-dotfile tmp.dot" msgstr "ptest --live-check -VVVVV --save-graph tmp.graph --save-dotfile tmp.dot" #. Tag: para #, no-c-format msgid "The tool uses the same library as the live cluster to show what it would have done given the supplied input. It's output, in addition to a significant amount of logging, is stored in two files tmp.graph and tmp.dot, both are representations of the same thing -- the cluster's response to your changes. In the graph file is stored the complete transition, containing a list of all the actions, their parameters and their pre-requisites. Because the transition graph is not terribly easy to read, the tool also generates a Graphviz dot-file representing the same information." msgstr "Utilitarul foloseşte aceeaşi bibliotecă precum clusterul activ pentru a arăta ce ar fi realizat cu informaţiile primite. Rezultatul de ieşire, suplimentar la o cantitate semnificativă de loguri, este stocat în două fişiere tmp.graph şi tmp.dot, ambele sunt reprezentări ale aceluiaşi lucru -- răspunsul clusterului la modificările voastre. În fişierul graf este stocată tranziţia completă, conţinând o listă cu toate acţiunile, parametrii acestora şi cerinţele premergătoare ale acestora. Pentru că graful de tranziţie nu este foarte uşor de citit, utilitarul generează un fişier Graphviz dot de asemenea reprezentând aceleaşi informaţii." #. Tag: title #, no-c-format msgid "Small Cluster Transition" msgstr "Tranziţia unui Cluster Mic" #. Tag: caption #, no-c-format msgid "An example transition graph as represented by Graphviz" msgstr "Un exemplu de graf de tranziţie aşa cum este reprezentat de Graphviz" #. Tag: title #, no-c-format msgid "Interpreting the Graphviz output" msgstr "Interpretarea rezultatului de ieşire al Graphviz" #. Tag: para #, no-c-format msgid "Arrows indicate ordering dependencies" msgstr "Săgeţile indică dependinţele de ordonare" #. Tag: para #, no-c-format msgid "Dashed-arrows indicate dependencies that are not present in the transition graph" msgstr "Săgeţile întrerupte de cratime indică dependinţe care nu sunt prezente în graful de tranziţie" #. Tag: para #, no-c-format msgid "Actions with a dashed border of any color do not form part of the transition graph" msgstr "Acţiunile cu o margine întreruptă cu cratime de orice culoare nu se formează ca parte a grafului de tranziţie" #. Tag: para #, no-c-format msgid "Actions with a green border form part of the transition graph" msgstr "Acţiunile cu o margine verde se formează ca parte a grafului de tranziţie" #. Tag: para #, no-c-format msgid "Actions with a red border are ones the cluster would like to execute but cannot run" msgstr "Acţiunile cu o margine roşie sunt cele pe care clusterul ar dori să le execute dar sunt neexecutabile" #. Tag: para #, no-c-format msgid "Actions with a blue border are ones the cluster does not feel need to be executed" msgstr "Acţiunile cu o margine albastră sunt cele pe care clusterul nu consideră că trebuie executate" #. Tag: para #, no-c-format msgid "Actions with orange text are pseudo/pretend actions that the cluster uses to simplify the graph" msgstr "Acţiunile cu text portocaliu sunt acţiuni pseudo/de prefacere pe care clusterul le foloseşte pentru a simplifica graful" #. Tag: para #, no-c-format msgid "Actions with black text are sent to the LRM" msgstr "Acţiunile cu text negru sunt trimise la LRM" #. Tag: para #, no-c-format msgid "Resource actions have text of the form rsc_action_interval node" msgstr "Acţiunile resurselor au textul de forma rsc_action_intervalnode" #. Tag: para #, no-c-format msgid "Any action depending on an action with a red border will not be able to execute." msgstr "Orice acţiune care depine de o acţiune cu o margine roşie nu va putea să se execute." #. Tag: para #, no-c-format msgid "Loops are really bad. Please report them to the development team." msgstr "Buclele de execuţie sunt foarte rele. Vă rugăm să le raportaţi echipei de dezvoltare." #. Tag: para #, no-c-format msgid "In the above example, it appears that a new node, node2, has come online and that the cluster is checking to make sure rsc1, rsc2 and rsc3 are not already running there (Indicated by the *_monitor_0 entries). Once it did that, and assuming the resources were not active there, it would have liked to stop rsc1 and rsc2 on node1 and move them to node2. However, there appears to be some problem and the cluster cannot or is not permitted to perform the stop actions which implies it also cannot perform the start actions. For some reason the cluster does not want to start rsc3 anywhere." msgstr "În exemplul de mai sus, se pare că un nod nou, node2, a ajuns online şi clusterul verifică să se asigure că rsc1, rsc2 şi rsc3, nu rulează deja acolo (aspect indicat de intrările *_monitor_0). Odată ce a realizat acest lucru şi mergând pe presupunerea că resursele nu erau active acolo, ar fi preferat să oprească rsc1 şi rsc2 pe node1 şi să le mute pe node2. Totuşi se pare că există o problemă şi clusterul nu poate sau nu îi este permis să execute operaţiunile de oprire, fapt care implică neputinţa de a efectua acţiunile de pornire de asemenea. Pentru un motiv anume clusterul nu vrea să pornească rsc3 nicăieri." #. Tag: para #, no-c-format msgid "For information on the options supported by ptest, use ptest --help." msgstr "Pentru informaţii legate de opţiunile suportate de ptest, folosiţi ptest --help" #. Tag: title #, no-c-format msgid "Complex Cluster Transition" msgstr "Tranziţii Complexe ale Clusterului" #. Tag: caption #, no-c-format msgid "Another, slightly more complex, transition graph that you're not expected to be able to read" msgstr "Un alt graf de tranziţie, ceva mai complex, pe care nu sunteţi aşteaptaţi să îl puteţi citi" #. Tag: title #, no-c-format msgid "Do I Need to Update the Configuration on all Cluster Nodes?" msgstr "Trebuie să Actualizez Configuraţia pe toate Nodurile Clusterului?" #. Tag: para #, no-c-format msgid "No. Any changes are immediately synchronized to the other active members of the cluster." msgstr "Nu. Orice modificări sunt sincronizate imediat către ceilalţi membri activi ai clusterului." #. Tag: para #, no-c-format msgid "To reduce bandwidth, the cluster only broadcasts the incremental updates that result from your changes and uses MD5 checksums to ensure that each copy is completely consistent." msgstr "Pentru a reduce consumul de lăţime de bandă, clusterul transmite numai actualizările incrementale care rezultă din modificările realizate de voi şi foloseşte hash-uri MD5 pentru a se asigura că fiecare copie este complet consistentă." diff --git a/fencing/Makefile.am b/fencing/Makefile.am index 05e92d37a8..eed59048ac 100644 --- a/fencing/Makefile.am +++ b/fencing/Makefile.am @@ -1,82 +1,85 @@ # Author: Sun Jiang Dong # Copyright (c) 2004 International Business Machines # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in SUBDIRS = ## binary progs +testdir = $(datadir)/$(PACKAGE)/tests/fencing +test_SCRIPTS = regression.py + halibdir = $(CRM_DAEMON_DIR) halib_PROGRAMS = stonithd stonith-test sbin_PROGRAMS = stonith_admin sbin_SCRIPTS = fence_legacy fence_pcmk man7_MANS = man8_MANS = if BUILD_XML_HELP man7_MANS += stonithd.7 stonithd.xml: stonithd $(top_builddir)/fencing/$< metadata | $(XSLTPROC) --nonet --novalid --stringparam man.name $< $(top_srcdir)/xml/ocf-meta2man.xsl - > $(top_builddir)/fencing/$@ stonithd.7: stonithd.xml $(XSLTPROC) $(MANPAGE_XSLT) $(top_builddir)/fencing/$< endif if BUILD_HELP man8_MANS += $(sbin_PROGRAMS:%=%.8) $(sbin_SCRIPTS:%=%.8) %.8: % echo Creating $@ chmod a+x $< $(HELP2MAN) --output $@ --no-info --section 8 --name "Part of the Pacemaker cluster resource manager" $(top_builddir)/fencing/$< endif stonith_test_SOURCES = test.c stonith_test_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/cluster/libcrmcluster.la \ $(top_builddir)/lib/fencing/libstonithd.la \ $(CRYPTOLIB) $(CLUSTERLIBS) stonith_admin_SOURCES = admin.c stonith_admin_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/cib/libcib.la \ $(top_builddir)/lib/pengine/libpe_status.la \ $(top_builddir)/lib/cluster/libcrmcluster.la \ $(top_builddir)/lib/fencing/libstonithd.la \ $(CRYPTOLIB) $(CLUSTERLIBS) stonithd_SOURCES = main.c commands.c remote.c if BUILD_STONITH_CONFIG BUILT_SOURCES = standalone_config.h stonithd_SOURCES += standalone_config.c config.y config.l stonithd_AM_LFLAGS = -o$(LEX_OUTPUT_ROOT).c # lex/yacc issues: endif stonithd_YFLAGS = -d stonithd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \ $(top_builddir)/lib/cluster/libcrmcluster.la \ $(top_builddir)/lib/fencing/libstonithd.la \ $(CRYPTOLIB) $(CLUSTERLIBS) CFLAGS = $(CFLAGS_COPY:-Werror=) diff --git a/fencing/fence_false b/fencing/fence_false index af9409e893..ae7c2d15e2 100644 --- a/fencing/fence_false +++ b/fencing/fence_false @@ -1,75 +1,76 @@ #!/usr/bin/python # The Following Agent Has Been Tested On: # # Virsh 0.3.3 on RHEL 5.2 with xen-3.0.3-51 # import sys, time sys.path.append("/usr/share/fence") from fencing import * #BEGIN_VERSION_GENERATION RELEASE_VERSION="3.1.6" BUILD_DATE="(built Mon Oct 24 12:14:08 UTC 2011)" REDHAT_COPYRIGHT="Copyright (C) Red Hat, Inc. 2004-2010 All rights reserved." #END_VERSION_GENERATION plug_status="on" def get_outlets_status(conn, options): result={} # This fake agent has no port data to list, so we have to make # something up for the list action. if options.has_key("-o") and options["-o"] == "list": result["fake_port_1"]=[plug_status, "fake"] result["fake_port_2"]=[plug_status, "fake"] elif (options.has_key("-n") == 0): fail_usage("Failed: You have to enter existing machine!") else: port=options["-n"] result[port]=[plug_status, "fake"] return result def get_power_status(conn, options): outlets=get_outlets_status(conn,options) if len(outlets) == 0 or options.has_key("-n") == 0: fail_usage("Failed: You have to enter existing machine!") else: return outlets[options["-n"]][0] def set_power_status(conn, options): global plug_status plug_status = "unknown" + exit(1) def main(): device_opt = [ "help", "version", "agent", "quiet", "verbose", "debug", "action", "port", "no_password", "power_wait", "power_timeout", ] atexit.register(atexit_handler) pinput = process_input(device_opt) # Fake options to keep the library happy #pinput["-p"] = "none" pinput["-a"] = "localhost" pinput["-C"] = "," options = check_input(device_opt, pinput) if options.has_key("-o") and (options["-o"] == "monitor"): sys.exit(0) ## Defaults for fence agent docs = { } docs["shortdesc"] = "Fake fence agent" docs["longdesc"] = "fence_true is a fake Fencing agent which always reports success without doing anything." show_docs(options, docs) ## Operate the fencing device result = fence_action(None, options, set_power_status, get_power_status, get_outlets_status) sys.exit(result) if __name__ == "__main__": main() diff --git a/fencing/main.c b/fencing/main.c index 502bfe1a57..bf6bfb89cd 100644 --- a/fencing/main.c +++ b/fencing/main.c @@ -1,864 +1,872 @@ /* * Copyright (C) 2009 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include char *stonith_our_uname = NULL; GMainLoop *mainloop = NULL; GHashTable *client_list = NULL; gboolean stand_alone = FALSE; +gboolean no_cib_connect = FALSE; gboolean stonith_shutdown_flag = FALSE; qb_ipcs_service_t *ipcs = NULL; #if SUPPORT_HEARTBEAT ll_cluster_t *hb_conn = NULL; #endif static void stonith_shutdown(int nsig); static void stonith_cleanup(void); static int32_t st_ipc_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid) { crm_trace("Connecting %p for uid=%d gid=%d", c, uid, gid); if(stonith_shutdown_flag) { crm_info("Ignoring new client [%d] during shutdown", crm_ipcs_client_pid(c)); return -EPERM; } return 0; } static void st_ipc_created(qb_ipcs_connection_t *c) { stonith_client_t *new_client = NULL; #if 0 struct qb_ipcs_stats srv_stats; qb_ipcs_stats_get(s1, &srv_stats, QB_FALSE); qb_log(LOG_INFO, "Connection created (active:%d, closed:%d)", srv_stats.active_connections, srv_stats.closed_connections); #endif new_client = calloc(1, sizeof(stonith_client_t)); new_client->channel = c; new_client->channel_name = strdup("ipc"); CRM_CHECK(new_client->id == NULL, free(new_client->id)); new_client->id = crm_generate_uuid(); crm_trace("Created channel %p for client %s", c, new_client->id); /* make sure we can find ourselves later for sync calls * redirected to the master instance */ g_hash_table_insert(client_list, new_client->id, new_client); qb_ipcs_context_set(c, new_client); CRM_ASSERT(qb_ipcs_context_get(c) != NULL); } /* Exit code means? */ static int32_t st_ipc_dispatch(qb_ipcs_connection_t *c, void *data, size_t size) { xmlNode *request = NULL; stonith_client_t *client = (stonith_client_t*)qb_ipcs_context_get(c); request = crm_ipcs_recv(c, data, size); if (request == NULL) { return 0; } CRM_CHECK(client != NULL, goto cleanup); if(client->name == NULL) { const char *value = crm_element_value(request, F_STONITH_CLIENTNAME); if(value == NULL) { client->name = crm_itoa(crm_ipcs_client_pid(c)); } else { client->name = strdup(value); } } CRM_CHECK(client->id != NULL, crm_err("Invalid client: %p/%s", client, client->name); goto cleanup); crm_xml_add(request, F_STONITH_CLIENTID, client->id); crm_xml_add(request, F_STONITH_CLIENTNAME, client->name); crm_log_xml_trace(request, "Client[inbound]"); stonith_command(client, request, NULL); cleanup: if(client == NULL || client->id == NULL) { crm_log_xml_notice(request, "Invalid client"); } free_xml(request); return 0; } /* Error code means? */ static int32_t st_ipc_closed(qb_ipcs_connection_t *c) { stonith_client_t *client = (stonith_client_t*)qb_ipcs_context_get(c); #if 0 qb_ipcs_stats_get(s1, &srv_stats, QB_FALSE); qb_ipcs_connection_stats_get(c, &stats, QB_FALSE); qb_log(LOG_INFO, "Connection to pid:%d destroyed (active:%d, closed:%d)", stats.client_pid, srv_stats.active_connections, srv_stats.closed_connections); qb_log(LOG_DEBUG, " Requests %"PRIu64"", stats.requests); qb_log(LOG_DEBUG, " Responses %"PRIu64"", stats.responses); qb_log(LOG_DEBUG, " Events %"PRIu64"", stats.events); qb_log(LOG_DEBUG, " Send retries %"PRIu64"", stats.send_retries); qb_log(LOG_DEBUG, " Recv retries %"PRIu64"", stats.recv_retries); qb_log(LOG_DEBUG, " FC state %d", stats.flow_control_state); qb_log(LOG_DEBUG, " FC count %"PRIu64"", stats.flow_control_count); #endif if (client == NULL) { crm_err("No client"); return 0; } crm_trace("Cleaning up after client disconnect: %p/%s/%s", client, crm_str(client->name), client->id); if(client->id != NULL) { g_hash_table_remove(client_list, client->id); } /* 0 means: yes, go ahead and destroy the connection */ return 0; } static void st_ipc_destroy(qb_ipcs_connection_t *c) { stonith_client_t *client = (stonith_client_t*)qb_ipcs_context_get(c); /* Make sure the connection is fully cleaned up */ st_ipc_closed(c); if(client == NULL) { crm_trace("Nothing to destroy"); return; } crm_trace("Destroying %s (%p)", client->name, client); free(client->name); free(client->id); free(client); crm_trace("Done"); return; } static void stonith_peer_callback(xmlNode * msg, void* private_data) { const char *remote = crm_element_value(msg, F_ORIG); crm_log_xml_trace(msg, "Peer[inbound]"); stonith_command(NULL, msg, remote); } #if SUPPORT_HEARTBEAT static void stonith_peer_hb_callback(HA_Message * msg, void* private_data) { xmlNode *xml = convert_ha_message(NULL, msg, __FUNCTION__); stonith_peer_callback(xml, private_data); free_xml(xml); } static void stonith_peer_hb_destroy(gpointer user_data) { if(stonith_shutdown_flag) { crm_info("Heartbeat disconnection complete... exiting"); } else { crm_err("Heartbeat connection lost! Exiting."); } stonith_shutdown(0); } #endif #if SUPPORT_COROSYNC static gboolean stonith_peer_ais_callback( AIS_Message *wrapper, char *data, int sender) { xmlNode *xml = NULL; if(wrapper->header.id == crm_class_cluster) { xml = string2xml(data); if(xml == NULL) { goto bail; } crm_xml_add(xml, F_ORIG, wrapper->sender.uname); crm_xml_add_int(xml, F_SEQ, wrapper->id); stonith_peer_callback(xml, NULL); } free_xml(xml); return TRUE; bail: crm_err("Invalid XML: '%.120s'", data); return TRUE; } static void stonith_peer_ais_destroy(gpointer user_data) { crm_err("AIS connection terminated"); stonith_shutdown(0); } #endif void do_local_reply(xmlNode *notify_src, const char *client_id, gboolean sync_reply, gboolean from_peer) { /* send callback to originating child */ stonith_client_t *client_obj = NULL; int local_rc = pcmk_ok; crm_trace("Sending response"); if(client_id != NULL) { client_obj = g_hash_table_lookup(client_list, client_id); } else { crm_trace("No client to sent the response to." " F_STONITH_CLIENTID not set."); } crm_trace("Sending callback to request originator"); if(client_obj == NULL) { local_rc = -1; } else { crm_trace("Sending %ssync response to %s %s", sync_reply?"":"an a-", client_obj->name, from_peer?"(originator of delegated request)":""); local_rc = crm_ipcs_send(client_obj->channel, notify_src, !sync_reply); } if(local_rc < pcmk_ok && client_obj != NULL) { crm_warn("%sSync reply to %s failed: %s", sync_reply?"":"A-", client_obj?client_obj->name:"", pcmk_strerror(local_rc)); } } long long get_stonith_flag(const char *name) { if(safe_str_eq(name, T_STONITH_NOTIFY_FENCE)) { return 0x01; } else if(safe_str_eq(name, STONITH_OP_DEVICE_ADD)) { return 0x04; } else if(safe_str_eq(name, STONITH_OP_DEVICE_DEL)) { return 0x10; } return 0; } static void stonith_notify_client(gpointer key, gpointer value, gpointer user_data) { xmlNode *update_msg = user_data; stonith_client_t *client = value; const char *type = NULL; CRM_CHECK(client != NULL, return); CRM_CHECK(update_msg != NULL, return); type = crm_element_value(update_msg, F_SUBTYPE); CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return); if(client->channel == NULL) { crm_trace("Skipping client with NULL channel"); return; } else if(client->name == NULL) { crm_trace("Skipping unnammed client / comamnd channel"); return; } if(client->flags & get_stonith_flag(type)) { crm_trace("Sending %s-notification to client %s/%s", type, client->name, client->id); if(crm_ipcs_send(client->channel, update_msg, ipcs_send_event|ipcs_send_error) <= 0) { crm_warn("%s-Notification of client %s/%s failed", type, client->name, client->id); } } } void do_stonith_notify( int options, const char *type, int result, xmlNode *data, const char *remote) { /* TODO: Standardize the contents of data */ xmlNode *update_msg = create_xml_node(NULL, "notify"); CRM_CHECK(type != NULL, ;); crm_xml_add(update_msg, F_TYPE, T_STONITH_NOTIFY); crm_xml_add(update_msg, F_SUBTYPE, type); crm_xml_add(update_msg, F_STONITH_OPERATION, type); crm_xml_add_int(update_msg, F_STONITH_RC, result); if(data != NULL) { add_message_xml(update_msg, F_STONITH_CALLDATA, data); } crm_trace("Notifying clients"); g_hash_table_foreach(client_list, stonith_notify_client, update_msg); free_xml(update_msg); crm_trace("Notify complete"); } static stonith_key_value_t *parse_device_list(const char *devices) { int lpc = 0; int max = 0; int last = 0; stonith_key_value_t *output = NULL; if(devices == NULL) { return output; } max = strlen(devices); for(lpc = 0; lpc <= max; lpc++) { if(devices[lpc] == ',' || devices[lpc] == 0) { char *line = NULL; line = calloc(1, 2 + lpc - last); snprintf(line, 1 + lpc - last, "%s", devices+last); output = stonith_key_value_add(output, NULL, line); free(line); last = lpc + 1; } } return output; } static void topology_remove_helper(const char *node, int level) { int rc; char *desc = NULL; xmlNode *data = create_xml_node(NULL, F_STONITH_LEVEL); xmlNode *notify_data = create_xml_node(NULL, STONITH_OP_LEVEL_DEL); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add_int(data, XML_ATTR_ID, level); crm_xml_add(data, F_STONITH_TARGET, node); rc = stonith_level_remove(data, &desc); crm_xml_add(notify_data, F_STONITH_DEVICE, desc); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology)); do_stonith_notify(0, STONITH_OP_LEVEL_DEL, rc, notify_data, NULL); free_xml(notify_data); free_xml(data); free(desc); } static void topology_register_helper(const char *node, int level, stonith_key_value_t *device_list) { int rc; char *desc = NULL; xmlNode *notify_data = create_xml_node(NULL, STONITH_OP_LEVEL_ADD); xmlNode *data = create_level_registration_xml(node, level, device_list); rc = stonith_level_register(data, &desc); crm_xml_add(notify_data, F_STONITH_DEVICE, desc); crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology)); do_stonith_notify(0, STONITH_OP_LEVEL_ADD, rc, notify_data, NULL); free_xml(notify_data); free_xml(data); free(desc); } static void remove_fencing_topology(xmlXPathObjectPtr xpathObj) { int max = 0, lpc = 0; if(xpathObj && xpathObj->nodesetval) { max = xpathObj->nodesetval->nodeNr; } for(lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); CRM_CHECK(match != NULL, continue); if(crm_element_value(match, XML_DIFF_MARKER)) { /* Deletion */ int index = 0; const char *target = crm_element_value(match, XML_ATTR_STONITH_TARGET); crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); if(target == NULL) { crm_err("Invalid fencing target in element %s", ID(match)); } else if(index <= 0) { crm_err("Invalid level for %s in element %s", target, ID(match)); } else { topology_remove_helper(target, index); } /* } else { Deal with modifications during the 'addition' stage */ } } } static void register_fencing_topology(xmlXPathObjectPtr xpathObj, gboolean force) { int max = 0, lpc = 0; if(xpathObj && xpathObj->nodesetval) { max = xpathObj->nodesetval->nodeNr; } for(lpc = 0; lpc < max; lpc++) { int index = 0; const char *target; const char *dev_list; stonith_key_value_t *devices = NULL; xmlNode *match = getXpathResult(xpathObj, lpc); CRM_CHECK(match != NULL, continue); crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index); target = crm_element_value(match, XML_ATTR_STONITH_TARGET); dev_list = crm_element_value(match, XML_ATTR_STONITH_DEVICES); devices = parse_device_list(dev_list); crm_trace("Updating %s[%d] (%s) to %s", target, index, ID(match), dev_list); if(target == NULL) { crm_err("Invalid fencing target in element %s", ID(match)); } else if(index <= 0) { crm_err("Invalid level for %s in element %s", target, ID(match)); } else if(force == FALSE && crm_element_value(match, XML_DIFF_MARKER)) { /* Addition */ topology_register_helper(target, index, devices); } else { /* Modification */ /* Remove then re-add */ topology_remove_helper(target, index); topology_register_helper(target, index, devices); } stonith_key_value_freeall(devices, 1, 1); } } /* Fencing */ static void fencing_topology_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data) { xmlXPathObjectPtr xpathObj = NULL; const char *xpath = "//" XML_TAG_FENCING_LEVEL; crm_trace("Pushing in stonith topology"); /* Grab everything */ xpathObj = xpath_search(msg, xpath); register_fencing_topology(xpathObj, TRUE); if(xpathObj) { xmlXPathFreeObject(xpathObj); } } static void update_fencing_topology(const char *event, xmlNode * msg) { const char *xpath; xmlXPathObjectPtr xpathObj = NULL; /* Process deletions (only) */ xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL; xpathObj = xpath_search(msg, xpath); remove_fencing_topology(xpathObj); if(xpathObj) { xmlXPathFreeObject(xpathObj); } /* Process additions and changes */ xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL; xpathObj = xpath_search(msg, xpath); register_fencing_topology(xpathObj, FALSE); if(xpathObj) { xmlXPathFreeObject(xpathObj); } } static void stonith_shutdown(int nsig) { stonith_shutdown_flag = TRUE; crm_info("Terminating with %d clients", g_hash_table_size(client_list)); if(mainloop != NULL && g_main_is_running(mainloop)) { g_main_quit(mainloop); } else { stonith_cleanup(); exit(EX_OK); } } cib_t *cib = NULL; static void stonith_cleanup(void) { if(cib) { cib->cmds->signoff(cib); } qb_ipcs_destroy(ipcs); crm_peer_destroy(); g_hash_table_destroy(client_list); free(stonith_our_uname); #if HAVE_LIBXML2 crm_xml_cleanup(); #endif } /* *INDENT-OFF* */ static struct crm_option long_options[] = { - {"stand-alone", 0, 0, 's'}, + {"stand-alone", 0, 0, 's'}, + {"stand-alone-w-cpg", 0, 0, 'c'}, {"verbose", 0, 0, 'V'}, {"version", 0, 0, '$'}, {"help", 0, 0, '?'}, {0, 0, 0, 0} }; /* *INDENT-ON* */ static void setup_cib(void) { static void *cib_library = NULL; static cib_t *(*cib_new_fn)(void) = NULL; static const char *(*cib_err_fn)(int) = NULL; int rc, retries = 0; if(cib_library == NULL) { cib_library = dlopen(CIB_LIBRARY, RTLD_LAZY); } if(cib_library && cib_new_fn == NULL) { cib_new_fn = dlsym(cib_library, "cib_new"); } if(cib_library && cib_err_fn == NULL) { cib_err_fn = dlsym(cib_library, "pcmk_strerror"); } if(cib_new_fn != NULL) { cib = (*cib_new_fn)(); } if(cib == NULL) { crm_err("No connection to the CIB"); return; } do { sleep(retries); rc = cib->cmds->signon(cib, CRM_SYSTEM_CRMD, cib_command); } while(rc == -ENOTCONN && ++retries < 5); if (rc != pcmk_ok) { crm_err("Could not connect to the CIB service: %s", (*cib_err_fn)(rc)); } else if (pcmk_ok != cib->cmds->add_notify_callback( cib, T_CIB_DIFF_NOTIFY, update_fencing_topology)) { crm_err("Could not set CIB notification callback"); } else { rc = cib->cmds->query(cib, NULL, NULL, cib_scope_local); add_cib_op_callback(cib, rc, FALSE, NULL, fencing_topology_callback); crm_notice("Watching for stonith topology changes"); } } struct qb_ipcs_service_handlers ipc_callbacks = { .connection_accept = st_ipc_accept, .connection_created = st_ipc_created, .msg_process = st_ipc_dispatch, .connection_closed = st_ipc_closed, .connection_destroyed = st_ipc_destroy }; int main(int argc, char ** argv) { int flag; int rc = 0; int lpc = 0; int argerr = 0; int option_index = 0; const char *actions[] = { "reboot", "poweroff", "list", "monitor", "status" }; crm_log_init("stonith-ng", LOG_INFO, TRUE, FALSE, argc, argv, FALSE); crm_set_options(NULL, "mode [options]", long_options, "Provides a summary of cluster's current state." "\n\nOutputs varying levels of detail in a number of different formats.\n"); while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) { break; } switch(flag) { case 'V': crm_bump_log_level(); break; case 's': stand_alone = TRUE; break; + case 'c': + stand_alone = FALSE; + no_cib_connect = TRUE; + break; case '$': case '?': crm_help(flag, EX_OK); break; default: ++argerr; break; } } if(argc - optind == 1 && safe_str_eq("metadata", argv[optind])) { printf("\n"); printf("\n"); printf(" 1.0\n"); printf(" This is a fake resource that details the instance attributes handled by stonithd.\n"); printf(" Options available for all stonith resources\n"); printf(" \n"); printf(" \n"); printf(" How long to wait for the STONITH action to complete.\n"); printf(" Overrides the stonith-timeout cluster property\n"); printf(" \n"); printf(" \n"); printf(" \n"); printf(" The priority of the stonith resource. The lower the number, the higher the priority.\n"); printf(" \n"); printf(" \n"); printf(" \n", STONITH_ATTR_HOSTARG); printf(" Advanced use only: An alternate parameter to supply instead of 'port'\n"); printf(" Some devices do not support the standard 'port' parameter or may provide additional ones.\n" "Use this to specify an alternate, device-specific, parameter that should indicate the machine to be fenced.\n" "A value of 'none' can be used to tell the cluster not to supply any additional parameters.\n" " \n"); printf(" \n"); printf(" \n"); printf(" \n", STONITH_ATTR_HOSTMAP); printf(" A mapping of host names to ports numbers for devices that do not support host names.\n"); printf(" Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2\n"); printf(" \n"); printf(" \n"); printf(" \n", STONITH_ATTR_HOSTLIST); printf(" A list of machines controlled by this device (Optional unless %s=static-list).\n", STONITH_ATTR_HOSTCHECK); printf(" \n"); printf(" \n"); printf(" \n", STONITH_ATTR_HOSTCHECK); printf(" How to determin which machines are controlled by the device.\n"); printf(" Allowed values: dynamic-list (query the device), static-list (check the %s attribute), none (assume every device can fence every machine)\n", STONITH_ATTR_HOSTLIST); printf(" \n"); printf(" \n"); for(lpc = 0; lpc < DIMOF(actions); lpc++) { printf(" \n", actions[lpc]); printf(" Advanced use only: An alternate command to run instead of '%s'\n", actions[lpc]); printf(" Some devices do not support the standard commands or may provide additional ones.\n" "Use this to specify an alternate, device-specific, command that implements the '%s' action.\n", actions[lpc]); printf(" \n", actions[lpc]); printf(" \n"); } printf(" \n"); printf("\n"); return 0; } if (optind != argc) { ++argerr; } if (argerr) { crm_help('?', EX_USAGE); } mainloop_add_signal(SIGTERM, stonith_shutdown); crm_peer_init(); client_list = g_hash_table_new(crm_str_hash, g_str_equal); if(stand_alone == FALSE) { void *dispatch = NULL; void *destroy = NULL; #if SUPPORT_HEARTBEAT dispatch = stonith_peer_hb_callback; destroy = stonith_peer_hb_destroy; #endif if(is_openais_cluster()) { #if SUPPORT_COROSYNC destroy = stonith_peer_ais_destroy; dispatch = stonith_peer_ais_callback; #endif } if(crm_cluster_connect(&stonith_our_uname, NULL, dispatch, destroy, #if SUPPORT_HEARTBEAT &hb_conn #else NULL #endif ) == FALSE) { crm_crit("Cannot sign in to the cluster... terminating"); exit(100); } - setup_cib(); + if (no_cib_connect == FALSE) { + setup_cib(); + } } else { stonith_our_uname = strdup("localhost"); } device_list = g_hash_table_new_full( crm_str_hash, g_str_equal, NULL, free_device); topology = g_hash_table_new_full( crm_str_hash, g_str_equal, NULL, free_topology_entry); ipcs = mainloop_add_ipc_server("stonith-ng", QB_IPC_NATIVE, &ipc_callbacks); #if SUPPORT_STONITH_CONFIG if (((stand_alone == TRUE)) && !(standalone_cfg_read_file(STONITH_NG_CONF_FILE))) { standalone_cfg_commit(); } #endif if(ipcs != NULL) { /* Create the mainloop and run it... */ mainloop = g_main_new(FALSE); crm_info("Starting %s mainloop", crm_system_name); g_main_run(mainloop); } else { crm_err("Couldnt start all communication channels, exiting."); } stonith_cleanup(); #if SUPPORT_HEARTBEAT if(hb_conn) { hb_conn->llc_ops->delete(hb_conn); } #endif crm_info("Done"); qb_log_fini(); return rc; } diff --git a/fencing/regression.py.in b/fencing/regression.py.in new file mode 100644 index 0000000000..ea508b7c96 --- /dev/null +++ b/fencing/regression.py.in @@ -0,0 +1,496 @@ +#!/usr/bin/python + +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + + +import os +import sys +import subprocess +import shlex +import time + +def output_from_command(command): + test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE) + test.wait() + + return test.communicate()[0].split("\n") + +class Test: + def __init__(self, name, description, verbose = 0, with_cpg = 0): + self.name = name + self.description = description + self.cmds = [] + self.verbose = verbose + + self.result_txt = "" + self.cmd_tool_output = "" + self.result_exitcode = 0; + + self.stonith_options = "-s" + self.enable_corosync = 0 + if with_cpg: + self.stonith_options = "-c" + self.enable_corosync = 1 + + self.stonith_process = None + self.stonith_output = "" + self.stonith_patterns = [] + + self.executed = 0 + + rsc_classes = output_from_command("crm_resource --list-standards") + + self.has_systemd = 0 + if "systemd" in rsc_classes: + self.has_systemd = 1 + + def __new_cmd(self, cmd, args, exitcode, stdout_match = "", no_wait = 0, stdout_negative_match = "", kill=None): + self.cmds.append( + { + "cmd" : cmd, + "kill" : kill, + "args" : args, + "expected_exitcode" : exitcode, + "stdout_match" : stdout_match, + "stdout_negative_match" : stdout_negative_match, + "no_wait" : no_wait, + } + ) + + def start_corosync(self): + if self.enable_corosync == 0: + return + + if self.has_systemd: + cmd = shlex.split("systemctl start corosync.service") + else: + cmd = shlex.split("service corosync start") + + test = subprocess.Popen(cmd, stdout=subprocess.PIPE) + test.wait() + + def stop_corosync(self): + if self.enable_corosync == 0: + return + + if self.has_systemd: + cmd = shlex.split("systemctl stop corosync.service") + else: + cmd = shlex.split("service corosync stop") + test = subprocess.Popen(cmd, stdout=subprocess.PIPE) + test.wait() + + def stop_pacemaker(self): + if self.has_systemd: + cmd = shlex.split("systemctl stop pacemaker.service") + else: + cmd = shlex.split("service pacemaker stop") + test = subprocess.Popen(cmd, stdout=subprocess.PIPE) + test.wait() + + def start_environment(self): + ### make sure we are in full control here ### + self.stop_pacemaker() + self.stop_corosync() + + cmd = shlex.split("killall -q -9 stonithd") + test = subprocess.Popen(cmd, stdout=subprocess.PIPE) + test.wait() + + self.start_corosync() + + self.stonith_process = subprocess.Popen( + shlex.split("@CRM_DAEMON_DIR@/stonithd %s -V" % self.stonith_options), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + time.sleep(1) + + def clean_environment(self): + if self.stonith_process: + self.stonith_process.kill() + + self.stonith_output = self.stonith_process.communicate()[1] + self.stonith_process = None + + self.stop_corosync() + + def add_stonith_log_pattern(self, pattern): + self.stonith_patterns.append(pattern) + + def add_cmd(self, cmd, args): + self.__new_cmd(cmd, args, 0, "") + + def add_cmd_no_wait(self, cmd, args): + self.__new_cmd(cmd, args, 0, "", 1) + + def add_cmd_check_stdout(self, cmd, args, match, no_match = ""): + self.__new_cmd(cmd, args, 0, match, 0, no_match) + + def add_expected_fail_cmd(self, cmd, args, exitcode = 255): + self.__new_cmd(cmd, args, exitcode, "") + + def get_exitcode(self): + return self.result_exitcode + + def print_result(self, filler): + print "%s%s" % (filler, self.result_txt) + + def run_cmd(self, args): + cmd = shlex.split(args['args']) + cmd.insert(0, args['cmd']) + + if self.verbose: + print "\n\nRunning: "+" ".join(cmd) + test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + if args['kill']: + if self.verbose: + print "Also running: "+args['kill'] + subprocess.Popen(shlex.split(args['kill'])) + + if args['no_wait'] == 0: + test.wait() + else: + return 0 + + output = test.communicate()[0] + + if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0: + test.returncode = -2 + print "STDOUT string '%s' was not found in cmd output: %s" % (args['stdout_match'], output) + + if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0: + test.returncode = -2 + print "STDOUT string '%s' was found in cmd output: %s" % (args['stdout_negative_match'], output) + + return test.returncode; + + def match_stonith_patterns(self): + cur = 0 + total_patterns = len(self.stonith_patterns) + + if len(self.stonith_patterns) == 0: + return + + for line in self.stonith_output.split("\n"): + if line.count(self.stonith_patterns[cur]): + cur = cur + 1 + if cur == total_patterns: + break + + if cur != len(self.stonith_patterns): + for item in range(total_patterns): + if self.verbose and item > (cur -1): + print "Pattern Not Matched = '%s'" % self.stonith_patterns[item] + + self.result_txt = "FAILURE - '%s' failed. %d patterns out of %d not matched" % (self.name, total_patterns - cur, total_patterns) + self.result_exitcode = -1 + + def run(self): + res = 0 + i = 1 + self.start_environment() + + if self.verbose: + print "\n--- START TEST - %s" % self.name + + self.result_txt = "SUCCESS - '%s'" % (self.name) + self.result_exitcode = 0 + for cmd in self.cmds: + res = self.run_cmd(cmd) + if res != cmd['expected_exitcode']: + print "Step %d FAILED - command returned %d, expected %d" % (i, res, cmd['expected_exitcode']) + self.result_txt = "FAILURE - '%s' failed at step %d. Command: lrmd_test %s" % (self.name, i, cmd['args']) + self.result_exitcode = -1 + break + else: + if self.verbose: + print "Step %d SUCCESS" % (i) + i = i + 1 + self.clean_environment() + + if self.result_exitcode == 0: + self.match_stonith_patterns() + + print self.result_txt + if self.verbose: + print "--- END TEST - %s\n" % self.name + + self.executed = 1 + return res + +class Tests: + def __init__(self, verbose = 0): + self.tests = [] + self.verbose = verbose + + def new_test(self, name, description, with_cpg = 0): + test = Test(name, description, self.verbose, with_cpg) + self.tests.append(test) + return test + + def print_list(self): + print "\n==== %d TESTS FOUND ====" % (len(self.tests)) + print "%35s - %s" % ("TEST NAME", "TEST DESCRIPTION") + print "%35s - %s" % ("--------------------", "--------------------") + for test in self.tests: + print "%35s - %s" % (test.name, test.description) + print "==== END OF LIST ====\n" + + def run_single(self, name): + for test in self.tests: + if test.name == name: + test.run() + break; + + def run_tests_matching(self, pattern): + for test in self.tests: + if test.name.count(pattern) != 0: + test.run() + + def run_tests(self): + for test in self.tests: + test.run() + + def print_results(self): + failures = 0; + success = 0; + print "\n\n======= FINAL RESULTS ==========" + print "\n--- FAILURE RESULTS:" + for test in self.tests: + if test.executed == 0: + continue + + if test.get_exitcode() != 0: + failures = failures + 1 + test.print_result(" ") + else: + success = success + 1 + + if failures == 0: + print " None" + + print "\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures) + def build_api_sanity_tests(self): + verbose_arg = "" + if self.verbose: + verbose_arg = "-V" + + test = self.new_test("api_sanity_test", "Sanity test client api.") + test.add_cmd("@CRM_DAEMON_DIR@/stonith-test", "-t %s" % (verbose_arg)) + + def build_standalone_tests(self): + test_types = [ + { + "prefix" : "standalone" , + "use_cpg" : 0, + }, + { + "prefix" : "cpg" , + "use_cpg" : 1, + }, + ] + + # test what happens when multiple devices can fence a node, but the first device fails. + for test_type in test_types: + test = self.new_test("%s_fence_device_failure" % test_type["prefix"], + "Verify that when one fence device fails for a node, the others are tried.", test_type["use_cpg"]) + test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") + test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") + test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") + test.add_cmd("stonith_admin", "-F node3 -t 5") + + # simple topology test for one device + for test_type in test_types: + if test_type["use_cpg"] == 0: + continue + + test = self.new_test("%s_topology_simple" % test_type["prefix"], + "Verify all fencing devices at a level are used.", test_type["use_cpg"]) + test.add_cmd("stonith_admin", "-R true -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") + + test.add_cmd("stonith_admin", "-r node3 -i 1 -v true") + test.add_cmd("stonith_admin", "-F node3 -t 5") + + test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") + + # test what happens when the first fencing level has multiple devices. + for test_type in test_types: + if test_type["use_cpg"] == 0: + continue + + test = self.new_test("%s_topology_device_fails" % test_type["prefix"], + "Verify if one device in a level fails, the other is tried.", test_type["use_cpg"]) + test.add_cmd("stonith_admin", "-R false -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") + test.add_cmd("stonith_admin", "-R true -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") + + test.add_cmd("stonith_admin", "-r node3 -i 1 -v false") + test.add_cmd("stonith_admin", "-r node3 -i 2 -v true") + test.add_cmd("stonith_admin", "-F node3 -t 5") + + test.add_stonith_log_pattern("for host 'node3' with device 'false' returned: -1001") + test.add_stonith_log_pattern("for host 'node3' with device 'true' returned: 0") + + # test what happens when the first fencing level fails. + for test_type in test_types: + if test_type["use_cpg"] == 0: + continue + + test = self.new_test("%s_topology_multi_level_fails" % test_type["prefix"], + "Verify if one level fails, the next leve is tried.", test_type["use_cpg"]) + test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") + test.add_cmd("stonith_admin", "-R true2 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") + test.add_cmd("stonith_admin", "-R true3 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") + test.add_cmd("stonith_admin", "-R true4 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") + test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") + test.add_cmd("stonith_admin", "-R false2 -a fence_false -o \"pcmk_host_list=node1 node2 node3\"") + + test.add_cmd("stonith_admin", "-r node3 -i 1 -v false1") + test.add_cmd("stonith_admin", "-r node3 -i 1 -v true1") + test.add_cmd("stonith_admin", "-r node3 -i 2 -v true2") + test.add_cmd("stonith_admin", "-r node3 -i 2 -v false2") + test.add_cmd("stonith_admin", "-r node3 -i 3 -v true3") + test.add_cmd("stonith_admin", "-r node3 -i 3 -v true4") + + test.add_cmd("stonith_admin", "-F node3 -t 5") + + + test.add_stonith_log_pattern("for host 'node3' with device 'false1' returned: -1001") + test.add_stonith_log_pattern("for host 'node3' with device 'false2' returned: -1001") + test.add_stonith_log_pattern("for host 'node3' with device 'true3' returned: 0") + test.add_stonith_log_pattern("for host 'node3' with device 'true4' returned: 0") + + # test the stonith builds the correct list of devices that can fence a node. + for test_type in test_types: + test = self.new_test("%s_list_devices" % test_type["prefix"], + "Verify list of devices that can fence a node is correct", test_type["use_cpg"]) + test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\"") + test.add_cmd("stonith_admin", "-R true2 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") + test.add_cmd("stonith_admin", "-R true3 -a fence_true -o \"pcmk_host_list=node1 node2 node3\"") + + test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true2", "true1") + test.add_cmd_check_stdout("stonith_admin", "-l node1 -V", "true3", "true1") + + # simple test of device monitor + for test_type in test_types: + test = self.new_test("%s_monitor" % test_type["prefix"], + "Verify device is reachable", test_type["use_cpg"]) + test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\"") + test.add_cmd("stonith_admin", "-R false1 -a fence_false -o \"pcmk_host_list=node3\"") + + test.add_cmd("stonith_admin", "-Q true1") + test.add_cmd("stonith_admin", "-Q false1") + test.add_expected_fail_cmd("stonith_admin", "-Q true2", 237) + + # simple register test + for test_type in test_types: + test = self.new_test("%s_register" % test_type["prefix"], + "Verify devices can be registered and un-registered", test_type["use_cpg"]) + test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\"") + + test.add_cmd("stonith_admin", "-Q true1") + + test.add_cmd("stonith_admin", "-D true1") + + test.add_expected_fail_cmd("stonith_admin", "-Q true1", 237) + + # test fencing history. + for test_type in test_types: + if test_type["use_cpg"] == 0: + continue + test = self.new_test("%s_fence_history" % test_type["prefix"], + "Verify last fencing operation is returned.", test_type["use_cpg"]) + test.add_cmd("stonith_admin", "-R true1 -a fence_true -o \"pcmk_host_list=node3\"") + + test.add_cmd("stonith_admin", "-F node3 -t 5 -V") + + test.add_cmd_check_stdout("stonith_admin", "-H node3", "was able to turn off node node3", "") + +class TestOptions: + def __init__(self): + self.options = {} + self.options['list-tests'] = 0 + self.options['run-all'] = 1 + self.options['run-only'] = "" + self.options['run-only-pattern'] = "" + self.options['verbose'] = 0 + self.options['invalid-arg'] = "" + self.options['show-usage'] = 0 + + def build_options(self, argv): + args = argv[1:] + skip = 0 + for i in range(0, len(args)): + if skip: + skip = 0 + continue + elif args[i] == "-h" or args[i] == "--help": + self.options['show-usage'] = 1 + elif args[i] == "-l" or args[i] == "--list-tests": + self.options['list-tests'] = 1 + elif args[i] == "-V" or args[i] == "--verbose": + self.options['verbose'] = 1 + elif args[i] == "-r" or args[i] == "--run-only": + self.options['run-only'] = args[i+1] + skip = 1 + elif args[i] == "-p" or args[i] == "--run-only-pattern": + self.options['run-only-pattern'] = args[i+1] + skip = 1 + + def show_usage(self): + print "usage: " + sys.argv[0] + " [options]" + print "If no options are provided, all tests will run" + print "Options:" + print "\t [--help | -h] Show usage" + print "\t [--list-tests | -l] Print out all registered tests." + print "\t [--run-only | -r 'testname'] Run a specific test" + print "\t [--verbose | -V] Verbose output" + print "\t [--run-only-pattern | -p 'string'] Run only tests containing the string value" + print "\n\tExample: Run only the test 'start_top'" + print "\t\t python ./regression.py --run-only start_stop" + print "\n\tExample: Run only the tests with the string 'systemd' present in them" + print "\t\t python ./regression.py --run-only-pattern systemd" + +def main(argv): + o = TestOptions() + o.build_options(argv) + + tests = Tests(o.options['verbose']) + tests.build_standalone_tests() + tests.build_api_sanity_tests() + + os.system("cp /usr/share/pacemaker/tests/cts/fence_false /usr/sbin/fence_false") + os.system("cp /usr/share/pacemaker/tests/cts/fence_true /usr/sbin/fence_true") + + print "Starting ..." + + if o.options['list-tests']: + tests.print_list() + elif o.options['show-usage']: + o.show_usage() + elif o.options['run-only-pattern'] != "": + tests.run_tests_matching(o.options['run-only-pattern']) + tests.print_results() + elif o.options['run-only'] != "": + tests.run_single(o.options['run-only']) + tests.print_results() + else: + tests.run_tests() + tests.print_results() + +if __name__=="__main__": + main(sys.argv) diff --git a/fencing/test.c b/fencing/test.c index cb26bd56da..be9b5df1e8 100644 --- a/fencing/test.c +++ b/fencing/test.c @@ -1,209 +1,378 @@ /* * Copyright (C) 2009 Andrew Beekhof * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include - /* *INDENT-OFF* */ +enum test_modes { + /* class dev test using a very specific environment */ + test_standard = 0, + /* watch notifications only */ + test_passive, + /* sanity test stonith client api using fence_true and fence_false */ + test_api_sanity, +}; + static struct crm_option long_options[] = { {"verbose", 0, 0, 'V'}, {"version", 0, 0, '$'}, {"help", 0, 0, '?'}, {"passive", 0, 0, 'p'}, + {"api_test", 0, 0, 't'}, {0, 0, 0, 0} }; /* *INDENT-ON* */ +stonith_t *st = NULL; +struct pollfd pollfd; int st_opts = st_opt_sync_call; +int expected_notifications = 0; +int verbose = 0; + +static void dispatch_helper(int timeout) +{ + int rc; + crm_debug("Looking for notification"); + pollfd.events = POLLIN; + while(true) { + rc = poll( &pollfd, 1, timeout); /* wait 10 minutes, -1 forever */ + if (rc > 0 ) { + stonith_dispatch( st ); + } else { + break; + } + } +} static void st_callback(stonith_t *st, stonith_event_t *e) { if(st->state == stonith_disconnected) { exit(1); } crm_notice("Operation %s requested by %s %s for peer %s. %s reported: %s (ref=%s)", e->operation, e->origin, e->result == pcmk_ok?"completed":"failed", e->target, e->executioner ? e->executioner : "", pcmk_strerror(e->result), e->id); + + if (expected_notifications) { + expected_notifications--; + } } static void st_global_callback(stonith_t * stonith, const xmlNode * msg, int call_id, int rc, xmlNode * output, void *userdata) { crm_log_xml_notice((xmlNode*)msg, "Event"); } +static void +passive_test(void) +{ + st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback); + st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback); + st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback); + st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback); + st->cmds->register_callback(st, 0, 120, FALSE, NULL, "st_global_callback", st_global_callback); + + dispatch_helper(600 * 1000); +} + +#define single_test(cmd, str, num_notifications, expected_rc) \ +{ \ + int rc = 0; \ + rc = cmd; \ + expected_notifications = 0; \ + if (num_notifications) { \ + expected_notifications = num_notifications; \ + dispatch_helper(500); \ + } \ + if (rc != expected_rc) { \ + crm_info("FAILURE - expected rc %d != %d(%s) for cmd - %s\n", expected_rc, rc, pcmk_strerror(rc), str); \ + exit(-1); \ + } else if (expected_notifications) { \ + crm_info("FAILURE - expected %d notifications, got only %d for cmd - %s\n", \ + num_notifications, num_notifications - expected_notifications, str); \ + exit(-1); \ + } else { \ + if (verbose) { \ + crm_info("SUCCESS - %s: %d", str, rc); \ + } else { \ + crm_debug("SUCCESS - %s: %d", str, rc); \ + } \ + } \ +}\ + +static void +run_fence_failure_test(void) +{ + stonith_key_value_t *params = NULL; + + params = stonith_key_value_add(params, "pcmk_host_map", "pcmk-1=1,2 pcmk-2=3,4"); + + single_test(st->cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_false", params), + "Register device1 for failure test", 1, 0); + + single_test(st->cmds->fence(st, st_opts, "pcmk-2", "off", 3), + "Fence failure results off", 1, -1001); + + single_test(st->cmds->fence(st, st_opts, "pcmk-2", "reboot", 3), + "Fence failure results reboot", 1, -1001); + + single_test(st->cmds->remove_device(st, st_opts, "test-id1"), + "Remove device1 for failure test", 1, 0); + + stonith_key_value_freeall(params, 1, 1); +} + +static void +run_fence_failure_rollover_test(void) +{ + stonith_key_value_t *params = NULL; + + params = stonith_key_value_add(params, "pcmk_host_map", "pcmk-1=1,2 pcmk-2=3,4"); + + single_test(st->cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_false", params), + "Register device1 for rollover test", 1, 0); + + single_test(st->cmds->register_device(st, st_opts, "test-id2", "stonith-ng", "fence_true", params), + "Register device2 for rollover test", 1, 0); + + single_test(st->cmds->fence(st, st_opts, "pcmk-2", "off", 3), + "Fence rollover results off", 1, 0); + + single_test(st->cmds->fence(st, st_opts, "pcmk-2", "on", 3), + "Fence rollover results on", 1, 0); + + single_test(st->cmds->remove_device(st, st_opts, "test-id1"), + "Remove device1 for rollover tests", 1, 0); + + single_test(st->cmds->remove_device(st, st_opts, "test-id2"), + "Remove device2 for rollover tests", 1, 0); + + stonith_key_value_freeall(params, 1, 1); +} + +static void +run_standard_test(void) +{ + stonith_key_value_t *params = NULL; + + params = stonith_key_value_add(params, "pcmk_host_map", "pcmk-1=1,2 pcmk-2=3,4"); + + single_test(st->cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_true", params), + "Register", 1, 0); + + single_test(st->cmds->list(st, st_opts, "test-id", NULL, 1), + "list", 1, 0); + + single_test(st->cmds->monitor(st, st_opts, "test-id", 1), + "Monitor", 1, 0); + + single_test(st->cmds->status(st, st_opts, "test-id", "pcmk-2", 1), + "Status pcmk-2", 1, 0); + + single_test(st->cmds->status(st, st_opts, "test-id", "pcmk-1", 1), + "Status pcmk-1", 1, 0); + + single_test(st->cmds->fence(st, st_opts, "unknown-host", "off", 1), + "Fence unknown-host (expected failure)", 0, -113); + + single_test(st->cmds->fence(st, st_opts, "pcmk-1", "off", 1), + "Fence pcmk-1", 1, 0); + + single_test(st->cmds->fence(st, st_opts, "pcmk-1", "on", 1), + "Unfence pcmk-1", 1, 0); + + single_test(st->cmds->remove_device(st, st_opts, "test-id"), + "Remove test-id", 1, 0); + + stonith_key_value_freeall(params, 1, 1); +} + +static void +sanity_tests(void) +{ + st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback); + st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback); + st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback); + st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback); + st->cmds->register_callback(st, 0, 120, FALSE, NULL, "st_global_callback", st_global_callback); + + crm_info("Starting API Sanity Tests"); + run_standard_test(); + run_fence_failure_test(); + run_fence_failure_rollover_test(); + crm_info("Sanity Tests Passed"); +} + +static void +standard_dev_test(void) +{ + int rc = 0; + char *tmp = NULL; + stonith_key_value_t *params = NULL; + + params = stonith_key_value_add(params, "pcmk_host_map", "some-host=pcmk-7 pcmk-3=3,4"); + + rc = st->cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_xvm", params); + crm_debug("Register: %d", rc); + + rc = st->cmds->list(st, st_opts, "test-id", &tmp, 10); + crm_debug("List: %d output: %s\n", rc, tmp ? tmp : ""); + + rc = st->cmds->monitor(st, st_opts, "test-id", 10); + crm_debug("Monitor: %d", rc); + + rc = st->cmds->status(st, st_opts, "test-id", "pcmk-2", 10); + crm_debug("Status pcmk-2: %d", rc); + + rc = st->cmds->status(st, st_opts, "test-id", "pcmk-1", 10); + crm_debug("Status pcmk-1: %d", rc); + + rc = st->cmds->fence(st, st_opts, "unknown-host", "off", 60); + crm_debug("Fence unknown-host: %d", rc); + + rc = st->cmds->status(st, st_opts, "test-id", "pcmk-1", 10); + crm_debug("Status pcmk-1: %d", rc); + + rc = st->cmds->fence(st, st_opts, "pcmk-1", "off", 60); + crm_debug("Fence pcmk-1: %d", rc); + + rc = st->cmds->status(st, st_opts, "test-id", "pcmk-1", 10); + crm_debug("Status pcmk-1: %d", rc); + + rc = st->cmds->fence(st, st_opts, "pcmk-1", "on", 10); + crm_debug("Unfence pcmk-1: %d", rc); + + rc = st->cmds->status(st, st_opts, "test-id", "pcmk-1", 10); + crm_debug("Status pcmk-1: %d", rc); + + rc = st->cmds->fence(st, st_opts, "some-host", "off", 10); + crm_debug("Fence alias: %d", rc); + + rc = st->cmds->status(st, st_opts, "test-id", "some-host", 10); + crm_debug("Status alias: %d", rc); + + rc = st->cmds->fence(st, st_opts, "pcmk-1", "on", 10); + crm_debug("Unfence pcmk-1: %d", rc); + + rc = st->cmds->remove_device(st, st_opts, "test-id"); + crm_debug("Remove test-id: %d", rc); + + stonith_key_value_freeall(params, 1, 1); +} int main(int argc, char ** argv) { int argerr = 0; int flag; int option_index = 0; int rc = 0; - char *tmp = NULL; - struct pollfd pollfd; - stonith_t *st = NULL; + enum test_modes mode = test_standard; - stonith_key_value_t *params = NULL; - gboolean passive_mode = FALSE; - - crm_log_cli_init("stonith-test"); crm_set_options(NULL, "mode [options]", long_options, "Provides a summary of cluster's current state." "\n\nOutputs varying levels of detail in a number of different formats.\n"); - params = stonith_key_value_add(params, "pcmk_host_map", "some-host=pcmk-7 pcmk-3=3,4"); - while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) { break; } switch(flag) { case 'V': - crm_bump_log_level(); + verbose = 1; break; case '$': case '?': crm_help(flag, EX_OK); break; case 'p': - passive_mode = TRUE; + mode = test_passive; break; - default: + case 't': + mode = test_api_sanity; + break; + default: ++argerr; break; } } + crm_log_init("stonith-test", LOG_INFO, TRUE, verbose ? TRUE : FALSE, argc, argv, FALSE); + if (optind > argc) { ++argerr; } if (argerr) { crm_help('?', EX_USAGE); } crm_debug("Create"); st = stonith_api_new(); rc = st->cmds->connect(st, crm_system_name, &pollfd.fd); crm_debug("Connect: %d", rc); - rc = st->cmds->register_notification(st, T_STONITH_NOTIFY_DISCONNECT, st_callback); - - if(passive_mode) { - rc = st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, st_callback); - rc = st->cmds->register_notification(st, STONITH_OP_DEVICE_ADD, st_callback); - rc = st->cmds->register_notification(st, STONITH_OP_DEVICE_DEL, st_callback); - - st->cmds->register_callback(st, 0, 120, FALSE, NULL, "st_global_callback", st_global_callback); - - crm_info("Looking for notification"); - pollfd.events = POLLIN; - while(true) { - rc = poll( &pollfd, 1, 600 * 1000 ); /* wait 10 minutes, -1 forever */ - if (rc > 0 ) - stonith_dispatch( st ); - else - break; - } - - } else { - rc = st->cmds->register_device(st, st_opts, "test-id", "stonith-ng", "fence_xvm", params); - crm_debug("Register: %d", rc); - - rc = st->cmds->list(st, st_opts, "test-id", &tmp, 10); - crm_debug("List: %d output: %s\n", rc, tmp ? tmp : ""); - - rc = st->cmds->monitor(st, st_opts, "test-id", 10); - crm_debug("Monitor: %d", rc); - - rc = st->cmds->status(st, st_opts, "test-id", "pcmk-2", 10); - crm_debug("Status pcmk-2: %d", rc); - - rc = st->cmds->status(st, st_opts, "test-id", "pcmk-1", 10); - crm_debug("Status pcmk-1: %d", rc); - - rc = st->cmds->fence(st, st_opts, "unknown-host", "off", 60); - crm_debug("Fence unknown-host: %d", rc); - - rc = st->cmds->status(st, st_opts, "test-id", "pcmk-1", 10); - crm_debug("Status pcmk-1: %d", rc); - - rc = st->cmds->fence(st, st_opts, "pcmk-1", "off", 60); - crm_debug("Fence pcmk-1: %d", rc); - - rc = st->cmds->status(st, st_opts, "test-id", "pcmk-1", 10); - crm_debug("Status pcmk-1: %d", rc); - - rc = st->cmds->fence(st, st_opts, "pcmk-1", "on", 10); - crm_debug("Unfence pcmk-1: %d", rc); - - rc = st->cmds->status(st, st_opts, "test-id", "pcmk-1", 10); - crm_debug("Status pcmk-1: %d", rc); - - rc = st->cmds->fence(st, st_opts, "some-host", "off", 10); - crm_debug("Fence alias: %d", rc); - - rc = st->cmds->status(st, st_opts, "test-id", "some-host", 10); - crm_debug("Status alias: %d", rc); - - rc = st->cmds->fence(st, st_opts, "pcmk-1", "on", 10); - crm_debug("Unfence pcmk-1: %d", rc); - - rc = st->cmds->remove_device(st, st_opts, "test-id"); - crm_debug("Remove test-id: %d", rc); + switch (mode) { + case test_standard: + standard_dev_test(); + break; + case test_passive: + passive_test(); + break; + case test_api_sanity: + sanity_tests(); + break; } - stonith_key_value_freeall(params, 1, 1); - rc = st->cmds->disconnect(st); crm_debug("Disconnect: %d", rc); crm_debug("Destroy"); stonith_api_delete(st); return rc; } diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c index 833e1e7c17..04976f666f 100644 --- a/lib/fencing/st_client.c +++ b/lib/fencing/st_client.c @@ -1,2008 +1,2018 @@ /* * Copyright (c) 2004 Andrew Beekhof * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include #include #include #include #include /* Add it for compiling on OSX */ #include #include #include #include #include #ifdef HAVE_STONITH_STONITH_H # include # define LHA_STONITH_LIBRARY "libstonith.so.1" static void *lha_agents_lib = NULL; #endif #include CRM_TRACE_INIT_DATA(stonith); typedef struct stonith_private_s { char *token; crm_ipc_t *ipc; mainloop_io_t *source; GHashTable *stonith_op_callback_table; GList *notify_list; void (*op_callback) (stonith_t * st, const xmlNode * msg, int call, int rc, xmlNode * output, void *userdata); } stonith_private_t; typedef struct stonith_notify_client_s { const char *event; const char *obj_id; /* implement one day */ const char *obj_type; /* implement one day */ void (*notify) (stonith_t * st, stonith_event_t *e); } stonith_notify_client_t; typedef struct stonith_callback_client_s { void (*callback) (stonith_t * st, const xmlNode * msg, int call, int rc, xmlNode * output, void *userdata); const char *id; void *user_data; gboolean only_success; struct timer_rec_s *timer; } stonith_callback_client_t; struct notify_blob_s { stonith_t *stonith; xmlNode *xml; }; struct timer_rec_s { int call_id; int timeout; guint ref; stonith_t *stonith; }; typedef int (*stonith_op_t) (const char *, int, const char *, xmlNode *, xmlNode *, xmlNode *, xmlNode **, xmlNode **); static const char META_TEMPLATE[] = "\n" "\n" "\n" " 1.0\n" " \n" "%s\n" " \n" " %s\n" "%s\n" " \n" " \n" " \n" " \n" " \n" " \n" " \n" " \n" " 2.0\n" " \n" "\n"; bool stonith_dispatch(stonith_t * st); int stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata); void stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc); xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options); int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data, int call_options, int timeout); static void stonith_connection_destroy(gpointer user_data); static void stonith_send_notification(gpointer data, gpointer user_data); static void stonith_connection_destroy(gpointer user_data) { stonith_t *stonith = user_data; stonith_private_t *native = NULL; struct notify_blob_s blob; crm_trace("Sending destroyed notification"); blob.stonith = stonith; blob.xml = create_xml_node(NULL, "notify"); native = stonith->private; native->ipc = NULL; native->source = NULL; stonith->state = stonith_disconnected; crm_xml_add(blob.xml, F_TYPE, T_STONITH_NOTIFY); crm_xml_add(blob.xml, F_SUBTYPE, T_STONITH_NOTIFY_DISCONNECT); g_list_foreach(native->notify_list, stonith_send_notification, &blob); free_xml(blob.xml); } xmlNode * create_device_registration_xml(const char *id, const char *namespace, const char *agent, stonith_key_value_t * params) { xmlNode *data = create_xml_node(NULL, F_STONITH_DEVICE); xmlNode *args = create_xml_node(data, XML_TAG_ATTRS); crm_xml_add(data, XML_ATTR_ID, id); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add(data, "agent", agent); crm_xml_add(data, "namespace", namespace); for (; params; params = params->next) { hash2field((gpointer) params->key, (gpointer) params->value, args); } return data; } static int stonith_api_register_device(stonith_t * st, int call_options, const char *id, const char *namespace, const char *agent, stonith_key_value_t * params) { int rc = 0; - xmlNode *data = create_device_registration_xml(id, namespace, agent, params); + xmlNode *data = NULL; + +#if HAVE_STONITH_STONITH_H + namespace = get_stonith_provider(agent, namespace); + if (strcmp(namespace, "heartbeat") == 0) { + stonith_key_value_add(params, "plugin", agent); + agent = "fence_legacy"; + } +#endif + + data = create_device_registration_xml(id, namespace, agent, params); rc = stonith_send_command(st, STONITH_OP_DEVICE_ADD, data, NULL, call_options, 0); free_xml(data); return rc; } static int stonith_api_remove_device(stonith_t * st, int call_options, const char *name) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add(data, XML_ATTR_ID, name); rc = stonith_send_command(st, STONITH_OP_DEVICE_DEL, data, NULL, call_options, 0); free_xml(data); return rc; } static int stonith_api_remove_level(stonith_t * st, int options, const char *node, int level) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, F_STONITH_LEVEL); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, node); crm_xml_add_int(data, XML_ATTR_ID, level); rc = stonith_send_command(st, STONITH_OP_LEVEL_DEL, data, NULL, options, 0); free_xml(data); return rc; } xmlNode * create_level_registration_xml(const char *node, int level, stonith_key_value_t * device_list) { xmlNode *data = create_xml_node(NULL, F_STONITH_LEVEL); crm_xml_add_int(data, XML_ATTR_ID, level); crm_xml_add(data, F_STONITH_TARGET, node); crm_xml_add(data, "origin", __FUNCTION__); for (; device_list; device_list = device_list->next) { xmlNode *dev = create_xml_node(data, F_STONITH_DEVICE); crm_xml_add(dev, XML_ATTR_ID, device_list->value); } return data; } static int stonith_api_register_level(stonith_t * st, int options, const char *node, int level, stonith_key_value_t * device_list) { int rc = 0; xmlNode *data = create_level_registration_xml(node, level, device_list); rc = stonith_send_command(st, STONITH_OP_LEVEL_ADD, data, NULL, options, 0); free_xml(data); return rc; } static void append_arg(gpointer key, gpointer value, gpointer user_data) { int len = 3; /* =, \n, \0 */ int last = 0; char **args = user_data; CRM_CHECK(key != NULL, return); CRM_CHECK(value != NULL, return); if (strstr(key, "pcmk_")) { return; } else if (strstr(key, CRM_META)) { return; } else if (safe_str_eq(key, "crm_feature_set")) { return; } len += strlen(key); len += strlen(value); if (*args != NULL) { last = strlen(*args); } *args = realloc(*args, last + len); crm_trace("Appending: %s=%s", (char *)key, (char *)value); sprintf((*args) + last, "%s=%s\n", (char *)key, (char *)value); } static void append_const_arg(const char *key, const char *value, char **arg_list) { char *glib_sucks_key = strdup(key); char *glib_sucks_value = strdup(value); append_arg(glib_sucks_key, glib_sucks_value, arg_list); free(glib_sucks_value); free(glib_sucks_key); } static void append_host_specific_args(const char *victim, const char *map, GHashTable * params, char **arg_list) { char *name = NULL; int last = 0, lpc = 0, max = 0; if (map == NULL) { /* The best default there is for now... */ crm_debug("Using default arg map: port=uname"); append_const_arg("port", victim, arg_list); return; } max = strlen(map); crm_debug("Processing arg map: %s", map); for (; lpc < max + 1; lpc++) { if (isalpha(map[lpc])) { /* keep going */ } else if (map[lpc] == '=' || map[lpc] == ':') { free(name); name = calloc(1, 1 + lpc - last); memcpy(name, map + last, lpc - last); crm_debug("Got name: %s", name); last = lpc + 1; } else if (map[lpc] == 0 || map[lpc] == ',' || isspace(map[lpc])) { char *param = NULL; const char *value = NULL; param = calloc(1, 1 + lpc - last); memcpy(param, map + last, lpc - last); last = lpc + 1; crm_debug("Got key: %s", param); if (name == NULL) { crm_err("Misparsed '%s', found '%s' without a name", map, param); free(param); continue; } if (safe_str_eq(param, "uname")) { value = victim; } else { char *key = crm_meta_name(param); value = g_hash_table_lookup(params, key); free(key); } if (value) { crm_debug("Setting '%s'='%s' (%s) for %s", name, value, param, victim); append_const_arg(name, value, arg_list); } else { crm_err("No node attribute '%s' for '%s'", name, victim); } free(name); name = NULL; free(param); if (map[lpc] == 0) { break; } } else if (isspace(map[lpc])) { last = lpc; } } free(name); } static char * make_args(const char *action, const char *victim, GHashTable * device_args, GHashTable * port_map) { char buffer[512]; char *arg_list = NULL; const char *value = NULL; CRM_CHECK(action != NULL, return NULL); if (device_args) { g_hash_table_foreach(device_args, append_arg, &arg_list); } buffer[511] = 0; snprintf(buffer, 511, "pcmk_%s_action", action); if (device_args) { value = g_hash_table_lookup(device_args, buffer); } if (value == NULL && device_args) { /* Legacy support for early 1.1 releases - Remove for 1.2 */ snprintf(buffer, 511, "pcmk_%s_cmd", action); value = g_hash_table_lookup(device_args, buffer); } if (value) { crm_info("Substituting action '%s' for requested operation '%s'", value, action); action = value; } append_const_arg(STONITH_ATTR_ACTION_OP, action, &arg_list); if (victim && device_args) { const char *alias = victim; const char *param = g_hash_table_lookup(device_args, STONITH_ATTR_HOSTARG); if (port_map && g_hash_table_lookup(port_map, victim)) { alias = g_hash_table_lookup(port_map, victim); } /* Always supply the node's name too: * https://fedorahosted.org/cluster/wiki/FenceAgentAPI */ append_const_arg("nodename", victim, &arg_list); /* Check if we need to supply the victim in any other form */ if (param == NULL) { const char *map = g_hash_table_lookup(device_args, STONITH_ATTR_ARGMAP); if (map == NULL) { param = "port"; value = g_hash_table_lookup(device_args, param); } else { /* Legacy handling */ append_host_specific_args(alias, map, device_args, &arg_list); value = map; /* Nothing more to do */ } } else if (safe_str_eq(param, "none")) { value = param; /* Nothing more to do */ } else { value = g_hash_table_lookup(device_args, param); } /* Don't overwrite explictly set values for $param */ if (value == NULL || safe_str_eq(value, "dynamic")) { crm_debug("Performing %s action for node '%s' as '%s=%s'", action, victim, param, alias); append_const_arg(param, alias, &arg_list); } } crm_trace("Calculated: %s", arg_list); return arg_list; } static gboolean st_child_term(gpointer data) { int rc = 0; async_command_t * track = data; crm_info("Child %d timed out, sending SIGTERM", track->pid); track->timer_sigterm = 0; rc = kill(track->pid, SIGTERM); if(rc < 0) { crm_perror(LOG_ERR, "Couldn't send SIGTERM to %d", track->pid); } return FALSE; } static gboolean st_child_kill(gpointer data) { int rc = 0; async_command_t * track = data; crm_info("Child %d timed out, sending SIGKILL", track->pid); track->timer_sigkill = 0; rc = kill(track->pid, SIGKILL); if(rc < 0) { crm_perror(LOG_ERR, "Couldn't send SIGKILL to %d", track->pid); } return FALSE; } /* Borrowed from libfence and extended */ int run_stonith_agent(const char *agent, const char *action, const char *victim, GHashTable * device_args, GHashTable * port_map, int *agent_result, char **output, async_command_t * track) { char *args = make_args(action, victim, device_args, port_map); int pid, status, len, rc = -EPROTO; int p_read_fd, p_write_fd; /* parent read/write file descriptors */ int c_read_fd, c_write_fd; /* child read/write file descriptors */ int fd1[2]; int fd2[2]; c_read_fd = c_write_fd = p_read_fd = p_write_fd = -1; if (args == NULL || agent == NULL) goto fail; len = strlen(args); if (pipe(fd1)) goto fail; p_read_fd = fd1[0]; c_write_fd = fd1[1]; if (pipe(fd2)) goto fail; c_read_fd = fd2[0]; p_write_fd = fd2[1]; crm_debug("forking"); pid = fork(); if (pid < 0) { rc = -ECHILD; goto fail; } if (pid) { /* parent */ int ret; int total = 0; fcntl(p_read_fd, F_SETFL, fcntl(p_read_fd, F_GETFL, 0) | O_NONBLOCK); do { crm_debug("sending args"); ret = write(p_write_fd, args + total, len - total); if (ret > 0) { total += ret; } } while (errno == EINTR && total < len); if (total != len) { crm_perror(LOG_ERR, "Sent %d not %d bytes", total, len); if (ret >= 0) { rc = -EREMOTEIO; } goto fail; } close(p_write_fd); if (track && track->done) { track->stdout = p_read_fd; g_child_watch_add(pid, track->done, track); crm_trace("Op: %s on %s, pid: %d, timeout: %ds", action, agent, pid, track->timeout); if (track->timeout) { track->pid = pid; track->timer_sigterm = g_timeout_add(1000*track->timeout, st_child_term, track); track->timer_sigkill = g_timeout_add(1000*(track->timeout+5), st_child_kill, track); } else { crm_err("No timeout set for stonith operation %s with device %s", action, agent); } close(c_write_fd); close(c_read_fd); free(args); return pid; } else { pid_t p; do { p = waitpid(pid, &status, 0); } while (p < 0 && errno == EINTR); if (p < 0) { crm_perror(LOG_ERR, "waitpid(%d)", pid); } else if (p != pid) { crm_err("Waited for %d, got %d", pid, p); } if (output != NULL) { char *local_copy; int lpc = 0, last = 0, more; len = 0; do { char buf[500]; ret = read(p_read_fd, buf, 500); if (ret > 0) { buf[ret] = 0; *output = realloc(*output, len + ret + 1); sprintf((*output) + len, "%s", buf); crm_trace("%d: %s", ret, (*output) + len); len += ret; } } while (ret == 500 || (ret < 0 && errno == EINTR)); if (*output) { local_copy = strdup(*output); more = strlen(local_copy); for(lpc = 0; lpc < more; lpc++) { if(local_copy[lpc] == '\n' || local_copy[lpc] == 0) { local_copy[lpc] = 0; crm_debug("%s: %s", agent, local_copy+last); last = lpc+1; } } crm_debug("%s: %s (total %d bytes)", agent, local_copy+last, more); free(local_copy); } } rc = -ECONNABORTED; *agent_result = -ECONNABORTED; if (WIFEXITED(status)) { crm_debug("result = %d", WEXITSTATUS(status)); *agent_result = -WEXITSTATUS(status); rc = 0; } else if (WIFSIGNALED(status)) { crm_err("call %s for %s exited due to signal %d", action, agent, WTERMSIG(status)); } else { crm_err("call %s for %s exited abnormally. stopped=%d, continued=%d", action, agent, WIFSTOPPED(status), WIFCONTINUED(status)); } } } else { /* child */ close(1); /* coverity[leaked_handle] False positive */ if (dup(c_write_fd) < 0) goto fail; close(2); /* coverity[leaked_handle] False positive */ if (dup(c_write_fd) < 0) goto fail; close(0); /* coverity[leaked_handle] False positive */ if (dup(c_read_fd) < 0) goto fail; /* keep c_write_fd open so parent can report all errors. */ close(c_read_fd); close(p_read_fd); close(p_write_fd); execlp(agent, agent, NULL); exit(EXIT_FAILURE); } fail: free(args); if (p_read_fd >= 0) { close(p_read_fd); } if (p_write_fd >= 0) { close(p_write_fd); } if (c_read_fd >= 0) { close(c_read_fd); } if (c_write_fd >= 0) { close(c_write_fd); } return rc; } static int stonith_api_device_list(stonith_t * stonith, int call_options, const char *namespace, stonith_key_value_t ** devices, int timeout) { int count = 0; if (devices == NULL) { crm_err("Parameter error: stonith_api_device_list"); return -EFAULT; } /* Include Heartbeat agents */ if (namespace == NULL || safe_str_eq("heartbeat", namespace)) { #if HAVE_STONITH_STONITH_H static gboolean need_init = TRUE; char **entry = NULL; char **type_list = NULL; static char **(*type_list_fn) (void) = NULL; static void (*type_free_fn) (char **) = NULL; if(need_init) { need_init = FALSE; type_list_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_types", FALSE); type_free_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_free_hostlist", FALSE); } if(type_list_fn) { type_list = (*type_list_fn)(); } for (entry = type_list; entry != NULL && *entry; ++entry) { crm_trace("Added: %s", *entry); *devices = stonith_key_value_add(*devices, NULL, *entry); count++; } if (type_list && type_free_fn) { (*type_free_fn)(type_list); } #else if(namespace != NULL) { return -EINVAL; /* Heartbeat agents not supported */ } #endif } /* Include Red Hat agents, basically: ls -1 @sbin_dir@/fence_* */ if (namespace == NULL || safe_str_eq("redhat", namespace)) { struct dirent **namelist; int file_num = scandir(RH_STONITH_DIR, &namelist, 0, alphasort); if (file_num > 0) { struct stat prop; char buffer[FILENAME_MAX + 1]; while (file_num--) { if ('.' == namelist[file_num]->d_name[0]) { free(namelist[file_num]); continue; } else if (0 != strncmp(RH_STONITH_PREFIX, namelist[file_num]->d_name, strlen(RH_STONITH_PREFIX))) { free(namelist[file_num]); continue; } snprintf(buffer, FILENAME_MAX, "%s/%s", RH_STONITH_DIR, namelist[file_num]->d_name); if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) { *devices = stonith_key_value_add(*devices, NULL, namelist[file_num]->d_name); count++; } free(namelist[file_num]); } free(namelist); } } return count; } static int stonith_api_device_metadata(stonith_t * stonith, int call_options, const char *agent, const char *namespace, char **output, int timeout) { int rc = 0; char *buffer = NULL; const char *provider = get_stonith_provider(agent, namespace); crm_trace("looking up %s/%s metadata", agent, provider); /* By having this in a library, we can access it from stonith_admin * when neither lrmd or stonith-ng are running * Important for the crm shell's validations... */ if (safe_str_eq(provider, "redhat")) { int exec_rc = run_stonith_agent(agent, "metadata", NULL, NULL, NULL, &rc, &buffer, NULL); if (exec_rc < 0 || rc != 0 || buffer == NULL) { crm_debug("Query failed: %d %d: %s", exec_rc, rc, crm_str(buffer)); free(buffer); /* Just in case */ return -EINVAL; } else { xmlNode *xml = string2xml(buffer); xmlNode *actions = NULL; xmlXPathObject *xpathObj = NULL; xpathObj = xpath_search(xml, "//actions"); if (xpathObj && xpathObj->nodesetval->nodeNr > 0) { actions = getXpathResult(xpathObj, 0); } /* Now fudge the metadata so that the start/stop actions appear */ xpathObj = xpath_search(xml, "//action[@name='stop']"); if (xpathObj == NULL || xpathObj->nodesetval->nodeNr <= 0) { xmlNode *tmp = NULL; tmp = create_xml_node(actions, "action"); crm_xml_add(tmp, "name", "stop"); crm_xml_add(tmp, "timeout", "20s"); tmp = create_xml_node(actions, "action"); crm_xml_add(tmp, "name", "start"); crm_xml_add(tmp, "timeout", "20s"); } /* Now fudge the metadata so that the port isn't required in the configuration */ xpathObj = xpath_search(xml, "//parameter[@name='port']"); if (xpathObj && xpathObj->nodesetval->nodeNr > 0) { /* We'll fill this in */ xmlNode *tmp = getXpathResult(xpathObj, 0); crm_xml_add(tmp, "required", "0"); } free(buffer); buffer = dump_xml_formatted(xml); free_xml(xml); } } else { #if !HAVE_STONITH_STONITH_H return -EINVAL; /* Heartbeat agents not supported */ #else int bufferlen = 0; char *xml_meta_longdesc = NULL; char *xml_meta_shortdesc = NULL; char *meta_param = NULL; char *meta_longdesc = NULL; char *meta_shortdesc = NULL; static const char *no_parameter_info = ""; Stonith *stonith_obj = NULL; static gboolean need_init = TRUE; static Stonith *(*st_new_fn) (const char *) = NULL; static const char *(*st_info_fn) (Stonith *, int) = NULL; static void (*st_del_fn) (Stonith *) = NULL; if(need_init) { need_init = FALSE; st_new_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_new", FALSE); st_del_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_delete", FALSE); st_info_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_get_info", FALSE); } if (lha_agents_lib && st_new_fn && st_del_fn && st_info_fn) { stonith_obj = (*st_new_fn) (agent); if(stonith_obj) { meta_longdesc = strdup((*st_info_fn)(stonith_obj, ST_DEVICEDESCR)); if (meta_longdesc == NULL) { crm_warn("no long description in %s's metadata.", agent); meta_longdesc = strdup(no_parameter_info); } meta_shortdesc = strdup((*st_info_fn)(stonith_obj, ST_DEVICEID)); if (meta_shortdesc == NULL) { crm_warn("no short description in %s's metadata.", agent); meta_shortdesc = strdup(no_parameter_info); } meta_param = strdup((*st_info_fn)(stonith_obj, ST_CONF_XML)); if (meta_param == NULL) { crm_warn("no list of parameters in %s's metadata.", agent); meta_param = strdup(no_parameter_info); } (*st_del_fn)(stonith_obj); } else { return -EINVAL; /* Heartbeat agents not supported */ } } xml_meta_longdesc = (char *)xmlEncodeEntitiesReentrant(NULL, (const unsigned char *)meta_longdesc); xml_meta_shortdesc = (char *)xmlEncodeEntitiesReentrant(NULL, (const unsigned char *)meta_shortdesc); bufferlen = strlen(META_TEMPLATE) + strlen(agent) + strlen(xml_meta_longdesc) + strlen(xml_meta_shortdesc) + strlen(meta_param) + 1; buffer = calloc(1, bufferlen); snprintf(buffer, bufferlen - 1, META_TEMPLATE, agent, xml_meta_longdesc, xml_meta_shortdesc, meta_param); xmlFree(xml_meta_longdesc); xmlFree(xml_meta_shortdesc); free(meta_shortdesc); free(meta_longdesc); free(meta_param); #endif } if (output) { *output = buffer; } else { free(buffer); } return rc; } static int stonith_api_query(stonith_t * stonith, int call_options, const char *target, stonith_key_value_t ** devices, int timeout) { int rc = 0, lpc = 0, max = 0; xmlNode *data = NULL; xmlNode *output = NULL; xmlXPathObjectPtr xpathObj = NULL; CRM_CHECK(devices != NULL, return -EINVAL); data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, target); rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout); if (rc < 0) { return rc; } xpathObj = xpath_search(output, "//@agent"); if (xpathObj) { max = xpathObj->nodesetval->nodeNr; for (lpc = 0; lpc < max; lpc++) { xmlNode *match = getXpathResult(xpathObj, lpc); CRM_CHECK(match != NULL, continue); crm_info("%s[%d] = %s", "//@agent", lpc, xmlGetNodePath(match)); *devices = stonith_key_value_add(*devices, NULL, crm_element_value(match, XML_ATTR_ID)); } } free_xml(output); free_xml(data); return max; } static int stonith_api_call(stonith_t * stonith, int call_options, const char *id, const char *action, const char *victim, int timeout, xmlNode **output) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, F_STONITH_DEVICE); crm_xml_add(data, "origin", __FUNCTION__); crm_xml_add(data, F_STONITH_DEVICE, id); crm_xml_add(data, F_STONITH_ACTION, action); crm_xml_add(data, F_STONITH_TARGET, victim); rc = stonith_send_command(stonith, STONITH_OP_EXEC, data, output, call_options, timeout); free_xml(data); return rc; } static int stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **list_info, int timeout) { int rc; xmlNode *output = NULL; rc = stonith_api_call(stonith, call_options, id, "list", NULL, timeout, &output); if (output && list_info) { const char *list_str; list_str = crm_element_value(output, "st_output"); if (list_str) { *list_info = strdup(list_str); } } if (output) { free_xml(output); } return rc; } static int stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout) { return stonith_api_call(stonith, call_options, id, "monitor", NULL, timeout, NULL); } static int stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port, int timeout) { return stonith_api_call(stonith, call_options, id, "status", port, timeout, NULL); } static int stonith_api_fence(stonith_t * stonith, int call_options, const char *node, const char *action, int timeout) { int rc = 0; xmlNode *data = NULL; data = create_xml_node(NULL, __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, node); crm_xml_add(data, F_STONITH_ACTION, action); crm_xml_add_int(data, F_STONITH_TIMEOUT, timeout); rc = stonith_send_command(stonith, STONITH_OP_FENCE, data, NULL, call_options, timeout); free_xml(data); return rc; } static int stonith_api_confirm(stonith_t * stonith, int call_options, const char *target) { return stonith_api_fence(stonith, call_options | st_opt_manual_ack, target, "off", 0); } static int stonith_api_history(stonith_t * stonith, int call_options, const char *node, stonith_history_t ** history, int timeout) { int rc = 0; xmlNode *data = NULL; xmlNode *output = NULL; stonith_history_t *last = NULL; *history = NULL; if (node) { data = create_xml_node(NULL, __FUNCTION__); crm_xml_add(data, F_STONITH_TARGET, node); } rc = stonith_send_command(stonith, STONITH_OP_FENCE_HISTORY, data, &output, call_options | st_opt_sync_call, timeout); free_xml(data); if (rc == 0) { xmlNode *op = NULL; xmlNode *reply = get_xpath_object("//" F_STONITH_HISTORY_LIST, output, LOG_ERR); for (op = __xml_first_child(reply); op != NULL; op = __xml_next(op)) { stonith_history_t *kvp; kvp = calloc(1, sizeof(stonith_history_t)); kvp->target = crm_element_value_copy(op, F_STONITH_TARGET); kvp->action = crm_element_value_copy(op, F_STONITH_ACTION); kvp->origin = crm_element_value_copy(op, F_STONITH_ORIGIN); kvp->delegate = crm_element_value_copy(op, F_STONITH_DELEGATE); crm_element_value_int(op, F_STONITH_DATE, &kvp->completed); crm_element_value_int(op, F_STONITH_STATE, &kvp->state); if (last) { last->next = kvp; } else { *history = kvp; } last = kvp; } } return rc; } gboolean is_redhat_agent(const char *agent) { int rc = 0; struct stat prop; char buffer[FILENAME_MAX + 1]; snprintf(buffer, FILENAME_MAX, "%s/%s", RH_STONITH_DIR, agent); rc = stat(buffer, &prop); if (rc >= 0 && S_ISREG(prop.st_mode)) { return TRUE; } return FALSE; } const char * get_stonith_provider(const char *agent, const char *provider) { /* This function sucks */ if (is_redhat_agent(agent)) { return "redhat"; #if HAVE_STONITH_STONITH_H } else { Stonith *stonith_obj = NULL; static gboolean need_init = TRUE; static Stonith *(*st_new_fn) (const char *) = NULL; static void (*st_del_fn) (Stonith *) = NULL; if(need_init) { need_init = FALSE; st_new_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_new", FALSE); st_del_fn = find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_delete", FALSE); } if (lha_agents_lib && st_new_fn && st_del_fn) { stonith_obj = (*st_new_fn) (agent); if(stonith_obj) { (*st_del_fn)(stonith_obj); return "heartbeat"; } } #endif } crm_err("No such device: %s", agent); return NULL; } static gint stonithlib_GCompareFunc(gconstpointer a, gconstpointer b) { int rc = 0; const stonith_notify_client_t *a_client = a; const stonith_notify_client_t *b_client = b; CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0); rc = strcmp(a_client->event, b_client->event); if (rc == 0) { if (a_client->notify == NULL || b_client->notify == NULL) { return 0; } else if (a_client->notify == b_client->notify) { return 0; } else if (((long)a_client->notify) < ((long)b_client->notify)) { crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return -1; } crm_err("callbacks for %s are not equal: %p vs. %p", a_client->event, a_client->notify, b_client->notify); return 1; } return rc; } xmlNode * stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options) { xmlNode *op_msg = create_xml_node(NULL, "stonith_command"); CRM_CHECK(op_msg != NULL, return NULL); CRM_CHECK(token != NULL, return NULL); crm_xml_add(op_msg, F_XML_TAGNAME, "stonith_command"); crm_xml_add(op_msg, F_TYPE, T_STONITH_NG); crm_xml_add(op_msg, F_STONITH_CALLBACK_TOKEN, token); crm_xml_add(op_msg, F_STONITH_OPERATION, op); crm_xml_add_int(op_msg, F_STONITH_CALLID, call_id); crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options); crm_xml_add_int(op_msg, F_STONITH_CALLOPTS, call_options); if (data != NULL) { add_message_xml(op_msg, F_STONITH_CALLDATA, data); } return op_msg; } static void stonith_destroy_op_callback(gpointer data) { stonith_callback_client_t *blob = data; if (blob->timer && blob->timer->ref > 0) { g_source_remove(blob->timer->ref); } free(blob->timer); free(blob); } static int stonith_api_signoff(stonith_t * stonith) { stonith_private_t *native = stonith->private; crm_debug("Signing out of the STONITH Service"); if (native->ipc != NULL) { /* If attached to mainloop and it is active, _close() will result in: * - the source being removed from mainloop * - the dnotify callback being invoked * Otherwise, we are at least correctly disconnecting IPC */ crm_ipc_close(native->ipc); crm_ipc_destroy(native->ipc); native->source = NULL; native->ipc = NULL; } stonith->state = stonith_disconnected; return pcmk_ok; } static int stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd) { int rc = pcmk_ok; stonith_private_t *native = stonith->private; static struct ipc_client_callbacks st_callbacks = { .dispatch = stonith_dispatch_internal, .destroy = stonith_connection_destroy }; crm_trace("Connecting command channel"); stonith->state = stonith_connected_command; if(stonith_fd) { /* No mainloop */ native->ipc = crm_ipc_new("stonith-ng", 0); if(native->ipc && crm_ipc_connect(native->ipc)) { *stonith_fd = crm_ipc_get_fd(native->ipc); } else if(native->ipc) { rc = -ENOTCONN; } } else { /* With mainloop */ native->source = mainloop_add_ipc_client("stonith-ng", 0, stonith, &st_callbacks); native->ipc = mainloop_get_ipc_client(native->source); } if (native->ipc == NULL) { crm_debug("Could not connect to the Stonith API"); rc = -ENOTCONN; } if (rc == pcmk_ok) { xmlNode *reply = NULL; xmlNode *hello = create_xml_node(NULL, "stonith_command"); crm_xml_add(hello, F_TYPE, T_STONITH_NG); crm_xml_add(hello, F_STONITH_OPERATION, CRM_OP_REGISTER); crm_xml_add(hello, F_STONITH_CLIENTNAME, name); rc = crm_ipc_send(native->ipc, hello, &reply, -1); if(rc < 0) { crm_perror(LOG_DEBUG, "Couldn't complete registration with the fencing API: %d", rc); rc = -ECOMM; } else if(reply == NULL) { crm_err("Did not receive registration reply"); rc = -EPROTO; } else { const char *msg_type = crm_element_value(reply, F_STONITH_OPERATION); const char *tmp_ticket = crm_element_value(reply, F_STONITH_CLIENTID); if (safe_str_neq(msg_type, CRM_OP_REGISTER)) { crm_err("Invalid registration message: %s", msg_type); crm_log_xml_err(reply, "Bad reply"); rc = -EPROTO; } else if (tmp_ticket == NULL) { crm_err("No registration token provided"); crm_log_xml_err(reply, "Bad reply"); rc = -EPROTO; } else { crm_trace("Obtained registration token: %s", tmp_ticket); native->token = strdup(tmp_ticket); rc = pcmk_ok; } } free_xml(reply); free_xml(hello); } if (rc == pcmk_ok) { #if HAVE_MSGFROMIPC_TIMEOUT stonith->call_timeout = MAX_IPC_DELAY; #endif crm_debug("Connection to STONITH successful"); return pcmk_ok; } crm_debug("Connection to STONITH failed: %s", pcmk_strerror(rc)); stonith->cmds->disconnect(stonith); return rc; } static int stonith_set_notification(stonith_t * stonith, const char *callback, int enabled) { xmlNode *notify_msg = create_xml_node(NULL, __FUNCTION__); stonith_private_t *native = stonith->private; if (stonith->state != stonith_disconnected) { int rc; crm_xml_add(notify_msg, F_STONITH_OPERATION, T_STONITH_NOTIFY); if (enabled) { crm_xml_add(notify_msg, F_STONITH_NOTIFY_ACTIVATE, callback); } else { crm_xml_add(notify_msg, F_STONITH_NOTIFY_DEACTIVATE, callback); } rc = crm_ipc_send(native->ipc, notify_msg, NULL, -1); if(rc < 0) { crm_perror(LOG_DEBUG, "Couldn't register for fencing notifications: %d", rc); rc = -ECOMM; } } free_xml(notify_msg); return pcmk_ok; } static int stonith_api_add_notification(stonith_t * stonith, const char *event, void (*callback) (stonith_t * stonith, stonith_event_t *e)) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = NULL; private = stonith->private; crm_trace("Adding callback for %s events (%d)", event, g_list_length(private->notify_list)); new_client = calloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = callback; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); if (list_item != NULL) { crm_warn("Callback already present"); free(new_client); return -ENOTUNIQ; } else { private->notify_list = g_list_append(private->notify_list, new_client); stonith_set_notification(stonith, event, 1); crm_trace("Callback added (%d)", g_list_length(private->notify_list)); } return pcmk_ok; } static int stonith_api_del_notification(stonith_t * stonith, const char *event) { GList *list_item = NULL; stonith_notify_client_t *new_client = NULL; stonith_private_t *private = NULL; crm_debug("Removing callback for %s events", event); private = stonith->private; new_client = calloc(1, sizeof(stonith_notify_client_t)); new_client->event = event; new_client->notify = NULL; list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc); stonith_set_notification(stonith, event, 0); if (list_item != NULL) { stonith_notify_client_t *list_client = list_item->data; private->notify_list = g_list_remove(private->notify_list, list_client); free(list_client); crm_trace("Removed callback"); } else { crm_trace("Callback not present"); } free(new_client); return pcmk_ok; } static gboolean stonith_async_timeout_handler(gpointer data) { struct timer_rec_s *timer = data; crm_err("Async call %d timed out after %dms", timer->call_id, timer->timeout); stonith_perform_callback(timer->stonith, NULL, timer->call_id, -ETIME); /* Always return TRUE, never remove the handler * We do that in stonith_del_callback() */ return TRUE; } static int stonith_api_add_callback(stonith_t * stonith, int call_id, int timeout, bool only_success, void *user_data, const char *callback_name, void (*callback) (stonith_t * st, const xmlNode * msg, int call, int rc, xmlNode * output, void *userdata)) { stonith_callback_client_t *blob = NULL; stonith_private_t *private = NULL; CRM_CHECK(stonith != NULL, return -EINVAL); CRM_CHECK(stonith->private != NULL, return -EINVAL); private = stonith->private; if (call_id == 0) { private->op_callback = callback; } else if (call_id < 0) { if (only_success == FALSE) { crm_trace("Call failed, calling %s: %s", callback_name, pcmk_strerror(call_id)); callback(stonith, NULL, call_id, call_id, NULL, user_data); } else { crm_warn("STONITH call failed: %s", pcmk_strerror(call_id)); } return FALSE; } blob = calloc(1, sizeof(stonith_callback_client_t)); blob->id = callback_name; blob->only_success = only_success; blob->user_data = user_data; blob->callback = callback; if (timeout > 0) { struct timer_rec_s *async_timer = NULL; async_timer = calloc(1, sizeof(struct timer_rec_s)); blob->timer = async_timer; async_timer->stonith = stonith; async_timer->call_id = call_id; /* Allow a fair bit of grace to allow the server to tell us of a timeout * This is only a fallback */ async_timer->timeout = (timeout + 60) * 1000; async_timer->ref = g_timeout_add(async_timer->timeout, stonith_async_timeout_handler, async_timer); } g_hash_table_insert(private->stonith_op_callback_table, GINT_TO_POINTER(call_id), blob); crm_trace("Added callback to %s for call %d", callback_name, call_id); return TRUE; } static int stonith_api_del_callback(stonith_t * stonith, int call_id, bool all_callbacks) { stonith_private_t *private = stonith->private; if (all_callbacks) { private->op_callback = NULL; g_hash_table_destroy(private->stonith_op_callback_table); private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, stonith_destroy_op_callback); } else if (call_id == 0) { private->op_callback = NULL; } else { g_hash_table_remove(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); } return pcmk_ok; } static void stonith_dump_pending_op(gpointer key, gpointer value, gpointer user_data) { int call = GPOINTER_TO_INT(key); stonith_callback_client_t *blob = value; crm_debug("Call %d (%s): pending", call, crm_str(blob->id)); } void stonith_dump_pending_callbacks(stonith_t * stonith) { stonith_private_t *private = stonith->private; if (private->stonith_op_callback_table == NULL) { return; } return g_hash_table_foreach(private->stonith_op_callback_table, stonith_dump_pending_op, NULL); } void stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc) { xmlNode *output = NULL; stonith_private_t *private = NULL; stonith_callback_client_t *blob = NULL; stonith_callback_client_t local_blob; CRM_CHECK(stonith != NULL, return); CRM_CHECK(stonith->private != NULL, return); private = stonith->private; local_blob.id = NULL; local_blob.callback = NULL; local_blob.user_data = NULL; local_blob.only_success = FALSE; if (msg != NULL) { crm_element_value_int(msg, F_STONITH_RC, &rc); crm_element_value_int(msg, F_STONITH_CALLID, &call_id); output = get_message_xml(msg, F_STONITH_CALLDATA); } CRM_CHECK(call_id > 0, crm_log_xml_err(msg, "Bad result")); blob = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id)); if (blob != NULL) { local_blob = *blob; blob = NULL; stonith_api_del_callback(stonith, call_id, FALSE); } else { crm_trace("No callback found for call %d", call_id); local_blob.callback = NULL; } if (local_blob.callback != NULL && (rc == pcmk_ok || local_blob.only_success == FALSE)) { crm_trace("Invoking callback %s for call %d", crm_str(local_blob.id), call_id); local_blob.callback(stonith, msg, call_id, rc, output, local_blob.user_data); } else if (private->op_callback == NULL && rc != pcmk_ok) { crm_warn("STONITH command failed: %s", pcmk_strerror(rc)); crm_log_xml_debug(msg, "Failed STONITH Update"); } if (private->op_callback != NULL) { crm_trace("Invoking global callback for call %d", call_id); private->op_callback(stonith, msg, call_id, rc, output, NULL); } crm_trace("OP callback activated."); } /* */ static stonith_event_t * xml_to_event(xmlNode *msg) { stonith_event_t *event = calloc(1, sizeof(stonith_event_t)); const char *ntype = crm_element_value(msg, F_SUBTYPE); char *data_addr = g_strdup_printf("//%s", ntype); xmlNode *data = get_xpath_object(data_addr, msg, LOG_ERR); crm_log_xml_trace(msg, "stonith_notify"); crm_element_value_int(msg, F_STONITH_RC, &(event->result)); if(safe_str_eq(ntype, T_STONITH_NOTIFY_FENCE)) { event->origin = crm_element_value_copy(data, F_STONITH_ORIGIN); event->target = crm_element_value_copy(data, F_STONITH_TARGET); event->operation = crm_element_value_copy(msg, F_STONITH_OPERATION); event->executioner = crm_element_value_copy(data, F_STONITH_DELEGATE); event->id = crm_element_value_copy(data, F_STONITH_REMOTE); } g_free(data_addr); return event; } static void stonith_send_notification(gpointer data, gpointer user_data) { struct notify_blob_s *blob = user_data; stonith_notify_client_t *entry = data; stonith_event_t *st_event = NULL; const char *event = NULL; if (blob->xml == NULL) { crm_warn("Skipping callback - NULL message"); return; } event = crm_element_value(blob->xml, F_SUBTYPE); if (entry == NULL) { crm_warn("Skipping callback - NULL callback client"); return; } else if (entry->notify == NULL) { crm_warn("Skipping callback - NULL callback"); return; } else if (safe_str_neq(entry->event, event)) { crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event); return; } st_event = xml_to_event(blob->xml); crm_trace("Invoking callback for %p/%s event...", entry, event); entry->notify(blob->stonith, st_event); crm_trace("Callback invoked..."); } int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data, int call_options, int timeout) { int rc = 0; int reply_id = -1; xmlNode *op_msg = NULL; xmlNode *op_reply = NULL; stonith_private_t *native = stonith->private; if (stonith->state == stonith_disconnected) { return -ENOTCONN; } if (output_data != NULL) { *output_data = NULL; } if (op == NULL) { crm_err("No operation specified"); return -EINVAL; } stonith->call_id++; /* prevent call_id from being negative (or zero) and conflicting * with the stonith_errors enum * use 2 because we use it as (stonith->call_id - 1) below */ if (stonith->call_id < 1) { stonith->call_id = 1; } CRM_CHECK(native->token != NULL,;); op_msg = stonith_create_op(stonith->call_id, native->token, op, data, call_options); if (op_msg == NULL) { return -EINVAL; } crm_xml_add_int(op_msg, F_STONITH_TIMEOUT, timeout); crm_trace("Sending %s message to STONITH service, Timeout: %ds", op, timeout); rc = crm_ipc_send(native->ipc, op_msg, &op_reply, 1000*(timeout + 60)); free_xml(op_msg); if(rc < 0) { crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%ds): %d", op, timeout, rc); rc = -ECOMM; goto done; } crm_log_xml_trace(op_reply, "Reply"); if (!(call_options & st_opt_sync_call)) { crm_trace("Async call %d, returning", stonith->call_id); CRM_CHECK(stonith->call_id != 0, return -EPROTO); free_xml(op_reply); return stonith->call_id; } rc = pcmk_ok; crm_element_value_int(op_reply, F_STONITH_CALLID, &reply_id); if (reply_id == stonith->call_id) { crm_trace("Syncronous reply %d received", reply_id); if (crm_element_value_int(op_reply, F_STONITH_RC, &rc) != 0) { rc = -ENOMSG; } if ((call_options & st_opt_discard_reply) || output_data == NULL) { crm_trace("Discarding reply"); } else { *output_data = op_reply; op_reply = NULL; /* Prevent subsequent free */ } } else if (reply_id <= 0) { crm_err("Recieved bad reply: No id set"); crm_log_xml_err(op_reply, "Bad reply"); free_xml(op_reply); rc = -ENOMSG; } else { crm_err("Recieved bad reply: %d (wanted %d)", reply_id, stonith->call_id); crm_log_xml_err(op_reply, "Old reply"); free_xml(op_reply); rc = -ENOMSG; } done: if (crm_ipc_connected(native->ipc) == FALSE) { crm_err("STONITH disconnected"); stonith->state = stonith_disconnected; } free_xml(op_reply); return rc; } /* Not used with mainloop */ bool stonith_dispatch(stonith_t * st) { gboolean stay_connected = TRUE; stonith_private_t *private = NULL; CRM_ASSERT(st != NULL); private = st->private; while(crm_ipc_ready(private->ipc)) { if(crm_ipc_read(private->ipc) > 0) { const char *msg = crm_ipc_buffer(private->ipc); stonith_dispatch_internal(msg, strlen(msg), st); } if(crm_ipc_connected(private->ipc) == FALSE) { crm_err("Connection closed"); stay_connected = FALSE; } } return stay_connected; } int stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata) { const char *type = NULL; struct notify_blob_s blob; stonith_t * st = userdata; stonith_private_t *private = NULL; CRM_ASSERT(st != NULL); private = st->private; blob.stonith = st; blob.xml = string2xml(buffer); if (blob.xml == NULL) { crm_warn("Received a NULL msg from STONITH service: %s.", buffer); return 0; } /* do callbacks */ type = crm_element_value(blob.xml, F_TYPE); crm_trace("Activating %s callbacks...", type); if (safe_str_eq(type, T_STONITH_NG)) { stonith_perform_callback(st, blob.xml, 0, 0); } else if (safe_str_eq(type, T_STONITH_NOTIFY)) { g_list_foreach(private->notify_list, stonith_send_notification, &blob); } else { crm_err("Unknown message type: %s", type); crm_log_xml_warn(blob.xml, "BadReply"); } free_xml(blob.xml); return 1; } static int stonith_api_free(stonith_t * stonith) { int rc = pcmk_ok; if (stonith->state != stonith_disconnected) { rc = stonith->cmds->disconnect(stonith); } if (stonith->state == stonith_disconnected) { stonith_private_t *private = stonith->private; g_hash_table_destroy(private->stonith_op_callback_table); free(private->token); free(stonith->private); free(stonith->cmds); free(stonith); } return rc; } void stonith_api_delete(stonith_t * stonith) { stonith_private_t *private = stonith->private; GList *list = private->notify_list; while (list != NULL) { stonith_notify_client_t *client = g_list_nth_data(list, 0); list = g_list_remove(list, client); free(client); } stonith->cmds->free(stonith); stonith = NULL; } stonith_t * stonith_api_new(void) { stonith_t *new_stonith = NULL; stonith_private_t *private = NULL; new_stonith = calloc(1, sizeof(stonith_t)); private = calloc(1, sizeof(stonith_private_t)); new_stonith->private = private; private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, stonith_destroy_op_callback); private->notify_list = NULL; new_stonith->call_id = 1; new_stonith->state = stonith_disconnected; new_stonith->cmds = calloc(1, sizeof(stonith_api_operations_t)); /* *INDENT-OFF* */ new_stonith->cmds->free = stonith_api_free; new_stonith->cmds->connect = stonith_api_signon; new_stonith->cmds->disconnect = stonith_api_signoff; new_stonith->cmds->list = stonith_api_list; new_stonith->cmds->monitor = stonith_api_monitor; new_stonith->cmds->status = stonith_api_status; new_stonith->cmds->fence = stonith_api_fence; new_stonith->cmds->confirm = stonith_api_confirm; new_stonith->cmds->history = stonith_api_history; new_stonith->cmds->list_agents = stonith_api_device_list; new_stonith->cmds->metadata = stonith_api_device_metadata; new_stonith->cmds->query = stonith_api_query; new_stonith->cmds->remove_device = stonith_api_remove_device; new_stonith->cmds->register_device = stonith_api_register_device; new_stonith->cmds->remove_level = stonith_api_remove_level; new_stonith->cmds->register_level = stonith_api_register_level; new_stonith->cmds->remove_callback = stonith_api_del_callback; new_stonith->cmds->register_callback = stonith_api_add_callback; new_stonith->cmds->remove_notification = stonith_api_del_notification; new_stonith->cmds->register_notification = stonith_api_add_notification; /* *INDENT-ON* */ return new_stonith; } stonith_key_value_t * stonith_key_value_add(stonith_key_value_t * head, const char *key, const char *value) { stonith_key_value_t *p, *end; p = calloc(1, sizeof(stonith_key_value_t)); if (key) { p->key = strdup(key); } if (value) { p->value = strdup(value); } end = head; while (end && end->next) { end = end->next; } if (end) { end->next = p; } else { head = p; } return head; } void stonith_key_value_freeall(stonith_key_value_t * head, int keys, int values) { stonith_key_value_t *p; while (head) { p = head->next; if (keys) { free(head->key); } if (values) { free(head->value); } free(head); head = p; } } int stonith_api_kick(int nodeid, const char *uname, int timeout, bool off) { char *name = NULL; const char *action = "reboot"; int rc = -EPROTO; stonith_t *st = NULL; enum stonith_call_options opts = st_opt_sync_call | st_opt_allow_suicide; st = stonith_api_new(); if (st) { rc = st->cmds->connect(st, "stonith-api", NULL); } if (uname != NULL) { name = strdup(uname); } else if (nodeid > 0) { opts |= st_opt_cs_nodeid; name = crm_itoa(nodeid); } if (off) { action = "off"; } if (rc == pcmk_ok) { rc = st->cmds->fence(st, opts, name, action, timeout); } if (st) { st->cmds->disconnect(st); stonith_api_delete(st); } free(name); return rc; } time_t stonith_api_time(int nodeid, const char *uname, bool in_progress) { int rc = 0; char *name = NULL; time_t when = 0; time_t progress = 0; stonith_t *st = NULL; stonith_history_t *history, *hp = NULL; enum stonith_call_options opts = st_opt_sync_call; st = stonith_api_new(); if (st) { rc = st->cmds->connect(st, "stonith-api", NULL); } if (uname != NULL) { name = strdup(uname); } else if (nodeid > 0) { opts |= st_opt_cs_nodeid; name = crm_itoa(nodeid); } if (st && rc == pcmk_ok) { st->cmds->history(st, st_opt_sync_call | st_opt_cs_nodeid, name, &history, 120); for (hp = history; hp; hp = hp->next) { if (in_progress) { if (hp->state != st_done && hp->state != st_failed) { progress = time(NULL); } } else if (hp->state == st_done) { when = hp->completed; } } } if (progress) { when = progress; } if (st) { st->cmds->disconnect(st); stonith_api_delete(st); } free(name); return when; } diff --git a/lrmd/regression.py.in b/lrmd/regression.py.in index a4619cdc2b..553e7b09ec 100755 --- a/lrmd/regression.py.in +++ b/lrmd/regression.py.in @@ -1,783 +1,780 @@ #!/usr/bin/python # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. import os import sys import subprocess import shlex import time def output_from_command(command, no_wait=0): test = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE) if no_wait == 0: test.wait() else: return 0 return test.communicate()[0].split("\n") class Test: def __init__(self, name, description, verbose = 0): self.name = name self.description = description self.cmds = [] self.daemon_location = "@CRM_DAEMON_DIR@/lrmd" self.test_tool_location = "@CRM_DAEMON_DIR@/lrmd_test" self.verbose = verbose self.result_txt = "" self.cmd_tool_output = "" self.result_exitcode = 0; self.lrmd_process = None self.stonith_process = None self.executed = 0 def __new_cmd(self, cmd, args, exitcode, stdout_match = "", no_wait = 0, stdout_negative_match = "", kill=None): if self.verbose and cmd == self.test_tool_location: args = args + " -V " self.cmds.append( { "cmd" : cmd, "kill" : kill, "args" : args, "expected_exitcode" : exitcode, "stdout_match" : stdout_match, "stdout_negative_match" : stdout_negative_match, "no_wait" : no_wait, "cmd_output" : "", } ) def start_environment(self): ### make sure we are in full control here ### cmd = shlex.split("killall -q -9 stonithd lrmd lt-lrmd lrmd_test lt-lrmd_test") test = subprocess.Popen(cmd, stdout=subprocess.PIPE) test.wait() self.stonith_process = subprocess.Popen(shlex.split("@CRM_DAEMON_DIR@/stonithd -s")) if self.verbose: self.lrmd_process = subprocess.Popen(shlex.split("%s -VVV -l /tmp/lrmd-regression.log" % self.daemon_location)) else: self.lrmd_process = subprocess.Popen(shlex.split("%s -l /tmp/lrmd-regression.log" % self.daemon_location)) time.sleep(1) def clean_environment(self): if self.lrmd_process: self.lrmd_process.kill() if self.stonith_process: self.stonith_process.kill() self.lrmd_process = None self.stonith_process = None def add_sys_cmd(self, cmd, args): self.__new_cmd(cmd, args, 0, "") def add_sys_cmd_no_wait(self, cmd, args): self.__new_cmd(cmd, args, 0, "", 1) def add_cmd_check_stdout(self, args, match, no_match = ""): self.__new_cmd(self.test_tool_location, args, 0, match, 0, no_match) def add_cmd(self, args): self.__new_cmd(self.test_tool_location, args, 0, "") def add_cmd_and_kill(self, killProc, args): self.__new_cmd(self.test_tool_location, args, 0, "", kill=killProc) def add_expected_fail_cmd(self, args): self.__new_cmd(self.test_tool_location, args, 255, "") def get_exitcode(self): return self.result_exitcode def print_result(self, filler): print "%s%s" % (filler, self.result_txt) def run_cmd(self, args): cmd = shlex.split(args['args']) cmd.insert(0, args['cmd']) if self.verbose: print "\n\nRunning: "+" ".join(cmd) test = subprocess.Popen(cmd, stdout=subprocess.PIPE) if args['kill']: if self.verbose: print "Also running: "+args['kill'] subprocess.Popen(shlex.split(args['kill'])) if args['no_wait'] == 0: test.wait() else: return 0 output = test.communicate()[0] if args['stdout_match'] != "" and output.count(args['stdout_match']) == 0: test.returncode = -2 print "STDOUT string '%s' was not found in cmd output" % (args['stdout_match']) if args['stdout_negative_match'] != "" and output.count(args['stdout_negative_match']) != 0: test.returncode = -2 print "STDOUT string '%s' was found in cmd output" % (args['stdout_negative_match']) args['cmd_output'] = output return test.returncode; def run(self): res = 0 i = 1 self.start_environment() if self.verbose: print "\n--- START TEST - %s" % self.name self.result_txt = "SUCCESS - '%s'" % (self.name) self.result_exitcode = 0 for cmd in self.cmds: res = self.run_cmd(cmd) if res != cmd['expected_exitcode']: print cmd['cmd_output'] print "Step %d FAILED - command returned %d, expected %d" % (i, res, cmd['expected_exitcode']) self.result_txt = "FAILURE - '%s' failed at step %d. Command: lrmd_test %s" % (self.name, i, cmd['args']) self.result_exitcode = -1 break else: if self.verbose: print cmd['cmd_output'].strip() print "Step %d SUCCESS" % (i) i = i + 1 self.clean_environment() print self.result_txt if self.verbose: print "--- END TEST - %s\n" % self.name self.executed = 1 return res class Tests: def __init__(self, verbose = 0): self.tests = [] self.verbose = verbose self.rsc_classes = output_from_command("crm_resource --list-standards") self.rsc_classes = self.rsc_classes[:-1] # Strip trailing empty line print "Testing "+repr(self.rsc_classes) self.common_cmds = { "ocf_reg_line" : "-c register_rsc -r ocf_test_rsc -t 1000 -C ocf -P pacemaker -T Dummy", "ocf_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"", "ocf_unreg_line" : "-c unregister_rsc -r \"ocf_test_rsc\" -t 1000", "ocf_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:ocf_test_rsc action:none rc:ok op_status:complete\"", "ocf_start_line" : "-c exec -r \"ocf_test_rsc\" -a \"start\" -t 1000 ", "ocf_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:start rc:ok op_status:complete\" ", "ocf_stop_line" : "-c exec -r \"ocf_test_rsc\" -a \"stop\" -t 1000 ", "ocf_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:stop rc:ok op_status:complete\" ", "ocf_monitor_line" : "-c exec -r \"ocf_test_rsc\" -a \"monitor\" -i \"1000\" -t 1000", "ocf_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:complete\" -t 2000", "ocf_cancel_line" : "-c cancel -r \"ocf_test_rsc\" -a \"monitor\" -i \"1000\" -t \"1000\" ", "ocf_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:ocf_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "systemd_reg_line" : "-c register_rsc -r systemd_test_rsc -t 1000 -C systemd -T lrmd_dummy_daemon", "systemd_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"", "systemd_unreg_line" : "-c unregister_rsc -r \"systemd_test_rsc\" -t 1000", "systemd_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:systemd_test_rsc action:none rc:ok op_status:complete\"", "systemd_start_line" : "-c exec -r \"systemd_test_rsc\" -a \"start\" -t 1000 ", "systemd_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:start rc:ok op_status:complete\" ", "systemd_stop_line" : "-c exec -r \"systemd_test_rsc\" -a \"stop\" -t 1000 ", "systemd_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:stop rc:ok op_status:complete\" ", "systemd_monitor_line" : "-c exec -r \"systemd_test_rsc\" -a \"monitor\" -i \"1000\" -t 1000", "systemd_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:complete\" -t 2000", "systemd_cancel_line" : "-c cancel -r \"systemd_test_rsc\" -a \"monitor\" -i \"1000\" -t \"1000\" ", "systemd_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:systemd_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "upstart_reg_line" : "-c register_rsc -r upstart_test_rsc -t 1000 -C upstart -T lrmd_dummy_daemon", "upstart_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"", "upstart_unreg_line" : "-c unregister_rsc -r \"upstart_test_rsc\" -t 1000", "upstart_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:upstart_test_rsc action:none rc:ok op_status:complete\"", "upstart_start_line" : "-c exec -r \"upstart_test_rsc\" -a \"start\" -t 1000 ", "upstart_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:start rc:ok op_status:complete\" ", "upstart_stop_line" : "-c exec -r \"upstart_test_rsc\" -a \"stop\" -t 1000 ", "upstart_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:stop rc:ok op_status:complete\" ", "upstart_monitor_line" : "-c exec -r \"upstart_test_rsc\" -a \"monitor\" -i \"1000\" -t 1000", "upstart_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:complete\" -t 2000", "upstart_cancel_line" : "-c cancel -r \"upstart_test_rsc\" -a \"monitor\" -i \"1000\" -t \"1000\" ", "upstart_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:upstart_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "service_reg_line" : "-c register_rsc -r service_test_rsc -t 1000 -C service -T lrmd_dummy_daemon", "service_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:service_test_rsc action:none rc:ok op_status:complete\"", "service_unreg_line" : "-c unregister_rsc -r \"service_test_rsc\" -t 1000", "service_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:service_test_rsc action:none rc:ok op_status:complete\"", "service_start_line" : "-c exec -r \"service_test_rsc\" -a \"start\" -t 1000 ", "service_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:start rc:ok op_status:complete\" ", "service_stop_line" : "-c exec -r \"service_test_rsc\" -a \"stop\" -t 1000 ", "service_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:stop rc:ok op_status:complete\" ", "service_monitor_line" : "-c exec -r \"service_test_rsc\" -a \"monitor\" -i \"1000\" -t 1000", "service_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:complete\" -t 2000", "service_cancel_line" : "-c cancel -r \"service_test_rsc\" -a \"monitor\" -i \"1000\" -t \"1000\" ", "service_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:service_test_rsc action:monitor rc:ok op_status:Cancelled\" ", "lsb_reg_line" : "-c register_rsc -r lsb_test_rsc -t 1000 -C lsb -T LSBDummy", "lsb_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\" ", "lsb_unreg_line" : "-c unregister_rsc -r \"lsb_test_rsc\" -t 1000", "lsb_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:lsb_test_rsc action:none rc:ok op_status:complete\"", "lsb_start_line" : "-c exec -r \"lsb_test_rsc\" -a \"start\" -t 1000 ", "lsb_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:start rc:ok op_status:complete\" ", "lsb_stop_line" : "-c exec -r \"lsb_test_rsc\" -a \"stop\" -t 1000 ", "lsb_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:stop rc:ok op_status:complete\" ", "lsb_monitor_line" : "-c exec -r \"lsb_test_rsc\" -a status -i \"1000\" -t 1000", "lsb_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:complete\" -t 2000", "lsb_cancel_line" : "-c cancel -r \"lsb_test_rsc\" -a \"status\" -i \"1000\" -t \"1000\" ", "lsb_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:lsb_test_rsc action:status rc:ok op_status:Cancelled\" ", "stonith_reg_line" : "-c register_rsc -r stonith_test_rsc -t 1000 -C stonith -P pacemaker -T fence_dummy_monitor", "stonith_reg_event" : "-l \"NEW_EVENT event_type:register rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\" ", "stonith_unreg_line" : "-c unregister_rsc -r \"stonith_test_rsc\" -t 1000", "stonith_unreg_event" : "-l \"NEW_EVENT event_type:unregister rsc_id:stonith_test_rsc action:none rc:ok op_status:complete\"", "stonith_start_line" : "-c exec -r \"stonith_test_rsc\" -a \"start\" -t 1000 ", "stonith_start_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:start rc:ok op_status:complete\" ", "stonith_stop_line" : "-c exec -r \"stonith_test_rsc\" -a \"stop\" -t 1000 ", "stonith_stop_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:stop rc:ok op_status:complete\" ", "stonith_monitor_line" : "-c exec -r \"stonith_test_rsc\" -a \"monitor\" -i \"1000\" -t 1000", "stonith_monitor_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:complete\" -t 3000", "stonith_cancel_line" : "-c cancel -r \"stonith_test_rsc\" -a \"monitor\" -i \"1000\" -t \"1000\" ", "stonith_cancel_event" : "-l \"NEW_EVENT event_type:exec_complete rsc_id:stonith_test_rsc action:monitor rc:ok op_status:Cancelled\" ", } def new_test(self, name, description): test = Test(name, description, self.verbose) self.tests.append(test) return test def setup_test_environment(self): self.cleanup_test_environment() ### Make fake systemd daemon and unit file ### dummy_daemon = "#!/bin/bash\nwhile true\ndo\nsleep 5\ndone" dummy_service_file = ("[Unit]\n" "Description=Dummy Resource\n" "[Service]\n" "Type=simple\n" "ExecStart=/usr/sbin/lrmd_dummy_daemon\n") dummy_upstart_job = (""" description "Dummy service for regression tests" exec dd if=/dev/random of=/dev/null """) dummy_fence_agent = ("""#!/usr/bin/python import sys def main(): for line in sys.stdin.readlines(): if line.count("monitor") > 0: sys.exit(0) if line.count("metadata") > 0: print '' print ' dummy description.' print ' http://www.example.com' print ' ' print ' ' print ' ' print ' ' print ' Fencing Action' print ' ' print ' ' print ' ' print ' ' print ' Physical plug number or name of virtual machine' print ' ' print ' ' print ' ' print ' ' print ' ' print ' ' print ' ' print ' ' print '' sys.exit(0) sys.exit(-1) if __name__ == "__main__": main() """) os.system("cat <<-END >>/etc/init/lrmd_dummy_daemon.conf\n%s\nEND" % (dummy_upstart_job)) os.system("cat <<-END >>/usr/sbin/lrmd_dummy_daemon\n%s\nEND" % (dummy_daemon)) os.system("cat <<-END >>/lib/systemd/system/lrmd_dummy_daemon.service\n%s\nEND" % (dummy_service_file)) os.system("chmod u+x /usr/sbin/lrmd_dummy_daemon") os.system("cat <<-END >>/usr/sbin/fence_dummy_monitor\n%s\nEND" % (dummy_fence_agent)) os.system("chmod 711 /usr/sbin/fence_dummy_monitor") os.system("cp /usr/share/pacemaker/tests/cts/LSBDummy /etc/init.d/LSBDummy") os.system("mkdir -p @CRM_CORE_DIR@/root") os.system("systemctl daemon-reload") def cleanup_test_environment(self): os.system("rm -f /lib/systemd/system/lrmd_dummy_daemon.service") os.system("rm -f /usr/sbin/lrmd_dummy_daemon") os.system("rm -f /usr/sbin/fence_dummy_monitor") os.system("rm -f /etc/init.d/LSBDummy") os.system("systemctl daemon-reload") ### These are tests that should apply to all resource classes ### def build_generic_tests(self): common_cmds = self.common_cmds ### register/unregister tests ### for rsc in self.rsc_classes: test = self.new_test("generic_registration_%s" % (rsc), "Simple resource registration test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### start/stop tests ### for rsc in self.rsc_classes: test = self.new_test("generic_start_stop_%s" % (rsc), "Simple start and stop test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### monitor cancel test ### for rsc in self.rsc_classes: test = self.new_test("generic_monitor_cancel_%s" % (rsc), "Simple monitor cancel test for %s standard" % (rsc)) test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor may not be getting rescheduled #### test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_expected_fail_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this happens the monitor did not actually cancel correctly. ### test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### These are complex tests that involve managing multiple resouces of different types ### def build_multi_rsc_tests(self): common_cmds = self.common_cmds # do not use service and systemd at the same time, it is the same resource. ### register start monitor stop unregister resources of each type at the same time. ### test = self.new_test("multi_rsc_start_stop_all", "Start, monitor, and stop resources of multiple types and classes") for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_reg_line" % (rsc)] + " " + common_cmds["%s_reg_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_start_line" % (rsc)] + " " + common_cmds["%s_start_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_monitor_line" % (rsc)] + " " + common_cmds["%s_monitor_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_monitor_event" % (rsc)]) ### If this fails, that means the monitor is not being rescheduled #### for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_cancel_line" % (rsc)] + " " + common_cmds["%s_cancel_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_stop_line" % (rsc)] + " " + common_cmds["%s_stop_event" % (rsc)]) for rsc in self.rsc_classes: test.add_cmd(common_cmds["%s_unreg_line" % (rsc)] + " " + common_cmds["%s_unreg_event" % (rsc)]) ### These are tests related to how the lrmd handles failures. ### def build_negative_tests(self): ### start timeout test ### test = self.new_test("start_timeout", "Force start timeout to occur, verify start failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" -t 1000 " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -k \"op_sleep\" -v \"3\" -t 1000 -w") test.add_cmd("-l " "\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:Timed Out\" -t 3000") test.add_cmd("-c exec -r test_rsc -a stop -t 1000" "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### monitor fail for ocf resources ### test = self.new_test("monitor_fail_ocf", "Force ocf monitor to fail, verify failure is reported.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" -t 1000 " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 2000") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 2000") test.add_cmd_and_kill("rm -f /var/run/Dummy-test_rsc.state", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 2000") test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"1000\" " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 1000") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 1000") test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### monitor fail for systemd resource ### if "systemd" in self.rsc_classes: test = self.new_test("monitor_fail_systemd", "Force systemd monitor to fail, verify failure is reported..") test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T lrmd_dummy_daemon -t 1000 " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 2000") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 2000") test.add_cmd_and_kill("killall -9 -q lrmd_dummy_daemon", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 5000") test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"1000\" " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 1000") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 1000") test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### monitor fail for upstart resource ### if "upstart" in self.rsc_classes: test = self.new_test("monitor_fail_upstart", "Force upstart monitor to fail, verify failure is reported..") test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T lrmd_dummy_daemon -t 1000 " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 2000") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 2000") test.add_cmd_and_kill("killall -9 -q dd", "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 5000") test.add_cmd("-c cancel -r \"test_rsc\" -a \"monitor\" -i \"100\" -t \"1000\" " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:complete\" -t 1000") test.add_expected_fail_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 1000") test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Cancel non-existent operation on a resource ### test = self.new_test("cancel_non_existent_op", "Attempt to cancel the wrong monitor operation, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C \"ocf\" -P \"pacemaker\" -T \"Dummy\" -t 1000 " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"monitor\" -i \"100\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_cmd("-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" -t 2000") test.add_expected_fail_cmd("-c cancel -r test_rsc -a \"monitor\" -i 1234 -t \"1000\" " ### interval is wrong, should fail "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_expected_fail_cmd("-c cancel -r test_rsc -a stop -i 100 -t \"1000\" " ### action name is wrong, should fail "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:not running op_status:Cancelled\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Attempt to invoke non-existent rsc id ### test = self.new_test("invoke_non_existent_rsc", "Attempt to perform operations on a non-existent rsc id.") test.add_expected_fail_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:unknown error op_status:complete\" ") test.add_expected_fail_cmd("-c exec -r test_rsc -a stop -t 1000" "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_expected_fail_cmd("-c exec -r test_rsc -a monitor -i 1000 -t 1000" "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:monitor rc:ok op_status:complete\" ") test.add_expected_fail_cmd("-c cancel -r test_rsc -a start -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register and start a resource that doesn't exist, systemd ### if "systemd" in self.rsc_classes: test = self.new_test("start_uninstalled_systemd", "Register uninstalled systemd agent, try to start, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C systemd -T this_is_fake1234 -t 1000 " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:complete\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") if "upstart" in self.rsc_classes: test = self.new_test("start_uninstalled_upstart", "Register uninstalled upstart agent, try to start, verify expected failure") test.add_cmd("-c register_rsc -r \"test_rsc\" -C upstart -T this_is_fake1234 -t 1000 " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:complete\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register and start a resource that doesn't exist, ocf ### test = self.new_test("start_uninstalled_ocf", "Register uninstalled ocf agent, try to start, verify expected failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pacemaker -T this_is_fake1234 -t 1000 " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:complete\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register ocf with non-existent provider ### test = self.new_test("start_ocf_bad_provider", "Register ocf agent with a non-existent provider, verify expected failure.") test.add_cmd("-c register_rsc -r \"test_rsc\" -C ocf -P pancakes -T Dummy -t 1000 " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:not installed op_status:complete\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register ocf with empty provider field ### test = self.new_test("start_ocf_no_provider", "Register ocf agent with a no provider, verify expected failure.") test.add_expected_fail_cmd("-c register_rsc -r \"test_rsc\" -C ocf -T Dummy -t 1000 " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" ") test.add_expected_fail_cmd("-c exec -r \"test_rsc\" -a \"start\" -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Error\" ") test.add_cmd("-c unregister_rsc -r \"test_rsc\" -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### These are tests that target specific cases ### def build_custom_tests(self): ### start delay then stop test ### test = self.new_test("start_delay", "Verify start delay works as expected.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" -t 1000") test.add_cmd("-c exec -r test_rsc -s 2000 -a start -w -t 1000") test.add_expected_fail_cmd("-l " "\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 1000") test.add_cmd("-l " "\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 3000") test.add_cmd("-c exec -r test_rsc -a stop -t 1000" "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:stop rc:ok op_status:complete\" ") test.add_cmd("-c unregister_rsc -r test_rsc -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### start delay, but cancel before it gets a chance to start. ### test = self.new_test("start_delay_cancel", "Using start_delay, start a rsc, but cancel the start op before execution.") test.add_cmd("-c register_rsc -r test_rsc -P pacemaker -C ocf -T Dummy " "-l \"NEW_EVENT event_type:register rsc_id:test_rsc action:none rc:ok op_status:complete\" -t 1000") test.add_cmd("-c exec -r test_rsc -s 2000 -a start -w -t 1000") test.add_cmd("-c cancel -r test_rsc -a start -t 1000 " "-l \"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:Cancelled\" ") test.add_expected_fail_cmd("-l " "\"NEW_EVENT event_type:exec_complete rsc_id:test_rsc action:start rc:ok op_status:complete\" -t 3000") test.add_cmd("-c unregister_rsc -r test_rsc -t 1000 " "-l \"NEW_EVENT event_type:unregister rsc_id:test_rsc action:none rc:ok op_status:complete\" ") ### Register a bunch of resources, verify we can get info on them ### test = self.new_test("verify_get_rsc_info", "Register multiple resources, verify retrieval of rsc info.") if "systemd" in self.rsc_classes: test.add_cmd("-c register_rsc -r rsc1 -C systemd -T lrmd_dummy_daemon -t 1000 ") test.add_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c unregister_rsc -r rsc1 -t 1000 ") test.add_expected_fail_cmd("-c get_rsc_info -r rsc1 ") if "upstart" in self.rsc_classes: test.add_cmd("-c register_rsc -r rsc1 -C upstart -T lrmd_dummy_daemon -t 1000 ") test.add_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c unregister_rsc -r rsc1 -t 1000 ") test.add_expected_fail_cmd("-c get_rsc_info -r rsc1 ") test.add_cmd("-c register_rsc -r rsc2 -C ocf -T Dummy -P pacemaker -t 1000 ") test.add_cmd("-c get_rsc_info -r rsc2 ") test.add_cmd("-c unregister_rsc -r rsc2 -t 1000 ") test.add_expected_fail_cmd("-c get_rsc_info -r rsc2 ") ### get metadata ### test = self.new_test("get_ocf_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Dummy\"" ,"resource-agent name=\"Dummy\"") test.add_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"Stateful\"") test.add_expected_fail_cmd("-c metadata -P \"pacemaker\" -T \"Stateful\"") test.add_expected_fail_cmd("-c metadata -C \"ocf\" -P \"pacemaker\" -T \"fake_agent\"") ### get metadata ### test = self.new_test("get_lsb_metadata", "Retrieve metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"lsb\" -T \"LSBDummy\"" ,"resource-agent name=\"LSBDummy\"") ### get stonith metadata ### test = self.new_test("get_stonith_metadata", "Retrieve stonith metadata for a resource") test.add_cmd_check_stdout("-c metadata -C \"stonith\" -P \"pacemaker\" -T \"fence_dummy_monitor\"", "resource-agent name=\"fence_dummy_monitor\"") ### get ocf providers ### test = self.new_test("list_ocf_providers", "Retrieve list of available resource providers, verifies pacemaker is a provider.") test.add_cmd_check_stdout("-c list_ocf_providers ", "pacemaker") test.add_cmd_check_stdout("-c list_ocf_providers -T ping", "pacemaker") ### Verify agents only exist in their lists ### test = self.new_test("verify_agent_lists", "Verify the agent lists contain the right data.") test.add_cmd_check_stdout("-c list_agents ", "Stateful") ### ocf ### test.add_cmd_check_stdout("-c list_agents -C ocf", "Stateful") test.add_cmd_check_stdout("-c list_agents -C lsb", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C service", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents ", "LSBDummy") ### init.d ### test.add_cmd_check_stdout("-c list_agents -C lsb", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C service", "LSBDummy") test.add_cmd_check_stdout("-c list_agents -C ocf", "", "LSBDummy") ### should not exist test.add_cmd_check_stdout("-c list_agents -C lsb", "", "lrmd_dummy_daemon") ### should not exist test.add_cmd_check_stdout("-c list_agents -C ocf", "", "lrmd_dummy_daemon") ### should not exist test.add_cmd_check_stdout("-c list_agents -C lsb", "", "fence_dummy_monitor") ### should not exist test.add_cmd_check_stdout("-c list_agents -C service", "", "fence_dummy_monitor") ### should not exist test.add_cmd_check_stdout("-c list_agents -C ocf", "", "fence_dummy_monitor") ### should not exist if "systemd" in self.rsc_classes: test.add_cmd_check_stdout("-c list_agents ", "lrmd_dummy_daemon") ### systemd ### test.add_cmd_check_stdout("-c list_agents -C service", "lrmd_dummy_daemon") test.add_cmd_check_stdout("-c list_agents -C systemd", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C systemd", "lrmd_dummy_daemon") test.add_cmd_check_stdout("-c list_agents -C systemd", "", "fence_dummy_monitor") ### should not exist test.add_cmd_check_stdout("-c list_agents -C systemd", "", "LSBDummy") ### should not exist if "upstart" in self.rsc_classes: test.add_cmd_check_stdout("-c list_agents ", "lrmd_dummy_daemon") ### upstart ### test.add_cmd_check_stdout("-c list_agents -C service", "lrmd_dummy_daemon") test.add_cmd_check_stdout("-c list_agents -C upstart", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents -C upstart", "lrmd_dummy_daemon") test.add_cmd_check_stdout("-c list_agents -C upstart", "", "fence_dummy_monitor") ### should not exist test.add_cmd_check_stdout("-c list_agents -C upstart", "", "LSBDummy") ### should not exist if "stonith" in self.rsc_classes: test.add_cmd_check_stdout("-c list_agents -C stonith", "", "LSBDummy") ### should not exist test.add_cmd_check_stdout("-c list_agents -C stonith", "fence_dummy_monitor") ### stonith ### test.add_cmd_check_stdout("-c list_agents -C stonith", "", "lrmd_dummy_daemon") ### should not exist test.add_cmd_check_stdout("-c list_agents -C stonith", "", "Stateful") ### should not exist test.add_cmd_check_stdout("-c list_agents ", "fence_dummy_monitor") def print_list(self): print "\n==== %d TESTS FOUND ====" % (len(self.tests)) print "%35s - %s" % ("TEST NAME", "TEST DESCRIPTION") print "%35s - %s" % ("--------------------", "--------------------") for test in self.tests: print "%35s - %s" % (test.name, test.description) print "==== END OF LIST ====\n" def run_single(self, name): for test in self.tests: if test.name == name: test.run() break; def run_tests_matching(self, pattern): for test in self.tests: if test.name.count(pattern) != 0: test.run() def run_tests(self): for test in self.tests: test.run() def print_results(self): failures = 0; success = 0; print "\n\n======= FINAL RESULTS ==========" print "\n--- FAILURE RESULTS:" for test in self.tests: if test.executed == 0: continue if test.get_exitcode() != 0: failures = failures + 1 test.print_result(" ") else: success = success + 1 if failures == 0: print " None" print "\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures) class TestOptions: def __init__(self): self.options = {} self.options['list-tests'] = 0 self.options['run-all'] = 1 self.options['run-only'] = "" self.options['run-only-pattern'] = "" self.options['verbose'] = 0 self.options['invalid-arg'] = "" self.options['show-usage'] = 0 def build_options(self, argv): args = argv[1:] skip = 0 for i in range(0, len(args)): if skip: skip = 0 continue elif args[i] == "-h" or args[i] == "--help": self.options['show-usage'] = 1 elif args[i] == "-l" or args[i] == "--list-tests": self.options['list-tests'] = 1 elif args[i] == "-V" or args[i] == "--verbose": self.options['verbose'] = 1 elif args[i] == "-r" or args[i] == "--run-only": self.options['run-only'] = args[i+1] skip = 1 elif args[i] == "-p" or args[i] == "--run-only-pattern": self.options['run-only-pattern'] = args[i+1] skip = 1 def show_usage(self): print "usage: " + sys.argv[0] + " [options]" print "If no options are provided, all tests will run" print "Options:" print "\t [--help | -h] Show usage" print "\t [--list-tests | -l] Print out all registered tests." print "\t [--run-only | -r 'testname'] Run a specific test" print "\t [--verbose | -V] Verbose output" print "\t [--run-only-pattern | -p 'string'] Run only tests containing the string value" print "\n\tExample: Run only the test 'start_top'" print "\t\t python ./regression.py --run-only start_stop" print "\n\tExample: Run only the tests with the string 'systemd' present in them" print "\t\t python ./regression.py --run-only-pattern systemd" def main(argv): - lrmd_loc = argv[0].replace("regression.py", "lrmd") - test_loc = argv[0].replace("regression.py", "lrmd_test") - o = TestOptions() o.build_options(argv) tests = Tests(o.options['verbose']) tests.build_generic_tests() tests.build_multi_rsc_tests() tests.build_negative_tests() tests.build_custom_tests() tests.setup_test_environment() print "Starting ..." if o.options['list-tests']: tests.print_list() elif o.options['show-usage']: o.show_usage() elif o.options['run-only-pattern'] != "": tests.run_tests_matching(o.options['run-only-pattern']) tests.print_results() elif o.options['run-only'] != "": tests.run_single(o.options['run-only']) tests.print_results() else: tests.run_tests() tests.print_results() tests.cleanup_test_environment() if __name__=="__main__": main(sys.argv) diff --git a/lrmd/test.c b/lrmd/test.c index 74f75ea7a8..c09b1f53f5 100644 --- a/lrmd/test.c +++ b/lrmd/test.c @@ -1,573 +1,565 @@ /* * Copyright (c) 2012 David Vossel * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include #include #include #include #include #include #include #include #include /* *INDENT-OFF* */ static struct crm_option long_options[] = { {"help", 0, 0, '?'}, {"verbose", 0, 0, 'V', "\t\tPrint out logs and events to screen"}, {"quiet", 0, 0, 'Q', "\t\tSuppress all output to screen"}, - /* just incase we have to add data to events, - * we don't want break a billion regression tests. Instead - * we'll create different versions */ {"listen", 1, 0, 'l', "\tListen for a specific event string"}, - {"event-ver", 1, 0, 'e', "\tVersion of event to listen to"}, {"api-call", 1, 0, 'c', "\tDirectly relates to lrmd api functions"}, {"no-wait", 0, 0, 'w', "\tMake api call and do not wait for result."}, {"is-running", 0, 0, 'R', "\tDetermine if a resource is registered and running."}, {"-spacer-", 1, 0, '-', "\nParameters for api-call option"}, {"action", 1, 0, 'a'}, {"rsc-id", 1, 0, 'r'}, {"cancel-call-id", 1, 0, 'x'}, {"provider", 1, 0, 'P'}, {"class", 1, 0, 'C'}, {"type", 1, 0, 'T'}, {"interval", 1, 0, 'i'}, {"timeout", 1, 0, 't'}, {"start-delay", 1, 0, 's'}, {"param-key", 1, 0, 'k'}, {"param-val", 1, 0, 'v'}, {"-spacer-", 1, 0, '-'}, {0, 0, 0, 0} }; /* *INDENT-ON* */ cib_t *cib_conn = NULL; static int exec_call_id = 0; static int exec_call_opts = 0; extern void cleanup_alloc_calculations(pe_working_set_t * data_set); static struct { int verbose; int quiet; int print; int interval; int timeout; int start_delay; int cancel_call_id; - int event_version; int no_wait; int is_running; int no_connect; const char *api_call; const char *rsc_id; const char *provider; const char *class; const char *type; const char *action; const char *listen; lrmd_key_value_t *params; } options; GMainLoop *mainloop = NULL; lrmd_t *lrmd_conn = NULL; static char event_buf_v0[1024]; #define print_result(result) \ if (!options.quiet) { \ result; \ } \ #define report_event(event) \ snprintf(event_buf_v0, sizeof(event_buf_v0), "NEW_EVENT event_type:%s rsc_id:%s action:%s rc:%s op_status:%s", \ lrmd_event_type2str(event->type), \ event->rsc_id, \ event->op_type ? event->op_type : "none", \ lrmd_event_rc2str(event->rc), \ services_lrm_status_str(event->op_status)); \ crm_info("%s", event_buf_v0);; static void test_shutdown(int nsig) { lrmd_api_delete(lrmd_conn); } static void read_events(lrmd_event_data_t * event) { report_event(event); if (options.listen) { if (safe_str_eq(options.listen, event_buf_v0)) { print_result(printf("LISTEN EVENT SUCCESSFUL\n")); exit(0); } } if (exec_call_id && (event->call_id == exec_call_id)) { if (event->op_status == 0 && event->rc == 0) { print_result(printf("API-CALL SUCCESSFUL for 'exec'\n")); } else { print_result(printf("API-CALL FAILURE for 'exec', rc:%d lrmd_op_status:%s\n", event->rc, services_lrm_status_str(event->op_status))); exit(-1); } if (!options.listen) { exit(0); } } } static gboolean timeout_err(gpointer data) { print_result(printf("LISTEN EVENT FAILURE - timeout occurred, never found.\n")); exit(-1); return FALSE; } static void try_connect(void) { int tries = 10; int i = 0; int rc = 0; for (i = 0; i < tries; i++) { rc = lrmd_conn->cmds->connect(lrmd_conn, "lrmd", NULL); if (!rc) { crm_info("lrmd client connection established"); return; } else { crm_info("lrmd client connection failed"); } sleep(1); } print_result(printf("API CONNECTION FAILURE\n")); exit(-1); } static gboolean start_test(gpointer user_data) { int rc = 0; if (!options.no_connect) { try_connect(); } lrmd_conn->cmds->set_callback(lrmd_conn, read_events); if (options.timeout) { g_timeout_add(options.timeout, timeout_err, NULL); } if (!options.api_call) { return 0; } if (safe_str_eq(options.api_call, "exec")) { rc = lrmd_conn->cmds->exec(lrmd_conn, options.rsc_id, options.action, NULL, options.interval, options.timeout, options.start_delay, exec_call_opts, options.params); if (rc > 0) { exec_call_id = rc; print_result(printf("API-CALL 'exec' action pending, waiting on response\n")); } } else if (safe_str_eq(options.api_call, "register_rsc")) { rc = lrmd_conn->cmds->register_rsc(lrmd_conn, options.rsc_id, options.class, options.provider, options.type, 0); } else if (safe_str_eq(options.api_call, "get_rsc_info")) { lrmd_rsc_info_t *rsc_info; rsc_info = lrmd_conn->cmds->get_rsc_info(lrmd_conn, options.rsc_id, 0); if (rsc_info) { print_result(printf("RSC_INFO: id:%s class:%s provider:%s type:%s\n", rsc_info->id, rsc_info->class, rsc_info->provider ? rsc_info->provider : "", rsc_info->type)); lrmd_free_rsc_info(rsc_info); rc = pcmk_ok; } else { rc = -1; } } else if (safe_str_eq(options.api_call, "unregister_rsc")) { rc = lrmd_conn->cmds->unregister_rsc(lrmd_conn, options.rsc_id, 0); } else if (safe_str_eq(options.api_call, "cancel")) { rc = lrmd_conn->cmds->cancel(lrmd_conn, options.rsc_id, options.action, options.interval); } else if (safe_str_eq(options.api_call, "metadata")) { char *output = NULL; rc = lrmd_conn->cmds->get_metadata(lrmd_conn, options.class, options.provider, options.type, &output, 0); if (rc == pcmk_ok) { print_result(printf("%s", output)); free(output); } } else if (safe_str_eq(options.api_call, "list_agents")) { lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, options.class, options.provider); if (rc > 0) { print_result(printf("%d agents found\n", rc)); for (iter = list; iter != NULL; iter = iter->next) { print_result(printf("%s\n", iter->val)); } lrmd_list_freeall(list); rc = 0; } else { print_result(printf("API_CALL FAILURE - no agents found\n")); rc = -1; } } else if (safe_str_eq(options.api_call, "list_ocf_providers")) { lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, options.type, &list); if (rc > 0) { print_result(printf("%d providers found\n", rc)); for (iter = list; iter != NULL; iter = iter->next) { print_result(printf("%s\n", iter->val)); } lrmd_list_freeall(list); rc = 0; } else { print_result(printf("API_CALL FAILURE - no providers found\n")); rc = -1; } } else if (safe_str_eq(options.api_call, "list_standards")) { lrmd_list_t *list = NULL; lrmd_list_t *iter = NULL; rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list); if (rc > 0) { print_result(printf("%d standards found\n", rc)); for (iter = list; iter != NULL; iter = iter->next) { print_result(printf("%s\n", iter->val)); } lrmd_list_freeall(list); rc = 0; } else { print_result(printf("API_CALL FAILURE - no providers found\n")); rc = -1; } } else if (options.api_call) { print_result(printf("API-CALL FAILURE unknown action '%s'\n", options.action)); exit(-1); } if (rc < 0) { print_result(printf("API-CALL FAILURE for '%s' api_rc:%d\n", options.api_call, rc)); exit(-1); } if (options.api_call && rc == pcmk_ok) { print_result(printf("API-CALL SUCCESSFUL for '%s'\n", options.api_call)); if (!options.listen) { exit(0); } } if (options.no_wait) { /* just make the call and exit regardless of anything else. */ exit(0); } return 0; } static resource_t * find_rsc_or_clone(const char *rsc, pe_working_set_t * data_set) { resource_t *the_rsc = pe_find_resource(data_set->resources, rsc); if (the_rsc == NULL) { char *as_clone = crm_concat(rsc, "0", ':'); the_rsc = pe_find_resource(data_set->resources, as_clone); free(as_clone); } return the_rsc; } static int generate_params(void) { int rc = 0; pe_working_set_t data_set; xmlNode *cib_xml_copy = NULL; resource_t *rsc = NULL; GHashTable *params = NULL; GHashTable *meta = NULL; GHashTableIter iter; if (options.params) { return 0; } set_working_set_defaults(&data_set); cib_conn = cib_new(); rc = cib_conn->cmds->signon(cib_conn, "lrmd_test", cib_query); if (rc != pcmk_ok) { crm_err("Error signing on to the CIB service: %s\n", pcmk_strerror(rc)); rc = -1; goto param_gen_bail; } cib_xml_copy = get_cib_copy(cib_conn); if (!cib_xml_copy) { crm_err("Error retrieving cib copy."); rc = -1; goto param_gen_bail; } if (cli_config_update(&cib_xml_copy, NULL, FALSE) == FALSE) { crm_err("Error updating cib configuration"); rc = -1; goto param_gen_bail; } data_set.input = cib_xml_copy; data_set.now = new_ha_date(TRUE); cluster_status(&data_set); if (options.rsc_id) { rsc = find_rsc_or_clone(options.rsc_id, &data_set); } if (!rsc) { crm_err("Resource does not exist in config"); rc = -1; goto param_gen_bail; } params = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); meta = g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str); get_rsc_attributes(params, rsc, NULL, &data_set); get_meta_attributes(meta, rsc, NULL, &data_set); if (params) { char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, params); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { options.params = lrmd_key_value_add(options.params, key, value); } g_hash_table_destroy(params); } if (meta) { char *key = NULL; char *value = NULL; g_hash_table_iter_init(&iter, meta); while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) { char *crm_name = crm_meta_name(key); options.params = lrmd_key_value_add(options.params, crm_name, value); free(crm_name); } g_hash_table_destroy(meta); } param_gen_bail: cleanup_alloc_calculations(&data_set); return rc; } int main(int argc, char **argv) { int option_index = 0; int argerr = 0; int flag; char *key = NULL; char *val = NULL; crm_trigger_t *trig; crm_set_options(NULL, "mode [options]", long_options, "Inject commands into the lrmd and watch for events\n"); while (1) { flag = crm_get_option(argc, argv, &option_index); if (flag == -1) break; switch (flag) { case '?': crm_help(flag, EX_OK); break; case 'V': options.verbose = 1; break; case 'Q': options.quiet = 1; options.verbose = 0; break; - case 'e': - options.event_version = atoi(optarg); - break; case 'l': options.listen = optarg; break; case 'w': options.no_wait = 1; break; case 'R': options.is_running = 1; break; case 'c': options.api_call = optarg; break; case 'a': options.action = optarg; break; case 'r': options.rsc_id = optarg; break; case 'x': options.cancel_call_id = atoi(optarg); break; case 'P': options.provider = optarg; break; case 'C': options.class = optarg; break; case 'T': options.type = optarg; break; case 'i': options.interval = atoi(optarg); break; case 't': options.timeout = atoi(optarg); break; case 's': options.start_delay = atoi(optarg); break; case 'k': key = optarg; if (key && val) { options.params = lrmd_key_value_add(options.params, key, val); key = val = NULL; } break; case 'v': val = optarg; if (key && val) { options.params = lrmd_key_value_add(options.params, key, val); key = val = NULL; } break; default: ++argerr; break; } } if (argerr) { crm_help('?', EX_USAGE); } if (optind > argc) { ++argerr; } if (!options.listen && (safe_str_eq(options.api_call, "metadata") || safe_str_eq(options.api_call, "list_agents") || safe_str_eq(options.api_call, "list_standards") || safe_str_eq(options.api_call, "list_ocf_providers"))) { options.no_connect = 1; } crm_log_init("lrmd_ctest", LOG_INFO, TRUE, options.verbose ? TRUE : FALSE, argc, argv, FALSE); if (options.is_running) { if (!options.timeout) { options.timeout = 30000; } options.interval = 0; if (!options.rsc_id) { crm_err("rsc-id must be given when is-running is used"); exit(-1); } if (generate_params()) { print_result(printf ("Failed to retrieve rsc parameters from cib, can not determine if rsc is running.\n")); exit(-1); } options.api_call = "exec"; options.action = "monitor"; exec_call_opts = lrmd_opt_notify_orig_only; } /* if we can't perform an api_call or listen for events, * there is nothing to do */ if (!options.api_call && !options.listen) { crm_err("Nothing to be done. Please specify 'api-call' and/or 'listen'"); return 0; } lrmd_conn = lrmd_api_new(); trig = mainloop_add_trigger(G_PRIORITY_HIGH, start_test, NULL); mainloop_set_trigger(trig); mainloop_add_signal(SIGTERM, test_shutdown); crm_info("Starting"); mainloop = g_main_new(FALSE); g_main_run(mainloop); lrmd_api_delete(lrmd_conn); if (cib_conn != NULL) { cib_conn->cmds->signoff(cib_conn); cib_delete(cib_conn); } return 0; }