diff --git a/configure.ac b/configure.ac index c6b04e7f..dc374ca1 100644 --- a/configure.ac +++ b/configure.ac @@ -1,651 +1,651 @@ # -*- Autoconf -*- # Process this file with autoconf to produce a configure script. # bootstrap / init AC_PREREQ([2.61]) AC_INIT([corosync], m4_esyscmd([build-aux/git-version-gen .tarball-version]), [discuss@lists.corosync.org]) AM_INIT_AUTOMAKE([-Wno-portability]) AM_SILENT_RULES([yes]) AC_CONFIG_SRCDIR([lib/cpg.c]) AC_CONFIG_HEADER([include/corosync/config.h]) AC_CANONICAL_HOST AC_LANG([C]) dnl Fix default variables - "prefix" variable if not specified if test "$prefix" = "NONE"; then prefix="/usr" dnl Fix "localstatedir" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi dnl Fix "sysconfdir" variable if not specified if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi dnl Fix "libdir" variable if not specified if test "$libdir" = "\${exec_prefix}/lib"; then if test -e /usr/lib64; then libdir="/usr/lib64" else libdir="/usr/lib" fi fi fi if test "$srcdir" = "."; then AC_MSG_NOTICE([building in place srcdir:$srcdir]) AC_DEFINE([BUILDING_IN_PLACE], 1, [building in place]) else AC_MSG_NOTICE([building out of tree srcdir:$srcdir]) fi # Checks for programs. # check stolen from gnulib/m4/gnu-make.m4 if ! ${MAKE-make} --version /cannot/make/this >/dev/null 2>&1; then AC_MSG_ERROR([you don't seem to have GNU make; it is required]) fi sinclude(coroysync-default.m4) AC_PROG_CC AC_PROG_INSTALL AC_PROG_LN_S AC_PROG_MAKE_SET AC_PROG_RANLIB AC_CHECK_PROGS([GROFF], [groff]) AC_CHECK_PROGS([PKGCONFIG], [pkg-config]) AC_CHECK_PROGS([AUGTOOL], [augtool]) AC_CHECK_PROGS([DOT], [dot]) AC_CHECK_PROGS([DOXYGEN], [doxygen]) AC_CHECK_PROGS([AWK], [awk]) # Checks for libraries. AC_CHECK_LIB([dl], [dlopen]) AC_CHECK_LIB([pthread], [pthread_create]) AC_CHECK_LIB([socket], [socket]) AC_CHECK_LIB([nsl], [t_open]) AC_CHECK_LIB([rt], [sched_getscheduler]) PKG_CHECK_MODULES([LIBQB], [libqb]) AC_CHECK_LIB([qb], [qb_log_thread_priority_set], \ have_qb_log_thread_priority_set="yes", \ have_qb_log_thread_priority_set="no") if test "x${have_qb_log_thread_priority_set}" = xyes; then AC_DEFINE_UNQUOTED([HAVE_QB_LOG_THREAD_PRIORITY_SET], 1, [have qb_log_thread_priority_set]) fi PKG_CHECK_MODULES([nss],[nss]) # Checks for header files. AC_FUNC_ALLOCA AC_HEADER_DIRENT AC_HEADER_STDC AC_HEADER_SYS_WAIT AC_CHECK_HEADERS([arpa/inet.h fcntl.h limits.h netdb.h netinet/in.h stdint.h \ stdlib.h string.h sys/ioctl.h sys/param.h sys/socket.h \ sys/time.h syslog.h unistd.h sys/types.h getopt.h malloc.h \ sys/sockio.h utmpx.h ifaddrs.h]) # Checks for typedefs, structures, and compiler characteristics. AC_C_CONST AC_TYPE_UID_T AC_C_INLINE AC_TYPE_INT16_T AC_TYPE_INT32_T AC_TYPE_INT64_T AC_TYPE_INT8_T AC_TYPE_SIZE_T AC_TYPE_SSIZE_T AC_HEADER_TIME AC_TYPE_UINT16_T AC_TYPE_UINT32_T AC_TYPE_UINT64_T AC_TYPE_UINT8_T AC_C_VOLATILE # Checks for library functions. AC_FUNC_CLOSEDIR_VOID AC_FUNC_ERROR_AT_LINE AC_REPLACE_FNMATCH AC_FUNC_FORK AC_PROG_GCC_TRADITIONAL AC_FUNC_MALLOC AC_FUNC_MEMCMP AC_FUNC_REALLOC AC_FUNC_SELECT_ARGTYPES AC_TYPE_SIGNAL AC_FUNC_VPRINTF AC_CHECK_FUNCS([alarm alphasort atexit bzero dup2 endgrent endpwent fcntl \ getcwd getpeerucred getpeereid gettimeofday inet_ntoa memmove \ memset mkdir scandir select socket strcasecmp strchr strdup \ strerror strrchr strspn strstr pthread_setschedparam \ sched_get_priority_max sched_setscheduler getifaddrs]) AC_CONFIG_FILES([Makefile exec/Makefile include/Makefile init/Makefile lib/Makefile common_lib/Makefile man/Makefile pkgconfig/Makefile test/Makefile cts/Makefile cts/agents/Makefile cts/CTSvars.py tools/Makefile conf/Makefile Doxyfile]) ### Local business dnl =============================================== dnl Functions / global M4 variables dnl =============================================== dnl Global list of LIB names m4_define([local_soname_list], [])dnl dnl Upcase parameter m4_define([local_upcase], [translit([$*], [a-z], [A-Z])])dnl dnl M4 macro for include lib/lib$1.soname and subst that m4_define([LIB_SONAME_IMPORT],[dnl m4_define([local_libname], local_upcase($1)[_SONAME])dnl m4_define([local_soname], translit(m4_sinclude(lib/lib$1.verso), [ ], []))dnl local_libname="local_soname"dnl m4_define([local_soname_list], m4_defn([local_soname_list])[,]local_libname[,]local_upcase($1))dnl AC_SUBST(local_libname)dnl ])dnl dnl M4 macro for print padspaces (used in LIB_MSG_RESULT). It takes 2 arguments, length of string to pad and desired dnl (padded) length m4_define([m4_printpadspace],[ifelse(m4_eval([$2 - $1 < 1]),[1],,[ ][m4_printpadspace([$1],m4_eval([$2 - 1]))])])dnl dnl Show AC_MSG_RESULT for specific libraries m4_define([LIB_MSG_RESULT], [ifelse([$#], [1], ,[dnl AC_MSG_RESULT([ $2 Library SONAME m4_printpadspace(len($2),8) = ${$1}]) LIB_MSG_RESULT(m4_shift(m4_shift($@)))dnl ])])dnl # =============================================== # Helpers # =============================================== ## helper for CC stuff cc_supports_flag() { local CPPFLAGS="$CPPFLAGS $@" AC_MSG_CHECKING([whether $CC supports "$@"]) AC_PREPROC_IFELSE([AC_LANG_PROGRAM([])], [RC=0; AC_MSG_RESULT([yes])], [RC=1; AC_MSG_RESULT([no])]) return $RC } ## cleanup AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr/local;; esac AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in dnl For consistency with Corosync, map NONE->$prefix NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac ## local defines PACKAGE_FEATURES="" LINT_FLAGS="-weak -unrecog +posixlib +ignoresigns -fcnuse \ -badflag -D__gnuc_va_list=va_list -D__attribute\(x\)=" # default libraries SONAME -SOMAJOR="4" +SOMAJOR="5" SOMINOR="0" SOMICRO="0" SONAME="${SOMAJOR}.${SOMINOR}.${SOMICRO}" # specific libraries SONAME LIB_SONAME_IMPORT([cfg]) LIB_SONAME_IMPORT([cpg]) LIB_SONAME_IMPORT([quorum]) LIB_SONAME_IMPORT([sam]) LIB_SONAME_IMPORT([votequorum]) LIB_SONAME_IMPORT([cmap]) # local options AC_ARG_ENABLE([ansi], [ --enable-ansi : force to build with ANSI standards. ], [ default="no" ]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings : enable fatal warnings. ], [ default="no" ]) AC_ARG_ENABLE([debug], [ --enable-debug : enable debug build. ], [ default="no" ]) AC_ARG_ENABLE([user-flags], [ --enable-user-flags : rely on user environment. ], [ default="no" ]) AC_ARG_ENABLE([coverage], [ --enable-coverage : coverage analysis of the codebase. ], [ default="no" ]) AC_ARG_ENABLE([small-memory-footprint], [ --enable-small-memory-footprint : Use small message queues and small messages sizes. ], [ default="no" ]) AC_ARG_ENABLE([dbus], [ --enable-dbus : dbus events. ],, [ enable_dbus="no" ]) AC_ARG_ENABLE([testagents], [ --enable-testagents : Install Test Agents. ],, [ default="no" ]) AC_ARG_ENABLE([rdma], [ --enable-rdma : Infiniband RDMA transport support ],, [ enable_rdma="no" ]) AM_CONDITIONAL(BUILD_RDMA, test x$enable_rdma = xyes) AC_ARG_ENABLE([monitoring], [ --enable-monitoring : resource monitoring ],, [ default="no" ]) AM_CONDITIONAL(BUILD_MONITORING, test x$enable_monitoring = xyes) AC_ARG_ENABLE([watchdog], [ --enable-watchdog : Watchdog support ],, [ default="no" ]) AM_CONDITIONAL(BUILD_WATCHDOG, test x$enable_watchdog = xyes) AC_ARG_ENABLE([augeas], [ --enable-augeas : Install the augeas lens for corosync.conf ],, [ enable_augeas="no" ]) AM_CONDITIONAL(INSTALL_AUGEAS, test x$enable_augeas = xyes) AC_ARG_ENABLE([systemd], [ --enable-systemd : Install systemd service files],, [ enable_systemd="no" ]) AM_CONDITIONAL(INSTALL_SYSTEMD, test x$enable_systemd = xyes) AC_ARG_WITH([initddir], [ --with-initddir=DIR : path to init script directory. ], [ INITDDIR="$withval" ], [ INITDDIR="$sysconfdir/init.d" ]) AC_ARG_ENABLE([snmp], [ --enable-snmp : SNMP protocol support ], [ default="no" ]) AC_ARG_ENABLE([xmlconf], [ --enable-xmlconf : XML configuration support ],, [ enable_xmlconf="no" ]) AM_CONDITIONAL(INSTALL_XMLCONF, test x$enable_xmlconf = xyes) # OS detection # THIS SECTION MUST DIE! CP=cp OS_LDL="-ldl" have_linux="no" case "$host_os" in *linux*) AC_DEFINE_UNQUOTED([COROSYNC_LINUX], [1], [Compiling for Linux platform]) OS_CFLAGS="" OS_CPPFLAGS="-D_GNU_SOURCE" OS_LDFLAGS="" OS_DYFLAGS="-rdynamic" DARWIN_OPTS="" have_linux="yes" ;; darwin*) AC_DEFINE_UNQUOTED([COROSYNC_DARWIN], [1], [Compiling for Darwin platform]) CP=rsync OS_CFLAGS="" OS_CPPFLAGS="" OS_LDFLAGS="" OS_DYFLAGS="" DARWIN_OPTS="-dynamiclib -bind_at_load \ -current_version ${SONAME} \ -compatibility_version ${SONAME} -install_name \$(libdir)/\$(@)" AC_DEFINE_UNQUOTED([MAP_ANONYMOUS], [MAP_ANON], [Shared memory define for Darwin platform]) AC_DEFINE_UNQUOTED([PATH_MAX], [4096], [Number of chars in a path name including nul]) AC_DEFINE_UNQUOTED([NAME_MAX], [255], [Number of chars in a file name]) ;; *bsd*) AC_DEFINE_UNQUOTED([COROSYNC_BSD], [1], [Compiling for BSD platform]) AC_DEFINE_UNQUOTED([MAP_ANONYMOUS], [MAP_ANON], [Shared memory define for Darwin platform]) OS_CFLAGS="" OS_CPPFLAGS="-I/usr/local/include" OS_LDFLAGS="-L/usr/local/lib" OS_DYFLAGS="-export-dynamic" DARWIN_OPTS="" OS_LDL="" case "$host_os" in *freebsd[[234567]]*) ;; *freebsd*) AC_DEFINE_UNQUOTED([COROSYNC_FREEBSD_GE_8], [1], [Compiling for FreeBSD >= 8 platform]) ;; esac ;; *solaris*) AC_DEFINE_UNQUOTED([COROSYNC_SOLARIS], [1], [Compiling for Solaris platform]) AC_DEFINE_UNQUOTED([TS_CLASS], [1], [Prevent being scheduled RR]) AC_DEFINE_UNQUOTED([_SEM_SEMUN_UNDEFINED], [1], [The semun structure is undefined]) CP=rsync OS_CFLAGS="" OS_CPPFLAGS="-D_REENTRANT" OS_LDFLAGS="" OS_DYFLAGS="-Wl,-z,lazyload" DARWIN_OPTS="" SOLARIS_OPTS=" " ;; *) AC_MSG_ERROR([Unsupported OS? hmmmm]) ;; esac AC_SUBST(CP) # *FLAGS handling goes here ENV_CFLAGS="$CFLAGS" ENV_CPPFLAGS="$CPPFLAGS" ENV_LDFLAGS="$LDFLAGS" # debug build stuff if test "x${enable_debug}" = xyes; then AC_DEFINE_UNQUOTED([DEBUG], [1], [Compiling Debugging code]) OPT_CFLAGS="-O0" PACKAGE_FEATURES="$PACKAGE_FEATURES debug" else OPT_CFLAGS="-O3" fi # gdb flags if test "x${GCC}" = xyes; then GDB_FLAGS="-ggdb3" else GDB_FLAGS="-g" fi # Look for dbus-1 if test "x${enable_dbus}" = xyes; then PKG_CHECK_MODULES([DBUS],[dbus-1]) AC_DEFINE_UNQUOTED([HAVE_DBUS], 1, [have dbus]) PACKAGE_FEATURES="$PACKAGE_FEATURES dbus" fi if test "x${enable_testagents}" = xyes; then AC_DEFINE_UNQUOTED([HAVE_TESTAGENTS], 1, [have testagents]) PACKAGE_FEATURES="$PACKAGE_FEATURES testagents" fi if test "x${enable_rdma}" = xyes; then PKG_CHECK_MODULES([rdmacm],[rdmacm]) PKG_CHECK_MODULES([ibverbs],[ibverbs]) AC_DEFINE_UNQUOTED([HAVE_RDMA], 1, [have rdmacm]) PACKAGE_FEATURES="$PACKAGE_FEATURES rdma" fi if test "x${enable_monitoring}" = xyes; then AC_CHECK_LIB([statgrab], [sg_get_mem_stats], have_libstatgrab="yes", have_libstatgrab="no") if test "x${have_libstatgrab}" = xyes; then AC_DEFINE_UNQUOTED([HAVE_LIBSTATGRAB], 1, [have libstatgrab]) statgrab_LIBS="-lstatgrab" else if test "x${have_linux}" = xno; then AC_MSG_ERROR(monitoring requires libstatgrab on non-linux systems) fi fi AC_SUBST([statgrab_LIBS]) AC_DEFINE_UNQUOTED([HAVE_MONITORING], 1, [have resource monitoring]) PACKAGE_FEATURES="$PACKAGE_FEATURES monitoring" fi if test "x${enable_watchdog}" = xyes; then AC_CHECK_HEADER(linux/watchdog.h,,AC_MSG_ERROR(watchdog requires linux/watchdog.h)) AC_CHECK_HEADER(linux/reboot.h,,AC_MSG_ERROR(watchdog requires linux/reboot.h)) AC_DEFINE_UNQUOTED([HAVE_WATCHDOG], 1, [have watchdog]) PACKAGE_FEATURES="$PACKAGE_FEATURES watchdog" fi if test "x${enable_augeas}" = xyes; then PACKAGE_FEATURES="$PACKAGE_FEATURES augeas" fi if test "x${enable_systemd}" = xyes; then PACKAGE_FEATURES="$PACKAGE_FEATURES systemd" fi if test "x${enable_xmlconf}" = xyes; then PACKAGE_FEATURES="$PACKAGE_FEATURES xmlconf" fi if test "x${enable_snmp}" = xyes; then SNMPCONFIG="" AC_CHECK_HEADERS(net-snmp/net-snmp-config.h) if test "x${ac_cv_header_net_snmp_net_snmp_config_h}" != "xyes"; then enable_snmp=no fi if test $enable_snmp != no; then AC_PATH_PROGS(SNMPCONFIG, net-snmp-config) if test "X${SNMPCONFIG}" = "X"; then AC_MSG_RESULT(You need the net_snmp development package to continue.) enable_snmp=no fi fi if test $enable_snmp != no; then AC_MSG_CHECKING(for special snmp libraries) SNMPLIBS=`$SNMPCONFIG --libs` AC_MSG_RESULT($SNMPLIBS) fi if test $enable_snmp != no; then savedLibs=$LIBS LIBS="$LIBS $SNMPLIBS" AC_CHECK_FUNCS(netsnmp_transport_open_client) if test $ac_cv_func_netsnmp_transport_open_client != yes; then AC_CHECK_FUNCS(netsnmp_tdomain_transport) if test $ac_cv_func_netsnmp_tdomain_transport != yes; then enable_snmp=no fi else AC_DEFINE_UNQUOTED([NETSNMPV54], $NETSNMP_NEW_SUPPORT, [have net-snmp5.4 over]) fi LIBS=$savedLibs fi AC_MSG_CHECKING(for snmp) AC_MSG_RESULT($enable_snmp) if test $enable_snmp = no; then enable_snmp=0 AC_MSG_ERROR(Unable to support SNMP) else enable_snmp=1 PACKAGE_FEATURES="$PACKAGE_FEATURES snmp" AC_DEFINE_UNQUOTED([ENABLE_SNMP], $enable_snmp, [Build in support for sending SNMP traps]) fi else enable_snmp=0 fi AC_SUBST([SNMPLIBS]) AM_CONDITIONAL(BUILD_SNMP, test "${enable_snmp}" = "1") # extra warnings EXTRA_WARNINGS="" WARNLIST=" all shadow missing-prototypes missing-declarations strict-prototypes declaration-after-statement pointer-arith write-strings cast-align bad-function-cast missing-format-attribute format=2 format-security format-nonliteral no-long-long unsigned-char gnu89-inline no-strict-aliasing " for j in $WARNLIST; do if cc_supports_flag -W$j; then EXTRA_WARNINGS="$EXTRA_WARNINGS -W$j"; fi done if test "x${enable_coverage}" = xyes && \ cc_supports_flag -ftest-coverage && \ cc_supports_flag -fprofile-arcs ; then AC_MSG_NOTICE([Enabling Coverage (enable -O0 by default)]) OPT_CFLAGS="-O0" COVERAGE_CFLAGS="-ftest-coverage -fprofile-arcs" COVERAGE_LDFLAGS="-ftest-coverage -fprofile-arcs" PACKAGE_FEATURES="$PACKAGE_FEATURES coverage" else COVERAGE_CFLAGS="" COVERAGE_LDFLAGS="" fi if test "x${enable_small_memory_footprint}" = xyes ; then AC_DEFINE_UNQUOTED([HAVE_SMALL_MEMORY_FOOTPRINT], 1, [have small_memory_footprint]) PACKAGE_FEATURES="$PACKAGE_FEATURES small-memory-footprint" fi if test "x${enable_ansi}" = xyes && \ cc_supports_flag -std=iso9899:199409 ; then AC_MSG_NOTICE([Enabling ANSI Compatibility]) ANSI_CPPFLAGS="-ansi -D_GNU_SOURCE -DANSI_ONLY" PACKAGE_FEATURES="$PACKAGE_FEATURES ansi" else ANSI_CPPFLAGS="" fi if test "x${enable_fatal_warnings}" = xyes && \ cc_supports_flag -Werror ; then AC_MSG_NOTICE([Enabling Fatal Warnings (-Werror)]) WERROR_CFLAGS="-Werror" PACKAGE_FEATURES="$PACKAGE_FEATURES fatal-warnings" else WERROR_CFLAGS="" fi # don't add addtional cflags if test "x${enable_user_flags}" = xyes; then OPT_CFLAGS="" GDB_FLAGS="" EXTRA_WARNINGS="" fi # final build of *FLAGS CFLAGS="$ENV_CFLAGS $OPT_CFLAGS $GDB_FLAGS $OS_CFLAGS \ $COVERAGE_CFLAGS $EXTRA_WARNINGS $WERROR_CFLAGS $NSS_CFLAGS" CPPFLAGS="$ENV_CPPFLAGS $ANSI_CPPFLAGS $OS_CPPFLAGS" LDFLAGS="$ENV_LDFLAGS $COVERAGE_LDFLAGS $OS_LDFLAGS" # substitute what we need: AC_SUBST([INITDDIR]) AC_SUBST([SOMAJOR]) AC_SUBST([SOMINOR]) AC_SUBST([SOMICRO]) AC_SUBST([SONAME]) AC_SUBST([OS_DYFLAGS]) AC_SUBST([OS_LDL]) AM_CONDITIONAL(INSTALL_TESTAGENTS, test -n "${enable_testagents}") AM_CONDITIONAL(INSTALL_MIB, test "${enable_snmp}" = "1") AM_CONDITIONAL(INSTALL_DBUSCONF, test "${enable_dbus}" = "1") AM_CONDITIONAL(AUGTOOL, test -n "${AUGTOOL}") AC_SUBST([NSS_LDFLAGS]) AM_CONDITIONAL(BUILD_DARWIN, test -n "${DARWIN_OPTS}") AM_CONDITIONAL(BUILD_SOLARIS, test -n "${SOLARIS_OPTS}") AC_SUBST([DARWIN_OPTS]) AC_SUBST([SOLARIS_OPTS]) AM_CONDITIONAL(BUILD_HTML_DOCS, test -n "${GROFF}") AC_SUBST([LINT_FLAGS]) AC_DEFINE_UNQUOTED([LOCALSTATEDIR], "$(eval echo ${localstatedir})", [localstate directory]) COROSYSCONFDIR=${sysconfdir}/corosync AC_SUBST([COROSYSCONFDIR]) AC_DEFINE_UNQUOTED([COROSYSCONFDIR], "$(eval echo ${COROSYSCONFDIR})", [corosync config directory]) AC_DEFINE_UNQUOTED([PACKAGE_FEATURES], "${PACKAGE_FEATURES}", [corosync built-in features]) AC_OUTPUT AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION}]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Doc dir = ${docdir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([ System init.d directory = ${INITDDIR}]) AC_MSG_RESULT([ corosync config dir = ${COROSYSCONFDIR}]) AC_MSG_RESULT([ Features =${PACKAGE_FEATURES}]) AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE build info:]) AC_MSG_RESULT([ Library SONAME = ${SONAME}]) LIB_MSG_RESULT(m4_shift(local_soname_list))dnl AC_MSG_RESULT([ Default optimization = ${OPT_CFLAGS}]) AC_MSG_RESULT([ Default debug options = ${GDB_CFLAGS}]) AC_MSG_RESULT([ Extra compiler warnings = ${EXTRA_WARNING}]) AC_MSG_RESULT([ Env. defined CFLAG = ${ENV_CFLAGS}]) AC_MSG_RESULT([ Env. defined CPPFLAGS = ${ENV_CPPFLAGS}]) AC_MSG_RESULT([ Env. defined LDFLAGS = ${ENV_LDFLAGS}]) AC_MSG_RESULT([ OS defined CFLAGS = ${OS_CFLAGS}]) AC_MSG_RESULT([ OS defined CPPFLAGS = ${OS_CPPFLAGS}]) AC_MSG_RESULT([ OS defined LDFLAGS = ${OS_LDFLAGS}]) AC_MSG_RESULT([ OS defined LDL = ${OS_LDL}]) AC_MSG_RESULT([ OS defined DYFLAGS = ${OS_DYFLAGS}]) AC_MSG_RESULT([ ANSI defined CPPFLAGS = ${ANSI_CPPFLAGS}]) AC_MSG_RESULT([ Coverage CFLAGS = ${COVERAGE_CFLAGS}]) AC_MSG_RESULT([ Coverage LDFLAGS = ${COVERAGE_LDFLAGS}]) AC_MSG_RESULT([ Fatal War. CFLAGS = ${WERROR_CFLAGS}]) AC_MSG_RESULT([ Final CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ Final CPPFLAGS = ${CPPFLAGS}]) AC_MSG_RESULT([ Final LDFLAGS = ${LDFLAGS}]) diff --git a/exec/totemiba.c b/exec/totemiba.c index 0bed4cfb..a419d1a5 100644 --- a/exec/totemiba.c +++ b/exec/totemiba.c @@ -1,1563 +1,1564 @@ /* * Copyright (c) 2009-2012 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LOGSYS_UTILS_ONLY 1 #include #include "totemiba.h" #define COMPLETION_QUEUE_ENTRIES 100 #define TOTAL_READ_POSTS 100 #define MAX_MTU_SIZE 4096 struct totemiba_instance { struct sockaddr bind_addr; struct sockaddr send_token_bind_addr; struct sockaddr mcast_addr; struct sockaddr token_addr; struct sockaddr local_mcast_bind_addr; struct totem_interface *totem_interface; struct totem_config *totem_config; void (*totemiba_iface_change_fn) ( void *context, const struct totem_ip_address *iface_address); void (*totemiba_deliver_fn) ( void *context, const void *msg, unsigned int msg_len); void (*totemiba_target_set_completed) ( void *context); void *rrp_context; qb_loop_timer_handle timer_netif_check_timeout; qb_loop_t *totemiba_poll_handle; struct totem_ip_address my_id; struct rdma_event_channel *mcast_channel; struct rdma_cm_id *mcast_cma_id; struct ibv_pd *mcast_pd; struct sockaddr mcast_dest_addr; uint32_t mcast_qpn; uint32_t mcast_qkey; struct ibv_ah *mcast_ah; struct ibv_comp_channel *mcast_send_completion_channel; struct ibv_comp_channel *mcast_recv_completion_channel; struct ibv_cq *mcast_send_cq; struct ibv_cq *mcast_recv_cq; int recv_token_accepted; struct rdma_event_channel *recv_token_channel; struct rdma_event_channel *listen_recv_token_channel; struct rdma_cm_id *listen_recv_token_cma_id; struct rdma_cm_id *recv_token_cma_id; struct ibv_pd *recv_token_pd; struct sockaddr recv_token_dest_addr; struct ibv_comp_channel *recv_token_send_completion_channel; struct ibv_comp_channel *recv_token_recv_completion_channel; struct ibv_cq *recv_token_send_cq; struct ibv_cq *recv_token_recv_cq; int send_token_bound; struct rdma_event_channel *send_token_channel; struct rdma_cm_id *send_token_cma_id; struct ibv_pd *send_token_pd; struct sockaddr send_token_dest_addr; uint32_t send_token_qpn; uint32_t send_token_qkey; struct ibv_ah *send_token_ah; struct ibv_comp_channel *send_token_send_completion_channel; struct ibv_comp_channel *send_token_recv_completion_channel; struct ibv_cq *send_token_send_cq; struct ibv_cq *send_token_recv_cq; void (*totemiba_log_printf) ( int level, int subsys, const char *function, const char *file, int line, const char *format, ...)__attribute__((format(printf, 6, 7))); int totemiba_subsys_id; struct list_head mcast_send_buf_free; struct list_head token_send_buf_free; struct list_head mcast_send_buf_head; struct list_head token_send_buf_head; struct list_head recv_token_recv_buf_head; }; union u { uint64_t wr_id; void *v; }; #define log_printf(level, format, args...) \ do { \ instance->totemiba_log_printf ( \ level, \ instance->totemiba_subsys_id, \ __FUNCTION__, __FILE__, __LINE__, \ (const char *)format, ##args); \ } while (0); struct recv_buf { struct list_head list_all; struct ibv_recv_wr recv_wr; struct ibv_sge sge; struct ibv_mr *mr; char buffer[MAX_MTU_SIZE]; }; struct send_buf { struct list_head list_free; struct list_head list_all; struct ibv_mr *mr; char buffer[MAX_MTU_SIZE]; }; static hdb_handle_t void2wrid (void *v) { union u u; u.v = v; return u.wr_id; } static void * wrid2void (uint64_t wr_id) { union u u; u.wr_id = wr_id; return u.v; } static void totemiba_instance_initialize (struct totemiba_instance *instance) { memset (instance, 0, sizeof (struct totemiba_instance)); list_init (&instance->mcast_send_buf_free); list_init (&instance->token_send_buf_free); list_init (&instance->mcast_send_buf_head); list_init (&instance->token_send_buf_head); list_init (&instance->recv_token_recv_buf_head); } static inline struct send_buf *mcast_send_buf_get ( struct totemiba_instance *instance) { struct send_buf *send_buf; if (list_empty (&instance->mcast_send_buf_free) == 0) { send_buf = list_entry (instance->mcast_send_buf_free.next, struct send_buf, list_free); list_del (&send_buf->list_free); return (send_buf); } send_buf = malloc (sizeof (struct send_buf)); if (send_buf == NULL) { return (NULL); } send_buf->mr = ibv_reg_mr (instance->mcast_pd, send_buf->buffer, 2048, IBV_ACCESS_LOCAL_WRITE); if (send_buf->mr == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't register memory range"); free (send_buf); return (NULL); } list_init (&send_buf->list_all); list_add_tail (&send_buf->list_all, &instance->mcast_send_buf_head); return (send_buf); } static inline void mcast_send_buf_put ( struct totemiba_instance *instance, struct send_buf *send_buf) { list_init (&send_buf->list_free); list_add_tail (&send_buf->list_free, &instance->mcast_send_buf_free); } static inline struct send_buf *token_send_buf_get ( struct totemiba_instance *instance) { struct send_buf *send_buf; if (list_empty (&instance->token_send_buf_free) == 0) { send_buf = list_entry (instance->token_send_buf_free.next, struct send_buf, list_free); list_del (&send_buf->list_free); return (send_buf); } send_buf = malloc (sizeof (struct send_buf)); if (send_buf == NULL) { return (NULL); } send_buf->mr = ibv_reg_mr (instance->send_token_pd, send_buf->buffer, 2048, IBV_ACCESS_LOCAL_WRITE); if (send_buf->mr == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't register memory range"); free (send_buf); return (NULL); } list_init (&send_buf->list_all); list_add_tail (&send_buf->list_all, &instance->token_send_buf_head); return (send_buf); } static inline void token_send_buf_destroy (struct totemiba_instance *instance) { struct list_head *list; struct send_buf *send_buf; for (list = instance->token_send_buf_head.next; list != &instance->token_send_buf_head;) { send_buf = list_entry (list, struct send_buf, list_all); list = list->next; ibv_dereg_mr (send_buf->mr); free (send_buf); } list_init (&instance->token_send_buf_free); list_init (&instance->token_send_buf_head); } static inline void token_send_buf_put ( struct totemiba_instance *instance, struct send_buf *send_buf) { list_init (&send_buf->list_free); list_add_tail (&send_buf->list_free, &instance->token_send_buf_free); } static inline struct recv_buf *recv_token_recv_buf_create ( struct totemiba_instance *instance) { struct recv_buf *recv_buf; recv_buf = malloc (sizeof (struct recv_buf)); if (recv_buf == NULL) { return (NULL); } recv_buf->mr = ibv_reg_mr (instance->recv_token_pd, &recv_buf->buffer, 2048, IBV_ACCESS_LOCAL_WRITE); recv_buf->recv_wr.next = NULL; recv_buf->recv_wr.sg_list = &recv_buf->sge; recv_buf->recv_wr.num_sge = 1; recv_buf->recv_wr.wr_id = (uintptr_t)recv_buf; recv_buf->sge.length = 2048; recv_buf->sge.lkey = recv_buf->mr->lkey; recv_buf->sge.addr = (uintptr_t)recv_buf->buffer; list_init (&recv_buf->list_all); list_add (&recv_buf->list_all, &instance->recv_token_recv_buf_head); return (recv_buf); } static inline int recv_token_recv_buf_post (struct totemiba_instance *instance, struct recv_buf *recv_buf) { struct ibv_recv_wr *fail_recv; int res; res = ibv_post_recv (instance->recv_token_cma_id->qp, &recv_buf->recv_wr, &fail_recv); return (res); } static inline void recv_token_recv_buf_post_initial (struct totemiba_instance *instance) { struct recv_buf *recv_buf; unsigned int i; for (i = 0; i < TOTAL_READ_POSTS; i++) { recv_buf = recv_token_recv_buf_create (instance); recv_token_recv_buf_post (instance, recv_buf); } } static inline void recv_token_recv_buf_post_destroy ( struct totemiba_instance *instance) { struct recv_buf *recv_buf; struct list_head *list; for (list = instance->recv_token_recv_buf_head.next; list != &instance->recv_token_recv_buf_head;) { recv_buf = list_entry (list, struct recv_buf, list_all); list = list->next; ibv_dereg_mr (recv_buf->mr); free (recv_buf); } list_init (&instance->recv_token_recv_buf_head); } static inline struct recv_buf *mcast_recv_buf_create (struct totemiba_instance *instance) { struct recv_buf *recv_buf; struct ibv_mr *mr; recv_buf = malloc (sizeof (struct recv_buf)); if (recv_buf == NULL) { return (NULL); } mr = ibv_reg_mr (instance->mcast_pd, &recv_buf->buffer, 2048, IBV_ACCESS_LOCAL_WRITE); recv_buf->recv_wr.next = NULL; recv_buf->recv_wr.sg_list = &recv_buf->sge; recv_buf->recv_wr.num_sge = 1; recv_buf->recv_wr.wr_id = (uintptr_t)recv_buf; recv_buf->sge.length = 2048; recv_buf->sge.lkey = mr->lkey; recv_buf->sge.addr = (uintptr_t)recv_buf->buffer; return (recv_buf); } static inline int mcast_recv_buf_post (struct totemiba_instance *instance, struct recv_buf *recv_buf) { struct ibv_recv_wr *fail_recv; int res; res = ibv_post_recv (instance->mcast_cma_id->qp, &recv_buf->recv_wr, &fail_recv); return (res); } static inline void mcast_recv_buf_post_initial (struct totemiba_instance *instance) { struct recv_buf *recv_buf; unsigned int i; for (i = 0; i < TOTAL_READ_POSTS; i++) { recv_buf = mcast_recv_buf_create (instance); mcast_recv_buf_post (instance, recv_buf); } } static inline void iba_deliver_fn (struct totemiba_instance *instance, uint64_t wr_id, uint32_t bytes) { const char *addr; const struct recv_buf *recv_buf; recv_buf = wrid2void(wr_id); addr = &recv_buf->buffer[sizeof (struct ibv_grh)]; instance->totemiba_deliver_fn (instance->rrp_context, addr, bytes); } static int mcast_cq_send_event_fn (int events, int suck, void *context) { struct totemiba_instance *instance = (struct totemiba_instance *)context; struct ibv_wc wc[32]; struct ibv_cq *ev_cq; void *ev_ctx; int res; int i; ibv_get_cq_event (instance->mcast_send_completion_channel, &ev_cq, &ev_ctx); ibv_ack_cq_events (ev_cq, 1); res = ibv_req_notify_cq (ev_cq, 0); res = ibv_poll_cq (instance->mcast_send_cq, 32, wc); if (res > 0) { for (i = 0; i < res; i++) { mcast_send_buf_put (instance, wrid2void(wc[i].wr_id)); } } return (0); } static int mcast_cq_recv_event_fn (int events, int suck, void *context) { struct totemiba_instance *instance = (struct totemiba_instance *)context; struct ibv_wc wc[64]; struct ibv_cq *ev_cq; void *ev_ctx; int res; int i; ibv_get_cq_event (instance->mcast_recv_completion_channel, &ev_cq, &ev_ctx); ibv_ack_cq_events (ev_cq, 1); res = ibv_req_notify_cq (ev_cq, 0); res = ibv_poll_cq (instance->mcast_recv_cq, 64, wc); if (res > 0) { for (i = 0; i < res; i++) { iba_deliver_fn (instance, wc[i].wr_id, wc[i].byte_len); mcast_recv_buf_post (instance, wrid2void(wc[i].wr_id)); } } return (0); } static int mcast_rdma_event_fn (int events, int suck, void *context) { struct totemiba_instance *instance = (struct totemiba_instance *)context; struct rdma_cm_event *event; int res; res = rdma_get_cm_event (instance->mcast_channel, &event); if (res != 0) { return (0); } switch (event->event) { /* * occurs when we resolve the multicast address */ case RDMA_CM_EVENT_ADDR_RESOLVED: rdma_join_multicast (instance->mcast_cma_id, &instance->mcast_addr, instance); break; /* * occurs when the CM joins the multicast group */ case RDMA_CM_EVENT_MULTICAST_JOIN: instance->mcast_qpn = event->param.ud.qp_num; instance->mcast_qkey = event->param.ud.qkey; instance->mcast_ah = ibv_create_ah (instance->mcast_pd, &event->param.ud.ah_attr); instance->totemiba_iface_change_fn (instance->rrp_context, &instance->my_id); break; case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_MULTICAST_ERROR: log_printf (LOGSYS_LEVEL_ERROR, "multicast error"); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: break; default: log_printf (LOGSYS_LEVEL_ERROR, "default %d", event->event); break; } rdma_ack_cm_event (event); return (0); } static int recv_token_cq_send_event_fn ( int fd, int revents, void *context) { struct totemiba_instance *instance = (struct totemiba_instance *)context; struct ibv_wc wc[32]; struct ibv_cq *ev_cq; void *ev_ctx; int res; int i; ibv_get_cq_event (instance->recv_token_send_completion_channel, &ev_cq, &ev_ctx); ibv_ack_cq_events (ev_cq, 1); res = ibv_req_notify_cq (ev_cq, 0); res = ibv_poll_cq (instance->recv_token_send_cq, 32, wc); if (res > 0) { for (i = 0; i < res; i++) { iba_deliver_fn (instance, wc[i].wr_id, wc[i].byte_len); ibv_dereg_mr (wrid2void(wc[i].wr_id)); } } return (0); } static int recv_token_cq_recv_event_fn (int events, int suck, void *context) { struct totemiba_instance *instance = (struct totemiba_instance *)context; struct ibv_wc wc[32]; struct ibv_cq *ev_cq; void *ev_ctx; int res; int i; ibv_get_cq_event (instance->recv_token_recv_completion_channel, &ev_cq, &ev_ctx); ibv_ack_cq_events (ev_cq, 1); res = ibv_req_notify_cq (ev_cq, 0); res = ibv_poll_cq (instance->recv_token_recv_cq, 32, wc); if (res > 0) { for (i = 0; i < res; i++) { iba_deliver_fn (instance, wc[i].wr_id, wc[i].byte_len); recv_token_recv_buf_post (instance, wrid2void(wc[i].wr_id)); } } return (0); } static int recv_token_accept_destroy (struct totemiba_instance *instance) { if (instance->recv_token_accepted == 0) { return (0); } rdma_destroy_qp (instance->recv_token_cma_id); recv_token_recv_buf_post_destroy (instance); ibv_destroy_cq (instance->recv_token_send_cq); ibv_destroy_cq (instance->recv_token_recv_cq); ibv_destroy_comp_channel (instance->recv_token_send_completion_channel); ibv_destroy_comp_channel (instance->recv_token_recv_completion_channel); ibv_dealloc_pd (instance->recv_token_pd); rdma_destroy_id (instance->recv_token_cma_id); qb_loop_poll_del ( instance->totemiba_poll_handle, instance->recv_token_recv_completion_channel->fd); qb_loop_poll_del ( instance->totemiba_poll_handle, instance->recv_token_send_completion_channel->fd); return (0); } static int recv_token_accept_setup (struct totemiba_instance *instance) { struct ibv_qp_init_attr init_qp_attr; int res = 0; /* * Allocate the protection domain */ instance->recv_token_pd = ibv_alloc_pd (instance->recv_token_cma_id->verbs); /* * Create a completion channel */ instance->recv_token_recv_completion_channel = ibv_create_comp_channel (instance->recv_token_cma_id->verbs); if (instance->recv_token_recv_completion_channel == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion channel"); return (-1); } /* * Create the completion queue */ instance->recv_token_recv_cq = ibv_create_cq (instance->recv_token_cma_id->verbs, COMPLETION_QUEUE_ENTRIES, instance, instance->recv_token_recv_completion_channel, 0); if (instance->recv_token_recv_cq == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion queue"); return (-1); } res = ibv_req_notify_cq (instance->recv_token_recv_cq, 0); if (res != 0) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't request notifications of the completion queue"); return (-1); } /* * Create a completion channel */ instance->recv_token_send_completion_channel = ibv_create_comp_channel (instance->recv_token_cma_id->verbs); if (instance->recv_token_send_completion_channel == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion channel"); return (-1); } /* * Create the completion queue */ instance->recv_token_send_cq = ibv_create_cq (instance->recv_token_cma_id->verbs, COMPLETION_QUEUE_ENTRIES, instance, instance->recv_token_send_completion_channel, 0); if (instance->recv_token_send_cq == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion queue"); return (-1); } res = ibv_req_notify_cq (instance->recv_token_send_cq, 0); if (res != 0) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't request notifications of the completion queue"); return (-1); } memset (&init_qp_attr, 0, sizeof (struct ibv_qp_init_attr)); init_qp_attr.cap.max_send_wr = 50; init_qp_attr.cap.max_recv_wr = TOTAL_READ_POSTS; init_qp_attr.cap.max_send_sge = 1; init_qp_attr.cap.max_recv_sge = 1; init_qp_attr.qp_context = instance; init_qp_attr.sq_sig_all = 0; init_qp_attr.qp_type = IBV_QPT_UD; init_qp_attr.send_cq = instance->recv_token_send_cq; init_qp_attr.recv_cq = instance->recv_token_recv_cq; res = rdma_create_qp (instance->recv_token_cma_id, instance->recv_token_pd, &init_qp_attr); if (res != 0) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create queue pair"); return (-1); } recv_token_recv_buf_post_initial (instance); qb_loop_poll_add ( instance->totemiba_poll_handle, QB_LOOP_MED, instance->recv_token_recv_completion_channel->fd, POLLIN, instance, recv_token_cq_recv_event_fn); qb_loop_poll_add ( instance->totemiba_poll_handle, QB_LOOP_MED, instance->recv_token_send_completion_channel->fd, POLLIN, instance, recv_token_cq_send_event_fn); instance->recv_token_accepted = 1; return (res); }; static int recv_token_rdma_event_fn (int events, int suck, void *context) { struct totemiba_instance *instance = (struct totemiba_instance *)context; struct rdma_cm_event *event; struct rdma_conn_param conn_param; int res; res = rdma_get_cm_event (instance->listen_recv_token_channel, &event); if (res != 0) { return (0); } switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: recv_token_accept_destroy (instance); instance->recv_token_cma_id = event->id; recv_token_accept_setup (instance); memset (&conn_param, 0, sizeof (struct rdma_conn_param)); conn_param.qp_num = instance->recv_token_cma_id->qp->qp_num; res = rdma_accept (instance->recv_token_cma_id, &conn_param); break; default: log_printf (LOGSYS_LEVEL_ERROR, "default %d", event->event); break; } res = rdma_ack_cm_event (event); return (0); } static int send_token_cq_send_event_fn (int events, int suck, void *context) { struct totemiba_instance *instance = (struct totemiba_instance *)context; struct ibv_wc wc[32]; struct ibv_cq *ev_cq; void *ev_ctx; int res; int i; ibv_get_cq_event (instance->send_token_send_completion_channel, &ev_cq, &ev_ctx); ibv_ack_cq_events (ev_cq, 1); res = ibv_req_notify_cq (ev_cq, 0); res = ibv_poll_cq (instance->send_token_send_cq, 32, wc); if (res > 0) { for (i = 0; i < res; i++) { token_send_buf_put (instance, wrid2void(wc[i].wr_id)); } } return (0); } static int send_token_cq_recv_event_fn (int events, int suck, void *context) { struct totemiba_instance *instance = (struct totemiba_instance *)context; struct ibv_wc wc[32]; struct ibv_cq *ev_cq; void *ev_ctx; int res; int i; ibv_get_cq_event (instance->send_token_recv_completion_channel, &ev_cq, &ev_ctx); ibv_ack_cq_events (ev_cq, 1); res = ibv_req_notify_cq (ev_cq, 0); res = ibv_poll_cq (instance->send_token_recv_cq, 32, wc); if (res > 0) { for (i = 0; i < res; i++) { iba_deliver_fn (instance, wc[i].wr_id, wc[i].byte_len); } } return (0); } static int send_token_rdma_event_fn (int events, int suck, void *context) { struct totemiba_instance *instance = (struct totemiba_instance *)context; struct rdma_cm_event *event; struct rdma_conn_param conn_param; int res; res = rdma_get_cm_event (instance->send_token_channel, &event); if (res != 0) { return (0); } switch (event->event) { /* * occurs when we resolve the multicast address */ case RDMA_CM_EVENT_ADDR_RESOLVED: res = rdma_resolve_route (instance->send_token_cma_id, 2000); break; /* * occurs when the CM joins the multicast group */ case RDMA_CM_EVENT_ROUTE_RESOLVED: memset (&conn_param, 0, sizeof (struct rdma_conn_param)); conn_param.private_data = NULL; conn_param.private_data_len = 0; res = rdma_connect (instance->send_token_cma_id, &conn_param); break; case RDMA_CM_EVENT_ESTABLISHED: instance->send_token_qpn = event->param.ud.qp_num; instance->send_token_qkey = event->param.ud.qkey; instance->send_token_ah = ibv_create_ah (instance->send_token_pd, &event->param.ud.ah_attr); instance->totemiba_target_set_completed (instance->rrp_context); break; case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_MULTICAST_ERROR: log_printf (LOGSYS_LEVEL_ERROR, "send_token_rdma_event_fn multicast error"); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: break; case RDMA_CM_EVENT_UNREACHABLE: log_printf (LOGSYS_LEVEL_ERROR, "send_token_rdma_event_fn unreachable"); break; default: log_printf (LOGSYS_LEVEL_ERROR, "send_token_rdma_event_fn unknown event %d", event->event); break; } rdma_ack_cm_event (event); return (0); } static int send_token_bind (struct totemiba_instance *instance) { int res; struct ibv_qp_init_attr init_qp_attr; instance->send_token_channel = rdma_create_event_channel(); if (instance->send_token_channel == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create rdma channel"); return (-1); } res = rdma_create_id (instance->send_token_channel, &instance->send_token_cma_id, NULL, RDMA_PS_UDP); if (res) { log_printf (LOGSYS_LEVEL_ERROR, "error creating send_token_cma_id"); return (-1); } res = rdma_bind_addr (instance->send_token_cma_id, &instance->send_token_bind_addr); if (res) { log_printf (LOGSYS_LEVEL_ERROR, "error doing rdma_bind_addr for send token"); return (-1); } /* * Resolve the send_token address into a GUID */ res = rdma_resolve_addr (instance->send_token_cma_id, &instance->bind_addr, &instance->token_addr, 2000); if (res) { log_printf (LOGSYS_LEVEL_ERROR, "error resolving send token address %d %d", res, errno); return (-1); } /* * Allocate the protection domain */ instance->send_token_pd = ibv_alloc_pd (instance->send_token_cma_id->verbs); /* * Create a completion channel */ instance->send_token_recv_completion_channel = ibv_create_comp_channel (instance->send_token_cma_id->verbs); if (instance->send_token_recv_completion_channel == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion channel"); return (-1); } /* * Create the completion queue */ instance->send_token_recv_cq = ibv_create_cq (instance->send_token_cma_id->verbs, COMPLETION_QUEUE_ENTRIES, instance, instance->send_token_recv_completion_channel, 0); if (instance->send_token_recv_cq == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion queue"); return (-1); } res = ibv_req_notify_cq (instance->send_token_recv_cq, 0); if (res != 0) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't request notifications of the completion queue"); return (-1); } /* * Create a completion channel */ instance->send_token_send_completion_channel = ibv_create_comp_channel (instance->send_token_cma_id->verbs); if (instance->send_token_send_completion_channel == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion channel"); return (-1); } /* * Create the completion queue */ instance->send_token_send_cq = ibv_create_cq ( instance->send_token_cma_id->verbs, COMPLETION_QUEUE_ENTRIES, instance, instance->send_token_send_completion_channel, 0); if (instance->send_token_send_cq == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion queue"); return (-1); } res = ibv_req_notify_cq (instance->send_token_send_cq, 0); if (res != 0) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't request notifications of the completion queue"); return (-1); } memset (&init_qp_attr, 0, sizeof (struct ibv_qp_init_attr)); init_qp_attr.cap.max_send_wr = 50; init_qp_attr.cap.max_recv_wr = TOTAL_READ_POSTS; init_qp_attr.cap.max_send_sge = 1; init_qp_attr.cap.max_recv_sge = 1; init_qp_attr.qp_context = instance; init_qp_attr.sq_sig_all = 0; init_qp_attr.qp_type = IBV_QPT_UD; init_qp_attr.send_cq = instance->send_token_send_cq; init_qp_attr.recv_cq = instance->send_token_recv_cq; res = rdma_create_qp (instance->send_token_cma_id, instance->send_token_pd, &init_qp_attr); if (res != 0) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create queue pair"); return (-1); } qb_loop_poll_add ( instance->totemiba_poll_handle, QB_LOOP_MED, instance->send_token_recv_completion_channel->fd, POLLIN, instance, send_token_cq_recv_event_fn); qb_loop_poll_add ( instance->totemiba_poll_handle, QB_LOOP_MED, instance->send_token_send_completion_channel->fd, POLLIN, instance, send_token_cq_send_event_fn); qb_loop_poll_add ( instance->totemiba_poll_handle, QB_LOOP_MED, instance->send_token_channel->fd, POLLIN, instance, send_token_rdma_event_fn); instance->send_token_bound = 1; return (0); } static int send_token_unbind (struct totemiba_instance *instance) { if (instance->send_token_bound == 0) { return (0); } qb_loop_poll_del ( instance->totemiba_poll_handle, instance->send_token_recv_completion_channel->fd); qb_loop_poll_del ( instance->totemiba_poll_handle, instance->send_token_send_completion_channel->fd); qb_loop_poll_del ( instance->totemiba_poll_handle, instance->send_token_channel->fd); rdma_destroy_qp (instance->send_token_cma_id); ibv_destroy_cq (instance->send_token_send_cq); ibv_destroy_cq (instance->send_token_recv_cq); ibv_destroy_comp_channel (instance->send_token_send_completion_channel); ibv_destroy_comp_channel (instance->send_token_recv_completion_channel); token_send_buf_destroy (instance); ibv_dealloc_pd (instance->send_token_pd); rdma_destroy_id (instance->send_token_cma_id); rdma_destroy_event_channel (instance->send_token_channel); return (0); } static int recv_token_bind (struct totemiba_instance *instance) { int res; instance->listen_recv_token_channel = rdma_create_event_channel(); if (instance->listen_recv_token_channel == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create rdma channel"); return (-1); } res = rdma_create_id (instance->listen_recv_token_channel, &instance->listen_recv_token_cma_id, NULL, RDMA_PS_UDP); if (res) { log_printf (LOGSYS_LEVEL_ERROR, "error creating recv_token_cma_id"); return (-1); } res = rdma_bind_addr (instance->listen_recv_token_cma_id, &instance->bind_addr); if (res) { log_printf (LOGSYS_LEVEL_ERROR, "error doing rdma_bind_addr for recv token"); return (-1); } /* * Resolve the recv_token address into a GUID */ res = rdma_listen (instance->listen_recv_token_cma_id, 10); if (res) { log_printf (LOGSYS_LEVEL_ERROR, "error listening %d %d", res, errno); return (-1); } qb_loop_poll_add ( instance->totemiba_poll_handle, QB_LOOP_MED, instance->listen_recv_token_channel->fd, POLLIN, instance, recv_token_rdma_event_fn); return (0); } static int mcast_bind (struct totemiba_instance *instance) { int res; struct ibv_qp_init_attr init_qp_attr; instance->mcast_channel = rdma_create_event_channel(); if (instance->mcast_channel == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create rdma channel"); return (-1); } res = rdma_create_id (instance->mcast_channel, &instance->mcast_cma_id, NULL, RDMA_PS_UDP); if (res) { log_printf (LOGSYS_LEVEL_ERROR, "error creating mcast_cma_id"); return (-1); } res = rdma_bind_addr (instance->mcast_cma_id, &instance->local_mcast_bind_addr); if (res) { log_printf (LOGSYS_LEVEL_ERROR, "error doing rdma_bind_addr for mcast"); return (-1); } /* * Resolve the multicast address into a GUID */ res = rdma_resolve_addr (instance->mcast_cma_id, &instance->local_mcast_bind_addr, &instance->mcast_addr, 5000); if (res) { log_printf (LOGSYS_LEVEL_ERROR, "error resolving multicast address %d %d", res, errno); return (-1); } /* * Allocate the protection domain */ instance->mcast_pd = ibv_alloc_pd (instance->mcast_cma_id->verbs); /* * Create a completion channel */ instance->mcast_recv_completion_channel = ibv_create_comp_channel (instance->mcast_cma_id->verbs); if (instance->mcast_recv_completion_channel == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion channel"); return (-1); } /* * Create the completion queue */ instance->mcast_recv_cq = ibv_create_cq (instance->mcast_cma_id->verbs, COMPLETION_QUEUE_ENTRIES, instance, instance->mcast_recv_completion_channel, 0); if (instance->mcast_recv_cq == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion queue"); return (-1); } res = ibv_req_notify_cq (instance->mcast_recv_cq, 0); if (res != 0) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't request notifications of the completion queue"); return (-1); } /* * Create a completion channel */ instance->mcast_send_completion_channel = ibv_create_comp_channel (instance->mcast_cma_id->verbs); if (instance->mcast_send_completion_channel == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion channel"); return (-1); } /* * Create the completion queue */ instance->mcast_send_cq = ibv_create_cq (instance->mcast_cma_id->verbs, COMPLETION_QUEUE_ENTRIES, instance, instance->mcast_send_completion_channel, 0); if (instance->mcast_send_cq == NULL) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create completion queue"); return (-1); } res = ibv_req_notify_cq (instance->mcast_send_cq, 0); if (res != 0) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't request notifications of the completion queue"); return (-1); } memset (&init_qp_attr, 0, sizeof (struct ibv_qp_init_attr)); init_qp_attr.cap.max_send_wr = 50; init_qp_attr.cap.max_recv_wr = TOTAL_READ_POSTS; init_qp_attr.cap.max_send_sge = 1; init_qp_attr.cap.max_recv_sge = 1; init_qp_attr.qp_context = instance; init_qp_attr.sq_sig_all = 0; init_qp_attr.qp_type = IBV_QPT_UD; init_qp_attr.send_cq = instance->mcast_send_cq; init_qp_attr.recv_cq = instance->mcast_recv_cq; res = rdma_create_qp (instance->mcast_cma_id, instance->mcast_pd, &init_qp_attr); if (res != 0) { log_printf (LOGSYS_LEVEL_ERROR, "couldn't create queue pair"); return (-1); } mcast_recv_buf_post_initial (instance); qb_loop_poll_add ( instance->totemiba_poll_handle, QB_LOOP_MED, instance->mcast_recv_completion_channel->fd, POLLIN, instance, mcast_cq_recv_event_fn); qb_loop_poll_add ( instance->totemiba_poll_handle, QB_LOOP_MED, instance->mcast_send_completion_channel->fd, POLLIN, instance, mcast_cq_send_event_fn); qb_loop_poll_add ( instance->totemiba_poll_handle, QB_LOOP_MED, instance->mcast_channel->fd, POLLIN, instance, mcast_rdma_event_fn); return (0); } static void timer_function_netif_check_timeout ( void *data) { struct totemiba_instance *instance = (struct totemiba_instance *)data; int res; int interface_up; int interface_num; int addr_len; totemip_iface_check (&instance->totem_interface->bindnet, &instance->totem_interface->boundto, &interface_up, &interface_num, instance->totem_config->clear_node_high_bit); totemip_totemip_to_sockaddr_convert(&instance->totem_interface->boundto, instance->totem_interface->ip_port, (struct sockaddr_storage *)&instance->bind_addr, &addr_len); totemip_totemip_to_sockaddr_convert(&instance->totem_interface->boundto, 0, (struct sockaddr_storage *)&instance->send_token_bind_addr, &addr_len); totemip_totemip_to_sockaddr_convert(&instance->totem_interface->boundto, 0, (struct sockaddr_storage *)&instance->local_mcast_bind_addr, &addr_len); totemip_totemip_to_sockaddr_convert(&instance->totem_interface->boundto, instance->totem_interface->ip_port, (struct sockaddr_storage *)&instance->my_id, &addr_len); totemip_sockaddr_to_totemip_convert( (const struct sockaddr_storage *)&instance->bind_addr, &instance->my_id); memcpy (&instance->my_id, &instance->totem_interface->boundto, sizeof (struct totem_ip_address)); totemip_totemip_to_sockaddr_convert(&instance->totem_interface->mcast_addr, instance->totem_interface->ip_port, (struct sockaddr_storage *)&instance->mcast_addr, &addr_len); res = recv_token_bind (instance); res = mcast_bind (instance); } int totemiba_crypto_set ( void *iba_context, - unsigned int type) + const char *cipher_type, + const char *hash_type) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; instance = NULL; return (res); } int totemiba_finalize ( void *iba_context) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; instance = NULL; return (res); } /* * Create an instance */ int totemiba_initialize ( qb_loop_t *qb_poll_handle, void **iba_context, struct totem_config *totem_config, int interface_no, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_address), void (*target_set_completed) ( void *context)) { struct totemiba_instance *instance; int res = 0; instance = malloc (sizeof (struct totemiba_instance)); if (instance == NULL) { return (-1); } totemiba_instance_initialize (instance); instance->totem_interface = &totem_config->interfaces[interface_no]; instance->totemiba_poll_handle = qb_poll_handle; instance->totem_interface->bindnet.nodeid = totem_config->node_id; instance->totemiba_deliver_fn = deliver_fn; instance->totemiba_target_set_completed = target_set_completed; instance->totemiba_iface_change_fn = iface_change_fn; instance->totem_config = totem_config; instance->rrp_context = context; qb_loop_timer_add (instance->totemiba_poll_handle, QB_LOOP_MED, 100*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); instance->totemiba_subsys_id = totem_config->totem_logging_configuration.log_subsys_id; instance->totemiba_log_printf = totem_config->totem_logging_configuration.log_printf; *iba_context = instance; return (res); } void *totemiba_buffer_alloc (void) { return malloc (MAX_MTU_SIZE); } void totemiba_buffer_release (void *ptr) { return free (ptr); } int totemiba_processor_count_set ( void *iba_context, int processor_count) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; instance = NULL; return (res); } int totemiba_recv_flush (void *iba_context) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; instance = NULL; return (res); } int totemiba_send_flush (void *iba_context) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; instance = NULL; return (res); } int totemiba_token_send ( void *iba_context, const void *ms, unsigned int msg_len) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; struct ibv_send_wr send_wr, *failed_send_wr; struct ibv_sge sge; void *msg; struct send_buf *send_buf; send_buf = token_send_buf_get (instance); if (send_buf == NULL) { return (-1); } msg = send_buf->buffer; memcpy (msg, ms, msg_len); send_wr.next = NULL; send_wr.sg_list = &sge; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_SEND; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr_id = void2wrid(send_buf); send_wr.imm_data = 0; send_wr.wr.ud.ah = instance->send_token_ah; send_wr.wr.ud.remote_qpn = instance->send_token_qpn; send_wr.wr.ud.remote_qkey = instance->send_token_qkey; sge.length = msg_len; sge.lkey = send_buf->mr->lkey; sge.addr = (uintptr_t)msg; res = ibv_post_send (instance->send_token_cma_id->qp, &send_wr, &failed_send_wr); return (res); } int totemiba_mcast_flush_send ( void *iba_context, const void *ms, unsigned int msg_len) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; struct ibv_send_wr send_wr, *failed_send_wr; struct ibv_sge sge; void *msg; struct send_buf *send_buf; send_buf = mcast_send_buf_get (instance); if (send_buf == NULL) { return (-1); } msg = send_buf->buffer; memcpy (msg, ms, msg_len); send_wr.next = NULL; send_wr.sg_list = &sge; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_SEND; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr_id = void2wrid(send_buf); send_wr.imm_data = 0; send_wr.wr.ud.ah = instance->mcast_ah; send_wr.wr.ud.remote_qpn = instance->mcast_qpn; send_wr.wr.ud.remote_qkey = instance->mcast_qkey; sge.length = msg_len; sge.lkey = send_buf->mr->lkey; sge.addr = (uintptr_t)msg; res = ibv_post_send (instance->mcast_cma_id->qp, &send_wr, &failed_send_wr); return (res); } int totemiba_mcast_noflush_send ( void *iba_context, const void *ms, unsigned int msg_len) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; struct ibv_send_wr send_wr, *failed_send_wr; struct ibv_sge sge; void *msg; struct send_buf *send_buf; send_buf = mcast_send_buf_get (instance); if (send_buf == NULL) { return (-1); } msg = send_buf->buffer; memcpy (msg, ms, msg_len); send_wr.next = NULL; send_wr.sg_list = &sge; send_wr.num_sge = 1; send_wr.opcode = IBV_WR_SEND; send_wr.send_flags = IBV_SEND_SIGNALED; send_wr.wr_id = void2wrid(send_buf); send_wr.imm_data = 0; send_wr.wr.ud.ah = instance->mcast_ah; send_wr.wr.ud.remote_qpn = instance->mcast_qpn; send_wr.wr.ud.remote_qkey = instance->mcast_qkey; sge.length = msg_len; sge.lkey = send_buf->mr->lkey; sge.addr = (uintptr_t)msg; res = ibv_post_send (instance->mcast_cma_id->qp, &send_wr, &failed_send_wr); return (res); } extern int totemiba_iface_check (void *iba_context) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; instance = NULL; return (res); } extern void totemiba_net_mtu_adjust (void *iba_context, struct totem_config *totem_config) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; instance = NULL; } const char *totemiba_iface_print (void *iba_context) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; const char *ret_char; ret_char = totemip_print (&instance->my_id); return (ret_char); } int totemiba_iface_get ( void *iba_context, struct totem_ip_address *addr) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; memcpy (addr, &instance->my_id, sizeof (struct totem_ip_address)); return (res); } int totemiba_token_target_set ( void *iba_context, const struct totem_ip_address *token_target) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; int addr_len = 16; totemip_totemip_to_sockaddr_convert((struct totem_ip_address *)token_target, instance->totem_interface->ip_port, (struct sockaddr_storage *)&instance->token_addr, &addr_len); res = send_token_unbind (instance); res = send_token_bind (instance); return (res); } extern int totemiba_recv_mcast_empty ( void *iba_context) { struct totemiba_instance *instance = (struct totemiba_instance *)iba_context; int res = 0; instance = NULL; return (res); } diff --git a/exec/totemiba.h b/exec/totemiba.h index 4b6e09db..de19756e 100644 --- a/exec/totemiba.h +++ b/exec/totemiba.h @@ -1,116 +1,117 @@ /* * Copyright (c) 2009-2011 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TOTEMIBA_H_DEFINED #define TOTEMIBA_H_DEFINED #include #include #include #include #include /** * Create an instance */ extern int totemiba_initialize ( qb_loop_t* qb_poll_handle, void **iba_handle, struct totem_config *totem_config, int interface_no, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_address), void (*target_set_completed) ( void *context)); extern void *totemiba_buffer_alloc (void); extern void totemiba_buffer_release (void *ptr); extern int totemiba_processor_count_set ( void *iba_context, int processor_count); extern int totemiba_token_send ( void *iba_context, const void *msg, unsigned int msg_len); extern int totemiba_mcast_flush_send ( void *iba_context, const void *msg, unsigned int msg_len); extern int totemiba_mcast_noflush_send ( void *iba_context, const void *msg, unsigned int msg_len); extern int totemiba_recv_flush (void *iba_context); extern int totemiba_send_flush (void *iba_context); extern int totemiba_iface_check (void *iba_context); extern int totemiba_finalize (void *iba_context); extern void totemiba_net_mtu_adjust (void *iba_context, struct totem_config *totem_config); extern const char *totemiba_iface_print (void *iba_context); extern int totemiba_iface_get ( void *iba_context, struct totem_ip_address *addr); extern int totemiba_token_target_set ( void *iba_context, const struct totem_ip_address *token_target); extern int totemiba_crypto_set ( void *iba_context, - unsigned int type); + const char *cipher_type, + const char *hash_type); extern int totemiba_recv_mcast_empty ( void *iba_context); #endif /* TOTEMIBA_H_DEFINED */ diff --git a/exec/totemmrp.c b/exec/totemmrp.c index df5caaf6..f7763fd8 100644 --- a/exec/totemmrp.c +++ b/exec/totemmrp.c @@ -1,274 +1,276 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2007, 2009 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "totemmrp.h" #include "totemsrp.h" void *totemsrp_context; void totemmrp_deliver_fn ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required); void totemmrp_confchg_fn ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id); void (*pg_deliver_fn) ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required) = 0; void (*pg_confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id) = 0; void totemmrp_deliver_fn ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required) { pg_deliver_fn (nodeid, msg, msg_len, endian_conversion_required); } void totemmrp_confchg_fn ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id) { pg_confchg_fn (configuration_type, member_list, member_list_entries, left_list, left_list_entries, joined_list, joined_list_entries, ring_id); } /* * Initialize the totem multiple ring protocol */ int totemmrp_initialize ( qb_loop_t *poll_handle, struct totem_config *totem_config, totempg_stats_t *stats, void (*deliver_fn) ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required), void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id)) { int result; pg_deliver_fn = deliver_fn; pg_confchg_fn = confchg_fn; stats->mrp = calloc (sizeof(totemmrp_stats_t), 1); result = totemsrp_initialize ( poll_handle, &totemsrp_context, totem_config, stats->mrp, totemmrp_deliver_fn, totemmrp_confchg_fn); return (result); } void totemmrp_finalize (void) { totemsrp_finalize (totemsrp_context); } /* * Multicast a message */ int totemmrp_mcast ( struct iovec *iovec, unsigned int iov_len, int priority) { return totemsrp_mcast (totemsrp_context, iovec, iov_len, priority); } /* * Return number of available messages that can be queued */ int totemmrp_avail (void) { return (totemsrp_avail (totemsrp_context)); } int totemmrp_callback_token_create ( void **handle_out, enum totem_callback_token_type type, int delete, int (*callback_fn) (enum totem_callback_token_type type, const void *), const void *data) { return totemsrp_callback_token_create (totemsrp_context, handle_out, type, delete, callback_fn, data); } void totemmrp_callback_token_destroy ( void *handle_out) { totemsrp_callback_token_destroy (totemsrp_context, handle_out); } void totemmrp_event_signal (enum totem_event_type type, int value) { totemsrp_event_signal (totemsrp_context, type, value); } int totemmrp_ifaces_get ( unsigned int nodeid, struct totem_ip_address *interfaces, char ***status, unsigned int *iface_count) { int res; res = totemsrp_ifaces_get ( totemsrp_context, nodeid, interfaces, status, iface_count); return (res); } int totemmrp_crypto_set ( - unsigned int type) + const char *cipher_type, + const char *hash_type) { return totemsrp_crypto_set (totemsrp_context, - type); + cipher_type, + hash_type); } unsigned int totemmrp_my_nodeid_get (void) { return (totemsrp_my_nodeid_get (totemsrp_context)); } int totemmrp_my_family_get (void) { return (totemsrp_my_family_get (totemsrp_context)); } extern int totemmrp_ring_reenable (void) { int res; res = totemsrp_ring_reenable ( totemsrp_context); return (res); } extern void totemmrp_service_ready_register ( void (*totem_service_ready) (void)) { totemsrp_service_ready_register ( totemsrp_context, totem_service_ready); } int totemmrp_member_add ( const struct totem_ip_address *member, int ring_no) { int res; res = totemsrp_member_add (totemsrp_context, member, ring_no); return (res); } int totemmrp_member_remove ( const struct totem_ip_address *member, int ring_no) { int res; res = totemsrp_member_remove (totemsrp_context, member, ring_no); return (res); } void totemmrp_threaded_mode_enable (void) { totemsrp_threaded_mode_enable (totemsrp_context); } diff --git a/exec/totemmrp.h b/exec/totemmrp.h index bb26f2a9..12cc1a8d 100644 --- a/exec/totemmrp.h +++ b/exec/totemmrp.h @@ -1,133 +1,133 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2011 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file * Totem Single Ring Protocol * * depends on poll abstraction, POSIX, IPV4 */ #ifndef TOTEMMRP_H_DEFINED #define TOTEMMRP_H_DEFINED #include /** * Initialize the logger */ extern void totemmrp_log_printf_init ( void (*log_printf) (int , char *, ...), int log_level_security, int log_level_error, int log_level_warning, int log_level_notice, int log_level_debug); /** * Initialize the group messaging interface */ extern int totemmrp_initialize ( qb_loop_t *poll_handle, struct totem_config *totem_config, totempg_stats_t *stats, void (*deliver_fn) ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required), void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id)); extern void totemmrp_finalize (void); /** * Multicast a message */ extern int totemmrp_mcast ( struct iovec *iovec, unsigned int iov_len, int priority); /** * Return number of available messages that can be queued */ extern int totemmrp_avail (void); extern int totemmrp_callback_token_create ( void **handle_out, enum totem_callback_token_type type, int delete, int (*callback_fn) (enum totem_callback_token_type type, const void *), const void *data); extern void totemmrp_callback_token_destroy ( void *handle_out); extern void totemmrp_event_signal (enum totem_event_type type, int value); extern int totemmrp_ifaces_get ( unsigned int nodeid, struct totem_ip_address *interfaces, char ***status, unsigned int *iface_count); extern unsigned int totemmrp_my_nodeid_get (void); extern int totemmrp_my_family_get (void); -extern int totemmrp_crypto_set (unsigned int); +extern int totemmrp_crypto_set (const char *cipher_type, const char *hash_type); extern int totemmrp_ring_reenable (void); extern void totemmrp_service_ready_register ( void (*totem_service_ready) (void)); extern int totemmrp_member_add ( const struct totem_ip_address *member, int ring_no); extern int totemmrp_member_remove ( const struct totem_ip_address *member, int ring_no); void totemmrp_threaded_mode_enable (void); #endif /* TOTEMMRP_H_DEFINED */ diff --git a/exec/totemnet.c b/exec/totemnet.c index 4c187886..fd7c76e9 100644 --- a/exec/totemnet.c +++ b/exec/totemnet.c @@ -1,489 +1,492 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2012 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #ifdef HAVE_RDMA #include #endif #include #include #include #include #define LOGSYS_UTILS_ONLY 1 #include struct transport { const char *name; int (*initialize) ( qb_loop_t *loop_pt, void **transport_instance, struct totem_config *totem_config, int interface_no, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_address), void (*target_set_completed) ( void *context)); void *(*buffer_alloc) (void); void (*buffer_release) (void *ptr); int (*processor_count_set) ( void *transport_context, int processor_count); int (*token_send) ( void *transport_context, const void *msg, unsigned int msg_len); int (*mcast_flush_send) ( void *transport_context, const void *msg, unsigned int msg_len); int (*mcast_noflush_send) ( void *transport_context, const void *msg, unsigned int msg_len); int (*recv_flush) (void *transport_context); int (*send_flush) (void *transport_context); int (*iface_check) (void *transport_context); int (*finalize) (void *transport_context); void (*net_mtu_adjust) (void *transport_context, struct totem_config *totem_config); const char *(*iface_print) (void *transport_context); int (*iface_get) ( void *transport_context, struct totem_ip_address *addr); int (*token_target_set) ( void *transport_context, const struct totem_ip_address *token_target); int (*crypto_set) ( void *transport_context, - unsigned int type); + const char *cipher_type, + const char *hash_type); int (*recv_mcast_empty) ( void *transport_context); int (*member_add) ( void *transport_context, const struct totem_ip_address *member); int (*member_remove) ( void *transport_context, const struct totem_ip_address *member); }; struct transport transport_entries[] = { { .name = "UDP/IP Multicast", .initialize = totemudp_initialize, .buffer_alloc = totemudp_buffer_alloc, .buffer_release = totemudp_buffer_release, .processor_count_set = totemudp_processor_count_set, .token_send = totemudp_token_send, .mcast_flush_send = totemudp_mcast_flush_send, .mcast_noflush_send = totemudp_mcast_noflush_send, .recv_flush = totemudp_recv_flush, .send_flush = totemudp_send_flush, .iface_check = totemudp_iface_check, .finalize = totemudp_finalize, .net_mtu_adjust = totemudp_net_mtu_adjust, .iface_print = totemudp_iface_print, .iface_get = totemudp_iface_get, .token_target_set = totemudp_token_target_set, .crypto_set = totemudp_crypto_set, .recv_mcast_empty = totemudp_recv_mcast_empty }, { .name = "UDP/IP Unicast", .initialize = totemudpu_initialize, .buffer_alloc = totemudpu_buffer_alloc, .buffer_release = totemudpu_buffer_release, .processor_count_set = totemudpu_processor_count_set, .token_send = totemudpu_token_send, .mcast_flush_send = totemudpu_mcast_flush_send, .mcast_noflush_send = totemudpu_mcast_noflush_send, .recv_flush = totemudpu_recv_flush, .send_flush = totemudpu_send_flush, .iface_check = totemudpu_iface_check, .finalize = totemudpu_finalize, .net_mtu_adjust = totemudpu_net_mtu_adjust, .iface_print = totemudpu_iface_print, .iface_get = totemudpu_iface_get, .token_target_set = totemudpu_token_target_set, .crypto_set = totemudpu_crypto_set, .recv_mcast_empty = totemudpu_recv_mcast_empty, .member_add = totemudpu_member_add, .member_remove = totemudpu_member_remove }, #ifdef HAVE_RDMA { .name = "Infiniband/IP", .initialize = totemiba_initialize, .buffer_alloc = totemiba_buffer_alloc, .buffer_release = totemiba_buffer_release, .processor_count_set = totemiba_processor_count_set, .token_send = totemiba_token_send, .mcast_flush_send = totemiba_mcast_flush_send, .mcast_noflush_send = totemiba_mcast_noflush_send, .recv_flush = totemiba_recv_flush, .send_flush = totemiba_send_flush, .iface_check = totemiba_iface_check, .finalize = totemiba_finalize, .net_mtu_adjust = totemiba_net_mtu_adjust, .iface_print = totemiba_iface_print, .iface_get = totemiba_iface_get, .token_target_set = totemiba_token_target_set, .crypto_set = totemiba_crypto_set, .recv_mcast_empty = totemiba_recv_mcast_empty } #endif }; struct totemnet_instance { void *transport_context; struct transport *transport; void (*totemnet_log_printf) ( int level, int subsys, const char *function, const char *file, int line, const char *format, ...)__attribute__((format(printf, 6, 7))); int totemnet_subsys_id; }; #define log_printf(level, format, args...) \ do { \ instance->totemnet_log_printf ( \ level, \ instance->totemnet_subsys_id, \ __FUNCTION__, __FILE__, __LINE__, \ (const char *)format, ##args); \ } while (0); static void totemnet_instance_initialize ( struct totemnet_instance *instance, struct totem_config *config) { int transport; instance->totemnet_log_printf = config->totem_logging_configuration.log_printf; instance->totemnet_subsys_id = config->totem_logging_configuration.log_subsys_id; transport = config->transport_number; log_printf (LOGSYS_LEVEL_NOTICE, "Initializing transport (%s).", transport_entries[transport].name); instance->transport = &transport_entries[transport]; } int totemnet_crypto_set ( void *net_context, - unsigned int type) + const char *cipher_type, + const char *hash_type) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; int res = 0; - res = instance->transport->crypto_set (instance->transport_context, type); + res = instance->transport->crypto_set (instance->transport_context, + cipher_type, hash_type); return res; } int totemnet_finalize ( void *net_context) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; int res = 0; res = instance->transport->finalize (instance->transport_context); return (res); } int totemnet_initialize ( qb_loop_t *loop_pt, void **net_context, struct totem_config *totem_config, int interface_no, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_address), void (*target_set_completed) ( void *context)) { struct totemnet_instance *instance; unsigned int res; instance = malloc (sizeof (struct totemnet_instance)); if (instance == NULL) { return (-1); } totemnet_instance_initialize (instance, totem_config); res = instance->transport->initialize (loop_pt, &instance->transport_context, totem_config, interface_no, context, deliver_fn, iface_change_fn, target_set_completed); if (res == -1) { goto error_destroy; } *net_context = instance; return (0); error_destroy: free (instance); return (-1); } void *totemnet_buffer_alloc (void *net_context) { struct totemnet_instance *instance = net_context; assert (instance != NULL); assert (instance->transport != NULL); return instance->transport->buffer_alloc(); } void totemnet_buffer_release (void *net_context, void *ptr) { struct totemnet_instance *instance = net_context; assert (instance != NULL); assert (instance->transport != NULL); instance->transport->buffer_release (ptr); } int totemnet_processor_count_set ( void *net_context, int processor_count) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; int res = 0; res = instance->transport->processor_count_set (instance->transport_context, processor_count); return (res); } int totemnet_recv_flush (void *net_context) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; int res = 0; res = instance->transport->recv_flush (instance->transport_context); return (res); } int totemnet_send_flush (void *net_context) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; int res = 0; res = instance->transport->send_flush (instance->transport_context); return (res); } int totemnet_token_send ( void *net_context, const void *msg, unsigned int msg_len) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; int res = 0; res = instance->transport->token_send (instance->transport_context, msg, msg_len); return (res); } int totemnet_mcast_flush_send ( void *net_context, const void *msg, unsigned int msg_len) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; int res = 0; res = instance->transport->mcast_flush_send (instance->transport_context, msg, msg_len); return (res); } int totemnet_mcast_noflush_send ( void *net_context, const void *msg, unsigned int msg_len) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; int res = 0; res = instance->transport->mcast_noflush_send (instance->transport_context, msg, msg_len); return (res); } extern int totemnet_iface_check (void *net_context) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; int res = 0; res = instance->transport->iface_check (instance->transport_context); return (res); } extern int totemnet_net_mtu_adjust (void *net_context, struct totem_config *totem_config) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; int res = 0; instance->transport->net_mtu_adjust (instance->transport_context, totem_config); return (res); } const char *totemnet_iface_print (void *net_context) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; const char *ret_char; ret_char = instance->transport->iface_print (instance->transport_context); return (ret_char); } int totemnet_iface_get ( void *net_context, struct totem_ip_address *addr) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; unsigned int res; res = instance->transport->iface_get (instance->transport_context, addr); return (res); } int totemnet_token_target_set ( void *net_context, const struct totem_ip_address *token_target) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; unsigned int res; res = instance->transport->token_target_set (instance->transport_context, token_target); return (res); } extern int totemnet_recv_mcast_empty ( void *net_context) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; unsigned int res; res = instance->transport->recv_mcast_empty (instance->transport_context); return (res); } extern int totemnet_member_add ( void *net_context, const struct totem_ip_address *member) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; unsigned int res = 0; if (instance->transport->member_add) { res = instance->transport->member_add ( instance->transport_context, member); } return (res); } extern int totemnet_member_remove ( void *net_context, const struct totem_ip_address *member) { struct totemnet_instance *instance = (struct totemnet_instance *)net_context; unsigned int res = 0; if (instance->transport->member_remove) { res = instance->transport->member_remove ( instance->transport_context, member); } return (res); } diff --git a/exec/totemnet.h b/exec/totemnet.h index 799091ea..232c5cf1 100644 --- a/exec/totemnet.h +++ b/exec/totemnet.h @@ -1,134 +1,135 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2007, 2009 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file * Totem Network interface - also does encryption/decryption * * depends on poll abstraction, POSIX, IPV4 */ #ifndef TOTEMNET_H_DEFINED #define TOTEMNET_H_DEFINED #include #include #include #define TOTEMNET_NOFLUSH 0 #define TOTEMNET_FLUSH 1 /** * Create an instance */ extern int totemnet_initialize ( qb_loop_t *poll_handle, void **net_context, struct totem_config *totem_config, int interface_no, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_address), void (*target_set_completed) ( void *context)); extern void *totemnet_buffer_alloc (void *net_context); extern void totemnet_buffer_release (void *net_context, void *ptr); extern int totemnet_processor_count_set ( void *net_context, int processor_count); extern int totemnet_token_send ( void *net_context, const void *msg, unsigned int msg_len); extern int totemnet_mcast_flush_send ( void *net_context, const void *msg, unsigned int msg_len); extern int totemnet_mcast_noflush_send ( void *net_context, const void *msg, unsigned int msg_len); extern int totemnet_recv_flush (void *net_context); extern int totemnet_send_flush (void *net_context); extern int totemnet_iface_check (void *net_context); extern int totemnet_finalize (void *net_context); extern int totemnet_net_mtu_adjust (void *net_context, struct totem_config *totem_config); extern const char *totemnet_iface_print (void *net_context); extern int totemnet_iface_get ( void *net_context, struct totem_ip_address *addr); extern int totemnet_token_target_set ( void *net_context, const struct totem_ip_address *token_target); extern int totemnet_crypto_set ( void *net_context, - unsigned int type); + const char *cipher_type, + const char *hash_type); extern int totemnet_recv_mcast_empty ( void *net_context); extern int totemnet_member_add ( void *net_context, const struct totem_ip_address *member); extern int totemnet_member_remove ( void *net_context, const struct totem_ip_address *member); #endif /* TOTEMNET_H_DEFINED */ diff --git a/exec/totempg.c b/exec/totempg.c index 70dfba54..94f00cd8 100644 --- a/exec/totempg.c +++ b/exec/totempg.c @@ -1,1470 +1,1470 @@ /* * Copyright (c) 2003-2005 MontaVista Software, Inc. * Copyright (c) 2005 OSDL. * Copyright (c) 2006-2012 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * Author: Mark Haverkamp (markh@osdl.org) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * FRAGMENTATION AND PACKING ALGORITHM: * * Assemble the entire message into one buffer * if full fragment * store fragment into lengths list * for each full fragment * multicast fragment * set length and fragment fields of pg mesage * store remaining multicast into head of fragmentation data and set lens field * * If a message exceeds the maximum packet size allowed by the totem * single ring protocol, the protocol could lose forward progress. * Statically calculating the allowed data amount doesn't work because * the amount of data allowed depends on the number of fragments in * each message. In this implementation, the maximum fragment size * is dynamically calculated for each fragment added to the message. * It is possible for a message to be two bytes short of the maximum * packet size. This occurs when a message or collection of * messages + the mcast header + the lens are two bytes short of the * end of the packet. Since another len field consumes two bytes, the * len field would consume the rest of the packet without room for data. * * One optimization would be to forgo the final len field and determine * it from the size of the udp datagram. Then this condition would no * longer occur. */ /* * ASSEMBLY AND UNPACKING ALGORITHM: * * copy incoming packet into assembly data buffer indexed by current * location of end of fragment * * if not fragmented * deliver all messages in assembly data buffer * else * if msg_count > 1 and fragmented * deliver all messages except last message in assembly data buffer * copy last fragmented section to start of assembly data buffer * else * if msg_count = 1 and fragmented * do nothing * */ #include #ifdef HAVE_ALLOCA_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LOGSYS_UTILS_ONLY 1 #include #include "totemmrp.h" #include "totemsrp.h" #define min(a,b) ((a) < (b)) ? a : b struct totempg_mcast_header { short version; short type; }; #if !(defined(__i386__) || defined(__x86_64__)) /* * Need align on architectures different then i386 or x86_64 */ #define TOTEMPG_NEED_ALIGN 1 #endif /* * totempg_mcast structure * * header: Identify the mcast. * fragmented: Set if this message continues into next message * continuation: Set if this message is a continuation from last message * msg_count Indicates how many packed messages are contained * in the mcast. * Also, the size of each packed message and the messages themselves are * appended to the end of this structure when sent. */ struct totempg_mcast { struct totempg_mcast_header header; unsigned char fragmented; unsigned char continuation; unsigned short msg_count; /* * short msg_len[msg_count]; */ /* * data for messages */ }; /* * Maximum packet size for totem pg messages */ #define TOTEMPG_PACKET_SIZE (totempg_totem_config->net_mtu - \ sizeof (struct totempg_mcast)) /* * Local variables used for packing small messages */ static unsigned short mcast_packed_msg_lens[FRAME_SIZE_MAX]; static int mcast_packed_msg_count = 0; static int totempg_reserved = 1; static unsigned int totempg_size_limit; static totem_queue_level_changed_fn totem_queue_level_changed = NULL; static uint32_t totempg_threaded_mode = 0; /* * Function and data used to log messages */ static int totempg_log_level_security; static int totempg_log_level_error; static int totempg_log_level_warning; static int totempg_log_level_notice; static int totempg_log_level_debug; static int totempg_subsys_id; static void (*totempg_log_printf) ( int level, int subsys, const char *function, const char *file, int line, const char *format, ...) __attribute__((format(printf, 6, 7))); struct totem_config *totempg_totem_config; static totempg_stats_t totempg_stats; enum throw_away_mode { THROW_AWAY_INACTIVE, THROW_AWAY_ACTIVE }; struct assembly { unsigned int nodeid; unsigned char data[MESSAGE_SIZE_MAX]; int index; unsigned char last_frag_num; enum throw_away_mode throw_away_mode; struct list_head list; }; static void assembly_deref (struct assembly *assembly); static int callback_token_received_fn (enum totem_callback_token_type type, const void *data); DECLARE_LIST_INIT(assembly_list_inuse); DECLARE_LIST_INIT(assembly_list_free); DECLARE_LIST_INIT(totempg_groups_list); /* * Staging buffer for packed messages. Messages are staged in this buffer * before sending. Multiple messages may fit which cuts down on the * number of mcasts sent. If a message doesn't completely fit, then * the mcast header has a fragment bit set that says that there are more * data to follow. fragment_size is an index into the buffer. It indicates * the size of message data and where to place new message data. * fragment_contuation indicates whether the first packed message in * the buffer is a continuation of a previously packed fragment. */ static unsigned char *fragmentation_data; static int fragment_size = 0; static int fragment_continuation = 0; static struct iovec iov_delv; struct totempg_group_instance { void (*deliver_fn) ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required); void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id); struct totempg_group *groups; int groups_cnt; int32_t q_level; struct list_head list; }; static unsigned char next_fragment = 1; static pthread_mutex_t totempg_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t callback_token_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t mcast_msg_mutex = PTHREAD_MUTEX_INITIALIZER; #define log_printf(level, format, args...) \ do { \ totempg_log_printf(level, \ totempg_subsys_id, \ __FUNCTION__, __FILE__, __LINE__, \ format, ##args); \ } while (0); static int msg_count_send_ok (int msg_count); static int byte_count_send_ok (int byte_count); static struct assembly *assembly_ref (unsigned int nodeid) { struct assembly *assembly; struct list_head *list; /* * Search inuse list for node id and return assembly buffer if found */ for (list = assembly_list_inuse.next; list != &assembly_list_inuse; list = list->next) { assembly = list_entry (list, struct assembly, list); if (nodeid == assembly->nodeid) { return (assembly); } } /* * Nothing found in inuse list get one from free list if available */ if (list_empty (&assembly_list_free) == 0) { assembly = list_entry (assembly_list_free.next, struct assembly, list); list_del (&assembly->list); list_add (&assembly->list, &assembly_list_inuse); assembly->nodeid = nodeid; assembly->index = 0; assembly->last_frag_num = 0; assembly->throw_away_mode = THROW_AWAY_INACTIVE; return (assembly); } /* * Nothing available in inuse or free list, so allocate a new one */ assembly = malloc (sizeof (struct assembly)); /* * TODO handle memory allocation failure here */ assert (assembly); assembly->nodeid = nodeid; assembly->data[0] = 0; assembly->index = 0; assembly->last_frag_num = 0; assembly->throw_away_mode = THROW_AWAY_INACTIVE; list_init (&assembly->list); list_add (&assembly->list, &assembly_list_inuse); return (assembly); } static void assembly_deref (struct assembly *assembly) { list_del (&assembly->list); list_add (&assembly->list, &assembly_list_free); } static inline void app_confchg_fn ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id) { int i; struct totempg_group_instance *instance; struct assembly *assembly; struct list_head *list; /* * For every leaving processor, add to free list * This also has the side effect of clearing out the dataset * In the leaving processor's assembly buffer. */ for (i = 0; i < left_list_entries; i++) { assembly = assembly_ref (left_list[i]); list_del (&assembly->list); list_add (&assembly->list, &assembly_list_free); } for (list = totempg_groups_list.next; list != &totempg_groups_list; list = list->next) { instance = list_entry (list, struct totempg_group_instance, list); if (instance->confchg_fn) { instance->confchg_fn ( configuration_type, member_list, member_list_entries, left_list, left_list_entries, joined_list, joined_list_entries, ring_id); } } } static inline void group_endian_convert ( void *msg, int msg_len) { unsigned short *group_len; int i; char *aligned_msg; #ifdef TOTEMPG_NEED_ALIGN /* * Align data structure for not i386 or x86_64 */ if ((size_t)msg % 4 != 0) { aligned_msg = alloca(msg_len); memcpy(aligned_msg, msg, msg_len); } else { aligned_msg = msg; } #else aligned_msg = msg; #endif group_len = (unsigned short *)aligned_msg; group_len[0] = swab16(group_len[0]); for (i = 1; i < group_len[0] + 1; i++) { group_len[i] = swab16(group_len[i]); } if (aligned_msg != msg) { memcpy(msg, aligned_msg, msg_len); } } static inline int group_matches ( struct iovec *iovec, unsigned int iov_len, struct totempg_group *groups_b, unsigned int group_b_cnt, unsigned int *adjust_iovec) { unsigned short *group_len; char *group_name; int i; int j; #ifdef TOTEMPG_NEED_ALIGN struct iovec iovec_aligned = { NULL, 0 }; #endif assert (iov_len == 1); #ifdef TOTEMPG_NEED_ALIGN /* * Align data structure for not i386 or x86_64 */ if ((size_t)iovec->iov_base % 4 != 0) { iovec_aligned.iov_base = alloca(iovec->iov_len); memcpy(iovec_aligned.iov_base, iovec->iov_base, iovec->iov_len); iovec_aligned.iov_len = iovec->iov_len; iovec = &iovec_aligned; } #endif group_len = (unsigned short *)iovec->iov_base; group_name = ((char *)iovec->iov_base) + sizeof (unsigned short) * (group_len[0] + 1); /* * Calculate amount to adjust the iovec by before delivering to app */ *adjust_iovec = sizeof (unsigned short) * (group_len[0] + 1); for (i = 1; i < group_len[0] + 1; i++) { *adjust_iovec += group_len[i]; } /* * Determine if this message should be delivered to this instance */ for (i = 1; i < group_len[0] + 1; i++) { for (j = 0; j < group_b_cnt; j++) { if ((group_len[i] == groups_b[j].group_len) && (memcmp (groups_b[j].group, group_name, group_len[i]) == 0)) { return (1); } } group_name += group_len[i]; } return (0); } static inline void app_deliver_fn ( unsigned int nodeid, void *msg, unsigned int msg_len, int endian_conversion_required) { struct totempg_group_instance *instance; struct iovec stripped_iovec; unsigned int adjust_iovec; struct iovec *iovec; struct list_head *list; struct iovec aligned_iovec = { NULL, 0 }; if (endian_conversion_required) { group_endian_convert (msg, msg_len); } /* * TODO: segmentation/assembly need to be redesigned to provide aligned access * in all cases to avoid memory copies on non386 archs. Probably broke backwars * compatibility */ #ifdef TOTEMPG_NEED_ALIGN /* * Align data structure for not i386 or x86_64 */ aligned_iovec.iov_base = alloca(msg_len); aligned_iovec.iov_len = msg_len; memcpy(aligned_iovec.iov_base, msg, msg_len); #else aligned_iovec.iov_base = msg; aligned_iovec.iov_len = msg_len; #endif iovec = &aligned_iovec; for (list = totempg_groups_list.next; list != &totempg_groups_list; list = list->next) { instance = list_entry (list, struct totempg_group_instance, list); if (group_matches (iovec, 1, instance->groups, instance->groups_cnt, &adjust_iovec)) { stripped_iovec.iov_len = iovec->iov_len - adjust_iovec; stripped_iovec.iov_base = (char *)iovec->iov_base + adjust_iovec; #ifdef TOTEMPG_NEED_ALIGN /* * Align data structure for not i386 or x86_64 */ if ((char *)iovec->iov_base + adjust_iovec % 4 != 0) { /* * Deal with misalignment */ stripped_iovec.iov_base = alloca (stripped_iovec.iov_len); memcpy (stripped_iovec.iov_base, (char *)iovec->iov_base + adjust_iovec, stripped_iovec.iov_len); } #endif instance->deliver_fn ( nodeid, stripped_iovec.iov_base, stripped_iovec.iov_len, endian_conversion_required); } } } static void totempg_confchg_fn ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id) { // TODO optimize this app_confchg_fn (configuration_type, member_list, member_list_entries, left_list, left_list_entries, joined_list, joined_list_entries, ring_id); } static void totempg_deliver_fn ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required) { struct totempg_mcast *mcast; unsigned short *msg_lens; int i; struct assembly *assembly; char header[FRAME_SIZE_MAX]; int msg_count; int continuation; int start; const char *data; int datasize; assembly = assembly_ref (nodeid); assert (assembly); /* * Assemble the header into one block of data and * assemble the packet contents into one block of data to simplify delivery */ mcast = (struct totempg_mcast *)msg; if (endian_conversion_required) { mcast->msg_count = swab16 (mcast->msg_count); } msg_count = mcast->msg_count; datasize = sizeof (struct totempg_mcast) + msg_count * sizeof (unsigned short); memcpy (header, msg, datasize); data = msg; msg_lens = (unsigned short *) (header + sizeof (struct totempg_mcast)); if (endian_conversion_required) { for (i = 0; i < mcast->msg_count; i++) { msg_lens[i] = swab16 (msg_lens[i]); } } memcpy (&assembly->data[assembly->index], &data[datasize], msg_len - datasize); /* * If the last message in the buffer is a fragment, then we * can't deliver it. We'll first deliver the full messages * then adjust the assembly buffer so we can add the rest of the * fragment when it arrives. */ msg_count = mcast->fragmented ? mcast->msg_count - 1 : mcast->msg_count; continuation = mcast->continuation; iov_delv.iov_base = (void *)&assembly->data[0]; iov_delv.iov_len = assembly->index + msg_lens[0]; /* * Make sure that if this message is a continuation, that it * matches the sequence number of the previous fragment. * Also, if the first packed message is a continuation * of a previous message, but the assembly buffer * is empty, then we need to discard it since we can't * assemble a complete message. Likewise, if this message isn't a * continuation and the assembly buffer is empty, we have to discard * the continued message. */ start = 0; if (assembly->throw_away_mode == THROW_AWAY_ACTIVE) { /* Throw away the first msg block */ if (mcast->fragmented == 0 || mcast->fragmented == 1) { assembly->throw_away_mode = THROW_AWAY_INACTIVE; assembly->index += msg_lens[0]; iov_delv.iov_base = (void *)&assembly->data[assembly->index]; iov_delv.iov_len = msg_lens[1]; start = 1; } } else if (assembly->throw_away_mode == THROW_AWAY_INACTIVE) { if (continuation == assembly->last_frag_num) { assembly->last_frag_num = mcast->fragmented; for (i = start; i < msg_count; i++) { app_deliver_fn(nodeid, iov_delv.iov_base, iov_delv.iov_len, endian_conversion_required); assembly->index += msg_lens[i]; iov_delv.iov_base = (void *)&assembly->data[assembly->index]; if (i < (msg_count - 1)) { iov_delv.iov_len = msg_lens[i + 1]; } } } else { assembly->throw_away_mode = THROW_AWAY_ACTIVE; } } if (mcast->fragmented == 0) { /* * End of messages, dereference assembly struct */ assembly->last_frag_num = 0; assembly->index = 0; assembly_deref (assembly); } else { /* * Message is fragmented, keep around assembly list */ if (mcast->msg_count > 1) { memmove (&assembly->data[0], &assembly->data[assembly->index], msg_lens[msg_count]); assembly->index = 0; } assembly->index += msg_lens[msg_count]; } } /* * Totem Process Group Abstraction * depends on poll abstraction, POSIX, IPV4 */ void *callback_token_received_handle; int callback_token_received_fn (enum totem_callback_token_type type, const void *data) { struct totempg_mcast mcast; struct iovec iovecs[3]; if (totempg_threaded_mode == 1) { pthread_mutex_lock (&mcast_msg_mutex); } if (mcast_packed_msg_count == 0) { if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&mcast_msg_mutex); } return (0); } if (totemmrp_avail() == 0) { if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&mcast_msg_mutex); } return (0); } mcast.header.version = 0; mcast.header.type = 0; mcast.fragmented = 0; /* * Was the first message in this buffer a continuation of a * fragmented message? */ mcast.continuation = fragment_continuation; fragment_continuation = 0; mcast.msg_count = mcast_packed_msg_count; iovecs[0].iov_base = (void *)&mcast; iovecs[0].iov_len = sizeof (struct totempg_mcast); iovecs[1].iov_base = (void *)mcast_packed_msg_lens; iovecs[1].iov_len = mcast_packed_msg_count * sizeof (unsigned short); iovecs[2].iov_base = (void *)&fragmentation_data[0]; iovecs[2].iov_len = fragment_size; (void)totemmrp_mcast (iovecs, 3, 0); mcast_packed_msg_count = 0; fragment_size = 0; if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&mcast_msg_mutex); } return (0); } /* * Initialize the totem process group abstraction */ int totempg_initialize ( qb_loop_t *poll_handle, struct totem_config *totem_config) { int res; totempg_totem_config = totem_config; totempg_log_level_security = totem_config->totem_logging_configuration.log_level_security; totempg_log_level_error = totem_config->totem_logging_configuration.log_level_error; totempg_log_level_warning = totem_config->totem_logging_configuration.log_level_warning; totempg_log_level_notice = totem_config->totem_logging_configuration.log_level_notice; totempg_log_level_debug = totem_config->totem_logging_configuration.log_level_debug; totempg_log_printf = totem_config->totem_logging_configuration.log_printf; totempg_subsys_id = totem_config->totem_logging_configuration.log_subsys_id; fragmentation_data = malloc (TOTEMPG_PACKET_SIZE); if (fragmentation_data == 0) { return (-1); } totemsrp_net_mtu_adjust (totem_config); res = totemmrp_initialize ( poll_handle, totem_config, &totempg_stats, totempg_deliver_fn, totempg_confchg_fn); totemmrp_callback_token_create ( &callback_token_received_handle, TOTEM_CALLBACK_TOKEN_RECEIVED, 0, callback_token_received_fn, 0); totempg_size_limit = (totemmrp_avail() - 1) * (totempg_totem_config->net_mtu - sizeof (struct totempg_mcast) - 16); list_init (&totempg_groups_list); return (res); } void totempg_finalize (void) { if (totempg_threaded_mode == 1) { pthread_mutex_lock (&totempg_mutex); } totemmrp_finalize (); if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&totempg_mutex); } } /* * Multicast a message */ static int mcast_msg ( struct iovec *iovec_in, unsigned int iov_len, int guarantee) { int res = 0; struct totempg_mcast mcast; struct iovec iovecs[3]; struct iovec iovec[64]; int i; int dest, src; int max_packet_size = 0; int copy_len = 0; int copy_base = 0; int total_size = 0; if (totempg_threaded_mode == 1) { pthread_mutex_lock (&mcast_msg_mutex); } totemmrp_event_signal (TOTEM_EVENT_NEW_MSG, 1); /* * Remove zero length iovectors from the list */ assert (iov_len < 64); for (dest = 0, src = 0; src < iov_len; src++) { if (iovec_in[src].iov_len) { memcpy (&iovec[dest++], &iovec_in[src], sizeof (struct iovec)); } } iov_len = dest; max_packet_size = TOTEMPG_PACKET_SIZE - (sizeof (unsigned short) * (mcast_packed_msg_count + 1)); mcast_packed_msg_lens[mcast_packed_msg_count] = 0; /* * Check if we would overwrite new message queue */ for (i = 0; i < iov_len; i++) { total_size += iovec[i].iov_len; } if (byte_count_send_ok (total_size + sizeof(unsigned short) * (mcast_packed_msg_count)) == 0) { if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&mcast_msg_mutex); } return(-1); } mcast.header.version = 0; for (i = 0; i < iov_len; ) { mcast.fragmented = 0; mcast.continuation = fragment_continuation; copy_len = iovec[i].iov_len - copy_base; /* * If it all fits with room left over, copy it in. * We need to leave at least sizeof(short) + 1 bytes in the * fragment_buffer on exit so that max_packet_size + fragment_size * doesn't exceed the size of the fragment_buffer on the next call. */ if ((copy_len + fragment_size) < (max_packet_size - sizeof (unsigned short))) { memcpy (&fragmentation_data[fragment_size], (char *)iovec[i].iov_base + copy_base, copy_len); fragment_size += copy_len; mcast_packed_msg_lens[mcast_packed_msg_count] += copy_len; next_fragment = 1; copy_len = 0; copy_base = 0; i++; continue; /* * If it just fits or is too big, then send out what fits. */ } else { unsigned char *data_ptr; copy_len = min(copy_len, max_packet_size - fragment_size); if( copy_len == max_packet_size ) data_ptr = (unsigned char *)iovec[i].iov_base + copy_base; else { data_ptr = fragmentation_data; memcpy (&fragmentation_data[fragment_size], (unsigned char *)iovec[i].iov_base + copy_base, copy_len); } memcpy (&fragmentation_data[fragment_size], (unsigned char *)iovec[i].iov_base + copy_base, copy_len); mcast_packed_msg_lens[mcast_packed_msg_count] += copy_len; /* * if we're not on the last iovec or the iovec is too large to * fit, then indicate a fragment. This also means that the next * message will have the continuation of this one. */ if ((i < (iov_len - 1)) || ((copy_base + copy_len) < iovec[i].iov_len)) { if (!next_fragment) { next_fragment++; } fragment_continuation = next_fragment; mcast.fragmented = next_fragment++; assert(fragment_continuation != 0); assert(mcast.fragmented != 0); } else { fragment_continuation = 0; } /* * assemble the message and send it */ mcast.msg_count = ++mcast_packed_msg_count; iovecs[0].iov_base = (void *)&mcast; iovecs[0].iov_len = sizeof(struct totempg_mcast); iovecs[1].iov_base = (void *)mcast_packed_msg_lens; iovecs[1].iov_len = mcast_packed_msg_count * sizeof(unsigned short); iovecs[2].iov_base = (void *)data_ptr; iovecs[2].iov_len = max_packet_size; assert (totemmrp_avail() > 0); res = totemmrp_mcast (iovecs, 3, guarantee); if (res == -1) { goto error_exit; } /* * Recalculate counts and indexes for the next. */ mcast_packed_msg_lens[0] = 0; mcast_packed_msg_count = 0; fragment_size = 0; max_packet_size = TOTEMPG_PACKET_SIZE - (sizeof(unsigned short)); /* * If the iovec all fit, go to the next iovec */ if ((copy_base + copy_len) == iovec[i].iov_len) { copy_len = 0; copy_base = 0; i++; /* * Continue with the rest of the current iovec. */ } else { copy_base += copy_len; } } } /* * Bump only if we added message data. This may be zero if * the last buffer just fit into the fragmentation_data buffer * and we were at the last iovec. */ if (mcast_packed_msg_lens[mcast_packed_msg_count]) { mcast_packed_msg_count++; } error_exit: if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&mcast_msg_mutex); } return (res); } /* * Determine if a message of msg_size could be queued */ static int msg_count_send_ok ( int msg_count) { int avail = 0; avail = totemmrp_avail (); totempg_stats.msg_queue_avail = avail; return ((avail - totempg_reserved) > msg_count); } static int byte_count_send_ok ( int byte_count) { unsigned int msg_count = 0; int avail = 0; avail = totemmrp_avail (); msg_count = (byte_count / (totempg_totem_config->net_mtu - sizeof (struct totempg_mcast) - 16)) + 1; return (avail >= msg_count); } static int send_reserve ( int msg_size) { unsigned int msg_count = 0; msg_count = (msg_size / (totempg_totem_config->net_mtu - sizeof (struct totempg_mcast) - 16)) + 1; totempg_reserved += msg_count; totempg_stats.msg_reserved = totempg_reserved; return (msg_count); } static void send_release ( int msg_count) { totempg_reserved -= msg_count; totempg_stats.msg_reserved = totempg_reserved; } #ifndef HAVE_SMALL_MEMORY_FOOTPRINT #undef MESSAGE_QUEUE_MAX #define MESSAGE_QUEUE_MAX ((4 * MESSAGE_SIZE_MAX) / totempg_totem_config->net_mtu) #endif /* HAVE_SMALL_MEMORY_FOOTPRINT */ static uint32_t q_level_precent_used(void) { return (100 - (((totemmrp_avail() - totempg_reserved) * 100) / MESSAGE_QUEUE_MAX)); } int totempg_callback_token_create ( void **handle_out, enum totem_callback_token_type type, int delete, int (*callback_fn) (enum totem_callback_token_type type, const void *), const void *data) { unsigned int res; if (totempg_threaded_mode == 1) { pthread_mutex_lock (&callback_token_mutex); } res = totemmrp_callback_token_create (handle_out, type, delete, callback_fn, data); if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&callback_token_mutex); } return (res); } void totempg_callback_token_destroy ( void *handle_out) { if (totempg_threaded_mode == 1) { pthread_mutex_lock (&callback_token_mutex); } totemmrp_callback_token_destroy (handle_out); if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&callback_token_mutex); } } /* * vi: set autoindent tabstop=4 shiftwidth=4 : */ int totempg_groups_initialize ( void **totempg_groups_instance, void (*deliver_fn) ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required), void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id)) { struct totempg_group_instance *instance; if (totempg_threaded_mode == 1) { pthread_mutex_lock (&totempg_mutex); } instance = malloc (sizeof (struct totempg_group_instance)); if (instance == NULL) { goto error_exit; } instance->deliver_fn = deliver_fn; instance->confchg_fn = confchg_fn; instance->groups = 0; instance->groups_cnt = 0; instance->q_level = QB_LOOP_MED; list_init (&instance->list); list_add (&instance->list, &totempg_groups_list); if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&totempg_mutex); } *totempg_groups_instance = instance; return (0); error_exit: if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&totempg_mutex); } return (-1); } int totempg_groups_join ( void *totempg_groups_instance, const struct totempg_group *groups, size_t group_cnt) { struct totempg_group_instance *instance = (struct totempg_group_instance *)totempg_groups_instance; struct totempg_group *new_groups; unsigned int res = 0; if (totempg_threaded_mode == 1) { pthread_mutex_lock (&totempg_mutex); } new_groups = realloc (instance->groups, sizeof (struct totempg_group) * (instance->groups_cnt + group_cnt)); if (new_groups == 0) { res = ENOMEM; goto error_exit; } memcpy (&new_groups[instance->groups_cnt], groups, group_cnt * sizeof (struct totempg_group)); instance->groups = new_groups; instance->groups_cnt += group_cnt; error_exit: if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&totempg_mutex); } return (res); } int totempg_groups_leave ( void *totempg_groups_instance, const struct totempg_group *groups, size_t group_cnt) { if (totempg_threaded_mode == 1) { pthread_mutex_lock (&totempg_mutex); } if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&totempg_mutex); } return (0); } #define MAX_IOVECS_FROM_APP 32 #define MAX_GROUPS_PER_MSG 32 int totempg_groups_mcast_joined ( void *totempg_groups_instance, const struct iovec *iovec, unsigned int iov_len, int guarantee) { struct totempg_group_instance *instance = (struct totempg_group_instance *)totempg_groups_instance; unsigned short group_len[MAX_GROUPS_PER_MSG + 1]; struct iovec iovec_mcast[MAX_GROUPS_PER_MSG + 1 + MAX_IOVECS_FROM_APP]; int i; unsigned int res; if (totempg_threaded_mode == 1) { pthread_mutex_lock (&totempg_mutex); } /* * Build group_len structure and the iovec_mcast structure */ group_len[0] = instance->groups_cnt; for (i = 0; i < instance->groups_cnt; i++) { group_len[i + 1] = instance->groups[i].group_len; iovec_mcast[i + 1].iov_len = instance->groups[i].group_len; iovec_mcast[i + 1].iov_base = (void *) instance->groups[i].group; } iovec_mcast[0].iov_len = (instance->groups_cnt + 1) * sizeof (unsigned short); iovec_mcast[0].iov_base = group_len; for (i = 0; i < iov_len; i++) { iovec_mcast[i + instance->groups_cnt + 1].iov_len = iovec[i].iov_len; iovec_mcast[i + instance->groups_cnt + 1].iov_base = iovec[i].iov_base; } res = mcast_msg (iovec_mcast, iov_len + instance->groups_cnt + 1, guarantee); if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&totempg_mutex); } return (res); } static void check_q_level( void *totempg_groups_instance) { struct totempg_group_instance *instance = (struct totempg_group_instance *)totempg_groups_instance; int32_t old_level = instance->q_level; int32_t percent_used = q_level_precent_used(); if (percent_used >= 75 && instance->q_level != TOTEM_Q_LEVEL_CRITICAL) { instance->q_level = TOTEM_Q_LEVEL_CRITICAL; } else if (percent_used < 30 && instance->q_level != TOTEM_Q_LEVEL_LOW) { instance->q_level = TOTEM_Q_LEVEL_LOW; } else if (percent_used > 40 && percent_used < 50 && instance->q_level != TOTEM_Q_LEVEL_GOOD) { instance->q_level = TOTEM_Q_LEVEL_GOOD; } else if (percent_used > 60 && percent_used < 70 && instance->q_level != TOTEM_Q_LEVEL_HIGH) { instance->q_level = TOTEM_Q_LEVEL_HIGH; } if (totem_queue_level_changed && old_level != instance->q_level) { totem_queue_level_changed(instance->q_level); } } void totempg_check_q_level( void *totempg_groups_instance) { struct totempg_group_instance *instance = (struct totempg_group_instance *)totempg_groups_instance; check_q_level(instance); } int totempg_groups_joined_reserve ( void *totempg_groups_instance, const struct iovec *iovec, unsigned int iov_len) { struct totempg_group_instance *instance = (struct totempg_group_instance *)totempg_groups_instance; unsigned int size = 0; unsigned int i; unsigned int reserved = 0; if (totempg_threaded_mode == 1) { pthread_mutex_lock (&totempg_mutex); pthread_mutex_lock (&mcast_msg_mutex); } for (i = 0; i < instance->groups_cnt; i++) { size += instance->groups[i].group_len; } for (i = 0; i < iov_len; i++) { size += iovec[i].iov_len; } if (size >= totempg_size_limit) { reserved = -1; goto error_exit; } if (byte_count_send_ok (size)) { reserved = send_reserve (size); } else { reserved = 0; } error_exit: check_q_level(instance); if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&mcast_msg_mutex); pthread_mutex_unlock (&totempg_mutex); } return (reserved); } int totempg_groups_joined_release (int msg_count) { if (totempg_threaded_mode == 1) { pthread_mutex_lock (&totempg_mutex); pthread_mutex_lock (&mcast_msg_mutex); } send_release (msg_count); if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&mcast_msg_mutex); pthread_mutex_unlock (&totempg_mutex); } return 0; } int totempg_groups_mcast_groups ( void *totempg_groups_instance, int guarantee, const struct totempg_group *groups, size_t groups_cnt, const struct iovec *iovec, unsigned int iov_len) { unsigned short group_len[MAX_GROUPS_PER_MSG + 1]; struct iovec iovec_mcast[MAX_GROUPS_PER_MSG + 1 + MAX_IOVECS_FROM_APP]; int i; unsigned int res; if (totempg_threaded_mode == 1) { pthread_mutex_lock (&totempg_mutex); } /* * Build group_len structure and the iovec_mcast structure */ group_len[0] = groups_cnt; for (i = 0; i < groups_cnt; i++) { group_len[i + 1] = groups[i].group_len; iovec_mcast[i + 1].iov_len = groups[i].group_len; iovec_mcast[i + 1].iov_base = (void *) groups[i].group; } iovec_mcast[0].iov_len = (groups_cnt + 1) * sizeof (unsigned short); iovec_mcast[0].iov_base = group_len; for (i = 0; i < iov_len; i++) { iovec_mcast[i + groups_cnt + 1].iov_len = iovec[i].iov_len; iovec_mcast[i + groups_cnt + 1].iov_base = iovec[i].iov_base; } res = mcast_msg (iovec_mcast, iov_len + groups_cnt + 1, guarantee); if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&totempg_mutex); } return (res); } /* * Returns -1 if error, 0 if can't send, 1 if can send the message */ int totempg_groups_send_ok_groups ( void *totempg_groups_instance, const struct totempg_group *groups, size_t groups_cnt, const struct iovec *iovec, unsigned int iov_len) { unsigned int size = 0; unsigned int i; unsigned int res; if (totempg_threaded_mode == 1) { pthread_mutex_lock (&totempg_mutex); } for (i = 0; i < groups_cnt; i++) { size += groups[i].group_len; } for (i = 0; i < iov_len; i++) { size += iovec[i].iov_len; } res = msg_count_send_ok (size); if (totempg_threaded_mode == 1) { pthread_mutex_unlock (&totempg_mutex); } return (res); } int totempg_ifaces_get ( unsigned int nodeid, struct totem_ip_address *interfaces, char ***status, unsigned int *iface_count) { int res; res = totemmrp_ifaces_get ( nodeid, interfaces, status, iface_count); return (res); } void totempg_event_signal (enum totem_event_type type, int value) { totemmrp_event_signal (type, value); } void* totempg_get_stats (void) { return &totempg_stats; } int totempg_crypto_set ( - unsigned int type) + const char *cipher_type, + const char *hash_type) { int res; - res = totemmrp_crypto_set ( - type); + res = totemmrp_crypto_set (cipher_type, hash_type); return (res); } int totempg_ring_reenable (void) { int res; res = totemmrp_ring_reenable (); return (res); } const char *totempg_ifaces_print (unsigned int nodeid) { static char iface_string[256 * INTERFACE_MAX]; char one_iface[64]; struct totem_ip_address interfaces[INTERFACE_MAX]; char **status; unsigned int iface_count; unsigned int i; int res; iface_string[0] = '\0'; res = totempg_ifaces_get (nodeid, interfaces, &status, &iface_count); if (res == -1) { return ("no interface found for nodeid"); } for (i = 0; i < iface_count; i++) { sprintf (one_iface, "r(%d) ip(%s) ", i, totemip_print (&interfaces[i])); strcat (iface_string, one_iface); } return (iface_string); } unsigned int totempg_my_nodeid_get (void) { return (totemmrp_my_nodeid_get()); } int totempg_my_family_get (void) { return (totemmrp_my_family_get()); } extern void totempg_service_ready_register ( void (*totem_service_ready) (void)) { totemmrp_service_ready_register (totem_service_ready); } void totempg_queue_level_register_callback (totem_queue_level_changed_fn fn) { totem_queue_level_changed = fn; } extern int totempg_member_add ( const struct totem_ip_address *member, int ring_no) { return totemmrp_member_add (member, ring_no); } extern int totempg_member_remove ( const struct totem_ip_address *member, int ring_no) { return totemmrp_member_remove (member, ring_no); } void totempg_threaded_mode_enable (void) { totempg_threaded_mode = 1; totemmrp_threaded_mode_enable (); } diff --git a/exec/totemrrp.c b/exec/totemrrp.c index 1a613410..25ea90b9 100644 --- a/exec/totemrrp.c +++ b/exec/totemrrp.c @@ -1,2155 +1,2156 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2012 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LOGSYS_UTILS_ONLY 1 #include #include "totemnet.h" #include "totemrrp.h" void rrp_deliver_fn ( void *context, const void *msg, unsigned int msg_len); void rrp_iface_change_fn ( void *context, const struct totem_ip_address *iface_addr); struct totemrrp_instance; struct passive_instance { struct totemrrp_instance *rrp_instance; unsigned int *faulty; unsigned int *token_recv_count; unsigned int *mcast_recv_count; unsigned char token[15000]; unsigned int token_len; qb_loop_timer_handle timer_expired_token; qb_loop_timer_handle timer_problem_decrementer; void *totemrrp_context; unsigned int token_xmit_iface; unsigned int msg_xmit_iface; }; struct active_instance { struct totemrrp_instance *rrp_instance; unsigned int *faulty; unsigned int *last_token_recv; unsigned int *counter_problems; unsigned char token[15000]; unsigned int token_len; unsigned int last_token_seq; qb_loop_timer_handle timer_expired_token; qb_loop_timer_handle timer_problem_decrementer; void *totemrrp_context; }; struct rrp_algo { const char *name; void * (*initialize) ( struct totemrrp_instance *rrp_instance, int interface_count); void (*mcast_recv) ( struct totemrrp_instance *instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len); void (*mcast_noflush_send) ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); void (*mcast_flush_send) ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); void (*token_recv) ( struct totemrrp_instance *instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len, unsigned int token_seqid); void (*token_send) ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); void (*recv_flush) ( struct totemrrp_instance *instance); void (*send_flush) ( struct totemrrp_instance *instance); void (*iface_check) ( struct totemrrp_instance *instance); void (*processor_count_set) ( struct totemrrp_instance *instance, unsigned int processor_count); void (*token_target_set) ( struct totemrrp_instance *instance, struct totem_ip_address *token_target, unsigned int iface_no); void (*ring_reenable) ( struct totemrrp_instance *instance, unsigned int iface_no); int (*mcast_recv_empty) ( struct totemrrp_instance *instance); int (*member_add) ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no); int (*member_remove) ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no); }; struct totemrrp_instance { qb_loop_t *poll_handle; struct totem_interface *interfaces; struct rrp_algo *rrp_algo; void *context; char *status[INTERFACE_MAX]; void (*totemrrp_deliver_fn) ( void *context, const void *msg, unsigned int msg_len); void (*totemrrp_iface_change_fn) ( void *context, const struct totem_ip_address *iface_addr, unsigned int iface_no); void (*totemrrp_token_seqid_get) ( const void *msg, unsigned int *seqid, unsigned int *token_is); void (*totemrrp_target_set_completed) ( void *context); unsigned int (*totemrrp_msgs_missing) (void); /* * Function and data used to log messages */ int totemrrp_log_level_security; int totemrrp_log_level_error; int totemrrp_log_level_warning; int totemrrp_log_level_notice; int totemrrp_log_level_debug; int totemrrp_subsys_id; void (*totemrrp_log_printf) ( int level, int subsys, const char *function, const char *file, int line, const char *format, ...)__attribute__((format(printf, 6, 7))); void **net_handles; void *rrp_algo_instance; int interface_count; int processor_count; int my_nodeid; struct totem_config *totem_config; void *deliver_fn_context[INTERFACE_MAX]; qb_loop_timer_handle timer_active_test_ring_timeout[INTERFACE_MAX]; totemrrp_stats_t stats; }; static void stats_set_interface_faulty(struct totemrrp_instance *rrp_instance, unsigned int iface_no, int is_faulty); /* * None Replication Forward Declerations */ static void none_mcast_recv ( struct totemrrp_instance *instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len); static void none_mcast_noflush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); static void none_mcast_flush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); static void none_token_recv ( struct totemrrp_instance *instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len, unsigned int token_seqid); static void none_token_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); static void none_recv_flush ( struct totemrrp_instance *instance); static void none_send_flush ( struct totemrrp_instance *instance); static void none_iface_check ( struct totemrrp_instance *instance); static void none_processor_count_set ( struct totemrrp_instance *instance, unsigned int processor_count_set); static void none_token_target_set ( struct totemrrp_instance *instance, struct totem_ip_address *token_target, unsigned int iface_no); static void none_ring_reenable ( struct totemrrp_instance *instance, unsigned int iface_no); static int none_mcast_recv_empty ( struct totemrrp_instance *instance); static int none_member_add ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no); static int none_member_remove ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no); /* * Passive Replication Forward Declerations */ static void *passive_instance_initialize ( struct totemrrp_instance *rrp_instance, int interface_count); static void passive_mcast_recv ( struct totemrrp_instance *instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len); static void passive_mcast_noflush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); static void passive_mcast_flush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); static void passive_monitor ( struct totemrrp_instance *rrp_instance, unsigned int iface_no, int is_token_recv_count); static void passive_token_recv ( struct totemrrp_instance *instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len, unsigned int token_seqid); static void passive_token_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); static void passive_recv_flush ( struct totemrrp_instance *instance); static void passive_send_flush ( struct totemrrp_instance *instance); static void passive_iface_check ( struct totemrrp_instance *instance); static void passive_processor_count_set ( struct totemrrp_instance *instance, unsigned int processor_count_set); static void passive_token_target_set ( struct totemrrp_instance *instance, struct totem_ip_address *token_target, unsigned int iface_no); static void passive_ring_reenable ( struct totemrrp_instance *instance, unsigned int iface_no); static int passive_mcast_recv_empty ( struct totemrrp_instance *instance); static int passive_member_add ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no); static int passive_member_remove ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no); /* * Active Replication Forward Definitions */ static void *active_instance_initialize ( struct totemrrp_instance *rrp_instance, int interface_count); static void active_mcast_recv ( struct totemrrp_instance *instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len); static void active_mcast_noflush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); static void active_mcast_flush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); static void active_token_recv ( struct totemrrp_instance *instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len, unsigned int token_seqid); static void active_token_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len); static void active_recv_flush ( struct totemrrp_instance *instance); static void active_send_flush ( struct totemrrp_instance *instance); static void active_iface_check ( struct totemrrp_instance *instance); static void active_processor_count_set ( struct totemrrp_instance *instance, unsigned int processor_count_set); static void active_token_target_set ( struct totemrrp_instance *instance, struct totem_ip_address *token_target, unsigned int iface_no); static void active_ring_reenable ( struct totemrrp_instance *instance, unsigned int iface_no); static int active_mcast_recv_empty ( struct totemrrp_instance *instance); static int active_member_add ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no); static int active_member_remove ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no); static void active_timer_expired_token_start ( struct active_instance *active_instance); static void active_timer_expired_token_cancel ( struct active_instance *active_instance); static void active_timer_problem_decrementer_start ( struct active_instance *active_instance); static void active_timer_problem_decrementer_cancel ( struct active_instance *active_instance); /* * 0-5 reserved for totemsrp.c */ #define MESSAGE_TYPE_RING_TEST_ACTIVE 6 #define MESSAGE_TYPE_RING_TEST_ACTIVATE 7 #define ENDIAN_LOCAL 0xff22 /* * Rollover handling: * * ARR_SEQNO_START_TOKEN is the starting sequence number of last seen sequence * for a token for active redundand ring. This should remain zero, unless testing * overflow in which case 07fffff00 or 0xffffff00 are good starting values. * It should be same as on defined in totemsrp.c */ #define ARR_SEQNO_START_TOKEN 0x0 /* * These can be used ot test different rollover points * #define ARR_SEQNO_START_MSG 0xfffffe00 */ /* * Threshold value when recv_count for passive rrp should be adjusted. * Set this value to some smaller for testing of adjusting proper * functionality. Also keep in mind that this value must be smaller * then rrp_problem_count_threshold */ #define PASSIVE_RECV_COUNT_THRESHOLD (INT_MAX / 2) struct message_header { char type; char encapsulated; unsigned short endian_detector; int ring_number; int nodeid_activator; } __attribute__((packed)); struct deliver_fn_context { struct totemrrp_instance *instance; void *context; int iface_no; }; struct rrp_algo none_algo = { .name = "none", .initialize = NULL, .mcast_recv = none_mcast_recv, .mcast_noflush_send = none_mcast_noflush_send, .mcast_flush_send = none_mcast_flush_send, .token_recv = none_token_recv, .token_send = none_token_send, .recv_flush = none_recv_flush, .send_flush = none_send_flush, .iface_check = none_iface_check, .processor_count_set = none_processor_count_set, .token_target_set = none_token_target_set, .ring_reenable = none_ring_reenable, .mcast_recv_empty = none_mcast_recv_empty, .member_add = none_member_add, .member_remove = none_member_remove }; struct rrp_algo passive_algo = { .name = "passive", .initialize = passive_instance_initialize, .mcast_recv = passive_mcast_recv, .mcast_noflush_send = passive_mcast_noflush_send, .mcast_flush_send = passive_mcast_flush_send, .token_recv = passive_token_recv, .token_send = passive_token_send, .recv_flush = passive_recv_flush, .send_flush = passive_send_flush, .iface_check = passive_iface_check, .processor_count_set = passive_processor_count_set, .token_target_set = passive_token_target_set, .ring_reenable = passive_ring_reenable, .mcast_recv_empty = passive_mcast_recv_empty, .member_add = passive_member_add, .member_remove = passive_member_remove }; struct rrp_algo active_algo = { .name = "active", .initialize = active_instance_initialize, .mcast_recv = active_mcast_recv, .mcast_noflush_send = active_mcast_noflush_send, .mcast_flush_send = active_mcast_flush_send, .token_recv = active_token_recv, .token_send = active_token_send, .recv_flush = active_recv_flush, .send_flush = active_send_flush, .iface_check = active_iface_check, .processor_count_set = active_processor_count_set, .token_target_set = active_token_target_set, .ring_reenable = active_ring_reenable, .mcast_recv_empty = active_mcast_recv_empty, .member_add = active_member_add, .member_remove = active_member_remove }; struct rrp_algo *rrp_algos[] = { &none_algo, &passive_algo, &active_algo }; #define RRP_ALGOS_COUNT 3 #define log_printf(level, format, args...) \ do { \ rrp_instance->totemrrp_log_printf ( \ level, rrp_instance->totemrrp_subsys_id, \ __FUNCTION__, __FILE__, __LINE__, \ format, ##args); \ } while (0); static void stats_set_interface_faulty(struct totemrrp_instance *rrp_instance, unsigned int iface_no, int is_faulty) { rrp_instance->stats.faulty[iface_no] = (is_faulty ? 1 : 0); } static void test_active_msg_endian_convert(const struct message_header *in, struct message_header *out) { out->type = in->type; out->encapsulated = in->encapsulated; out->endian_detector = ENDIAN_LOCAL; out->ring_number = swab32 (in->ring_number); out->nodeid_activator = swab32(in->nodeid_activator); } static void timer_function_test_ring_timeout (void *context) { struct deliver_fn_context *deliver_fn_context = (struct deliver_fn_context *)context; struct totemrrp_instance *rrp_instance = deliver_fn_context->instance; unsigned int *faulty = NULL; int iface_no = deliver_fn_context->iface_no; struct message_header msg = { .type = MESSAGE_TYPE_RING_TEST_ACTIVE, .endian_detector = ENDIAN_LOCAL, }; if (strcmp(rrp_instance->totem_config->rrp_mode, "active") == 0) faulty = ((struct active_instance *)(rrp_instance->rrp_algo_instance))->faulty; if (strcmp(rrp_instance->totem_config->rrp_mode, "passive") == 0) faulty = ((struct passive_instance *)(rrp_instance->rrp_algo_instance))->faulty; assert (faulty != NULL); if (faulty[iface_no] == 1) { msg.ring_number = iface_no; msg.nodeid_activator = rrp_instance->my_nodeid; totemnet_token_send ( rrp_instance->net_handles[iface_no], &msg, sizeof (struct message_header)); qb_loop_timer_add (rrp_instance->poll_handle, QB_LOOP_MED, rrp_instance->totem_config->rrp_autorecovery_check_timeout*QB_TIME_NS_IN_MSEC, (void *)deliver_fn_context, timer_function_test_ring_timeout, &rrp_instance->timer_active_test_ring_timeout[iface_no]); } } /* * None Replication Implementation */ static void none_mcast_recv ( struct totemrrp_instance *rrp_instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len) { rrp_instance->totemrrp_deliver_fn ( context, msg, msg_len); } static void none_mcast_flush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len) { totemnet_mcast_flush_send (instance->net_handles[0], msg, msg_len); } static void none_mcast_noflush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len) { totemnet_mcast_noflush_send (instance->net_handles[0], msg, msg_len); } static void none_token_recv ( struct totemrrp_instance *rrp_instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len, unsigned int token_seq) { rrp_instance->totemrrp_deliver_fn ( context, msg, msg_len); } static void none_token_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len) { totemnet_token_send ( instance->net_handles[0], msg, msg_len); } static void none_recv_flush (struct totemrrp_instance *instance) { totemnet_recv_flush (instance->net_handles[0]); } static void none_send_flush (struct totemrrp_instance *instance) { totemnet_send_flush (instance->net_handles[0]); } static void none_iface_check (struct totemrrp_instance *instance) { totemnet_iface_check (instance->net_handles[0]); } static void none_processor_count_set ( struct totemrrp_instance *instance, unsigned int processor_count) { totemnet_processor_count_set (instance->net_handles[0], processor_count); } static void none_token_target_set ( struct totemrrp_instance *instance, struct totem_ip_address *token_target, unsigned int iface_no) { totemnet_token_target_set (instance->net_handles[0], token_target); } static void none_ring_reenable ( struct totemrrp_instance *instance, unsigned int iface_no) { /* * No operation */ } static int none_mcast_recv_empty ( struct totemrrp_instance *instance) { int res; res = totemnet_recv_mcast_empty (instance->net_handles[0]); return (res); } static int none_member_add ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no) { int res; res = totemnet_member_add (instance->net_handles[0], member); return (res); } static int none_member_remove ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no) { int res; res = totemnet_member_remove (instance->net_handles[0], member); return (res); } /* * Passive Replication Implementation */ void *passive_instance_initialize ( struct totemrrp_instance *rrp_instance, int interface_count) { struct passive_instance *instance; int i; instance = malloc (sizeof (struct passive_instance)); if (instance == 0) { goto error_exit; } memset (instance, 0, sizeof (struct passive_instance)); instance->faulty = malloc (sizeof (int) * interface_count); if (instance->faulty == 0) { free (instance); instance = 0; goto error_exit; } memset (instance->faulty, 0, sizeof (int) * interface_count); for (i = 0; i < interface_count; i++) { stats_set_interface_faulty (rrp_instance, i, 0); } instance->token_recv_count = malloc (sizeof (int) * interface_count); if (instance->token_recv_count == 0) { free (instance->faulty); free (instance); instance = 0; goto error_exit; } memset (instance->token_recv_count, 0, sizeof (int) * interface_count); instance->mcast_recv_count = malloc (sizeof (int) * interface_count); if (instance->mcast_recv_count == 0) { free (instance->token_recv_count); free (instance->faulty); free (instance); instance = 0; goto error_exit; } memset (instance->mcast_recv_count, 0, sizeof (int) * interface_count); error_exit: return ((void *)instance); } static void timer_function_passive_token_expired (void *context) { struct passive_instance *passive_instance = (struct passive_instance *)context; struct totemrrp_instance *rrp_instance = passive_instance->rrp_instance; rrp_instance->totemrrp_deliver_fn ( passive_instance->totemrrp_context, passive_instance->token, passive_instance->token_len); } /* TODO static void timer_function_passive_problem_decrementer (void *context) { // struct passive_instance *passive_instance = (struct passive_instance *)context; // struct totemrrp_instance *rrp_instance = passive_instance->rrp_instance; } */ static void passive_timer_expired_token_start ( struct passive_instance *passive_instance) { qb_loop_timer_add ( passive_instance->rrp_instance->poll_handle, QB_LOOP_MED, passive_instance->rrp_instance->totem_config->rrp_token_expired_timeout*QB_TIME_NS_IN_MSEC, (void *)passive_instance, timer_function_passive_token_expired, &passive_instance->timer_expired_token); } static void passive_timer_expired_token_cancel ( struct passive_instance *passive_instance) { qb_loop_timer_del ( passive_instance->rrp_instance->poll_handle, passive_instance->timer_expired_token); } /* static void passive_timer_problem_decrementer_start ( struct passive_instance *passive_instance) { qb_loop_timer_add ( QB_LOOP_MED, passive_instance->rrp_instance->poll_handle, passive_instance->rrp_instance->totem_config->rrp_problem_count_timeout*QB_TIME_NS_IN_MSEC, (void *)passive_instance, timer_function_passive_problem_decrementer, &passive_instance->timer_problem_decrementer); } static void passive_timer_problem_decrementer_cancel ( struct passive_instance *passive_instance) { qb_loop_timer_del ( passive_instance->rrp_instance->poll_handle, passive_instance->timer_problem_decrementer); } */ /* * Monitor function implementation from rrp paper. * rrp_instance is passive rrp instance, iface_no is interface with received messgae/token and * is_token_recv_count is boolean variable which donates if message is token (>1) or regular * message (= 0) */ static void passive_monitor ( struct totemrrp_instance *rrp_instance, unsigned int iface_no, int is_token_recv_count) { struct passive_instance *passive_instance = (struct passive_instance *)rrp_instance->rrp_algo_instance; unsigned int *recv_count; unsigned int max; unsigned int i; unsigned int min_all, min_active; unsigned int threshold; /* * Monitor for failures */ if (is_token_recv_count) { recv_count = passive_instance->token_recv_count; threshold = rrp_instance->totem_config->rrp_problem_count_threshold; } else { recv_count = passive_instance->mcast_recv_count; threshold = rrp_instance->totem_config->rrp_problem_count_mcast_threshold; } recv_count[iface_no] += 1; max = 0; for (i = 0; i < rrp_instance->interface_count; i++) { if (max < recv_count[i]) { max = recv_count[i]; } } /* * Max is larger then threshold -> start adjusting process */ if (max > PASSIVE_RECV_COUNT_THRESHOLD) { min_all = min_active = recv_count[iface_no]; for (i = 0; i < rrp_instance->interface_count; i++) { if (recv_count[i] < min_all) { min_all = recv_count[i]; } if (passive_instance->faulty[i] == 0 && recv_count[i] < min_active) { min_active = recv_count[i]; } } if (min_all > 0) { /* * There is one or more faulty device with recv_count > 0 */ for (i = 0; i < rrp_instance->interface_count; i++) { recv_count[i] -= min_all; } } else { /* * No faulty device with recv_count > 0, adjust only active * devices */ for (i = 0; i < rrp_instance->interface_count; i++) { if (passive_instance->faulty[i] == 0) { recv_count[i] -= min_active; } } } /* * Find again max */ max = 0; for (i = 0; i < rrp_instance->interface_count; i++) { if (max < recv_count[i]) { max = recv_count[i]; } } } for (i = 0; i < rrp_instance->interface_count; i++) { if ((passive_instance->faulty[i] == 0) && (max - recv_count[i] > threshold)) { passive_instance->faulty[i] = 1; qb_loop_timer_add (rrp_instance->poll_handle, QB_LOOP_MED, rrp_instance->totem_config->rrp_autorecovery_check_timeout*QB_TIME_NS_IN_MSEC, rrp_instance->deliver_fn_context[i], timer_function_test_ring_timeout, &rrp_instance->timer_active_test_ring_timeout[i]); stats_set_interface_faulty (rrp_instance, i, passive_instance->faulty[i]); sprintf (rrp_instance->status[i], "Marking ringid %u interface %s FAULTY", i, totemnet_iface_print (rrp_instance->net_handles[i])); log_printf ( rrp_instance->totemrrp_log_level_error, "%s", rrp_instance->status[i]); } } } static void passive_mcast_recv ( struct totemrrp_instance *rrp_instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len) { struct passive_instance *passive_instance = (struct passive_instance *)rrp_instance->rrp_algo_instance; rrp_instance->totemrrp_deliver_fn ( context, msg, msg_len); if (rrp_instance->totemrrp_msgs_missing() == 0 && passive_instance->timer_expired_token) { /* * Delivers the last token */ rrp_instance->totemrrp_deliver_fn ( passive_instance->totemrrp_context, passive_instance->token, passive_instance->token_len); passive_timer_expired_token_cancel (passive_instance); } passive_monitor (rrp_instance, iface_no, 0); } static void passive_mcast_flush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len) { struct passive_instance *passive_instance = (struct passive_instance *)instance->rrp_algo_instance; int i = 0; do { passive_instance->msg_xmit_iface = (passive_instance->msg_xmit_iface + 1) % instance->interface_count; i++; } while ((i <= instance->interface_count) && (passive_instance->faulty[passive_instance->msg_xmit_iface] == 1)); if (i <= instance->interface_count) { totemnet_mcast_flush_send (instance->net_handles[passive_instance->msg_xmit_iface], msg, msg_len); } } static void passive_mcast_noflush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len) { struct passive_instance *passive_instance = (struct passive_instance *)instance->rrp_algo_instance; int i = 0; do { passive_instance->msg_xmit_iface = (passive_instance->msg_xmit_iface + 1) % instance->interface_count; i++; } while ((i <= instance->interface_count) && (passive_instance->faulty[passive_instance->msg_xmit_iface] == 1)); if (i <= instance->interface_count) { totemnet_mcast_noflush_send (instance->net_handles[passive_instance->msg_xmit_iface], msg, msg_len); } } static void passive_token_recv ( struct totemrrp_instance *rrp_instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len, unsigned int token_seq) { struct passive_instance *passive_instance = (struct passive_instance *)rrp_instance->rrp_algo_instance; passive_instance->totemrrp_context = context; // this should be in totemrrp_instance ? TODO if (rrp_instance->totemrrp_msgs_missing() == 0) { rrp_instance->totemrrp_deliver_fn ( context, msg, msg_len); } else { memcpy (passive_instance->token, msg, msg_len); passive_timer_expired_token_start (passive_instance); } passive_monitor (rrp_instance, iface_no, 1); } static void passive_token_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len) { struct passive_instance *passive_instance = (struct passive_instance *)instance->rrp_algo_instance; int i = 0; do { passive_instance->token_xmit_iface = (passive_instance->token_xmit_iface + 1) % instance->interface_count; i++; } while ((i <= instance->interface_count) && (passive_instance->faulty[passive_instance->token_xmit_iface] == 1)); if (i <= instance->interface_count) { totemnet_token_send ( instance->net_handles[passive_instance->token_xmit_iface], msg, msg_len); } } static void passive_recv_flush (struct totemrrp_instance *instance) { struct passive_instance *rrp_algo_instance = (struct passive_instance *)instance->rrp_algo_instance; unsigned int i; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_recv_flush (instance->net_handles[i]); } } } static void passive_send_flush (struct totemrrp_instance *instance) { struct passive_instance *rrp_algo_instance = (struct passive_instance *)instance->rrp_algo_instance; unsigned int i; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_send_flush (instance->net_handles[i]); } } } static void passive_iface_check (struct totemrrp_instance *instance) { struct passive_instance *rrp_algo_instance = (struct passive_instance *)instance->rrp_algo_instance; unsigned int i; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_iface_check (instance->net_handles[i]); } } } static void passive_processor_count_set ( struct totemrrp_instance *instance, unsigned int processor_count) { struct passive_instance *rrp_algo_instance = (struct passive_instance *)instance->rrp_algo_instance; unsigned int i; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_processor_count_set (instance->net_handles[i], processor_count); } } } static void passive_token_target_set ( struct totemrrp_instance *instance, struct totem_ip_address *token_target, unsigned int iface_no) { totemnet_token_target_set (instance->net_handles[iface_no], token_target); } static int passive_mcast_recv_empty ( struct totemrrp_instance *instance) { int res; int msgs_emptied = 0; int i; for (i = 0; i < instance->interface_count; i++) { res = totemnet_recv_mcast_empty (instance->net_handles[i]); if (res == -1) { return (-1); } if (res == 1) { msgs_emptied = 1; } } return (msgs_emptied); } static int passive_member_add ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no) { int res; res = totemnet_member_add (instance->net_handles[iface_no], member); return (res); } static int passive_member_remove ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no) { int res; res = totemnet_member_remove (instance->net_handles[iface_no], member); return (res); } static void passive_ring_reenable ( struct totemrrp_instance *instance, unsigned int iface_no) { struct passive_instance *rrp_algo_instance = (struct passive_instance *)instance->rrp_algo_instance; int i; memset (rrp_algo_instance->mcast_recv_count, 0, sizeof (unsigned int) * instance->interface_count); memset (rrp_algo_instance->token_recv_count, 0, sizeof (unsigned int) * instance->interface_count); if (iface_no == instance->interface_count) { memset (rrp_algo_instance->faulty, 0, sizeof (unsigned int) * instance->interface_count); for (i = 0; i < instance->interface_count; i++) { stats_set_interface_faulty (instance, i, 0); } } else { rrp_algo_instance->faulty[iface_no] = 0; stats_set_interface_faulty (instance, iface_no, 0); } } /* * Active Replication Implementation */ void *active_instance_initialize ( struct totemrrp_instance *rrp_instance, int interface_count) { struct active_instance *instance; int i; instance = malloc (sizeof (struct active_instance)); if (instance == 0) { goto error_exit; } memset (instance, 0, sizeof (struct active_instance)); instance->faulty = malloc (sizeof (int) * interface_count); if (instance->faulty == 0) { free (instance); instance = 0; goto error_exit; } memset (instance->faulty, 0, sizeof (unsigned int) * interface_count); for (i = 0; i < interface_count; i++) { stats_set_interface_faulty (rrp_instance, i, 0); } instance->last_token_recv = malloc (sizeof (int) * interface_count); if (instance->last_token_recv == 0) { free (instance->faulty); free (instance); instance = 0; goto error_exit; } memset (instance->last_token_recv, 0, sizeof (unsigned int) * interface_count); instance->counter_problems = malloc (sizeof (int) * interface_count); if (instance->counter_problems == 0) { free (instance->last_token_recv); free (instance->faulty); free (instance); instance = 0; goto error_exit; } memset (instance->counter_problems, 0, sizeof (unsigned int) * interface_count); instance->timer_expired_token = 0; instance->timer_problem_decrementer = 0; instance->rrp_instance = rrp_instance; instance->last_token_seq = ARR_SEQNO_START_TOKEN - 1; error_exit: return ((void *)instance); } static void timer_function_active_problem_decrementer (void *context) { struct active_instance *active_instance = (struct active_instance *)context; struct totemrrp_instance *rrp_instance = active_instance->rrp_instance; unsigned int problem_found = 0; unsigned int i; for (i = 0; i < rrp_instance->interface_count; i++) { if (active_instance->counter_problems[i] > 0) { problem_found = 1; active_instance->counter_problems[i] -= 1; if (active_instance->counter_problems[i] == 0) { sprintf (rrp_instance->status[i], "ring %d active with no faults", i); } else { sprintf (rrp_instance->status[i], "Decrementing problem counter for iface %s to [%d of %d]", totemnet_iface_print (rrp_instance->net_handles[i]), active_instance->counter_problems[i], rrp_instance->totem_config->rrp_problem_count_threshold); } log_printf ( rrp_instance->totemrrp_log_level_warning, "%s", rrp_instance->status[i]); } } if (problem_found) { active_timer_problem_decrementer_start (active_instance); } else { active_instance->timer_problem_decrementer = 0; } } static void timer_function_active_token_expired (void *context) { struct active_instance *active_instance = (struct active_instance *)context; struct totemrrp_instance *rrp_instance = active_instance->rrp_instance; unsigned int i; for (i = 0; i < rrp_instance->interface_count; i++) { if (active_instance->last_token_recv[i] == 0) { active_instance->counter_problems[i] += 1; if (active_instance->timer_problem_decrementer == 0) { active_timer_problem_decrementer_start (active_instance); } sprintf (rrp_instance->status[i], "Incrementing problem counter for seqid %d iface %s to [%d of %d]", active_instance->last_token_seq, totemnet_iface_print (rrp_instance->net_handles[i]), active_instance->counter_problems[i], rrp_instance->totem_config->rrp_problem_count_threshold); log_printf ( rrp_instance->totemrrp_log_level_warning, "%s", rrp_instance->status[i]); } } for (i = 0; i < rrp_instance->interface_count; i++) { if (active_instance->counter_problems[i] >= rrp_instance->totem_config->rrp_problem_count_threshold && active_instance->faulty[i] == 0) { active_instance->faulty[i] = 1; qb_loop_timer_add (rrp_instance->poll_handle, QB_LOOP_MED, rrp_instance->totem_config->rrp_autorecovery_check_timeout*QB_TIME_NS_IN_MSEC, rrp_instance->deliver_fn_context[i], timer_function_test_ring_timeout, &rrp_instance->timer_active_test_ring_timeout[i]); stats_set_interface_faulty (rrp_instance, i, active_instance->faulty[i]); sprintf (rrp_instance->status[i], "Marking seqid %d ringid %u interface %s FAULTY", active_instance->last_token_seq, i, totemnet_iface_print (rrp_instance->net_handles[i])); log_printf ( rrp_instance->totemrrp_log_level_error, "%s", rrp_instance->status[i]); active_timer_problem_decrementer_cancel (active_instance); } } rrp_instance->totemrrp_deliver_fn ( active_instance->totemrrp_context, active_instance->token, active_instance->token_len); } static void active_timer_expired_token_start ( struct active_instance *active_instance) { qb_loop_timer_add ( active_instance->rrp_instance->poll_handle, QB_LOOP_MED, active_instance->rrp_instance->totem_config->rrp_token_expired_timeout*QB_TIME_NS_IN_MSEC, (void *)active_instance, timer_function_active_token_expired, &active_instance->timer_expired_token); } static void active_timer_expired_token_cancel ( struct active_instance *active_instance) { qb_loop_timer_del ( active_instance->rrp_instance->poll_handle, active_instance->timer_expired_token); } static void active_timer_problem_decrementer_start ( struct active_instance *active_instance) { qb_loop_timer_add ( active_instance->rrp_instance->poll_handle, QB_LOOP_MED, active_instance->rrp_instance->totem_config->rrp_problem_count_timeout*QB_TIME_NS_IN_MSEC, (void *)active_instance, timer_function_active_problem_decrementer, &active_instance->timer_problem_decrementer); } static void active_timer_problem_decrementer_cancel ( struct active_instance *active_instance) { qb_loop_timer_del ( active_instance->rrp_instance->poll_handle, active_instance->timer_problem_decrementer); } /* * active replication */ static void active_mcast_recv ( struct totemrrp_instance *instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len) { instance->totemrrp_deliver_fn ( context, msg, msg_len); } static void active_mcast_flush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len) { int i; struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_mcast_flush_send (instance->net_handles[i], msg, msg_len); } } } static void active_mcast_noflush_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len) { int i; struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_mcast_noflush_send (instance->net_handles[i], msg, msg_len); } } } static void active_token_recv ( struct totemrrp_instance *rrp_instance, unsigned int iface_no, void *context, const void *msg, unsigned int msg_len, unsigned int token_seq) { int i; struct active_instance *active_instance = (struct active_instance *)rrp_instance->rrp_algo_instance; active_instance->totemrrp_context = context; if (sq_lt_compare (active_instance->last_token_seq, token_seq)) { memcpy (active_instance->token, msg, msg_len); active_instance->token_len = msg_len; for (i = 0; i < rrp_instance->interface_count; i++) { active_instance->last_token_recv[i] = 0; } active_instance->last_token_recv[iface_no] = 1; active_timer_expired_token_start (active_instance); } /* * This doesn't follow spec because the spec assumes we will know * when token resets occur. */ active_instance->last_token_seq = token_seq; if (token_seq == active_instance->last_token_seq) { active_instance->last_token_recv[iface_no] = 1; for (i = 0; i < rrp_instance->interface_count; i++) { if ((active_instance->last_token_recv[i] == 0) && active_instance->faulty[i] == 0) { return; /* don't deliver token */ } } active_timer_expired_token_cancel (active_instance); rrp_instance->totemrrp_deliver_fn ( context, msg, msg_len); } } static void active_token_send ( struct totemrrp_instance *instance, const void *msg, unsigned int msg_len) { struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance; int i; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_token_send ( instance->net_handles[i], msg, msg_len); } } } static void active_recv_flush (struct totemrrp_instance *instance) { struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance; unsigned int i; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_recv_flush (instance->net_handles[i]); } } } static void active_send_flush (struct totemrrp_instance *instance) { struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance; unsigned int i; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_send_flush (instance->net_handles[i]); } } } static int active_member_add ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no) { int res; res = totemnet_member_add (instance->net_handles[iface_no], member); return (res); } static int active_member_remove ( struct totemrrp_instance *instance, const struct totem_ip_address *member, unsigned int iface_no) { int res; res = totemnet_member_remove (instance->net_handles[iface_no], member); return (res); } static void active_iface_check (struct totemrrp_instance *instance) { struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance; unsigned int i; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_iface_check (instance->net_handles[i]); } } } static void active_processor_count_set ( struct totemrrp_instance *instance, unsigned int processor_count) { struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance; unsigned int i; for (i = 0; i < instance->interface_count; i++) { if (rrp_algo_instance->faulty[i] == 0) { totemnet_processor_count_set (instance->net_handles[i], processor_count); } } } static void active_token_target_set ( struct totemrrp_instance *instance, struct totem_ip_address *token_target, unsigned int iface_no) { totemnet_token_target_set (instance->net_handles[iface_no], token_target); } static int active_mcast_recv_empty ( struct totemrrp_instance *instance) { int res; int msgs_emptied = 0; int i; for (i = 0; i < instance->interface_count; i++) { res = totemnet_recv_mcast_empty (instance->net_handles[i]); if (res == -1) { return (-1); } if (res == 1) { msgs_emptied = 1; } } return (msgs_emptied); } static void active_ring_reenable ( struct totemrrp_instance *instance, unsigned int iface_no) { struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance; int i; if (iface_no == instance->interface_count) { memset (rrp_algo_instance->last_token_recv, 0, sizeof (unsigned int) * instance->interface_count); memset (rrp_algo_instance->faulty, 0, sizeof (unsigned int) * instance->interface_count); memset (rrp_algo_instance->counter_problems, 0, sizeof (unsigned int) * instance->interface_count); for (i = 0; i < instance->interface_count; i++) { stats_set_interface_faulty (instance, i, 0); } } else { rrp_algo_instance->last_token_recv[iface_no] = 0; rrp_algo_instance->faulty[iface_no] = 0; rrp_algo_instance->counter_problems[iface_no] = 0; stats_set_interface_faulty (instance, iface_no, 0); } } static void totemrrp_instance_initialize (struct totemrrp_instance *instance) { memset (instance, 0, sizeof (struct totemrrp_instance)); } static int totemrrp_algorithm_set ( struct totem_config *totem_config, struct totemrrp_instance *instance) { unsigned int res = -1; unsigned int i; for (i = 0; i < RRP_ALGOS_COUNT; i++) { if (strcmp (totem_config->rrp_mode, rrp_algos[i]->name) == 0) { instance->rrp_algo = rrp_algos[i]; if (rrp_algos[i]->initialize) { instance->rrp_algo_instance = rrp_algos[i]->initialize ( instance, totem_config->interface_count); } res = 0; break; } } for (i = 0; i < totem_config->interface_count; i++) { instance->status[i] = malloc (1024); sprintf (instance->status[i], "ring %d active with no faults", i); } return (res); } void rrp_deliver_fn ( void *context, const void *msg, unsigned int msg_len) { unsigned int token_seqid; unsigned int token_is; struct deliver_fn_context *deliver_fn_context = (struct deliver_fn_context *)context; struct totemrrp_instance *rrp_instance = deliver_fn_context->instance; const struct message_header *hdr = msg; struct message_header tmp_msg, activate_msg; memset(&tmp_msg, 0, sizeof(struct message_header)); memset(&activate_msg, 0, sizeof(struct message_header)); rrp_instance->totemrrp_token_seqid_get ( msg, &token_seqid, &token_is); if (hdr->type == MESSAGE_TYPE_RING_TEST_ACTIVE) { log_printf ( rrp_instance->totemrrp_log_level_debug, "received message requesting test of ring now active"); if (hdr->endian_detector != ENDIAN_LOCAL) { test_active_msg_endian_convert(hdr, &tmp_msg); hdr = &tmp_msg; } if (hdr->nodeid_activator == rrp_instance->my_nodeid) { /* * Send an activate message */ activate_msg.type = MESSAGE_TYPE_RING_TEST_ACTIVATE; activate_msg.endian_detector = ENDIAN_LOCAL; activate_msg.ring_number = hdr->ring_number; activate_msg.nodeid_activator = rrp_instance->my_nodeid; totemnet_token_send ( rrp_instance->net_handles[deliver_fn_context->iface_no], &activate_msg, sizeof (struct message_header)); } else { /* * Send a ring test message */ totemnet_token_send ( rrp_instance->net_handles[deliver_fn_context->iface_no], msg, msg_len); } } else if (hdr->type == MESSAGE_TYPE_RING_TEST_ACTIVATE) { log_printf ( rrp_instance->totemrrp_log_level_notice, "Automatically recovered ring %d", hdr->ring_number); if (hdr->endian_detector != ENDIAN_LOCAL) { test_active_msg_endian_convert(hdr, &tmp_msg); hdr = &tmp_msg; } totemrrp_ring_reenable (rrp_instance, deliver_fn_context->iface_no); if (hdr->nodeid_activator != rrp_instance->my_nodeid) { totemnet_token_send ( rrp_instance->net_handles[deliver_fn_context->iface_no], msg, msg_len); } } else if (token_is) { /* * Deliver to the token receiver for this rrp algorithm */ rrp_instance->rrp_algo->token_recv ( rrp_instance, deliver_fn_context->iface_no, deliver_fn_context->context, msg, msg_len, token_seqid); } else { /* * Deliver to the mcast receiver for this rrp algorithm */ rrp_instance->rrp_algo->mcast_recv ( rrp_instance, deliver_fn_context->iface_no, deliver_fn_context->context, msg, msg_len); } } void rrp_iface_change_fn ( void *context, const struct totem_ip_address *iface_addr) { struct deliver_fn_context *deliver_fn_context = (struct deliver_fn_context *)context; deliver_fn_context->instance->my_nodeid = iface_addr->nodeid; deliver_fn_context->instance->totemrrp_iface_change_fn ( deliver_fn_context->context, iface_addr, deliver_fn_context->iface_no); } int totemrrp_finalize ( void *rrp_context) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; int i; for (i = 0; i < instance->interface_count; i++) { totemnet_finalize (instance->net_handles[i]); } free (instance->net_handles); free (instance); return (0); } static void rrp_target_set_completed (void *context) { struct deliver_fn_context *deliver_fn_context = (struct deliver_fn_context *)context; deliver_fn_context->instance->totemrrp_target_set_completed (deliver_fn_context->context); } /* * Totem Redundant Ring interface * depends on poll abstraction, POSIX, IPV4 */ /* * Create an instance */ int totemrrp_initialize ( qb_loop_t *poll_handle, void **rrp_context, struct totem_config *totem_config, totemsrp_stats_t *stats, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_addr, unsigned int iface_no), void (*token_seqid_get) ( const void *msg, unsigned int *seqid, unsigned int *token_is), unsigned int (*msgs_missing) (void), void (*target_set_completed) (void *context)) { struct totemrrp_instance *instance; unsigned int res; int i; instance = malloc (sizeof (struct totemrrp_instance)); if (instance == 0) { return (-1); } totemrrp_instance_initialize (instance); instance->totem_config = totem_config; stats->rrp = &instance->stats; if (totem_config->interface_count > 1) { instance->stats.interface_count = totem_config->interface_count; instance->stats.faulty = calloc(instance->stats.interface_count, sizeof(uint8_t)); } res = totemrrp_algorithm_set ( instance->totem_config, instance); if (res == -1) { goto error_destroy; } /* * Configure logging */ instance->totemrrp_log_level_security = totem_config->totem_logging_configuration.log_level_security; instance->totemrrp_log_level_error = totem_config->totem_logging_configuration.log_level_error; instance->totemrrp_log_level_warning = totem_config->totem_logging_configuration.log_level_warning; instance->totemrrp_log_level_notice = totem_config->totem_logging_configuration.log_level_notice; instance->totemrrp_log_level_debug = totem_config->totem_logging_configuration.log_level_debug; instance->totemrrp_subsys_id = totem_config->totem_logging_configuration.log_subsys_id; instance->totemrrp_log_printf = totem_config->totem_logging_configuration.log_printf; instance->interfaces = totem_config->interfaces; instance->poll_handle = poll_handle; instance->totemrrp_deliver_fn = deliver_fn; instance->totemrrp_iface_change_fn = iface_change_fn; instance->totemrrp_token_seqid_get = token_seqid_get; instance->totemrrp_target_set_completed = target_set_completed; instance->totemrrp_msgs_missing = msgs_missing; instance->interface_count = totem_config->interface_count; instance->net_handles = malloc (sizeof (void *) * totem_config->interface_count); instance->context = context; instance->poll_handle = poll_handle; for (i = 0; i < totem_config->interface_count; i++) { struct deliver_fn_context *deliver_fn_context; deliver_fn_context = malloc (sizeof (struct deliver_fn_context)); assert (deliver_fn_context); deliver_fn_context->instance = instance; deliver_fn_context->context = context; deliver_fn_context->iface_no = i; instance->deliver_fn_context[i] = (void *)deliver_fn_context; totemnet_initialize ( poll_handle, &instance->net_handles[i], totem_config, i, (void *)deliver_fn_context, rrp_deliver_fn, rrp_iface_change_fn, rrp_target_set_completed); totemnet_net_mtu_adjust (instance->net_handles[i], totem_config); } *rrp_context = instance; return (0); error_destroy: free (instance); return (res); } void *totemrrp_buffer_alloc (void *rrp_context) { struct totemrrp_instance *instance = rrp_context; assert (instance != NULL); return totemnet_buffer_alloc (instance->net_handles[0]); } void totemrrp_buffer_release (void *rrp_context, void *ptr) { struct totemrrp_instance *instance = rrp_context; assert (instance != NULL); totemnet_buffer_release (instance->net_handles[0], ptr); } int totemrrp_processor_count_set ( void *rrp_context, unsigned int processor_count) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; instance->rrp_algo->processor_count_set (instance, processor_count); instance->processor_count = processor_count; return (0); } int totemrrp_token_target_set ( void *rrp_context, struct totem_ip_address *addr, unsigned int iface_no) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; instance->rrp_algo->token_target_set (instance, addr, iface_no); return (0); } int totemrrp_recv_flush (void *rrp_context) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; instance->rrp_algo->recv_flush (instance); return (0); } int totemrrp_send_flush (void *rrp_context) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; instance->rrp_algo->send_flush (instance); return (0); } int totemrrp_token_send ( void *rrp_context, const void *msg, unsigned int msg_len) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; instance->rrp_algo->token_send (instance, msg, msg_len); return (0); } int totemrrp_mcast_flush_send ( void *rrp_context, const void *msg, unsigned int msg_len) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; int res = 0; // TODO this needs to return the result instance->rrp_algo->mcast_flush_send (instance, msg, msg_len); return (res); } int totemrrp_mcast_noflush_send ( void *rrp_context, const void *msg, unsigned int msg_len) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; /* * merge detects go out through mcast_flush_send so it is safe to * flush these messages if we are only one processor. This avoids * an encryption/hmac and decryption/hmac */ if (instance->processor_count > 1) { // TODO this needs to return the result instance->rrp_algo->mcast_noflush_send (instance, msg, msg_len); } return (0); } int totemrrp_iface_check (void *rrp_context) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; instance->rrp_algo->iface_check (instance); return (0); } int totemrrp_ifaces_get ( void *rrp_context, char ***status, unsigned int *iface_count) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; *status = instance->status; if (iface_count) { *iface_count = instance->interface_count; } return (0); } int totemrrp_crypto_set ( void *rrp_context, - unsigned int type) + const char *cipher_type, + const char *hash_type) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; int res; - res = totemnet_crypto_set(instance->net_handles[0], type); + res = totemnet_crypto_set(instance->net_handles[0], cipher_type, hash_type); return (res); } /* * iface_no indicates the interface number [0, ..., interface_count-1] of the * specific ring which will be reenabled. We specify iface_no == interface_count * means reenabling all the rings. */ int totemrrp_ring_reenable ( void *rrp_context, unsigned int iface_no) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; int res = 0; unsigned int i; instance->rrp_algo->ring_reenable (instance, iface_no); if (iface_no == instance->interface_count) { for (i = 0; i < instance->interface_count; i++) { sprintf (instance->status[i], "ring %d active with no faults", i); } } else { sprintf (instance->status[iface_no], "ring %d active with no faults", iface_no); } return (res); } extern int totemrrp_mcast_recv_empty ( void *rrp_context) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; int res; res = instance->rrp_algo->mcast_recv_empty (instance); return (res); } int totemrrp_member_add ( void *rrp_context, const struct totem_ip_address *member, int iface_no) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; int res; res = instance->rrp_algo->member_add (instance, member, iface_no); return (res); } int totemrrp_member_remove ( void *rrp_context, const struct totem_ip_address *member, int iface_no) { struct totemrrp_instance *instance = (struct totemrrp_instance *)rrp_context; int res; res = instance->rrp_algo->member_remove (instance, member, iface_no); return (res); } diff --git a/exec/totemrrp.h b/exec/totemrrp.h index 4416eab5..db32cdaf 100644 --- a/exec/totemrrp.h +++ b/exec/totemrrp.h @@ -1,152 +1,153 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2007, 2009 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file * Totem Network interface - also does encryption/decryption * * depends on poll abstraction, POSIX, IPV4 */ #ifndef TOTEMRRP_H_DEFINED #define TOTEMRRP_H_DEFINED #include #include #include #include #define TOTEMRRP_NOFLUSH 0 #define TOTEMRRP_FLUSH 1 /** * Create an instance */ extern int totemrrp_initialize ( qb_loop_t *poll_handle, void **rrp_context, struct totem_config *totem_config, totemsrp_stats_t *stats, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_addr, unsigned int iface_no), void (*token_seqid_get) ( const void *msg, unsigned int *seqid, unsigned int *token_is), unsigned int (*msgs_missing) (void), void (*target_set_completed) ( void *context) ); extern void *totemrrp_buffer_alloc ( void *rrp_context); extern void totemrrp_buffer_release ( void *rrp_context, void *ptr); extern int totemrrp_processor_count_set ( void *rrp_context, unsigned int processor_count); extern int totemrrp_token_send ( void *rrp_context, const void *msg, unsigned int msg_len); extern int totemrrp_mcast_noflush_send ( void *rrp_context, const void *msg, unsigned int msg_len); extern int totemrrp_mcast_flush_send ( void *rrp_context, const void *msg, unsigned int msg_len); extern int totemrrp_recv_flush ( void *rrp_context); extern int totemrrp_send_flush ( void *rrp_context); extern int totemrrp_token_target_set ( void *rrp_context, struct totem_ip_address *target, unsigned int iface_no); extern int totemrrp_iface_check (void *rrp_context); extern int totemrrp_finalize (void *rrp_context); extern int totemrrp_ifaces_get ( void *rrp_context, char ***status, unsigned int *iface_count); extern int totemrrp_crypto_set ( void *rrp_context, - unsigned int type); + const char *cipher_type, + const char *hash_type); extern int totemrrp_ring_reenable ( void *rrp_context, unsigned int iface_no); extern int totemrrp_mcast_recv_empty ( void *rrp_context); extern int totemrrp_member_add ( void *net_context, const struct totem_ip_address *member, int iface_no); extern int totemrrp_member_remove ( void *net_context, const struct totem_ip_address *member, int iface_no); #endif /* TOTEMRRP_H_DEFINED */ diff --git a/exec/totemsrp.c b/exec/totemsrp.c index 6a16db8e..c262d981 100644 --- a/exec/totemsrp.c +++ b/exec/totemsrp.c @@ -1,4539 +1,4540 @@ /* * Copyright (c) 2003-2006 MontaVista Software, Inc. * Copyright (c) 2006-2009 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * The first version of this code was based upon Yair Amir's PhD thesis: * http://www.cs.jhu.edu/~yairamir/phd.ps) (ch4,5). * * The current version of totemsrp implements the Totem protocol specified in: * http://citeseer.ist.psu.edu/amir95totem.html * * The deviations from the above published protocols are: * - encryption of message contents with nss * - authentication of meessage contents with SHA1/HMAC * - token hold mode where token doesn't rotate on unused ring - reduces cpu * usage on 1.6ghz xeon from 35% to less then .1 % as measured by top */ #include #include #ifdef HAVE_ALLOCA_H #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LOGSYS_UTILS_ONLY 1 #include #include "totemsrp.h" #include "totemrrp.h" #include "totemnet.h" #include "cs_queue.h" #define LOCALHOST_IP inet_addr("127.0.0.1") #define QUEUE_RTR_ITEMS_SIZE_MAX 16384 /* allow 16384 retransmit items */ #define RETRANS_MESSAGE_QUEUE_SIZE_MAX 16384 /* allow 500 messages to be queued */ #define RECEIVED_MESSAGE_QUEUE_SIZE_MAX 500 /* allow 500 messages to be queued */ #define MAXIOVS 5 #define RETRANSMIT_ENTRIES_MAX 30 #define TOKEN_SIZE_MAX 64000 /* bytes */ #define LEAVE_DUMMY_NODEID 0 /* * Rollover handling: * SEQNO_START_MSG is the starting sequence number after a new configuration * This should remain zero, unless testing overflow in which case * 0x7ffff000 and 0xfffff000 are good starting values. * * SEQNO_START_TOKEN is the starting sequence number after a new configuration * for a token. This should remain zero, unless testing overflow in which * case 07fffff00 or 0xffffff00 are good starting values. */ #define SEQNO_START_MSG 0x0 #define SEQNO_START_TOKEN 0x0 /* * These can be used ot test different rollover points * #define SEQNO_START_MSG 0xfffffe00 * #define SEQNO_START_TOKEN 0xfffffe00 */ /* * These can be used to test the error recovery algorithms * #define TEST_DROP_ORF_TOKEN_PERCENTAGE 30 * #define TEST_DROP_COMMIT_TOKEN_PERCENTAGE 30 * #define TEST_DROP_MCAST_PERCENTAGE 50 * #define TEST_RECOVERY_MSG_COUNT 300 */ /* * we compare incoming messages to determine if their endian is * different - if so convert them * * do not change */ #define ENDIAN_LOCAL 0xff22 enum message_type { MESSAGE_TYPE_ORF_TOKEN = 0, /* Ordering, Reliability, Flow (ORF) control Token */ MESSAGE_TYPE_MCAST = 1, /* ring ordered multicast message */ MESSAGE_TYPE_MEMB_MERGE_DETECT = 2, /* merge rings if there are available rings */ MESSAGE_TYPE_MEMB_JOIN = 3, /* membership join message */ MESSAGE_TYPE_MEMB_COMMIT_TOKEN = 4, /* membership commit token */ MESSAGE_TYPE_TOKEN_HOLD_CANCEL = 5, /* cancel the holding of the token */ }; enum encapsulation_type { MESSAGE_ENCAPSULATED = 1, MESSAGE_NOT_ENCAPSULATED = 2 }; /* * New membership algorithm local variables */ struct srp_addr { struct totem_ip_address addr[INTERFACE_MAX]; }; struct consensus_list_item { struct srp_addr addr; int set; }; struct token_callback_instance { struct list_head list; int (*callback_fn) (enum totem_callback_token_type type, const void *); enum totem_callback_token_type callback_type; int delete; void *data; }; struct totemsrp_socket { int mcast; int token; }; struct message_header { char type; char encapsulated; unsigned short endian_detector; unsigned int nodeid; } __attribute__((packed)); struct mcast { struct message_header header; struct srp_addr system_from; unsigned int seq; int this_seqno; struct memb_ring_id ring_id; unsigned int node_id; int guarantee; } __attribute__((packed)); struct rtr_item { struct memb_ring_id ring_id; unsigned int seq; }__attribute__((packed)); struct orf_token { struct message_header header; unsigned int seq; unsigned int token_seq; unsigned int aru; unsigned int aru_addr; struct memb_ring_id ring_id; unsigned int backlog; unsigned int fcc; int retrans_flg; int rtr_list_entries; struct rtr_item rtr_list[0]; }__attribute__((packed)); struct memb_join { struct message_header header; struct srp_addr system_from; unsigned int proc_list_entries; unsigned int failed_list_entries; unsigned long long ring_seq; unsigned char end_of_memb_join[0]; /* * These parts of the data structure are dynamic: * struct srp_addr proc_list[]; * struct srp_addr failed_list[]; */ } __attribute__((packed)); struct memb_merge_detect { struct message_header header; struct srp_addr system_from; struct memb_ring_id ring_id; } __attribute__((packed)); struct token_hold_cancel { struct message_header header; struct memb_ring_id ring_id; } __attribute__((packed)); struct memb_commit_token_memb_entry { struct memb_ring_id ring_id; unsigned int aru; unsigned int high_delivered; unsigned int received_flg; }__attribute__((packed)); struct memb_commit_token { struct message_header header; unsigned int token_seq; struct memb_ring_id ring_id; unsigned int retrans_flg; int memb_index; int addr_entries; unsigned char end_of_commit_token[0]; /* * These parts of the data structure are dynamic: * * struct srp_addr addr[PROCESSOR_COUNT_MAX]; * struct memb_commit_token_memb_entry memb_list[PROCESSOR_COUNT_MAX]; */ }__attribute__((packed)); struct message_item { struct mcast *mcast; unsigned int msg_len; }; struct sort_queue_item { struct mcast *mcast; unsigned int msg_len; }; struct orf_token_mcast_thread_state { char iobuf[9000]; }; enum memb_state { MEMB_STATE_OPERATIONAL = 1, MEMB_STATE_GATHER = 2, MEMB_STATE_COMMIT = 3, MEMB_STATE_RECOVERY = 4 }; struct totemsrp_instance { int iface_changes; int failed_to_recv; /* * Flow control mcasts and remcasts on last and current orf_token */ int fcc_remcast_last; int fcc_mcast_last; int fcc_remcast_current; struct consensus_list_item consensus_list[PROCESSOR_COUNT_MAX]; int consensus_list_entries; struct srp_addr my_id; struct srp_addr my_proc_list[PROCESSOR_COUNT_MAX]; struct srp_addr my_failed_list[PROCESSOR_COUNT_MAX]; struct srp_addr my_new_memb_list[PROCESSOR_COUNT_MAX]; struct srp_addr my_trans_memb_list[PROCESSOR_COUNT_MAX]; struct srp_addr my_memb_list[PROCESSOR_COUNT_MAX]; struct srp_addr my_deliver_memb_list[PROCESSOR_COUNT_MAX]; struct srp_addr my_left_memb_list[PROCESSOR_COUNT_MAX]; int my_proc_list_entries; int my_failed_list_entries; int my_new_memb_entries; int my_trans_memb_entries; int my_memb_entries; int my_deliver_memb_entries; int my_left_memb_entries; struct memb_ring_id my_ring_id; struct memb_ring_id my_old_ring_id; int my_aru_count; int my_merge_detect_timeout_outstanding; unsigned int my_last_aru; int my_seq_unchanged; int my_received_flg; unsigned int my_high_seq_received; unsigned int my_install_seq; int my_rotation_counter; int my_set_retrans_flg; int my_retrans_flg_count; unsigned int my_high_ring_delivered; int heartbeat_timeout; /* * Queues used to order, deliver, and recover messages */ struct cs_queue new_message_queue; struct cs_queue retrans_message_queue; struct sq regular_sort_queue; struct sq recovery_sort_queue; /* * Received up to and including */ unsigned int my_aru; unsigned int my_high_delivered; struct list_head token_callback_received_listhead; struct list_head token_callback_sent_listhead; char orf_token_retransmit[TOKEN_SIZE_MAX]; int orf_token_retransmit_size; unsigned int my_token_seq; /* * Timers */ qb_loop_timer_handle timer_pause_timeout; qb_loop_timer_handle timer_orf_token_timeout; qb_loop_timer_handle timer_orf_token_retransmit_timeout; qb_loop_timer_handle timer_orf_token_hold_retransmit_timeout; qb_loop_timer_handle timer_merge_detect_timeout; qb_loop_timer_handle memb_timer_state_gather_join_timeout; qb_loop_timer_handle memb_timer_state_gather_consensus_timeout; qb_loop_timer_handle memb_timer_state_commit_timeout; qb_loop_timer_handle timer_heartbeat_timeout; /* * Function and data used to log messages */ int totemsrp_log_level_security; int totemsrp_log_level_error; int totemsrp_log_level_warning; int totemsrp_log_level_notice; int totemsrp_log_level_debug; int totemsrp_subsys_id; void (*totemsrp_log_printf) ( int level, int sybsys, const char *function, const char *file, int line, const char *format, ...)__attribute__((format(printf, 6, 7)));; enum memb_state memb_state; //TODO struct srp_addr next_memb; qb_loop_t *totemsrp_poll_handle; struct totem_ip_address mcast_address; void (*totemsrp_deliver_fn) ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required); void (*totemsrp_confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id); void (*totemsrp_service_ready_fn) (void); int global_seqno; int my_token_held; unsigned long long token_ring_id_seq; unsigned int last_released; unsigned int set_aru; int old_ring_state_saved; int old_ring_state_aru; unsigned int old_ring_state_high_seq_received; unsigned int my_last_seq; struct timeval tv_old; void *totemrrp_context; struct totem_config *totem_config; unsigned int use_heartbeat; unsigned int my_trc; unsigned int my_pbl; unsigned int my_cbl; uint64_t pause_timestamp; struct memb_commit_token *commit_token; totemsrp_stats_t stats; uint32_t orf_token_discard; uint32_t threaded_mode_enabled; void * token_recv_event_handle; void * token_sent_event_handle; char commit_token_storage[40000]; }; struct message_handlers { int count; int (*handler_functions[6]) ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed); }; /* * forward decls */ static int message_handler_orf_token ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed); static int message_handler_mcast ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed); static int message_handler_memb_merge_detect ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed); static int message_handler_memb_join ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed); static int message_handler_memb_commit_token ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed); static int message_handler_token_hold_cancel ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed); static void totemsrp_instance_initialize (struct totemsrp_instance *instance); static unsigned int main_msgs_missing (void); static void main_token_seqid_get ( const void *msg, unsigned int *seqid, unsigned int *token_is); static void srp_addr_copy (struct srp_addr *dest, const struct srp_addr *src); static void srp_addr_to_nodeid ( unsigned int *nodeid_out, struct srp_addr *srp_addr_in, unsigned int entries); static int srp_addr_equal (const struct srp_addr *a, const struct srp_addr *b); static void memb_leave_message_send (struct totemsrp_instance *instance); static void memb_ring_id_create_or_load (struct totemsrp_instance *, struct memb_ring_id *); static void token_callbacks_execute (struct totemsrp_instance *instance, enum totem_callback_token_type type); static void memb_state_gather_enter (struct totemsrp_instance *instance, int gather_from); static void messages_deliver_to_app (struct totemsrp_instance *instance, int skip, unsigned int end_point); static int orf_token_mcast (struct totemsrp_instance *instance, struct orf_token *oken, int fcc_mcasts_allowed); static void messages_free (struct totemsrp_instance *instance, unsigned int token_aru); static void memb_ring_id_set_and_store (struct totemsrp_instance *instance, const struct memb_ring_id *ring_id); static void target_set_completed (void *context); static void memb_state_commit_token_update (struct totemsrp_instance *instance); static void memb_state_commit_token_target_set (struct totemsrp_instance *instance); static int memb_state_commit_token_send (struct totemsrp_instance *instance); static int memb_state_commit_token_send_recovery (struct totemsrp_instance *instance, struct memb_commit_token *memb_commit_token); static void memb_state_commit_token_create (struct totemsrp_instance *instance); static int token_hold_cancel_send (struct totemsrp_instance *instance); static void orf_token_endian_convert (const struct orf_token *in, struct orf_token *out); static void memb_commit_token_endian_convert (const struct memb_commit_token *in, struct memb_commit_token *out); static void memb_join_endian_convert (const struct memb_join *in, struct memb_join *out); static void mcast_endian_convert (const struct mcast *in, struct mcast *out); static void memb_merge_detect_endian_convert ( const struct memb_merge_detect *in, struct memb_merge_detect *out); static void srp_addr_copy_endian_convert (struct srp_addr *out, const struct srp_addr *in); static void timer_function_orf_token_timeout (void *data); static void timer_function_pause_timeout (void *data); static void timer_function_heartbeat_timeout (void *data); static void timer_function_token_retransmit_timeout (void *data); static void timer_function_token_hold_retransmit_timeout (void *data); static void timer_function_merge_detect_timeout (void *data); static void *totemsrp_buffer_alloc (struct totemsrp_instance *instance); static void totemsrp_buffer_release (struct totemsrp_instance *instance, void *ptr); void main_deliver_fn ( void *context, const void *msg, unsigned int msg_len); void main_iface_change_fn ( void *context, const struct totem_ip_address *iface_address, unsigned int iface_no); struct message_handlers totemsrp_message_handlers = { 6, { message_handler_orf_token, /* MESSAGE_TYPE_ORF_TOKEN */ message_handler_mcast, /* MESSAGE_TYPE_MCAST */ message_handler_memb_merge_detect, /* MESSAGE_TYPE_MEMB_MERGE_DETECT */ message_handler_memb_join, /* MESSAGE_TYPE_MEMB_JOIN */ message_handler_memb_commit_token, /* MESSAGE_TYPE_MEMB_COMMIT_TOKEN */ message_handler_token_hold_cancel /* MESSAGE_TYPE_TOKEN_HOLD_CANCEL */ } }; static const char *rundir = NULL; #define log_printf(level, format, args...) \ do { \ instance->totemsrp_log_printf ( \ level, instance->totemsrp_subsys_id, \ __FUNCTION__, __FILE__, __LINE__, \ format, ##args); \ } while (0); #define LOGSYS_PERROR(err_num, level, fmt, args...) \ do { \ char _error_str[LOGSYS_MAX_PERROR_MSG_LEN]; \ const char *_error_ptr = qb_strerror_r(err_num, _error_str, sizeof(_error_str)); \ instance->totemsrp_log_printf ( \ level, instance->totemsrp_subsys_id, \ __FUNCTION__, __FILE__, __LINE__, \ fmt ": %s (%d)\n", ##args, _error_ptr, err_num); \ } while(0) static void totemsrp_instance_initialize (struct totemsrp_instance *instance) { memset (instance, 0, sizeof (struct totemsrp_instance)); list_init (&instance->token_callback_received_listhead); list_init (&instance->token_callback_sent_listhead); instance->my_received_flg = 1; instance->my_token_seq = SEQNO_START_TOKEN - 1; instance->memb_state = MEMB_STATE_OPERATIONAL; instance->set_aru = -1; instance->my_aru = SEQNO_START_MSG; instance->my_high_seq_received = SEQNO_START_MSG; instance->my_high_delivered = SEQNO_START_MSG; instance->orf_token_discard = 0; instance->commit_token = (struct memb_commit_token *)instance->commit_token_storage; } static void main_token_seqid_get ( const void *msg, unsigned int *seqid, unsigned int *token_is) { const struct orf_token *token = msg; *seqid = 0; *token_is = 0; if (token->header.type == MESSAGE_TYPE_ORF_TOKEN) { *seqid = token->token_seq; *token_is = 1; } } static unsigned int main_msgs_missing (void) { // TODO return (0); } static int pause_flush (struct totemsrp_instance *instance) { uint64_t now_msec; uint64_t timestamp_msec; int res = 0; now_msec = (qb_util_nano_current_get () / QB_TIME_NS_IN_MSEC); timestamp_msec = instance->pause_timestamp / QB_TIME_NS_IN_MSEC; if ((now_msec - timestamp_msec) > (instance->totem_config->token_timeout / 2)) { log_printf (instance->totemsrp_log_level_notice, "Process pause detected for %d ms, flushing membership messages.", (unsigned int)(now_msec - timestamp_msec)); /* * -1 indicates an error from recvmsg */ do { res = totemrrp_mcast_recv_empty (instance->totemrrp_context); } while (res == -1); } return (res); } static int token_event_stats_collector (enum totem_callback_token_type type, const void *void_instance) { struct totemsrp_instance *instance = (struct totemsrp_instance *)void_instance; uint32_t time_now; unsigned long long nano_secs = qb_util_nano_current_get (); time_now = (nano_secs / QB_TIME_NS_IN_MSEC); if (type == TOTEM_CALLBACK_TOKEN_RECEIVED) { /* incr latest token the index */ if (instance->stats.latest_token == (TOTEM_TOKEN_STATS_MAX - 1)) instance->stats.latest_token = 0; else instance->stats.latest_token++; if (instance->stats.earliest_token == instance->stats.latest_token) { /* we have filled up the array, start overwriting */ if (instance->stats.earliest_token == (TOTEM_TOKEN_STATS_MAX - 1)) instance->stats.earliest_token = 0; else instance->stats.earliest_token++; instance->stats.token[instance->stats.earliest_token].rx = 0; instance->stats.token[instance->stats.earliest_token].tx = 0; instance->stats.token[instance->stats.earliest_token].backlog_calc = 0; } instance->stats.token[instance->stats.latest_token].rx = time_now; instance->stats.token[instance->stats.latest_token].tx = 0; /* in case we drop the token */ } else { instance->stats.token[instance->stats.latest_token].tx = time_now; } return 0; } /* * Exported interfaces */ int totemsrp_initialize ( qb_loop_t *poll_handle, void **srp_context, struct totem_config *totem_config, totemmrp_stats_t *stats, void (*deliver_fn) ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required), void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id)) { struct totemsrp_instance *instance; unsigned int res; instance = malloc (sizeof (struct totemsrp_instance)); if (instance == NULL) { goto error_exit; } rundir = getenv ("COROSYNC_RUN_DIR"); if (rundir == NULL) { rundir = LOCALSTATEDIR "/lib/corosync"; } res = mkdir (rundir, 0700); if (res == -1 && errno != EEXIST) { goto error_destroy; } res = chdir (rundir); if (res == -1) { goto error_destroy; } totemsrp_instance_initialize (instance); stats->srp = &instance->stats; instance->stats.latest_token = 0; instance->stats.earliest_token = 0; instance->totem_config = totem_config; /* * Configure logging */ instance->totemsrp_log_level_security = totem_config->totem_logging_configuration.log_level_security; instance->totemsrp_log_level_error = totem_config->totem_logging_configuration.log_level_error; instance->totemsrp_log_level_warning = totem_config->totem_logging_configuration.log_level_warning; instance->totemsrp_log_level_notice = totem_config->totem_logging_configuration.log_level_notice; instance->totemsrp_log_level_debug = totem_config->totem_logging_configuration.log_level_debug; instance->totemsrp_subsys_id = totem_config->totem_logging_configuration.log_subsys_id; instance->totemsrp_log_printf = totem_config->totem_logging_configuration.log_printf; /* * Initialize local variables for totemsrp */ totemip_copy (&instance->mcast_address, &totem_config->interfaces[0].mcast_addr); /* * Display totem configuration */ log_printf (instance->totemsrp_log_level_debug, "Token Timeout (%d ms) retransmit timeout (%d ms)", totem_config->token_timeout, totem_config->token_retransmit_timeout); log_printf (instance->totemsrp_log_level_debug, "token hold (%d ms) retransmits before loss (%d retrans)", totem_config->token_hold_timeout, totem_config->token_retransmits_before_loss_const); log_printf (instance->totemsrp_log_level_debug, "join (%d ms) send_join (%d ms) consensus (%d ms) merge (%d ms)", totem_config->join_timeout, totem_config->send_join_timeout, totem_config->consensus_timeout, totem_config->merge_timeout); log_printf (instance->totemsrp_log_level_debug, "downcheck (%d ms) fail to recv const (%d msgs)", totem_config->downcheck_timeout, totem_config->fail_to_recv_const); log_printf (instance->totemsrp_log_level_debug, "seqno unchanged const (%d rotations) Maximum network MTU %d", totem_config->seqno_unchanged_const, totem_config->net_mtu); log_printf (instance->totemsrp_log_level_debug, "window size per rotation (%d messages) maximum messages per rotation (%d messages)", totem_config->window_size, totem_config->max_messages); log_printf (instance->totemsrp_log_level_debug, "missed count const (%d messages)", totem_config->miss_count_const); log_printf (instance->totemsrp_log_level_debug, "send threads (%d threads)", totem_config->threads); log_printf (instance->totemsrp_log_level_debug, "RRP token expired timeout (%d ms)", totem_config->rrp_token_expired_timeout); log_printf (instance->totemsrp_log_level_debug, "RRP token problem counter (%d ms)", totem_config->rrp_problem_count_timeout); log_printf (instance->totemsrp_log_level_debug, "RRP threshold (%d problem count)", totem_config->rrp_problem_count_threshold); log_printf (instance->totemsrp_log_level_debug, "RRP multicast threshold (%d problem count)", totem_config->rrp_problem_count_mcast_threshold); log_printf (instance->totemsrp_log_level_debug, "RRP automatic recovery check timeout (%d ms)", totem_config->rrp_autorecovery_check_timeout); log_printf (instance->totemsrp_log_level_debug, "RRP mode set to %s.", instance->totem_config->rrp_mode); log_printf (instance->totemsrp_log_level_debug, "heartbeat_failures_allowed (%d)", totem_config->heartbeat_failures_allowed); log_printf (instance->totemsrp_log_level_debug, "max_network_delay (%d ms)", totem_config->max_network_delay); cs_queue_init (&instance->retrans_message_queue, RETRANS_MESSAGE_QUEUE_SIZE_MAX, sizeof (struct message_item), instance->threaded_mode_enabled); sq_init (&instance->regular_sort_queue, QUEUE_RTR_ITEMS_SIZE_MAX, sizeof (struct sort_queue_item), 0); sq_init (&instance->recovery_sort_queue, QUEUE_RTR_ITEMS_SIZE_MAX, sizeof (struct sort_queue_item), 0); instance->totemsrp_poll_handle = poll_handle; instance->totemsrp_deliver_fn = deliver_fn; instance->totemsrp_confchg_fn = confchg_fn; instance->use_heartbeat = 1; timer_function_pause_timeout (instance); if ( totem_config->heartbeat_failures_allowed == 0 ) { log_printf (instance->totemsrp_log_level_debug, "HeartBeat is Disabled. To enable set heartbeat_failures_allowed > 0"); instance->use_heartbeat = 0; } if (instance->use_heartbeat) { instance->heartbeat_timeout = (totem_config->heartbeat_failures_allowed) * totem_config->token_retransmit_timeout + totem_config->max_network_delay; if (instance->heartbeat_timeout >= totem_config->token_timeout) { log_printf (instance->totemsrp_log_level_debug, "total heartbeat_timeout (%d ms) is not less than token timeout (%d ms)", instance->heartbeat_timeout, totem_config->token_timeout); log_printf (instance->totemsrp_log_level_debug, "heartbeat_timeout = heartbeat_failures_allowed * token_retransmit_timeout + max_network_delay"); log_printf (instance->totemsrp_log_level_debug, "heartbeat timeout should be less than the token timeout. HeartBeat is Diabled !!"); instance->use_heartbeat = 0; } else { log_printf (instance->totemsrp_log_level_debug, "total heartbeat_timeout (%d ms)", instance->heartbeat_timeout); } } totemrrp_initialize ( poll_handle, &instance->totemrrp_context, totem_config, stats->srp, instance, main_deliver_fn, main_iface_change_fn, main_token_seqid_get, main_msgs_missing, target_set_completed); /* * Must have net_mtu adjusted by totemrrp_initialize first */ cs_queue_init (&instance->new_message_queue, MESSAGE_QUEUE_MAX, sizeof (struct message_item), instance->threaded_mode_enabled); totemsrp_callback_token_create (instance, &instance->token_recv_event_handle, TOTEM_CALLBACK_TOKEN_RECEIVED, 0, token_event_stats_collector, instance); totemsrp_callback_token_create (instance, &instance->token_sent_event_handle, TOTEM_CALLBACK_TOKEN_SENT, 0, token_event_stats_collector, instance); *srp_context = instance; return (0); error_destroy: free (instance); error_exit: return (-1); } void totemsrp_finalize ( void *srp_context) { struct totemsrp_instance *instance = (struct totemsrp_instance *)srp_context; memb_leave_message_send (instance); totemrrp_finalize (instance->totemrrp_context); cs_queue_free (&instance->new_message_queue); cs_queue_free (&instance->retrans_message_queue); sq_free (&instance->regular_sort_queue); sq_free (&instance->recovery_sort_queue); free (instance); } int totemsrp_ifaces_get ( void *srp_context, unsigned int nodeid, struct totem_ip_address *interfaces, char ***status, unsigned int *iface_count) { struct totemsrp_instance *instance = (struct totemsrp_instance *)srp_context; int res = 0; unsigned int found = 0; unsigned int i; for (i = 0; i < instance->my_memb_entries; i++) { if (instance->my_memb_list[i].addr[0].nodeid == nodeid) { found = 1; break; } } if (found) { memcpy (interfaces, &instance->my_memb_list[i], sizeof (struct srp_addr)); *iface_count = instance->totem_config->interface_count; goto finish; } for (i = 0; i < instance->my_left_memb_entries; i++) { if (instance->my_left_memb_list[i].addr[0].nodeid == nodeid) { found = 1; break; } } if (found) { memcpy (interfaces, &instance->my_left_memb_list[i], sizeof (struct srp_addr)); *iface_count = instance->totem_config->interface_count; } else { res = -1; } finish: totemrrp_ifaces_get (instance->totemrrp_context, status, NULL); return (res); } int totemsrp_crypto_set ( void *srp_context, - unsigned int type) + const char *cipher_type, + const char *hash_type) { struct totemsrp_instance *instance = (struct totemsrp_instance *)srp_context; int res; - res = totemrrp_crypto_set(instance->totemrrp_context, type); + res = totemrrp_crypto_set(instance->totemrrp_context, cipher_type, hash_type); return (res); } unsigned int totemsrp_my_nodeid_get ( void *srp_context) { struct totemsrp_instance *instance = (struct totemsrp_instance *)srp_context; unsigned int res; res = instance->totem_config->interfaces[0].boundto.nodeid; return (res); } int totemsrp_my_family_get ( void *srp_context) { struct totemsrp_instance *instance = (struct totemsrp_instance *)srp_context; int res; res = instance->totem_config->interfaces[0].boundto.family; return (res); } int totemsrp_ring_reenable ( void *srp_context) { struct totemsrp_instance *instance = (struct totemsrp_instance *)srp_context; totemrrp_ring_reenable (instance->totemrrp_context, instance->totem_config->interface_count); return (0); } /* * Set operations for use by the membership algorithm */ static int srp_addr_equal (const struct srp_addr *a, const struct srp_addr *b) { unsigned int i; unsigned int res; for (i = 0; i < 1; i++) { res = totemip_equal (&a->addr[i], &b->addr[i]); if (res == 0) { return (0); } } return (1); } static void srp_addr_copy (struct srp_addr *dest, const struct srp_addr *src) { unsigned int i; for (i = 0; i < INTERFACE_MAX; i++) { totemip_copy (&dest->addr[i], &src->addr[i]); } } static void srp_addr_to_nodeid ( unsigned int *nodeid_out, struct srp_addr *srp_addr_in, unsigned int entries) { unsigned int i; for (i = 0; i < entries; i++) { nodeid_out[i] = srp_addr_in[i].addr[0].nodeid; } } static void srp_addr_copy_endian_convert (struct srp_addr *out, const struct srp_addr *in) { int i; for (i = 0; i < INTERFACE_MAX; i++) { totemip_copy_endian_convert (&out->addr[i], &in->addr[i]); } } static void memb_consensus_reset (struct totemsrp_instance *instance) { instance->consensus_list_entries = 0; } static void memb_set_subtract ( struct srp_addr *out_list, int *out_list_entries, struct srp_addr *one_list, int one_list_entries, struct srp_addr *two_list, int two_list_entries) { int found = 0; int i; int j; *out_list_entries = 0; for (i = 0; i < one_list_entries; i++) { for (j = 0; j < two_list_entries; j++) { if (srp_addr_equal (&one_list[i], &two_list[j])) { found = 1; break; } } if (found == 0) { srp_addr_copy (&out_list[*out_list_entries], &one_list[i]); *out_list_entries = *out_list_entries + 1; } found = 0; } } /* * Set consensus for a specific processor */ static void memb_consensus_set ( struct totemsrp_instance *instance, const struct srp_addr *addr) { int found = 0; int i; if (addr->addr[0].nodeid == LEAVE_DUMMY_NODEID) return; for (i = 0; i < instance->consensus_list_entries; i++) { if (srp_addr_equal(addr, &instance->consensus_list[i].addr)) { found = 1; break; /* found entry */ } } srp_addr_copy (&instance->consensus_list[i].addr, addr); instance->consensus_list[i].set = 1; if (found == 0) { instance->consensus_list_entries++; } return; } /* * Is consensus set for a specific processor */ static int memb_consensus_isset ( struct totemsrp_instance *instance, const struct srp_addr *addr) { int i; for (i = 0; i < instance->consensus_list_entries; i++) { if (srp_addr_equal (addr, &instance->consensus_list[i].addr)) { return (instance->consensus_list[i].set); } } return (0); } /* * Is consensus agreed upon based upon consensus database */ static int memb_consensus_agreed ( struct totemsrp_instance *instance) { struct srp_addr token_memb[PROCESSOR_COUNT_MAX]; int token_memb_entries = 0; int agreed = 1; int i; memb_set_subtract (token_memb, &token_memb_entries, instance->my_proc_list, instance->my_proc_list_entries, instance->my_failed_list, instance->my_failed_list_entries); for (i = 0; i < token_memb_entries; i++) { if (memb_consensus_isset (instance, &token_memb[i]) == 0) { agreed = 0; break; } } assert (token_memb_entries >= 1); return (agreed); } static void memb_consensus_notset ( struct totemsrp_instance *instance, struct srp_addr *no_consensus_list, int *no_consensus_list_entries, struct srp_addr *comparison_list, int comparison_list_entries) { int i; *no_consensus_list_entries = 0; for (i = 0; i < instance->my_proc_list_entries; i++) { if (memb_consensus_isset (instance, &instance->my_proc_list[i]) == 0) { srp_addr_copy (&no_consensus_list[*no_consensus_list_entries], &instance->my_proc_list[i]); *no_consensus_list_entries = *no_consensus_list_entries + 1; } } } /* * Is set1 equal to set2 Entries can be in different orders */ static int memb_set_equal ( struct srp_addr *set1, int set1_entries, struct srp_addr *set2, int set2_entries) { int i; int j; int found = 0; if (set1_entries != set2_entries) { return (0); } for (i = 0; i < set2_entries; i++) { for (j = 0; j < set1_entries; j++) { if (srp_addr_equal (&set1[j], &set2[i])) { found = 1; break; } } if (found == 0) { return (0); } found = 0; } return (1); } /* * Is subset fully contained in fullset */ static int memb_set_subset ( const struct srp_addr *subset, int subset_entries, const struct srp_addr *fullset, int fullset_entries) { int i; int j; int found = 0; if (subset_entries > fullset_entries) { return (0); } for (i = 0; i < subset_entries; i++) { for (j = 0; j < fullset_entries; j++) { if (srp_addr_equal (&subset[i], &fullset[j])) { found = 1; } } if (found == 0) { return (0); } found = 0; } return (1); } /* * merge subset into fullset taking care not to add duplicates */ static void memb_set_merge ( const struct srp_addr *subset, int subset_entries, struct srp_addr *fullset, int *fullset_entries) { int found = 0; int i; int j; for (i = 0; i < subset_entries; i++) { for (j = 0; j < *fullset_entries; j++) { if (srp_addr_equal (&fullset[j], &subset[i])) { found = 1; break; } } if (found == 0) { srp_addr_copy (&fullset[*fullset_entries], &subset[i]); *fullset_entries = *fullset_entries + 1; } found = 0; } return; } static void memb_set_and_with_ring_id ( struct srp_addr *set1, struct memb_ring_id *set1_ring_ids, int set1_entries, struct srp_addr *set2, int set2_entries, struct memb_ring_id *old_ring_id, struct srp_addr *and, int *and_entries) { int i; int j; int found = 0; *and_entries = 0; for (i = 0; i < set2_entries; i++) { for (j = 0; j < set1_entries; j++) { if (srp_addr_equal (&set1[j], &set2[i])) { if (memcmp (&set1_ring_ids[j], old_ring_id, sizeof (struct memb_ring_id)) == 0) { found = 1; } break; } } if (found) { srp_addr_copy (&and[*and_entries], &set1[j]); *and_entries = *and_entries + 1; } found = 0; } return; } #ifdef CODE_COVERAGE static void memb_set_print ( char *string, struct srp_addr *list, int list_entries) { int i; int j; printf ("List '%s' contains %d entries:\n", string, list_entries); for (i = 0; i < list_entries; i++) { for (j = 0; j < INTERFACE_MAX; j++) { printf ("Address %d\n", i); printf ("\tiface %d %s\n", j, totemip_print (&list[i].addr[j])); printf ("family %d\n", list[i].addr[j].family); } } } #endif static void *totemsrp_buffer_alloc (struct totemsrp_instance *instance) { assert (instance != NULL); return totemrrp_buffer_alloc (instance->totemrrp_context); } static void totemsrp_buffer_release (struct totemsrp_instance *instance, void *ptr) { assert (instance != NULL); totemrrp_buffer_release (instance->totemrrp_context, ptr); } static void reset_token_retransmit_timeout (struct totemsrp_instance *instance) { qb_loop_timer_del (instance->totemsrp_poll_handle, instance->timer_orf_token_retransmit_timeout); qb_loop_timer_add (instance->totemsrp_poll_handle, QB_LOOP_MED, instance->totem_config->token_retransmit_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_token_retransmit_timeout, &instance->timer_orf_token_retransmit_timeout); } static void start_merge_detect_timeout (struct totemsrp_instance *instance) { if (instance->my_merge_detect_timeout_outstanding == 0) { qb_loop_timer_add (instance->totemsrp_poll_handle, QB_LOOP_MED, instance->totem_config->merge_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_merge_detect_timeout, &instance->timer_merge_detect_timeout); instance->my_merge_detect_timeout_outstanding = 1; } } static void cancel_merge_detect_timeout (struct totemsrp_instance *instance) { qb_loop_timer_del (instance->totemsrp_poll_handle, instance->timer_merge_detect_timeout); instance->my_merge_detect_timeout_outstanding = 0; } /* * ring_state_* is used to save and restore the sort queue * state when a recovery operation fails (and enters gather) */ static void old_ring_state_save (struct totemsrp_instance *instance) { if (instance->old_ring_state_saved == 0) { instance->old_ring_state_saved = 1; memcpy (&instance->my_old_ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)); instance->old_ring_state_aru = instance->my_aru; instance->old_ring_state_high_seq_received = instance->my_high_seq_received; log_printf (instance->totemsrp_log_level_debug, "Saving state aru %x high seq received %x", instance->my_aru, instance->my_high_seq_received); } } static void old_ring_state_restore (struct totemsrp_instance *instance) { instance->my_aru = instance->old_ring_state_aru; instance->my_high_seq_received = instance->old_ring_state_high_seq_received; log_printf (instance->totemsrp_log_level_debug, "Restoring instance->my_aru %x my high seq received %x", instance->my_aru, instance->my_high_seq_received); } static void old_ring_state_reset (struct totemsrp_instance *instance) { log_printf (instance->totemsrp_log_level_debug, "Resetting old ring state"); instance->old_ring_state_saved = 0; } static void reset_pause_timeout (struct totemsrp_instance *instance) { qb_loop_timer_del (instance->totemsrp_poll_handle, instance->timer_pause_timeout); qb_loop_timer_add (instance->totemsrp_poll_handle, QB_LOOP_MED, instance->totem_config->token_timeout * QB_TIME_NS_IN_MSEC / 5, (void *)instance, timer_function_pause_timeout, &instance->timer_pause_timeout); } static void reset_token_timeout (struct totemsrp_instance *instance) { qb_loop_timer_del (instance->totemsrp_poll_handle, instance->timer_orf_token_timeout); qb_loop_timer_add (instance->totemsrp_poll_handle, QB_LOOP_MED, instance->totem_config->token_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_orf_token_timeout, &instance->timer_orf_token_timeout); } static void reset_heartbeat_timeout (struct totemsrp_instance *instance) { qb_loop_timer_del (instance->totemsrp_poll_handle, instance->timer_heartbeat_timeout); qb_loop_timer_add (instance->totemsrp_poll_handle, QB_LOOP_MED, instance->heartbeat_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_heartbeat_timeout, &instance->timer_heartbeat_timeout); } static void cancel_token_timeout (struct totemsrp_instance *instance) { qb_loop_timer_del (instance->totemsrp_poll_handle, instance->timer_orf_token_timeout); } static void cancel_heartbeat_timeout (struct totemsrp_instance *instance) { qb_loop_timer_del (instance->totemsrp_poll_handle, instance->timer_heartbeat_timeout); } static void cancel_token_retransmit_timeout (struct totemsrp_instance *instance) { qb_loop_timer_del (instance->totemsrp_poll_handle, instance->timer_orf_token_retransmit_timeout); } static void start_token_hold_retransmit_timeout (struct totemsrp_instance *instance) { qb_loop_timer_add (instance->totemsrp_poll_handle, QB_LOOP_MED, instance->totem_config->token_hold_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_token_hold_retransmit_timeout, &instance->timer_orf_token_hold_retransmit_timeout); } static void cancel_token_hold_retransmit_timeout (struct totemsrp_instance *instance) { qb_loop_timer_del (instance->totemsrp_poll_handle, instance->timer_orf_token_hold_retransmit_timeout); } static void memb_state_consensus_timeout_expired ( struct totemsrp_instance *instance) { struct srp_addr no_consensus_list[PROCESSOR_COUNT_MAX]; int no_consensus_list_entries; instance->stats.consensus_timeouts++; if (memb_consensus_agreed (instance)) { memb_consensus_reset (instance); memb_consensus_set (instance, &instance->my_id); reset_token_timeout (instance); // REVIEWED } else { memb_consensus_notset ( instance, no_consensus_list, &no_consensus_list_entries, instance->my_proc_list, instance->my_proc_list_entries); memb_set_merge (no_consensus_list, no_consensus_list_entries, instance->my_failed_list, &instance->my_failed_list_entries); memb_state_gather_enter (instance, 0); } } static void memb_join_message_send (struct totemsrp_instance *instance); static void memb_merge_detect_transmit (struct totemsrp_instance *instance); /* * Timers used for various states of the membership algorithm */ static void timer_function_pause_timeout (void *data) { struct totemsrp_instance *instance = data; instance->pause_timestamp = qb_util_nano_current_get (); reset_pause_timeout (instance); } static void memb_recovery_state_token_loss (struct totemsrp_instance *instance) { old_ring_state_restore (instance); memb_state_gather_enter (instance, 5); instance->stats.recovery_token_lost++; } static void timer_function_orf_token_timeout (void *data) { struct totemsrp_instance *instance = data; switch (instance->memb_state) { case MEMB_STATE_OPERATIONAL: log_printf (instance->totemsrp_log_level_debug, "The token was lost in the OPERATIONAL state."); log_printf (instance->totemsrp_log_level_notice, "A processor failed, forming new configuration."); totemrrp_iface_check (instance->totemrrp_context); memb_state_gather_enter (instance, 2); instance->stats.operational_token_lost++; break; case MEMB_STATE_GATHER: log_printf (instance->totemsrp_log_level_debug, "The consensus timeout expired."); memb_state_consensus_timeout_expired (instance); memb_state_gather_enter (instance, 3); instance->stats.gather_token_lost++; break; case MEMB_STATE_COMMIT: log_printf (instance->totemsrp_log_level_debug, "The token was lost in the COMMIT state."); memb_state_gather_enter (instance, 4); instance->stats.commit_token_lost++; break; case MEMB_STATE_RECOVERY: log_printf (instance->totemsrp_log_level_debug, "The token was lost in the RECOVERY state."); memb_recovery_state_token_loss (instance); instance->orf_token_discard = 1; break; } } static void timer_function_heartbeat_timeout (void *data) { struct totemsrp_instance *instance = data; log_printf (instance->totemsrp_log_level_debug, "HeartBeat Timer expired Invoking token loss mechanism in state %d ", instance->memb_state); timer_function_orf_token_timeout(data); } static void memb_timer_function_state_gather (void *data) { struct totemsrp_instance *instance = data; switch (instance->memb_state) { case MEMB_STATE_OPERATIONAL: case MEMB_STATE_RECOVERY: assert (0); /* this should never happen */ break; case MEMB_STATE_GATHER: case MEMB_STATE_COMMIT: memb_join_message_send (instance); /* * Restart the join timeout `*/ qb_loop_timer_del (instance->totemsrp_poll_handle, instance->memb_timer_state_gather_join_timeout); qb_loop_timer_add (instance->totemsrp_poll_handle, QB_LOOP_MED, instance->totem_config->join_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, memb_timer_function_state_gather, &instance->memb_timer_state_gather_join_timeout); break; } } static void memb_timer_function_gather_consensus_timeout (void *data) { struct totemsrp_instance *instance = data; memb_state_consensus_timeout_expired (instance); } static void deliver_messages_from_recovery_to_regular (struct totemsrp_instance *instance) { unsigned int i; struct sort_queue_item *recovery_message_item; struct sort_queue_item regular_message_item; unsigned int range = 0; int res; void *ptr; struct mcast *mcast; log_printf (instance->totemsrp_log_level_debug, "recovery to regular %x-%x", SEQNO_START_MSG + 1, instance->my_aru); range = instance->my_aru - SEQNO_START_MSG; /* * Move messages from recovery to regular sort queue */ // todo should i be initialized to 0 or 1 ? for (i = 1; i <= range; i++) { res = sq_item_get (&instance->recovery_sort_queue, i + SEQNO_START_MSG, &ptr); if (res != 0) { continue; } recovery_message_item = ptr; /* * Convert recovery message into regular message */ mcast = recovery_message_item->mcast; if (mcast->header.encapsulated == MESSAGE_ENCAPSULATED) { /* * Message is a recovery message encapsulated * in a new ring message */ regular_message_item.mcast = (struct mcast *)(((char *)recovery_message_item->mcast) + sizeof (struct mcast)); regular_message_item.msg_len = recovery_message_item->msg_len - sizeof (struct mcast); mcast = regular_message_item.mcast; } else { /* * TODO this case shouldn't happen */ continue; } log_printf (instance->totemsrp_log_level_debug, "comparing if ring id is for this processors old ring seqno %d", mcast->seq); /* * Only add this message to the regular sort * queue if it was originated with the same ring * id as the previous ring */ if (memcmp (&instance->my_old_ring_id, &mcast->ring_id, sizeof (struct memb_ring_id)) == 0) { res = sq_item_inuse (&instance->regular_sort_queue, mcast->seq); if (res == 0) { sq_item_add (&instance->regular_sort_queue, ®ular_message_item, mcast->seq); if (sq_lt_compare (instance->old_ring_state_high_seq_received, mcast->seq)) { instance->old_ring_state_high_seq_received = mcast->seq; } } } else { log_printf (instance->totemsrp_log_level_debug, "-not adding msg with seq no %x", mcast->seq); } } } /* * Change states in the state machine of the membership algorithm */ static void memb_state_operational_enter (struct totemsrp_instance *instance) { struct srp_addr joined_list[PROCESSOR_COUNT_MAX]; int joined_list_entries = 0; unsigned int aru_save; unsigned int joined_list_totemip[PROCESSOR_COUNT_MAX]; unsigned int trans_memb_list_totemip[PROCESSOR_COUNT_MAX]; unsigned int new_memb_list_totemip[PROCESSOR_COUNT_MAX]; unsigned int left_list[PROCESSOR_COUNT_MAX]; unsigned int i; unsigned int res; memb_consensus_reset (instance); old_ring_state_reset (instance); deliver_messages_from_recovery_to_regular (instance); log_printf (instance->totemsrp_log_level_debug, "Delivering to app %x to %x", instance->my_high_delivered + 1, instance->old_ring_state_high_seq_received); aru_save = instance->my_aru; instance->my_aru = instance->old_ring_state_aru; messages_deliver_to_app (instance, 0, instance->old_ring_state_high_seq_received); /* * Calculate joined and left list */ memb_set_subtract (instance->my_left_memb_list, &instance->my_left_memb_entries, instance->my_memb_list, instance->my_memb_entries, instance->my_trans_memb_list, instance->my_trans_memb_entries); memb_set_subtract (joined_list, &joined_list_entries, instance->my_new_memb_list, instance->my_new_memb_entries, instance->my_trans_memb_list, instance->my_trans_memb_entries); /* * Install new membership */ instance->my_memb_entries = instance->my_new_memb_entries; memcpy (&instance->my_memb_list, instance->my_new_memb_list, sizeof (struct srp_addr) * instance->my_memb_entries); instance->last_released = 0; instance->my_set_retrans_flg = 0; /* * Deliver transitional configuration to application */ srp_addr_to_nodeid (left_list, instance->my_left_memb_list, instance->my_left_memb_entries); srp_addr_to_nodeid (trans_memb_list_totemip, instance->my_trans_memb_list, instance->my_trans_memb_entries); instance->totemsrp_confchg_fn (TOTEM_CONFIGURATION_TRANSITIONAL, trans_memb_list_totemip, instance->my_trans_memb_entries, left_list, instance->my_left_memb_entries, 0, 0, &instance->my_ring_id); // TODO we need to filter to ensure we only deliver those // messages which are part of instance->my_deliver_memb messages_deliver_to_app (instance, 1, instance->old_ring_state_high_seq_received); instance->my_aru = aru_save; /* * Deliver regular configuration to application */ srp_addr_to_nodeid (new_memb_list_totemip, instance->my_new_memb_list, instance->my_new_memb_entries); srp_addr_to_nodeid (joined_list_totemip, joined_list, joined_list_entries); instance->totemsrp_confchg_fn (TOTEM_CONFIGURATION_REGULAR, new_memb_list_totemip, instance->my_new_memb_entries, 0, 0, joined_list_totemip, joined_list_entries, &instance->my_ring_id); /* * The recovery sort queue now becomes the regular * sort queue. It is necessary to copy the state * into the regular sort queue. */ sq_copy (&instance->regular_sort_queue, &instance->recovery_sort_queue); instance->my_last_aru = SEQNO_START_MSG; /* When making my_proc_list smaller, ensure that the * now non-used entries are zero-ed out. There are some suspect * assert's that assume that there is always 2 entries in the list. * These fail when my_proc_list is reduced to 1 entry (and the * valid [0] entry is the same as the 'unused' [1] entry). */ memset(instance->my_proc_list, 0, sizeof (struct srp_addr) * instance->my_proc_list_entries); instance->my_proc_list_entries = instance->my_new_memb_entries; memcpy (instance->my_proc_list, instance->my_new_memb_list, sizeof (struct srp_addr) * instance->my_memb_entries); instance->my_failed_list_entries = 0; /* * TODO Not exactly to spec * * At the entry to this function all messages without a gap are * deliered. * * This code throw away messages from the last gap in the sort queue * to my_high_seq_received * * What should really happen is we should deliver all messages up to * a gap, then delier the transitional configuration, then deliver * the messages between the first gap and my_high_seq_received, then * deliver a regular configuration, then deliver the regular * configuration * * Unfortunately totempg doesn't appear to like this operating mode * which needs more inspection */ i = instance->my_high_seq_received + 1; do { void *ptr; i -= 1; res = sq_item_get (&instance->regular_sort_queue, i, &ptr); if (i == 0) { break; } } while (res); instance->my_high_delivered = i; for (i = 0; i <= instance->my_high_delivered; i++) { void *ptr; res = sq_item_get (&instance->regular_sort_queue, i, &ptr); if (res == 0) { struct sort_queue_item *regular_message; regular_message = ptr; free (regular_message->mcast); } } sq_items_release (&instance->regular_sort_queue, instance->my_high_delivered); instance->last_released = instance->my_high_delivered; log_printf (instance->totemsrp_log_level_debug, "entering OPERATIONAL state."); log_printf (instance->totemsrp_log_level_notice, "A processor joined or left the membership and a new membership was formed."); instance->memb_state = MEMB_STATE_OPERATIONAL; instance->stats.operational_entered++; instance->stats.continuous_gather = 0; instance->my_received_flg = 1; reset_pause_timeout (instance); /* * Save ring id information from this configuration to determine * which processors are transitioning from old regular configuration * in to new regular configuration on the next configuration change */ memcpy (&instance->my_old_ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)); return; } static void memb_state_gather_enter ( struct totemsrp_instance *instance, int gather_from) { instance->orf_token_discard = 1; memb_set_merge ( &instance->my_id, 1, instance->my_proc_list, &instance->my_proc_list_entries); memb_join_message_send (instance); /* * Restart the join timeout */ qb_loop_timer_del (instance->totemsrp_poll_handle, instance->memb_timer_state_gather_join_timeout); qb_loop_timer_add (instance->totemsrp_poll_handle, QB_LOOP_MED, instance->totem_config->join_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, memb_timer_function_state_gather, &instance->memb_timer_state_gather_join_timeout); /* * Restart the consensus timeout */ qb_loop_timer_del (instance->totemsrp_poll_handle, instance->memb_timer_state_gather_consensus_timeout); qb_loop_timer_add (instance->totemsrp_poll_handle, QB_LOOP_MED, instance->totem_config->consensus_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, memb_timer_function_gather_consensus_timeout, &instance->memb_timer_state_gather_consensus_timeout); /* * Cancel the token loss and token retransmission timeouts */ cancel_token_retransmit_timeout (instance); // REVIEWED cancel_token_timeout (instance); // REVIEWED cancel_merge_detect_timeout (instance); memb_consensus_reset (instance); memb_consensus_set (instance, &instance->my_id); log_printf (instance->totemsrp_log_level_debug, "entering GATHER state from %d.", gather_from); instance->memb_state = MEMB_STATE_GATHER; instance->stats.gather_entered++; if (gather_from == 3) { /* * State 3 means gather, so we are continuously gathering. */ instance->stats.continuous_gather++; } if (instance->stats.continuous_gather > MAX_NO_CONT_GATHER) { log_printf (instance->totemsrp_log_level_warning, "Totem is unable to form a cluster because of an " "operating system or network fault. The most common " "cause of this message is that the local firewall is " "configured improperly."); } return; } static void timer_function_token_retransmit_timeout (void *data); static void target_set_completed ( void *context) { struct totemsrp_instance *instance = (struct totemsrp_instance *)context; memb_state_commit_token_send (instance); } static void memb_state_commit_enter ( struct totemsrp_instance *instance) { old_ring_state_save (instance); memb_state_commit_token_update (instance); memb_state_commit_token_target_set (instance); qb_loop_timer_del (instance->totemsrp_poll_handle, instance->memb_timer_state_gather_join_timeout); instance->memb_timer_state_gather_join_timeout = 0; qb_loop_timer_del (instance->totemsrp_poll_handle, instance->memb_timer_state_gather_consensus_timeout); instance->memb_timer_state_gather_consensus_timeout = 0; memb_ring_id_set_and_store (instance, &instance->commit_token->ring_id); instance->token_ring_id_seq = instance->my_ring_id.seq; log_printf (instance->totemsrp_log_level_debug, "entering COMMIT state."); instance->memb_state = MEMB_STATE_COMMIT; reset_token_retransmit_timeout (instance); // REVIEWED reset_token_timeout (instance); // REVIEWED instance->stats.commit_entered++; instance->stats.continuous_gather = 0; /* * reset all flow control variables since we are starting a new ring */ instance->my_trc = 0; instance->my_pbl = 0; instance->my_cbl = 0; /* * commit token sent after callback that token target has been set */ } static void memb_state_recovery_enter ( struct totemsrp_instance *instance, struct memb_commit_token *commit_token) { int i; int local_received_flg = 1; unsigned int low_ring_aru; unsigned int range = 0; unsigned int messages_originated = 0; const struct srp_addr *addr; struct memb_commit_token_memb_entry *memb_list; struct memb_ring_id my_new_memb_ring_id_list[PROCESSOR_COUNT_MAX]; addr = (const struct srp_addr *)commit_token->end_of_commit_token; memb_list = (struct memb_commit_token_memb_entry *)(addr + commit_token->addr_entries); log_printf (instance->totemsrp_log_level_debug, "entering RECOVERY state."); instance->orf_token_discard = 0; instance->my_high_ring_delivered = 0; sq_reinit (&instance->recovery_sort_queue, SEQNO_START_MSG); cs_queue_reinit (&instance->retrans_message_queue); low_ring_aru = instance->old_ring_state_high_seq_received; memb_state_commit_token_send_recovery (instance, commit_token); instance->my_token_seq = SEQNO_START_TOKEN - 1; /* * Build regular configuration */ totemrrp_processor_count_set ( instance->totemrrp_context, commit_token->addr_entries); /* * Build transitional configuration */ for (i = 0; i < instance->my_new_memb_entries; i++) { memcpy (&my_new_memb_ring_id_list[i], &memb_list[i].ring_id, sizeof (struct memb_ring_id)); } memb_set_and_with_ring_id ( instance->my_new_memb_list, my_new_memb_ring_id_list, instance->my_new_memb_entries, instance->my_memb_list, instance->my_memb_entries, &instance->my_old_ring_id, instance->my_trans_memb_list, &instance->my_trans_memb_entries); for (i = 0; i < instance->my_trans_memb_entries; i++) { log_printf (instance->totemsrp_log_level_debug, "TRANS [%d] member %s:", i, totemip_print (&instance->my_trans_memb_list[i].addr[0])); } for (i = 0; i < instance->my_new_memb_entries; i++) { log_printf (instance->totemsrp_log_level_debug, "position [%d] member %s:", i, totemip_print (&addr[i].addr[0])); log_printf (instance->totemsrp_log_level_debug, "previous ring seq %llx rep %s", memb_list[i].ring_id.seq, totemip_print (&memb_list[i].ring_id.rep)); log_printf (instance->totemsrp_log_level_debug, "aru %x high delivered %x received flag %d", memb_list[i].aru, memb_list[i].high_delivered, memb_list[i].received_flg); // assert (totemip_print (&memb_list[i].ring_id.rep) != 0); } /* * Determine if any received flag is false */ for (i = 0; i < commit_token->addr_entries; i++) { if (memb_set_subset (&instance->my_new_memb_list[i], 1, instance->my_trans_memb_list, instance->my_trans_memb_entries) && memb_list[i].received_flg == 0) { instance->my_deliver_memb_entries = instance->my_trans_memb_entries; memcpy (instance->my_deliver_memb_list, instance->my_trans_memb_list, sizeof (struct srp_addr) * instance->my_trans_memb_entries); local_received_flg = 0; break; } } if (local_received_flg == 1) { goto no_originate; } /* Else originate messages if we should */ /* * Calculate my_low_ring_aru, instance->my_high_ring_delivered for the transitional membership */ for (i = 0; i < commit_token->addr_entries; i++) { if (memb_set_subset (&instance->my_new_memb_list[i], 1, instance->my_deliver_memb_list, instance->my_deliver_memb_entries) && memcmp (&instance->my_old_ring_id, &memb_list[i].ring_id, sizeof (struct memb_ring_id)) == 0) { if (sq_lt_compare (memb_list[i].aru, low_ring_aru)) { low_ring_aru = memb_list[i].aru; } if (sq_lt_compare (instance->my_high_ring_delivered, memb_list[i].high_delivered)) { instance->my_high_ring_delivered = memb_list[i].high_delivered; } } } /* * Copy all old ring messages to instance->retrans_message_queue */ range = instance->old_ring_state_high_seq_received - low_ring_aru; if (range == 0) { /* * No messages to copy */ goto no_originate; } assert (range < QUEUE_RTR_ITEMS_SIZE_MAX); log_printf (instance->totemsrp_log_level_debug, "copying all old ring messages from %x-%x.", low_ring_aru + 1, instance->old_ring_state_high_seq_received); for (i = 1; i <= range; i++) { struct sort_queue_item *sort_queue_item; struct message_item message_item; void *ptr; int res; res = sq_item_get (&instance->regular_sort_queue, low_ring_aru + i, &ptr); if (res != 0) { continue; } sort_queue_item = ptr; messages_originated++; memset (&message_item, 0, sizeof (struct message_item)); // TODO LEAK message_item.mcast = totemsrp_buffer_alloc (instance); assert (message_item.mcast); message_item.mcast->header.type = MESSAGE_TYPE_MCAST; srp_addr_copy (&message_item.mcast->system_from, &instance->my_id); message_item.mcast->header.encapsulated = MESSAGE_ENCAPSULATED; message_item.mcast->header.nodeid = instance->my_id.addr[0].nodeid; assert (message_item.mcast->header.nodeid); message_item.mcast->header.endian_detector = ENDIAN_LOCAL; memcpy (&message_item.mcast->ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)); message_item.msg_len = sort_queue_item->msg_len + sizeof (struct mcast); memcpy (((char *)message_item.mcast) + sizeof (struct mcast), sort_queue_item->mcast, sort_queue_item->msg_len); cs_queue_item_add (&instance->retrans_message_queue, &message_item); } log_printf (instance->totemsrp_log_level_debug, "Originated %d messages in RECOVERY.", messages_originated); goto originated; no_originate: log_printf (instance->totemsrp_log_level_debug, "Did not need to originate any messages in recovery."); originated: instance->my_aru = SEQNO_START_MSG; instance->my_aru_count = 0; instance->my_seq_unchanged = 0; instance->my_high_seq_received = SEQNO_START_MSG; instance->my_install_seq = SEQNO_START_MSG; instance->last_released = SEQNO_START_MSG; reset_token_timeout (instance); // REVIEWED reset_token_retransmit_timeout (instance); // REVIEWED instance->memb_state = MEMB_STATE_RECOVERY; instance->stats.recovery_entered++; instance->stats.continuous_gather = 0; return; } void totemsrp_event_signal (void *srp_context, enum totem_event_type type, int value) { struct totemsrp_instance *instance = (struct totemsrp_instance *)srp_context; token_hold_cancel_send (instance); return; } int totemsrp_mcast ( void *srp_context, struct iovec *iovec, unsigned int iov_len, int guarantee) { struct totemsrp_instance *instance = (struct totemsrp_instance *)srp_context; int i; struct message_item message_item; char *addr; unsigned int addr_idx; if (cs_queue_is_full (&instance->new_message_queue)) { log_printf (instance->totemsrp_log_level_debug, "queue full"); return (-1); } memset (&message_item, 0, sizeof (struct message_item)); /* * Allocate pending item */ message_item.mcast = totemsrp_buffer_alloc (instance); if (message_item.mcast == 0) { goto error_mcast; } /* * Set mcast header */ memset(message_item.mcast, 0, sizeof (struct mcast)); message_item.mcast->header.type = MESSAGE_TYPE_MCAST; message_item.mcast->header.endian_detector = ENDIAN_LOCAL; message_item.mcast->header.encapsulated = MESSAGE_NOT_ENCAPSULATED; message_item.mcast->header.nodeid = instance->my_id.addr[0].nodeid; assert (message_item.mcast->header.nodeid); message_item.mcast->guarantee = guarantee; srp_addr_copy (&message_item.mcast->system_from, &instance->my_id); addr = (char *)message_item.mcast; addr_idx = sizeof (struct mcast); for (i = 0; i < iov_len; i++) { memcpy (&addr[addr_idx], iovec[i].iov_base, iovec[i].iov_len); addr_idx += iovec[i].iov_len; } message_item.msg_len = addr_idx; log_printf (instance->totemsrp_log_level_debug, "mcasted message added to pending queue"); instance->stats.mcast_tx++; cs_queue_item_add (&instance->new_message_queue, &message_item); return (0); error_mcast: return (-1); } /* * Determine if there is room to queue a new message */ int totemsrp_avail (void *srp_context) { struct totemsrp_instance *instance = (struct totemsrp_instance *)srp_context; int avail; cs_queue_avail (&instance->new_message_queue, &avail); return (avail); } /* * ORF Token Management */ /* * Recast message to mcast group if it is available */ static int orf_token_remcast ( struct totemsrp_instance *instance, int seq) { struct sort_queue_item *sort_queue_item; int res; void *ptr; struct sq *sort_queue; if (instance->memb_state == MEMB_STATE_RECOVERY) { sort_queue = &instance->recovery_sort_queue; } else { sort_queue = &instance->regular_sort_queue; } res = sq_in_range (sort_queue, seq); if (res == 0) { log_printf (instance->totemsrp_log_level_debug, "sq not in range"); return (-1); } /* * Get RTR item at seq, if not available, return */ res = sq_item_get (sort_queue, seq, &ptr); if (res != 0) { return -1; } sort_queue_item = ptr; totemrrp_mcast_noflush_send ( instance->totemrrp_context, sort_queue_item->mcast, sort_queue_item->msg_len); return (0); } /* * Free all freeable messages from ring */ static void messages_free ( struct totemsrp_instance *instance, unsigned int token_aru) { struct sort_queue_item *regular_message; unsigned int i; int res; int log_release = 0; unsigned int release_to; unsigned int range = 0; release_to = token_aru; if (sq_lt_compare (instance->my_last_aru, release_to)) { release_to = instance->my_last_aru; } if (sq_lt_compare (instance->my_high_delivered, release_to)) { release_to = instance->my_high_delivered; } /* * Ensure we dont try release before an already released point */ if (sq_lt_compare (release_to, instance->last_released)) { return; } range = release_to - instance->last_released; assert (range < QUEUE_RTR_ITEMS_SIZE_MAX); /* * Release retransmit list items if group aru indicates they are transmitted */ for (i = 1; i <= range; i++) { void *ptr; res = sq_item_get (&instance->regular_sort_queue, instance->last_released + i, &ptr); if (res == 0) { regular_message = ptr; totemsrp_buffer_release (instance, regular_message->mcast); } sq_items_release (&instance->regular_sort_queue, instance->last_released + i); log_release = 1; } instance->last_released += range; if (log_release) { log_printf (instance->totemsrp_log_level_debug, "releasing messages up to and including %x", release_to); } } static void update_aru ( struct totemsrp_instance *instance) { unsigned int i; int res; struct sq *sort_queue; unsigned int range; unsigned int my_aru_saved = 0; if (instance->memb_state == MEMB_STATE_RECOVERY) { sort_queue = &instance->recovery_sort_queue; } else { sort_queue = &instance->regular_sort_queue; } range = instance->my_high_seq_received - instance->my_aru; my_aru_saved = instance->my_aru; for (i = 1; i <= range; i++) { void *ptr; res = sq_item_get (sort_queue, my_aru_saved + i, &ptr); /* * If hole, stop updating aru */ if (res != 0) { break; } } instance->my_aru += i - 1; } /* * Multicasts pending messages onto the ring (requires orf_token possession) */ static int orf_token_mcast ( struct totemsrp_instance *instance, struct orf_token *token, int fcc_mcasts_allowed) { struct message_item *message_item = 0; struct cs_queue *mcast_queue; struct sq *sort_queue; struct sort_queue_item sort_queue_item; struct mcast *mcast; unsigned int fcc_mcast_current; if (instance->memb_state == MEMB_STATE_RECOVERY) { mcast_queue = &instance->retrans_message_queue; sort_queue = &instance->recovery_sort_queue; reset_token_retransmit_timeout (instance); // REVIEWED } else { mcast_queue = &instance->new_message_queue; sort_queue = &instance->regular_sort_queue; } for (fcc_mcast_current = 0; fcc_mcast_current < fcc_mcasts_allowed; fcc_mcast_current++) { if (cs_queue_is_empty (mcast_queue)) { break; } message_item = (struct message_item *)cs_queue_item_get (mcast_queue); message_item->mcast->seq = ++token->seq; message_item->mcast->this_seqno = instance->global_seqno++; /* * Build IO vector */ memset (&sort_queue_item, 0, sizeof (struct sort_queue_item)); sort_queue_item.mcast = message_item->mcast; sort_queue_item.msg_len = message_item->msg_len; mcast = sort_queue_item.mcast; memcpy (&mcast->ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)); /* * Add message to retransmit queue */ sq_item_add (sort_queue, &sort_queue_item, message_item->mcast->seq); totemrrp_mcast_noflush_send ( instance->totemrrp_context, message_item->mcast, message_item->msg_len); /* * Delete item from pending queue */ cs_queue_item_remove (mcast_queue); /* * If messages mcasted, deliver any new messages to totempg */ instance->my_high_seq_received = token->seq; } update_aru (instance); /* * Return 1 if more messages are available for single node clusters */ return (fcc_mcast_current); } /* * Remulticasts messages in orf_token's retransmit list (requires orf_token) * Modify's orf_token's rtr to include retransmits required by this process */ static int orf_token_rtr ( struct totemsrp_instance *instance, struct orf_token *orf_token, unsigned int *fcc_allowed) { unsigned int res; unsigned int i, j; unsigned int found; struct sq *sort_queue; struct rtr_item *rtr_list; unsigned int range = 0; char retransmit_msg[1024]; char value[64]; if (instance->memb_state == MEMB_STATE_RECOVERY) { sort_queue = &instance->recovery_sort_queue; } else { sort_queue = &instance->regular_sort_queue; } rtr_list = &orf_token->rtr_list[0]; strcpy (retransmit_msg, "Retransmit List: "); if (orf_token->rtr_list_entries) { log_printf (instance->totemsrp_log_level_debug, "Retransmit List %d", orf_token->rtr_list_entries); for (i = 0; i < orf_token->rtr_list_entries; i++) { sprintf (value, "%x ", rtr_list[i].seq); strcat (retransmit_msg, value); } strcat (retransmit_msg, ""); log_printf (instance->totemsrp_log_level_notice, "%s", retransmit_msg); } /* * Retransmit messages on orf_token's RTR list from RTR queue */ for (instance->fcc_remcast_current = 0, i = 0; instance->fcc_remcast_current < *fcc_allowed && i < orf_token->rtr_list_entries;) { /* * If this retransmit request isn't from this configuration, * try next rtr entry */ if (memcmp (&rtr_list[i].ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)) != 0) { i += 1; continue; } res = orf_token_remcast (instance, rtr_list[i].seq); if (res == 0) { /* * Multicasted message, so no need to copy to new retransmit list */ orf_token->rtr_list_entries -= 1; assert (orf_token->rtr_list_entries >= 0); memmove (&rtr_list[i], &rtr_list[i + 1], sizeof (struct rtr_item) * (orf_token->rtr_list_entries - i)); instance->stats.mcast_retx++; instance->fcc_remcast_current++; } else { i += 1; } } *fcc_allowed = *fcc_allowed - instance->fcc_remcast_current; /* * Add messages to retransmit to RTR list * but only retry if there is room in the retransmit list */ range = orf_token->seq - instance->my_aru; assert (range < QUEUE_RTR_ITEMS_SIZE_MAX); for (i = 1; (orf_token->rtr_list_entries < RETRANSMIT_ENTRIES_MAX) && (i <= range); i++) { /* * Ensure message is within the sort queue range */ res = sq_in_range (sort_queue, instance->my_aru + i); if (res == 0) { break; } /* * Find if a message is missing from this processor */ res = sq_item_inuse (sort_queue, instance->my_aru + i); if (res == 0) { /* * Determine how many times we have missed receiving * this sequence number. sq_item_miss_count increments * a counter for the sequence number. The miss count * will be returned and compared. This allows time for * delayed multicast messages to be received before * declaring the message is missing and requesting a * retransmit. */ res = sq_item_miss_count (sort_queue, instance->my_aru + i); if (res < instance->totem_config->miss_count_const) { continue; } /* * Determine if missing message is already in retransmit list */ found = 0; for (j = 0; j < orf_token->rtr_list_entries; j++) { if (instance->my_aru + i == rtr_list[j].seq) { found = 1; } } if (found == 0) { /* * Missing message not found in current retransmit list so add it */ memcpy (&rtr_list[orf_token->rtr_list_entries].ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)); rtr_list[orf_token->rtr_list_entries].seq = instance->my_aru + i; orf_token->rtr_list_entries++; } } } return (instance->fcc_remcast_current); } static void token_retransmit (struct totemsrp_instance *instance) { totemrrp_token_send (instance->totemrrp_context, instance->orf_token_retransmit, instance->orf_token_retransmit_size); } /* * Retransmit the regular token if no mcast or token has * been received in retransmit token period retransmit * the token to the next processor */ static void timer_function_token_retransmit_timeout (void *data) { struct totemsrp_instance *instance = data; switch (instance->memb_state) { case MEMB_STATE_GATHER: break; case MEMB_STATE_COMMIT: case MEMB_STATE_OPERATIONAL: case MEMB_STATE_RECOVERY: token_retransmit (instance); reset_token_retransmit_timeout (instance); // REVIEWED break; } } static void timer_function_token_hold_retransmit_timeout (void *data) { struct totemsrp_instance *instance = data; switch (instance->memb_state) { case MEMB_STATE_GATHER: break; case MEMB_STATE_COMMIT: break; case MEMB_STATE_OPERATIONAL: case MEMB_STATE_RECOVERY: token_retransmit (instance); break; } } static void timer_function_merge_detect_timeout(void *data) { struct totemsrp_instance *instance = data; instance->my_merge_detect_timeout_outstanding = 0; switch (instance->memb_state) { case MEMB_STATE_OPERATIONAL: if (totemip_equal(&instance->my_ring_id.rep, &instance->my_id.addr[0])) { memb_merge_detect_transmit (instance); } break; case MEMB_STATE_GATHER: case MEMB_STATE_COMMIT: case MEMB_STATE_RECOVERY: break; } } /* * Send orf_token to next member (requires orf_token) */ static int token_send ( struct totemsrp_instance *instance, struct orf_token *orf_token, int forward_token) { int res = 0; unsigned int orf_token_size; orf_token_size = sizeof (struct orf_token) + (orf_token->rtr_list_entries * sizeof (struct rtr_item)); orf_token->header.nodeid = instance->my_id.addr[0].nodeid; memcpy (instance->orf_token_retransmit, orf_token, orf_token_size); instance->orf_token_retransmit_size = orf_token_size; assert (orf_token->header.nodeid); if (forward_token == 0) { return (0); } totemrrp_token_send (instance->totemrrp_context, orf_token, orf_token_size); return (res); } static int token_hold_cancel_send (struct totemsrp_instance *instance) { struct token_hold_cancel token_hold_cancel; /* * Only cancel if the token is currently held */ if (instance->my_token_held == 0) { return (0); } instance->my_token_held = 0; /* * Build message */ token_hold_cancel.header.type = MESSAGE_TYPE_TOKEN_HOLD_CANCEL; token_hold_cancel.header.endian_detector = ENDIAN_LOCAL; token_hold_cancel.header.encapsulated = 0; token_hold_cancel.header.nodeid = instance->my_id.addr[0].nodeid; memcpy (&token_hold_cancel.ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)); assert (token_hold_cancel.header.nodeid); instance->stats.token_hold_cancel_tx++; totemrrp_mcast_flush_send (instance->totemrrp_context, &token_hold_cancel, sizeof (struct token_hold_cancel)); return (0); } static int orf_token_send_initial (struct totemsrp_instance *instance) { struct orf_token orf_token; int res; orf_token.header.type = MESSAGE_TYPE_ORF_TOKEN; orf_token.header.endian_detector = ENDIAN_LOCAL; orf_token.header.encapsulated = 0; orf_token.header.nodeid = instance->my_id.addr[0].nodeid; assert (orf_token.header.nodeid); orf_token.seq = SEQNO_START_MSG; orf_token.token_seq = SEQNO_START_TOKEN; orf_token.retrans_flg = 1; instance->my_set_retrans_flg = 1; instance->stats.orf_token_tx++; if (cs_queue_is_empty (&instance->retrans_message_queue) == 1) { orf_token.retrans_flg = 0; instance->my_set_retrans_flg = 0; } else { orf_token.retrans_flg = 1; instance->my_set_retrans_flg = 1; } orf_token.aru = 0; orf_token.aru = SEQNO_START_MSG - 1; orf_token.aru_addr = instance->my_id.addr[0].nodeid; memcpy (&orf_token.ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)); orf_token.fcc = 0; orf_token.backlog = 0; orf_token.rtr_list_entries = 0; res = token_send (instance, &orf_token, 1); return (res); } static void memb_state_commit_token_update ( struct totemsrp_instance *instance) { struct srp_addr *addr; struct memb_commit_token_memb_entry *memb_list; unsigned int high_aru; unsigned int i; addr = (struct srp_addr *)instance->commit_token->end_of_commit_token; memb_list = (struct memb_commit_token_memb_entry *)(addr + instance->commit_token->addr_entries); memcpy (instance->my_new_memb_list, addr, sizeof (struct srp_addr) * instance->commit_token->addr_entries); instance->my_new_memb_entries = instance->commit_token->addr_entries; memcpy (&memb_list[instance->commit_token->memb_index].ring_id, &instance->my_old_ring_id, sizeof (struct memb_ring_id)); memb_list[instance->commit_token->memb_index].aru = instance->old_ring_state_aru; /* * TODO high delivered is really instance->my_aru, but with safe this * could change? */ instance->my_received_flg = (instance->my_aru == instance->my_high_seq_received); memb_list[instance->commit_token->memb_index].received_flg = instance->my_received_flg; memb_list[instance->commit_token->memb_index].high_delivered = instance->my_high_delivered; /* * find high aru up to current memb_index for all matching ring ids * if any ring id matching memb_index has aru less then high aru set * received flag for that entry to false */ high_aru = memb_list[instance->commit_token->memb_index].aru; for (i = 0; i <= instance->commit_token->memb_index; i++) { if (memcmp (&memb_list[instance->commit_token->memb_index].ring_id, &memb_list[i].ring_id, sizeof (struct memb_ring_id)) == 0) { if (sq_lt_compare (high_aru, memb_list[i].aru)) { high_aru = memb_list[i].aru; } } } for (i = 0; i <= instance->commit_token->memb_index; i++) { if (memcmp (&memb_list[instance->commit_token->memb_index].ring_id, &memb_list[i].ring_id, sizeof (struct memb_ring_id)) == 0) { if (sq_lt_compare (memb_list[i].aru, high_aru)) { memb_list[i].received_flg = 0; if (i == instance->commit_token->memb_index) { instance->my_received_flg = 0; } } } } instance->commit_token->header.nodeid = instance->my_id.addr[0].nodeid; instance->commit_token->memb_index += 1; assert (instance->commit_token->memb_index <= instance->commit_token->addr_entries); assert (instance->commit_token->header.nodeid); } static void memb_state_commit_token_target_set ( struct totemsrp_instance *instance) { struct srp_addr *addr; unsigned int i; addr = (struct srp_addr *)instance->commit_token->end_of_commit_token; for (i = 0; i < instance->totem_config->interface_count; i++) { totemrrp_token_target_set ( instance->totemrrp_context, &addr[instance->commit_token->memb_index % instance->commit_token->addr_entries].addr[i], i); } } static int memb_state_commit_token_send_recovery ( struct totemsrp_instance *instance, struct memb_commit_token *commit_token) { unsigned int commit_token_size; commit_token->token_seq++; commit_token->header.nodeid = instance->my_id.addr[0].nodeid; commit_token_size = sizeof (struct memb_commit_token) + ((sizeof (struct srp_addr) + sizeof (struct memb_commit_token_memb_entry)) * commit_token->addr_entries); /* * Make a copy for retransmission if necessary */ memcpy (instance->orf_token_retransmit, commit_token, commit_token_size); instance->orf_token_retransmit_size = commit_token_size; instance->stats.memb_commit_token_tx++; totemrrp_token_send (instance->totemrrp_context, commit_token, commit_token_size); /* * Request retransmission of the commit token in case it is lost */ reset_token_retransmit_timeout (instance); return (0); } static int memb_state_commit_token_send ( struct totemsrp_instance *instance) { unsigned int commit_token_size; instance->commit_token->token_seq++; instance->commit_token->header.nodeid = instance->my_id.addr[0].nodeid; commit_token_size = sizeof (struct memb_commit_token) + ((sizeof (struct srp_addr) + sizeof (struct memb_commit_token_memb_entry)) * instance->commit_token->addr_entries); /* * Make a copy for retransmission if necessary */ memcpy (instance->orf_token_retransmit, instance->commit_token, commit_token_size); instance->orf_token_retransmit_size = commit_token_size; instance->stats.memb_commit_token_tx++; totemrrp_token_send (instance->totemrrp_context, instance->commit_token, commit_token_size); /* * Request retransmission of the commit token in case it is lost */ reset_token_retransmit_timeout (instance); return (0); } static int memb_lowest_in_config (struct totemsrp_instance *instance) { struct srp_addr token_memb[PROCESSOR_COUNT_MAX]; int token_memb_entries = 0; int i; struct totem_ip_address *lowest_addr; memb_set_subtract (token_memb, &token_memb_entries, instance->my_proc_list, instance->my_proc_list_entries, instance->my_failed_list, instance->my_failed_list_entries); /* * find representative by searching for smallest identifier */ lowest_addr = &token_memb[0].addr[0]; for (i = 1; i < token_memb_entries; i++) { if (totemip_compare(lowest_addr, &token_memb[i].addr[0]) > 0) { totemip_copy (lowest_addr, &token_memb[i].addr[0]); } } return (totemip_compare (lowest_addr, &instance->my_id.addr[0]) == 0); } static int srp_addr_compare (const void *a, const void *b) { const struct srp_addr *srp_a = (const struct srp_addr *)a; const struct srp_addr *srp_b = (const struct srp_addr *)b; return (totemip_compare (&srp_a->addr[0], &srp_b->addr[0])); } static void memb_state_commit_token_create ( struct totemsrp_instance *instance) { struct srp_addr token_memb[PROCESSOR_COUNT_MAX]; struct srp_addr *addr; struct memb_commit_token_memb_entry *memb_list; int token_memb_entries = 0; log_printf (instance->totemsrp_log_level_debug, "Creating commit token because I am the rep."); memb_set_subtract (token_memb, &token_memb_entries, instance->my_proc_list, instance->my_proc_list_entries, instance->my_failed_list, instance->my_failed_list_entries); memset (instance->commit_token, 0, sizeof (struct memb_commit_token)); instance->commit_token->header.type = MESSAGE_TYPE_MEMB_COMMIT_TOKEN; instance->commit_token->header.endian_detector = ENDIAN_LOCAL; instance->commit_token->header.encapsulated = 0; instance->commit_token->header.nodeid = instance->my_id.addr[0].nodeid; assert (instance->commit_token->header.nodeid); totemip_copy(&instance->commit_token->ring_id.rep, &instance->my_id.addr[0]); instance->commit_token->ring_id.seq = instance->token_ring_id_seq + 4; /* * This qsort is necessary to ensure the commit token traverses * the ring in the proper order */ qsort (token_memb, token_memb_entries, sizeof (struct srp_addr), srp_addr_compare); instance->commit_token->memb_index = 0; instance->commit_token->addr_entries = token_memb_entries; addr = (struct srp_addr *)instance->commit_token->end_of_commit_token; memb_list = (struct memb_commit_token_memb_entry *)(addr + instance->commit_token->addr_entries); memcpy (addr, token_memb, token_memb_entries * sizeof (struct srp_addr)); memset (memb_list, 0, sizeof (struct memb_commit_token_memb_entry) * token_memb_entries); } static void memb_join_message_send (struct totemsrp_instance *instance) { char memb_join_data[40000]; struct memb_join *memb_join = (struct memb_join *)memb_join_data; char *addr; unsigned int addr_idx; memb_join->header.type = MESSAGE_TYPE_MEMB_JOIN; memb_join->header.endian_detector = ENDIAN_LOCAL; memb_join->header.encapsulated = 0; memb_join->header.nodeid = instance->my_id.addr[0].nodeid; assert (memb_join->header.nodeid); memb_join->ring_seq = instance->my_ring_id.seq; memb_join->proc_list_entries = instance->my_proc_list_entries; memb_join->failed_list_entries = instance->my_failed_list_entries; srp_addr_copy (&memb_join->system_from, &instance->my_id); /* * This mess adds the joined and failed processor lists into the join * message */ addr = (char *)memb_join; addr_idx = sizeof (struct memb_join); memcpy (&addr[addr_idx], instance->my_proc_list, instance->my_proc_list_entries * sizeof (struct srp_addr)); addr_idx += instance->my_proc_list_entries * sizeof (struct srp_addr); memcpy (&addr[addr_idx], instance->my_failed_list, instance->my_failed_list_entries * sizeof (struct srp_addr)); addr_idx += instance->my_failed_list_entries * sizeof (struct srp_addr); if (instance->totem_config->send_join_timeout) { usleep (random() % (instance->totem_config->send_join_timeout * 1000)); } instance->stats.memb_join_tx++; totemrrp_mcast_flush_send ( instance->totemrrp_context, memb_join, addr_idx); } static void memb_leave_message_send (struct totemsrp_instance *instance) { char memb_join_data[40000]; struct memb_join *memb_join = (struct memb_join *)memb_join_data; char *addr; unsigned int addr_idx; int active_memb_entries; struct srp_addr active_memb[PROCESSOR_COUNT_MAX]; log_printf (instance->totemsrp_log_level_debug, "sending join/leave message"); /* * add us to the failed list, and remove us from * the members list */ memb_set_merge( &instance->my_id, 1, instance->my_failed_list, &instance->my_failed_list_entries); memb_set_subtract (active_memb, &active_memb_entries, instance->my_proc_list, instance->my_proc_list_entries, &instance->my_id, 1); memb_join->header.type = MESSAGE_TYPE_MEMB_JOIN; memb_join->header.endian_detector = ENDIAN_LOCAL; memb_join->header.encapsulated = 0; memb_join->header.nodeid = LEAVE_DUMMY_NODEID; memb_join->ring_seq = instance->my_ring_id.seq; memb_join->proc_list_entries = active_memb_entries; memb_join->failed_list_entries = instance->my_failed_list_entries; srp_addr_copy (&memb_join->system_from, &instance->my_id); memb_join->system_from.addr[0].nodeid = LEAVE_DUMMY_NODEID; // TODO: CC Maybe use the actual join send routine. /* * This mess adds the joined and failed processor lists into the join * message */ addr = (char *)memb_join; addr_idx = sizeof (struct memb_join); memcpy (&addr[addr_idx], active_memb, active_memb_entries * sizeof (struct srp_addr)); addr_idx += active_memb_entries * sizeof (struct srp_addr); memcpy (&addr[addr_idx], instance->my_failed_list, instance->my_failed_list_entries * sizeof (struct srp_addr)); addr_idx += instance->my_failed_list_entries * sizeof (struct srp_addr); if (instance->totem_config->send_join_timeout) { usleep (random() % (instance->totem_config->send_join_timeout * 1000)); } instance->stats.memb_join_tx++; totemrrp_mcast_flush_send ( instance->totemrrp_context, memb_join, addr_idx); } static void memb_merge_detect_transmit (struct totemsrp_instance *instance) { struct memb_merge_detect memb_merge_detect; memb_merge_detect.header.type = MESSAGE_TYPE_MEMB_MERGE_DETECT; memb_merge_detect.header.endian_detector = ENDIAN_LOCAL; memb_merge_detect.header.encapsulated = 0; memb_merge_detect.header.nodeid = instance->my_id.addr[0].nodeid; srp_addr_copy (&memb_merge_detect.system_from, &instance->my_id); memcpy (&memb_merge_detect.ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)); assert (memb_merge_detect.header.nodeid); instance->stats.memb_merge_detect_tx++; totemrrp_mcast_flush_send (instance->totemrrp_context, &memb_merge_detect, sizeof (struct memb_merge_detect)); } static void memb_ring_id_create_or_load ( struct totemsrp_instance *instance, struct memb_ring_id *memb_ring_id) { int fd; int res = 0; char filename[PATH_MAX]; snprintf (filename, sizeof(filename), "%s/ringid_%s", rundir, totemip_print (&instance->my_id.addr[0])); fd = open (filename, O_RDONLY, 0700); /* * If file can be opened and read, read the ring id */ if (fd != -1) { res = read (fd, &memb_ring_id->seq, sizeof (uint64_t)); close (fd); } /* * If file could not be opened or read, create a new ring id */ if ((fd == -1) || (res != sizeof (uint64_t))) { memb_ring_id->seq = 0; umask(0); fd = open (filename, O_CREAT|O_RDWR, 0700); if (fd != -1) { res = write (fd, &memb_ring_id->seq, sizeof (uint64_t)); close (fd); if (res == -1) { LOGSYS_PERROR (errno, instance->totemsrp_log_level_warning, "Couldn't write ringid file '%s'", filename); } } else { LOGSYS_PERROR (errno, instance->totemsrp_log_level_warning, "Couldn't create ringid file '%s'", filename); } } totemip_copy(&memb_ring_id->rep, &instance->my_id.addr[0]); assert (!totemip_zero_check(&memb_ring_id->rep)); instance->token_ring_id_seq = memb_ring_id->seq; } static void memb_ring_id_set_and_store ( struct totemsrp_instance *instance, const struct memb_ring_id *ring_id) { char filename[256]; int fd; int res; memcpy (&instance->my_ring_id, ring_id, sizeof (struct memb_ring_id)); snprintf (filename, sizeof(filename), "%s/ringid_%s", rundir, totemip_print (&instance->my_id.addr[0])); fd = open (filename, O_WRONLY, 0777); if (fd == -1) { fd = open (filename, O_CREAT|O_RDWR, 0777); } if (fd == -1) { LOGSYS_PERROR(errno, instance->totemsrp_log_level_warning, "Couldn't store new ring id %llx to stable storage", instance->my_ring_id.seq); assert (0); return; } log_printf (instance->totemsrp_log_level_debug, "Storing new sequence id for ring %llx", instance->my_ring_id.seq); //assert (fd > 0); res = write (fd, &instance->my_ring_id.seq, sizeof (unsigned long long)); assert (res == sizeof (unsigned long long)); close (fd); } int totemsrp_callback_token_create ( void *srp_context, void **handle_out, enum totem_callback_token_type type, int delete, int (*callback_fn) (enum totem_callback_token_type type, const void *), const void *data) { struct totemsrp_instance *instance = (struct totemsrp_instance *)srp_context; struct token_callback_instance *callback_handle; token_hold_cancel_send (instance); callback_handle = malloc (sizeof (struct token_callback_instance)); if (callback_handle == 0) { return (-1); } *handle_out = (void *)callback_handle; list_init (&callback_handle->list); callback_handle->callback_fn = callback_fn; callback_handle->data = (void *) data; callback_handle->callback_type = type; callback_handle->delete = delete; switch (type) { case TOTEM_CALLBACK_TOKEN_RECEIVED: list_add (&callback_handle->list, &instance->token_callback_received_listhead); break; case TOTEM_CALLBACK_TOKEN_SENT: list_add (&callback_handle->list, &instance->token_callback_sent_listhead); break; } return (0); } void totemsrp_callback_token_destroy (void *srp_context, void **handle_out) { struct token_callback_instance *h; if (*handle_out) { h = (struct token_callback_instance *)*handle_out; list_del (&h->list); free (h); h = NULL; *handle_out = 0; } } static void token_callbacks_execute ( struct totemsrp_instance *instance, enum totem_callback_token_type type) { struct list_head *list; struct list_head *list_next; struct list_head *callback_listhead = 0; struct token_callback_instance *token_callback_instance; int res; int del; switch (type) { case TOTEM_CALLBACK_TOKEN_RECEIVED: callback_listhead = &instance->token_callback_received_listhead; break; case TOTEM_CALLBACK_TOKEN_SENT: callback_listhead = &instance->token_callback_sent_listhead; break; default: assert (0); } for (list = callback_listhead->next; list != callback_listhead; list = list_next) { token_callback_instance = list_entry (list, struct token_callback_instance, list); list_next = list->next; del = token_callback_instance->delete; if (del == 1) { list_del (list); } res = token_callback_instance->callback_fn ( token_callback_instance->callback_type, token_callback_instance->data); /* * This callback failed to execute, try it again on the next token */ if (res == -1 && del == 1) { list_add (list, callback_listhead); } else if (del) { free (token_callback_instance); } } } /* * Flow control functions */ static unsigned int backlog_get (struct totemsrp_instance *instance) { unsigned int backlog = 0; if (instance->memb_state == MEMB_STATE_OPERATIONAL) { backlog = cs_queue_used (&instance->new_message_queue); } else if (instance->memb_state == MEMB_STATE_RECOVERY) { backlog = cs_queue_used (&instance->retrans_message_queue); } instance->stats.token[instance->stats.latest_token].backlog_calc = backlog; return (backlog); } static int fcc_calculate ( struct totemsrp_instance *instance, struct orf_token *token) { unsigned int transmits_allowed; unsigned int backlog_calc; transmits_allowed = instance->totem_config->max_messages; if (transmits_allowed > instance->totem_config->window_size - token->fcc) { transmits_allowed = instance->totem_config->window_size - token->fcc; } instance->my_cbl = backlog_get (instance); /* * Only do backlog calculation if there is a backlog otherwise * we would result in div by zero */ if (token->backlog + instance->my_cbl - instance->my_pbl) { backlog_calc = (instance->totem_config->window_size * instance->my_pbl) / (token->backlog + instance->my_cbl - instance->my_pbl); if (backlog_calc > 0 && transmits_allowed > backlog_calc) { transmits_allowed = backlog_calc; } } return (transmits_allowed); } /* * don't overflow the RTR sort queue */ static void fcc_rtr_limit ( struct totemsrp_instance *instance, struct orf_token *token, unsigned int *transmits_allowed) { int check = QUEUE_RTR_ITEMS_SIZE_MAX; check -= (*transmits_allowed + instance->totem_config->window_size); assert (check >= 0); if (sq_lt_compare (instance->last_released + QUEUE_RTR_ITEMS_SIZE_MAX - *transmits_allowed - instance->totem_config->window_size, token->seq)) { *transmits_allowed = 0; } } static void fcc_token_update ( struct totemsrp_instance *instance, struct orf_token *token, unsigned int msgs_transmitted) { token->fcc += msgs_transmitted - instance->my_trc; token->backlog += instance->my_cbl - instance->my_pbl; instance->my_trc = msgs_transmitted; instance->my_pbl = instance->my_cbl; } /* * Message Handlers */ unsigned long long int tv_old; /* * message handler called when TOKEN message type received */ static int message_handler_orf_token ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed) { char token_storage[1500]; char token_convert[1500]; struct orf_token *token = NULL; int forward_token; unsigned int transmits_allowed; unsigned int mcasted_retransmit; unsigned int mcasted_regular; unsigned int last_aru; #ifdef GIVEINFO unsigned long long tv_current; unsigned long long tv_diff; tv_current = qb_util_nano_current_get (); tv_diff = tv_current - tv_old; tv_old = tv_current; log_printf (instance->totemsrp_log_level_debug, "Time since last token %0.4f ms", ((float)tv_diff) / 1000000.0); #endif if (instance->orf_token_discard) { return (0); } #ifdef TEST_DROP_ORF_TOKEN_PERCENTAGE if (random()%100 < TEST_DROP_ORF_TOKEN_PERCENTAGE) { return (0); } #endif if (endian_conversion_needed) { orf_token_endian_convert ((struct orf_token *)msg, (struct orf_token *)token_convert); msg = (struct orf_token *)token_convert; } /* * Make copy of token and retransmit list in case we have * to flush incoming messages from the kernel queue */ token = (struct orf_token *)token_storage; memcpy (token, msg, sizeof (struct orf_token)); memcpy (&token->rtr_list[0], (char *)msg + sizeof (struct orf_token), sizeof (struct rtr_item) * RETRANSMIT_ENTRIES_MAX); /* * Handle merge detection timeout */ if (token->seq == instance->my_last_seq) { start_merge_detect_timeout (instance); instance->my_seq_unchanged += 1; } else { cancel_merge_detect_timeout (instance); cancel_token_hold_retransmit_timeout (instance); instance->my_seq_unchanged = 0; } instance->my_last_seq = token->seq; #ifdef TEST_RECOVERY_MSG_COUNT if (instance->memb_state == MEMB_STATE_OPERATIONAL && token->seq > TEST_RECOVERY_MSG_COUNT) { return (0); } #endif totemrrp_recv_flush (instance->totemrrp_context); /* * Determine if we should hold (in reality drop) the token */ instance->my_token_held = 0; if (totemip_equal(&instance->my_ring_id.rep, &instance->my_id.addr[0]) && instance->my_seq_unchanged > instance->totem_config->seqno_unchanged_const) { instance->my_token_held = 1; } else if (!totemip_equal(&instance->my_ring_id.rep, &instance->my_id.addr[0]) && instance->my_seq_unchanged >= instance->totem_config->seqno_unchanged_const) { instance->my_token_held = 1; } /* * Hold onto token when there is no activity on ring and * this processor is the ring rep */ forward_token = 1; if (totemip_equal(&instance->my_ring_id.rep, &instance->my_id.addr[0])) { if (instance->my_token_held) { forward_token = 0; } } token_callbacks_execute (instance, TOTEM_CALLBACK_TOKEN_RECEIVED); switch (instance->memb_state) { case MEMB_STATE_COMMIT: /* Discard token */ break; case MEMB_STATE_OPERATIONAL: messages_free (instance, token->aru); /* * Do NOT add break, this case should also execute code in gather case. */ case MEMB_STATE_GATHER: /* * DO NOT add break, we use different free mechanism in recovery state */ case MEMB_STATE_RECOVERY: /* * Discard tokens from another configuration */ if (memcmp (&token->ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)) != 0) { if ((forward_token) && instance->use_heartbeat) { reset_heartbeat_timeout(instance); } else { cancel_heartbeat_timeout(instance); } return (0); /* discard token */ } /* * Discard retransmitted tokens */ if (sq_lte_compare (token->token_seq, instance->my_token_seq)) { return (0); /* discard token */ } last_aru = instance->my_last_aru; instance->my_last_aru = token->aru; transmits_allowed = fcc_calculate (instance, token); mcasted_retransmit = orf_token_rtr (instance, token, &transmits_allowed); fcc_rtr_limit (instance, token, &transmits_allowed); mcasted_regular = orf_token_mcast (instance, token, transmits_allowed); /* if (mcasted_regular) { printf ("mcasted regular %d\n", mcasted_regular); printf ("token seq %d\n", token->seq); } */ fcc_token_update (instance, token, mcasted_retransmit + mcasted_regular); if (sq_lt_compare (instance->my_aru, token->aru) || instance->my_id.addr[0].nodeid == token->aru_addr || token->aru_addr == 0) { token->aru = instance->my_aru; if (token->aru == token->seq) { token->aru_addr = 0; } else { token->aru_addr = instance->my_id.addr[0].nodeid; } } if (token->aru == last_aru && token->aru_addr != 0) { instance->my_aru_count += 1; } else { instance->my_aru_count = 0; } if (instance->my_aru_count > instance->totem_config->fail_to_recv_const && token->aru_addr == instance->my_id.addr[0].nodeid) { log_printf (instance->totemsrp_log_level_error, "FAILED TO RECEIVE"); instance->failed_to_recv = 1; memb_set_merge (&instance->my_id, 1, instance->my_failed_list, &instance->my_failed_list_entries); memb_state_gather_enter (instance, 6); } else { instance->my_token_seq = token->token_seq; token->token_seq += 1; if (instance->memb_state == MEMB_STATE_RECOVERY) { /* * instance->my_aru == instance->my_high_seq_received means this processor * has recovered all messages it can recover * (ie: its retrans queue is empty) */ if (cs_queue_is_empty (&instance->retrans_message_queue) == 0) { if (token->retrans_flg == 0) { token->retrans_flg = 1; instance->my_set_retrans_flg = 1; } } else if (token->retrans_flg == 1 && instance->my_set_retrans_flg) { token->retrans_flg = 0; instance->my_set_retrans_flg = 0; } log_printf (instance->totemsrp_log_level_debug, "token retrans flag is %d my set retrans flag%d retrans queue empty %d count %d, aru %x", token->retrans_flg, instance->my_set_retrans_flg, cs_queue_is_empty (&instance->retrans_message_queue), instance->my_retrans_flg_count, token->aru); if (token->retrans_flg == 0) { instance->my_retrans_flg_count += 1; } else { instance->my_retrans_flg_count = 0; } if (instance->my_retrans_flg_count == 2) { instance->my_install_seq = token->seq; } log_printf (instance->totemsrp_log_level_debug, "install seq %x aru %x high seq received %x", instance->my_install_seq, instance->my_aru, instance->my_high_seq_received); if (instance->my_retrans_flg_count >= 2 && instance->my_received_flg == 0 && sq_lte_compare (instance->my_install_seq, instance->my_aru)) { instance->my_received_flg = 1; instance->my_deliver_memb_entries = instance->my_trans_memb_entries; memcpy (instance->my_deliver_memb_list, instance->my_trans_memb_list, sizeof (struct totem_ip_address) * instance->my_trans_memb_entries); } if (instance->my_retrans_flg_count >= 3 && sq_lte_compare (instance->my_install_seq, token->aru)) { instance->my_rotation_counter += 1; } else { instance->my_rotation_counter = 0; } if (instance->my_rotation_counter == 2) { log_printf (instance->totemsrp_log_level_debug, "retrans flag count %x token aru %x install seq %x aru %x %x", instance->my_retrans_flg_count, token->aru, instance->my_install_seq, instance->my_aru, token->seq); memb_state_operational_enter (instance); instance->my_rotation_counter = 0; instance->my_retrans_flg_count = 0; } } totemrrp_send_flush (instance->totemrrp_context); token_send (instance, token, forward_token); #ifdef GIVEINFO tv_current = qb_util_nano_current_get (); tv_diff = tv_current - tv_old; tv_old = tv_current; log_printf (instance->totemsrp_log_level_debug, "I held %0.4f ms", ((float)tv_diff) / 1000000.0); #endif if (instance->memb_state == MEMB_STATE_OPERATIONAL) { messages_deliver_to_app (instance, 0, instance->my_high_seq_received); } /* * Deliver messages after token has been transmitted * to improve performance */ reset_token_timeout (instance); // REVIEWED reset_token_retransmit_timeout (instance); // REVIEWED if (totemip_equal(&instance->my_id.addr[0], &instance->my_ring_id.rep) && instance->my_token_held == 1) { start_token_hold_retransmit_timeout (instance); } token_callbacks_execute (instance, TOTEM_CALLBACK_TOKEN_SENT); } break; } if ((forward_token) && instance->use_heartbeat) { reset_heartbeat_timeout(instance); } else { cancel_heartbeat_timeout(instance); } return (0); } static void messages_deliver_to_app ( struct totemsrp_instance *instance, int skip, unsigned int end_point) { struct sort_queue_item *sort_queue_item_p; unsigned int i; int res; struct mcast *mcast_in; struct mcast mcast_header; unsigned int range = 0; int endian_conversion_required; unsigned int my_high_delivered_stored = 0; range = end_point - instance->my_high_delivered; if (range) { log_printf (instance->totemsrp_log_level_debug, "Delivering %x to %x", instance->my_high_delivered, end_point); } assert (range < QUEUE_RTR_ITEMS_SIZE_MAX); my_high_delivered_stored = instance->my_high_delivered; /* * Deliver messages in order from rtr queue to pending delivery queue */ for (i = 1; i <= range; i++) { void *ptr = 0; /* * If out of range of sort queue, stop assembly */ res = sq_in_range (&instance->regular_sort_queue, my_high_delivered_stored + i); if (res == 0) { break; } res = sq_item_get (&instance->regular_sort_queue, my_high_delivered_stored + i, &ptr); /* * If hole, stop assembly */ if (res != 0 && skip == 0) { break; } instance->my_high_delivered = my_high_delivered_stored + i; if (res != 0) { continue; } sort_queue_item_p = ptr; mcast_in = sort_queue_item_p->mcast; assert (mcast_in != (struct mcast *)0xdeadbeef); endian_conversion_required = 0; if (mcast_in->header.endian_detector != ENDIAN_LOCAL) { endian_conversion_required = 1; mcast_endian_convert (mcast_in, &mcast_header); } else { memcpy (&mcast_header, mcast_in, sizeof (struct mcast)); } /* * Skip messages not originated in instance->my_deliver_memb */ if (skip && memb_set_subset (&mcast_header.system_from, 1, instance->my_deliver_memb_list, instance->my_deliver_memb_entries) == 0) { instance->my_high_delivered = my_high_delivered_stored + i; continue; } /* * Message found */ log_printf (instance->totemsrp_log_level_debug, "Delivering MCAST message with seq %x to pending delivery queue", mcast_header.seq); /* * Message is locally originated multicast */ instance->totemsrp_deliver_fn ( mcast_header.header.nodeid, ((char *)sort_queue_item_p->mcast) + sizeof (struct mcast), sort_queue_item_p->msg_len - sizeof (struct mcast), endian_conversion_required); } } /* * recv message handler called when MCAST message type received */ static int message_handler_mcast ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed) { struct sort_queue_item sort_queue_item; struct sq *sort_queue; struct mcast mcast_header; if (endian_conversion_needed) { mcast_endian_convert (msg, &mcast_header); } else { memcpy (&mcast_header, msg, sizeof (struct mcast)); } if (mcast_header.header.encapsulated == MESSAGE_ENCAPSULATED) { sort_queue = &instance->recovery_sort_queue; } else { sort_queue = &instance->regular_sort_queue; } assert (msg_len <= FRAME_SIZE_MAX); #ifdef TEST_DROP_MCAST_PERCENTAGE if (random()%100 < TEST_DROP_MCAST_PERCENTAGE) { return (0); } #endif /* * If the message is foreign execute the switch below */ if (memcmp (&instance->my_ring_id, &mcast_header.ring_id, sizeof (struct memb_ring_id)) != 0) { switch (instance->memb_state) { case MEMB_STATE_OPERATIONAL: memb_set_merge ( &mcast_header.system_from, 1, instance->my_proc_list, &instance->my_proc_list_entries); memb_state_gather_enter (instance, 7); break; case MEMB_STATE_GATHER: if (!memb_set_subset ( &mcast_header.system_from, 1, instance->my_proc_list, instance->my_proc_list_entries)) { memb_set_merge (&mcast_header.system_from, 1, instance->my_proc_list, &instance->my_proc_list_entries); memb_state_gather_enter (instance, 8); return (0); } break; case MEMB_STATE_COMMIT: /* discard message */ instance->stats.rx_msg_dropped++; break; case MEMB_STATE_RECOVERY: /* discard message */ instance->stats.rx_msg_dropped++; break; } return (0); } log_printf (instance->totemsrp_log_level_debug, "Received ringid(%s:%lld) seq %x", totemip_print (&mcast_header.ring_id.rep), mcast_header.ring_id.seq, mcast_header.seq); /* * Add mcast message to rtr queue if not already in rtr queue * otherwise free io vectors */ if (msg_len > 0 && msg_len <= FRAME_SIZE_MAX && sq_in_range (sort_queue, mcast_header.seq) && sq_item_inuse (sort_queue, mcast_header.seq) == 0) { /* * Allocate new multicast memory block */ // TODO LEAK sort_queue_item.mcast = totemsrp_buffer_alloc (instance); if (sort_queue_item.mcast == NULL) { return (-1); /* error here is corrected by the algorithm */ } memcpy (sort_queue_item.mcast, msg, msg_len); sort_queue_item.msg_len = msg_len; if (sq_lt_compare (instance->my_high_seq_received, mcast_header.seq)) { instance->my_high_seq_received = mcast_header.seq; } sq_item_add (sort_queue, &sort_queue_item, mcast_header.seq); } update_aru (instance); if (instance->memb_state == MEMB_STATE_OPERATIONAL) { messages_deliver_to_app (instance, 0, instance->my_high_seq_received); } /* TODO remove from retrans message queue for old ring in recovery state */ return (0); } static int message_handler_memb_merge_detect ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed) { struct memb_merge_detect memb_merge_detect; if (endian_conversion_needed) { memb_merge_detect_endian_convert (msg, &memb_merge_detect); } else { memcpy (&memb_merge_detect, msg, sizeof (struct memb_merge_detect)); } /* * do nothing if this is a merge detect from this configuration */ if (memcmp (&instance->my_ring_id, &memb_merge_detect.ring_id, sizeof (struct memb_ring_id)) == 0) { return (0); } /* * Execute merge operation */ switch (instance->memb_state) { case MEMB_STATE_OPERATIONAL: memb_set_merge (&memb_merge_detect.system_from, 1, instance->my_proc_list, &instance->my_proc_list_entries); memb_state_gather_enter (instance, 9); break; case MEMB_STATE_GATHER: if (!memb_set_subset ( &memb_merge_detect.system_from, 1, instance->my_proc_list, instance->my_proc_list_entries)) { memb_set_merge (&memb_merge_detect.system_from, 1, instance->my_proc_list, &instance->my_proc_list_entries); memb_state_gather_enter (instance, 10); return (0); } break; case MEMB_STATE_COMMIT: /* do nothing in commit */ break; case MEMB_STATE_RECOVERY: /* do nothing in recovery */ break; } return (0); } static void memb_join_process ( struct totemsrp_instance *instance, const struct memb_join *memb_join) { struct srp_addr *proc_list; struct srp_addr *failed_list; int gather_entered = 0; int fail_minus_memb_entries = 0; struct srp_addr fail_minus_memb[PROCESSOR_COUNT_MAX]; proc_list = (struct srp_addr *)memb_join->end_of_memb_join; failed_list = proc_list + memb_join->proc_list_entries; /* memb_set_print ("proclist", proc_list, memb_join->proc_list_entries); memb_set_print ("faillist", failed_list, memb_join->failed_list_entries); memb_set_print ("my_proclist", instance->my_proc_list, instance->my_proc_list_entries); memb_set_print ("my_faillist", instance->my_failed_list, instance->my_failed_list_entries); -*/ if (memb_set_equal (proc_list, memb_join->proc_list_entries, instance->my_proc_list, instance->my_proc_list_entries) && memb_set_equal (failed_list, memb_join->failed_list_entries, instance->my_failed_list, instance->my_failed_list_entries)) { memb_consensus_set (instance, &memb_join->system_from); if (memb_consensus_agreed (instance) && instance->failed_to_recv == 1) { instance->failed_to_recv = 0; srp_addr_copy (&instance->my_proc_list[0], &instance->my_id); instance->my_proc_list_entries = 1; instance->my_failed_list_entries = 0; memb_state_commit_token_create (instance); memb_state_commit_enter (instance); return; } if (memb_consensus_agreed (instance) && memb_lowest_in_config (instance)) { memb_state_commit_token_create (instance); memb_state_commit_enter (instance); } else { goto out; } } else if (memb_set_subset (proc_list, memb_join->proc_list_entries, instance->my_proc_list, instance->my_proc_list_entries) && memb_set_subset (failed_list, memb_join->failed_list_entries, instance->my_failed_list, instance->my_failed_list_entries)) { goto out; } else if (memb_set_subset (&memb_join->system_from, 1, instance->my_failed_list, instance->my_failed_list_entries)) { goto out; } else { memb_set_merge (proc_list, memb_join->proc_list_entries, instance->my_proc_list, &instance->my_proc_list_entries); if (memb_set_subset ( &instance->my_id, 1, failed_list, memb_join->failed_list_entries)) { memb_set_merge ( &memb_join->system_from, 1, instance->my_failed_list, &instance->my_failed_list_entries); } else { if (memb_set_subset ( &memb_join->system_from, 1, instance->my_memb_list, instance->my_memb_entries)) { if (memb_set_subset ( &memb_join->system_from, 1, instance->my_failed_list, instance->my_failed_list_entries) == 0) { memb_set_merge (failed_list, memb_join->failed_list_entries, instance->my_failed_list, &instance->my_failed_list_entries); } else { memb_set_subtract (fail_minus_memb, &fail_minus_memb_entries, failed_list, memb_join->failed_list_entries, instance->my_memb_list, instance->my_memb_entries); memb_set_merge (fail_minus_memb, fail_minus_memb_entries, instance->my_failed_list, &instance->my_failed_list_entries); } } } memb_state_gather_enter (instance, 11); gather_entered = 1; } out: if (gather_entered == 0 && instance->memb_state == MEMB_STATE_OPERATIONAL) { memb_state_gather_enter (instance, 12); } } static void memb_join_endian_convert (const struct memb_join *in, struct memb_join *out) { int i; struct srp_addr *in_proc_list; struct srp_addr *in_failed_list; struct srp_addr *out_proc_list; struct srp_addr *out_failed_list; out->header.type = in->header.type; out->header.endian_detector = ENDIAN_LOCAL; out->header.nodeid = swab32 (in->header.nodeid); srp_addr_copy_endian_convert (&out->system_from, &in->system_from); out->proc_list_entries = swab32 (in->proc_list_entries); out->failed_list_entries = swab32 (in->failed_list_entries); out->ring_seq = swab64 (in->ring_seq); in_proc_list = (struct srp_addr *)in->end_of_memb_join; in_failed_list = in_proc_list + out->proc_list_entries; out_proc_list = (struct srp_addr *)out->end_of_memb_join; out_failed_list = out_proc_list + out->proc_list_entries; for (i = 0; i < out->proc_list_entries; i++) { srp_addr_copy_endian_convert (&out_proc_list[i], &in_proc_list[i]); } for (i = 0; i < out->failed_list_entries; i++) { srp_addr_copy_endian_convert (&out_failed_list[i], &in_failed_list[i]); } } static void memb_commit_token_endian_convert (const struct memb_commit_token *in, struct memb_commit_token *out) { int i; struct srp_addr *in_addr = (struct srp_addr *)in->end_of_commit_token; struct srp_addr *out_addr = (struct srp_addr *)out->end_of_commit_token; struct memb_commit_token_memb_entry *in_memb_list; struct memb_commit_token_memb_entry *out_memb_list; out->header.type = in->header.type; out->header.endian_detector = ENDIAN_LOCAL; out->header.nodeid = swab32 (in->header.nodeid); out->token_seq = swab32 (in->token_seq); totemip_copy_endian_convert(&out->ring_id.rep, &in->ring_id.rep); out->ring_id.seq = swab64 (in->ring_id.seq); out->retrans_flg = swab32 (in->retrans_flg); out->memb_index = swab32 (in->memb_index); out->addr_entries = swab32 (in->addr_entries); in_memb_list = (struct memb_commit_token_memb_entry *)(in_addr + out->addr_entries); out_memb_list = (struct memb_commit_token_memb_entry *)(out_addr + out->addr_entries); for (i = 0; i < out->addr_entries; i++) { srp_addr_copy_endian_convert (&out_addr[i], &in_addr[i]); /* * Only convert the memb entry if it has been set */ if (in_memb_list[i].ring_id.rep.family != 0) { totemip_copy_endian_convert (&out_memb_list[i].ring_id.rep, &in_memb_list[i].ring_id.rep); out_memb_list[i].ring_id.seq = swab64 (in_memb_list[i].ring_id.seq); out_memb_list[i].aru = swab32 (in_memb_list[i].aru); out_memb_list[i].high_delivered = swab32 (in_memb_list[i].high_delivered); out_memb_list[i].received_flg = swab32 (in_memb_list[i].received_flg); } } } static void orf_token_endian_convert (const struct orf_token *in, struct orf_token *out) { int i; out->header.type = in->header.type; out->header.endian_detector = ENDIAN_LOCAL; out->header.nodeid = swab32 (in->header.nodeid); out->seq = swab32 (in->seq); out->token_seq = swab32 (in->token_seq); out->aru = swab32 (in->aru); totemip_copy_endian_convert(&out->ring_id.rep, &in->ring_id.rep); out->aru_addr = swab32(in->aru_addr); out->ring_id.seq = swab64 (in->ring_id.seq); out->fcc = swab32 (in->fcc); out->backlog = swab32 (in->backlog); out->retrans_flg = swab32 (in->retrans_flg); out->rtr_list_entries = swab32 (in->rtr_list_entries); for (i = 0; i < out->rtr_list_entries; i++) { totemip_copy_endian_convert(&out->rtr_list[i].ring_id.rep, &in->rtr_list[i].ring_id.rep); out->rtr_list[i].ring_id.seq = swab64 (in->rtr_list[i].ring_id.seq); out->rtr_list[i].seq = swab32 (in->rtr_list[i].seq); } } static void mcast_endian_convert (const struct mcast *in, struct mcast *out) { out->header.type = in->header.type; out->header.endian_detector = ENDIAN_LOCAL; out->header.nodeid = swab32 (in->header.nodeid); out->header.encapsulated = in->header.encapsulated; out->seq = swab32 (in->seq); out->this_seqno = swab32 (in->this_seqno); totemip_copy_endian_convert(&out->ring_id.rep, &in->ring_id.rep); out->ring_id.seq = swab64 (in->ring_id.seq); out->node_id = swab32 (in->node_id); out->guarantee = swab32 (in->guarantee); srp_addr_copy_endian_convert (&out->system_from, &in->system_from); } static void memb_merge_detect_endian_convert ( const struct memb_merge_detect *in, struct memb_merge_detect *out) { out->header.type = in->header.type; out->header.endian_detector = ENDIAN_LOCAL; out->header.nodeid = swab32 (in->header.nodeid); totemip_copy_endian_convert(&out->ring_id.rep, &in->ring_id.rep); out->ring_id.seq = swab64 (in->ring_id.seq); srp_addr_copy_endian_convert (&out->system_from, &in->system_from); } static int message_handler_memb_join ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed) { const struct memb_join *memb_join; struct memb_join *memb_join_convert = alloca (msg_len); if (endian_conversion_needed) { memb_join = memb_join_convert; memb_join_endian_convert (msg, memb_join_convert); } else { memb_join = msg; } /* * If the process paused because it wasn't scheduled in a timely * fashion, flush the join messages because they may be queued * entries */ if (pause_flush (instance)) { return (0); } if (instance->token_ring_id_seq < memb_join->ring_seq) { instance->token_ring_id_seq = memb_join->ring_seq; } switch (instance->memb_state) { case MEMB_STATE_OPERATIONAL: memb_join_process (instance, memb_join); break; case MEMB_STATE_GATHER: memb_join_process (instance, memb_join); break; case MEMB_STATE_COMMIT: if (memb_set_subset (&memb_join->system_from, 1, instance->my_new_memb_list, instance->my_new_memb_entries) && memb_join->ring_seq >= instance->my_ring_id.seq) { memb_join_process (instance, memb_join); memb_state_gather_enter (instance, 13); } break; case MEMB_STATE_RECOVERY: if (memb_set_subset (&memb_join->system_from, 1, instance->my_new_memb_list, instance->my_new_memb_entries) && memb_join->ring_seq >= instance->my_ring_id.seq) { memb_join_process (instance, memb_join); memb_recovery_state_token_loss (instance); memb_state_gather_enter (instance, 14); } break; } return (0); } static int message_handler_memb_commit_token ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed) { struct memb_commit_token *memb_commit_token_convert = alloca (msg_len); struct memb_commit_token *memb_commit_token; struct srp_addr sub[PROCESSOR_COUNT_MAX]; int sub_entries; struct srp_addr *addr; log_printf (instance->totemsrp_log_level_debug, "got commit token"); if (endian_conversion_needed) { memb_commit_token_endian_convert (msg, memb_commit_token_convert); } else { memcpy (memb_commit_token_convert, msg, msg_len); } memb_commit_token = memb_commit_token_convert; addr = (struct srp_addr *)memb_commit_token->end_of_commit_token; #ifdef TEST_DROP_COMMIT_TOKEN_PERCENTAGE if (random()%100 < TEST_DROP_COMMIT_TOKEN_PERCENTAGE) { return (0); } #endif switch (instance->memb_state) { case MEMB_STATE_OPERATIONAL: /* discard token */ break; case MEMB_STATE_GATHER: memb_set_subtract (sub, &sub_entries, instance->my_proc_list, instance->my_proc_list_entries, instance->my_failed_list, instance->my_failed_list_entries); if (memb_set_equal (addr, memb_commit_token->addr_entries, sub, sub_entries) && memb_commit_token->ring_id.seq > instance->my_ring_id.seq) { memcpy (instance->commit_token, memb_commit_token, msg_len); memb_state_commit_enter (instance); } break; case MEMB_STATE_COMMIT: /* * If retransmitted commit tokens are sent on this ring * filter them out and only enter recovery once the * commit token has traversed the array. This is * determined by : * memb_commit_token->memb_index == memb_commit_token->addr_entries) { */ if (memb_commit_token->ring_id.seq == instance->my_ring_id.seq && memb_commit_token->memb_index == memb_commit_token->addr_entries) { memb_state_recovery_enter (instance, memb_commit_token); } break; case MEMB_STATE_RECOVERY: if (totemip_equal (&instance->my_id.addr[0], &instance->my_ring_id.rep)) { log_printf (instance->totemsrp_log_level_debug, "Sending initial ORF token"); // TODO convert instead of initiate orf_token_send_initial (instance); reset_token_timeout (instance); // REVIEWED reset_token_retransmit_timeout (instance); // REVIEWED } break; } return (0); } static int message_handler_token_hold_cancel ( struct totemsrp_instance *instance, const void *msg, size_t msg_len, int endian_conversion_needed) { const struct token_hold_cancel *token_hold_cancel = msg; if (memcmp (&token_hold_cancel->ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id)) == 0) { instance->my_seq_unchanged = 0; if (totemip_equal(&instance->my_ring_id.rep, &instance->my_id.addr[0])) { timer_function_token_retransmit_timeout (instance); } } return (0); } void main_deliver_fn ( void *context, const void *msg, unsigned int msg_len) { struct totemsrp_instance *instance = context; const struct message_header *message_header = msg; if (msg_len < sizeof (struct message_header)) { log_printf (instance->totemsrp_log_level_security, "Received message is too short... ignoring %u.", (unsigned int)msg_len); return; } switch (message_header->type) { case MESSAGE_TYPE_ORF_TOKEN: instance->stats.orf_token_rx++; break; case MESSAGE_TYPE_MCAST: instance->stats.mcast_rx++; break; case MESSAGE_TYPE_MEMB_MERGE_DETECT: instance->stats.memb_merge_detect_rx++; break; case MESSAGE_TYPE_MEMB_JOIN: instance->stats.memb_join_rx++; break; case MESSAGE_TYPE_MEMB_COMMIT_TOKEN: instance->stats.memb_commit_token_rx++; break; case MESSAGE_TYPE_TOKEN_HOLD_CANCEL: instance->stats.token_hold_cancel_rx++; break; default: log_printf (instance->totemsrp_log_level_security, "Type of received message is wrong... ignoring %d.\n", (int)message_header->type); printf ("wrong message type\n"); instance->stats.rx_msg_dropped++; return; } /* * Handle incoming message */ totemsrp_message_handlers.handler_functions[(int)message_header->type] ( instance, msg, msg_len, message_header->endian_detector != ENDIAN_LOCAL); } void main_iface_change_fn ( void *context, const struct totem_ip_address *iface_addr, unsigned int iface_no) { struct totemsrp_instance *instance = context; int i; totemip_copy (&instance->my_id.addr[iface_no], iface_addr); assert (instance->my_id.addr[iface_no].nodeid); totemip_copy (&instance->my_memb_list[0].addr[iface_no], iface_addr); if (instance->iface_changes++ == 0) { memb_ring_id_create_or_load (instance, &instance->my_ring_id); log_printf ( instance->totemsrp_log_level_debug, "Created or loaded sequence id %llx.%s for this ring.", instance->my_ring_id.seq, totemip_print (&instance->my_ring_id.rep)); if (instance->totemsrp_service_ready_fn) { instance->totemsrp_service_ready_fn (); } } for (i = 0; i < instance->totem_config->interfaces[iface_no].member_count; i++) { totemsrp_member_add (instance, &instance->totem_config->interfaces[iface_no].member_list[i], iface_no); } if (instance->iface_changes >= instance->totem_config->interface_count) { memb_state_gather_enter (instance, 15); } } void totemsrp_net_mtu_adjust (struct totem_config *totem_config) { totem_config->net_mtu -= sizeof (struct mcast); } void totemsrp_service_ready_register ( void *context, void (*totem_service_ready) (void)) { struct totemsrp_instance *instance = (struct totemsrp_instance *)context; instance->totemsrp_service_ready_fn = totem_service_ready; } int totemsrp_member_add ( void *context, const struct totem_ip_address *member, int ring_no) { struct totemsrp_instance *instance = (struct totemsrp_instance *)context; int res; res = totemrrp_member_add (instance->totemrrp_context, member, ring_no); return (res); } int totemsrp_member_remove ( void *context, const struct totem_ip_address *member, int ring_no) { struct totemsrp_instance *instance = (struct totemsrp_instance *)context; int res; res = totemrrp_member_remove (instance->totemrrp_context, member, ring_no); return (res); } void totemsrp_threaded_mode_enable (void *context) { struct totemsrp_instance *instance = (struct totemsrp_instance *)context; instance->threaded_mode_enabled = 1; } diff --git a/exec/totemsrp.h b/exec/totemsrp.h index 71627210..29fa127f 100644 --- a/exec/totemsrp.h +++ b/exec/totemsrp.h @@ -1,139 +1,140 @@ /* * Copyright (c) 2003-2005 MontaVista Software, Inc. * Copyright (c) 2006-2011 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file * Totem Single Ring Protocol * * depends on poll abstraction, POSIX, IPV4 */ #ifndef TOTEMSRP_H_DEFINED #define TOTEMSRP_H_DEFINED #include #include /** * Create a protocol instance */ int totemsrp_initialize ( qb_loop_t *poll_handle, void **srp_context, struct totem_config *totem_config, totemmrp_stats_t *stats, void (*deliver_fn) ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required), void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id)); void totemsrp_finalize (void *srp_context); /** * Multicast a message */ int totemsrp_mcast ( void *srp_context, struct iovec *iovec, unsigned int iov_len, int priority); /** * Return number of available messages that can be queued */ int totemsrp_avail (void *srp_context); int totemsrp_callback_token_create ( void *srp_context, void **handle_out, enum totem_callback_token_type type, int delete, int (*callback_fn) (enum totem_callback_token_type type, const void *), const void *data); void totemsrp_callback_token_destroy ( void *srp_context, void **handle_out); void totemsrp_event_signal (void *srp_context, enum totem_event_type type, int value); extern void totemsrp_net_mtu_adjust (struct totem_config *totem_config); extern int totemsrp_ifaces_get ( void *srp_context, unsigned int nodeid, struct totem_ip_address *interfaces, char ***status, unsigned int *iface_count); extern unsigned int totemsrp_my_nodeid_get ( void *srp_context); extern int totemsrp_my_family_get ( void *srp_context); extern int totemsrp_crypto_set ( void *srp_context, - unsigned int type); + const char *cipher_type, + const char *hash_type); extern int totemsrp_ring_reenable ( void *srp_context); void totemsrp_service_ready_register ( void *srp_context, void (*totem_service_ready) (void)); extern int totemsrp_member_add ( void *srp_context, const struct totem_ip_address *member, int ring_no); extern int totemsrp_member_remove ( void *srp_context, const struct totem_ip_address *member, int ring_no); void totemsrp_threaded_mode_enable ( void *srp_context); #endif /* TOTEMSRP_H_DEFINED */ diff --git a/exec/totemudp.c b/exec/totemudp.c index e2657324..e40300a5 100644 --- a/exec/totemudp.c +++ b/exec/totemudp.c @@ -1,1259 +1,1260 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2012 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LOGSYS_UTILS_ONLY 1 #include #include "totemudp.h" #include "util.h" #include "totemcrypto.h" #include #include #include #include #ifndef MSG_NOSIGNAL #define MSG_NOSIGNAL 0 #endif #define MCAST_SOCKET_BUFFER_SIZE (TRANSMITS_ALLOWED * FRAME_SIZE_MAX) #define NETIF_STATE_REPORT_UP 1 #define NETIF_STATE_REPORT_DOWN 2 #define BIND_STATE_UNBOUND 0 #define BIND_STATE_REGULAR 1 #define BIND_STATE_LOOPBACK 2 #define MESSAGE_TYPE_MCAST 1 struct totemudp_socket { int mcast_recv; int mcast_send; int token; }; struct totemudp_instance { struct crypto_instance *crypto_inst; qb_loop_t *totemudp_poll_handle; struct totem_interface *totem_interface; int netif_state_report; int netif_bind_state; void *context; void (*totemudp_deliver_fn) ( void *context, const void *msg, unsigned int msg_len); void (*totemudp_iface_change_fn) ( void *context, const struct totem_ip_address *iface_address); void (*totemudp_target_set_completed) (void *context); /* * Function and data used to log messages */ int totemudp_log_level_security; int totemudp_log_level_error; int totemudp_log_level_warning; int totemudp_log_level_notice; int totemudp_log_level_debug; int totemudp_subsys_id; void (*totemudp_log_printf) ( int level, int subsys, const char *function, const char *file, int line, const char *format, ...)__attribute__((format(printf, 6, 7))); void *udp_context; char iov_buffer[FRAME_SIZE_MAX]; char iov_buffer_flush[FRAME_SIZE_MAX]; struct iovec totemudp_iov_recv; struct iovec totemudp_iov_recv_flush; struct totemudp_socket totemudp_sockets; struct totem_ip_address mcast_address; int stats_sent; int stats_recv; int stats_delv; int stats_remcasts; int stats_orf_token; struct timeval stats_tv_start; struct totem_ip_address my_id; int firstrun; qb_loop_timer_handle timer_netif_check_timeout; unsigned int my_memb_entries; int flushing; struct totem_config *totem_config; struct totem_ip_address token_target; }; struct work_item { const void *msg; unsigned int msg_len; struct totemudp_instance *instance; }; static int totemudp_build_sockets ( struct totemudp_instance *instance, struct totem_ip_address *bindnet_address, struct totem_ip_address *mcastaddress, struct totemudp_socket *sockets, struct totem_ip_address *bound_to); static struct totem_ip_address localhost; static void totemudp_instance_initialize (struct totemudp_instance *instance) { memset (instance, 0, sizeof (struct totemudp_instance)); instance->netif_state_report = NETIF_STATE_REPORT_UP | NETIF_STATE_REPORT_DOWN; instance->totemudp_iov_recv.iov_base = instance->iov_buffer; instance->totemudp_iov_recv.iov_len = FRAME_SIZE_MAX; //sizeof (instance->iov_buffer); instance->totemudp_iov_recv_flush.iov_base = instance->iov_buffer_flush; instance->totemudp_iov_recv_flush.iov_len = FRAME_SIZE_MAX; //sizeof (instance->iov_buffer); /* * There is always atleast 1 processor */ instance->my_memb_entries = 1; } #define log_printf(level, format, args...) \ do { \ instance->totemudp_log_printf ( \ level, instance->totemudp_subsys_id, \ __FUNCTION__, __FILE__, __LINE__, \ (const char *)format, ##args); \ } while (0); #define LOGSYS_PERROR(err_num, level, fmt, args...) \ do { \ char _error_str[LOGSYS_MAX_PERROR_MSG_LEN]; \ const char *_error_ptr = qb_strerror_r(err_num, _error_str, sizeof(_error_str)); \ instance->totemudp_log_printf ( \ level, instance->totemudp_subsys_id, \ __FUNCTION__, __FILE__, __LINE__, \ fmt ": %s (%d)\n", ##args, _error_ptr, err_num); \ } while(0) int totemudp_crypto_set ( void *udp_context, - unsigned int type) + const char *cipher_type, + const char *hash_type) { return (0); } static inline void ucast_sendmsg ( struct totemudp_instance *instance, struct totem_ip_address *system_to, const void *msg, unsigned int msg_len) { struct msghdr msg_ucast; int res = 0; size_t buf_out_len; unsigned char buf_out[FRAME_SIZE_MAX]; struct sockaddr_storage sockaddr; struct iovec iovec; int addrlen; /* * Encrypt and digest the message */ if (crypto_encrypt_and_sign ( instance->crypto_inst, (const unsigned char *)msg, msg_len, buf_out, &buf_out_len) != 0) { log_printf(LOGSYS_LEVEL_CRIT, "Error encrypting/signing packet (non-critical)"); return; } iovec.iov_base = (void *)buf_out; iovec.iov_len = buf_out_len; /* * Build unicast message */ totemip_totemip_to_sockaddr_convert(system_to, instance->totem_interface->ip_port, &sockaddr, &addrlen); msg_ucast.msg_name = &sockaddr; msg_ucast.msg_namelen = addrlen; msg_ucast.msg_iov = (void *)&iovec; msg_ucast.msg_iovlen = 1; #if !defined(COROSYNC_SOLARIS) msg_ucast.msg_control = 0; msg_ucast.msg_controllen = 0; msg_ucast.msg_flags = 0; #else msg_ucast.msg_accrights = NULL; msg_ucast.msg_accrightslen = 0; #endif /* * Transmit unicast message * An error here is recovered by totemsrp */ res = sendmsg (instance->totemudp_sockets.mcast_send, &msg_ucast, MSG_NOSIGNAL); if (res < 0) { LOGSYS_PERROR (errno, instance->totemudp_log_level_debug, "sendmsg(ucast) failed (non-critical)"); } } static inline void mcast_sendmsg ( struct totemudp_instance *instance, const void *msg, unsigned int msg_len) { struct msghdr msg_mcast; int res = 0; size_t buf_out_len; unsigned char buf_out[FRAME_SIZE_MAX]; struct iovec iovec; struct sockaddr_storage sockaddr; int addrlen; /* * Encrypt and digest the message */ if (crypto_encrypt_and_sign ( instance->crypto_inst, (const unsigned char *)msg, msg_len, buf_out, &buf_out_len) != 0) { log_printf(LOGSYS_LEVEL_CRIT, "Error encrypting/signing packet (non-critical)"); return; } iovec.iov_base = (void *)&buf_out; iovec.iov_len = buf_out_len; /* * Build multicast message */ totemip_totemip_to_sockaddr_convert(&instance->mcast_address, instance->totem_interface->ip_port, &sockaddr, &addrlen); msg_mcast.msg_name = &sockaddr; msg_mcast.msg_namelen = addrlen; msg_mcast.msg_iov = (void *)&iovec; msg_mcast.msg_iovlen = 1; #if !defined(COROSYNC_SOLARIS) msg_mcast.msg_control = 0; msg_mcast.msg_controllen = 0; msg_mcast.msg_flags = 0; #else msg_mcast.msg_accrights = NULL; msg_mcast.msg_accrightslen = 0; #endif /* * Transmit multicast message * An error here is recovered by totemsrp */ res = sendmsg (instance->totemudp_sockets.mcast_send, &msg_mcast, MSG_NOSIGNAL); if (res < 0) { LOGSYS_PERROR (errno, instance->totemudp_log_level_debug, "sendmsg(mcast) failed (non-critical)"); } } int totemudp_finalize ( void *udp_context) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; int res = 0; if (instance->totemudp_sockets.mcast_recv > 0) { close (instance->totemudp_sockets.mcast_recv); qb_loop_poll_del (instance->totemudp_poll_handle, instance->totemudp_sockets.mcast_recv); } if (instance->totemudp_sockets.mcast_send > 0) { close (instance->totemudp_sockets.mcast_send); } if (instance->totemudp_sockets.token > 0) { close (instance->totemudp_sockets.token); qb_loop_poll_del (instance->totemudp_poll_handle, instance->totemudp_sockets.token); } return (res); } /* * Only designed to work with a message with one iov */ static int net_deliver_fn ( int fd, int revents, void *data) { struct totemudp_instance *instance = (struct totemudp_instance *)data; struct msghdr msg_recv; struct iovec *iovec; struct sockaddr_storage system_from; int bytes_received; int res = 0; char *message_type; if (instance->flushing == 1) { iovec = &instance->totemudp_iov_recv_flush; } else { iovec = &instance->totemudp_iov_recv; } /* * Receive datagram */ msg_recv.msg_name = &system_from; msg_recv.msg_namelen = sizeof (struct sockaddr_storage); msg_recv.msg_iov = iovec; msg_recv.msg_iovlen = 1; #if !defined(COROSYNC_SOLARIS) msg_recv.msg_control = 0; msg_recv.msg_controllen = 0; msg_recv.msg_flags = 0; #else msg_recv.msg_accrights = NULL; msg_recv.msg_accrightslen = 0; #endif bytes_received = recvmsg (fd, &msg_recv, MSG_NOSIGNAL | MSG_DONTWAIT); if (bytes_received == -1) { return (0); } else { instance->stats_recv += bytes_received; } /* * Authenticate and if authenticated, decrypt datagram */ res = crypto_authenticate_and_decrypt (instance->crypto_inst, iovec->iov_base, &bytes_received); if (res == -1) { log_printf (instance->totemudp_log_level_security, "Received message has invalid digest... ignoring."); log_printf (instance->totemudp_log_level_security, "Invalid packet data"); iovec->iov_len = FRAME_SIZE_MAX; return 0; } iovec->iov_len = bytes_received; /* * Drop all non-mcast messages (more specifically join * messages should be dropped) */ message_type = (char *)iovec->iov_base; if (instance->flushing == 1 && *message_type != MESSAGE_TYPE_MCAST) { iovec->iov_len = FRAME_SIZE_MAX; return (0); } /* * Handle incoming message */ instance->totemudp_deliver_fn ( instance->context, iovec->iov_base, iovec->iov_len); iovec->iov_len = FRAME_SIZE_MAX; return (0); } static int netif_determine ( struct totemudp_instance *instance, struct totem_ip_address *bindnet, struct totem_ip_address *bound_to, int *interface_up, int *interface_num) { int res; res = totemip_iface_check (bindnet, bound_to, interface_up, interface_num, instance->totem_config->clear_node_high_bit); return (res); } /* * If the interface is up, the sockets for totem are built. If the interface is down * this function is requeued in the timer list to retry building the sockets later. */ static void timer_function_netif_check_timeout ( void *data) { struct totemudp_instance *instance = (struct totemudp_instance *)data; int interface_up; int interface_num; struct totem_ip_address *bind_address; /* * Build sockets for every interface */ netif_determine (instance, &instance->totem_interface->bindnet, &instance->totem_interface->boundto, &interface_up, &interface_num); /* * If the network interface isn't back up and we are already * in loopback mode, add timer to check again and return */ if ((instance->netif_bind_state == BIND_STATE_LOOPBACK && interface_up == 0) || (instance->my_memb_entries == 1 && instance->netif_bind_state == BIND_STATE_REGULAR && interface_up == 1)) { qb_loop_timer_add (instance->totemudp_poll_handle, QB_LOOP_MED, instance->totem_config->downcheck_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); /* * Add a timer to check for a downed regular interface */ return; } if (instance->totemudp_sockets.mcast_recv > 0) { close (instance->totemudp_sockets.mcast_recv); qb_loop_poll_del (instance->totemudp_poll_handle, instance->totemudp_sockets.mcast_recv); } if (instance->totemudp_sockets.mcast_send > 0) { close (instance->totemudp_sockets.mcast_send); } if (instance->totemudp_sockets.token > 0) { close (instance->totemudp_sockets.token); qb_loop_poll_del (instance->totemudp_poll_handle, instance->totemudp_sockets.token); } if (interface_up == 0) { /* * Interface is not up */ instance->netif_bind_state = BIND_STATE_LOOPBACK; bind_address = &localhost; /* * Add a timer to retry building interfaces and request memb_gather_enter */ qb_loop_timer_add (instance->totemudp_poll_handle, QB_LOOP_MED, instance->totem_config->downcheck_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); } else { /* * Interface is up */ instance->netif_bind_state = BIND_STATE_REGULAR; bind_address = &instance->totem_interface->bindnet; } /* * Create and bind the multicast and unicast sockets */ (void)totemudp_build_sockets (instance, &instance->mcast_address, bind_address, &instance->totemudp_sockets, &instance->totem_interface->boundto); qb_loop_poll_add ( instance->totemudp_poll_handle, QB_LOOP_MED, instance->totemudp_sockets.mcast_recv, POLLIN, instance, net_deliver_fn); qb_loop_poll_add ( instance->totemudp_poll_handle, QB_LOOP_MED, instance->totemudp_sockets.token, POLLIN, instance, net_deliver_fn); totemip_copy (&instance->my_id, &instance->totem_interface->boundto); /* * This reports changes in the interface to the user and totemsrp */ if (instance->netif_bind_state == BIND_STATE_REGULAR) { if (instance->netif_state_report & NETIF_STATE_REPORT_UP) { log_printf (instance->totemudp_log_level_notice, "The network interface [%s] is now up.", totemip_print (&instance->totem_interface->boundto)); instance->netif_state_report = NETIF_STATE_REPORT_DOWN; instance->totemudp_iface_change_fn (instance->context, &instance->my_id); } /* * Add a timer to check for interface going down in single membership */ if (instance->my_memb_entries == 1) { qb_loop_timer_add (instance->totemudp_poll_handle, QB_LOOP_MED, instance->totem_config->downcheck_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); } } else { if (instance->netif_state_report & NETIF_STATE_REPORT_DOWN) { log_printf (instance->totemudp_log_level_notice, "The network interface is down."); instance->totemudp_iface_change_fn (instance->context, &instance->my_id); } instance->netif_state_report = NETIF_STATE_REPORT_UP; } } /* Set the socket priority to INTERACTIVE to ensure that our messages don't get queued behind anything else */ static void totemudp_traffic_control_set(struct totemudp_instance *instance, int sock) { #ifdef SO_PRIORITY int prio = 6; /* TC_PRIO_INTERACTIVE */ if (setsockopt(sock, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(int))) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "Could not set traffic priority"); } #endif } static int totemudp_build_sockets_ip ( struct totemudp_instance *instance, struct totem_ip_address *mcast_address, struct totem_ip_address *bindnet_address, struct totemudp_socket *sockets, struct totem_ip_address *bound_to, int interface_num) { struct sockaddr_storage sockaddr; struct ipv6_mreq mreq6; struct ip_mreq mreq; struct sockaddr_storage mcast_ss, boundto_ss; struct sockaddr_in6 *mcast_sin6 = (struct sockaddr_in6 *)&mcast_ss; struct sockaddr_in *mcast_sin = (struct sockaddr_in *)&mcast_ss; struct sockaddr_in *boundto_sin = (struct sockaddr_in *)&boundto_ss; unsigned int sendbuf_size; unsigned int recvbuf_size; unsigned int optlen = sizeof (sendbuf_size); int addrlen; int res; int flag; /* * Create multicast recv socket */ sockets->mcast_recv = socket (bindnet_address->family, SOCK_DGRAM, 0); if (sockets->mcast_recv == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "socket() failed"); return (-1); } totemip_nosigpipe (sockets->mcast_recv); res = fcntl (sockets->mcast_recv, F_SETFL, O_NONBLOCK); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "Could not set non-blocking operation on multicast socket"); return (-1); } /* * Force reuse */ flag = 1; if ( setsockopt(sockets->mcast_recv, SOL_SOCKET, SO_REUSEADDR, (char *)&flag, sizeof (flag)) < 0) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "setsockopt(SO_REUSEADDR) failed"); return (-1); } /* * Bind to multicast socket used for multicast receives */ totemip_totemip_to_sockaddr_convert(mcast_address, instance->totem_interface->ip_port, &sockaddr, &addrlen); res = bind (sockets->mcast_recv, (struct sockaddr *)&sockaddr, addrlen); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "Unable to bind the socket to receive multicast packets"); return (-1); } /* * Setup mcast send socket */ sockets->mcast_send = socket (bindnet_address->family, SOCK_DGRAM, 0); if (sockets->mcast_send == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "socket() failed"); return (-1); } totemip_nosigpipe (sockets->mcast_send); res = fcntl (sockets->mcast_send, F_SETFL, O_NONBLOCK); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "Could not set non-blocking operation on multicast socket"); return (-1); } /* * Force reuse */ flag = 1; if ( setsockopt(sockets->mcast_send, SOL_SOCKET, SO_REUSEADDR, (char *)&flag, sizeof (flag)) < 0) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "setsockopt(SO_REUSEADDR) failed"); return (-1); } totemip_totemip_to_sockaddr_convert(bound_to, instance->totem_interface->ip_port - 1, &sockaddr, &addrlen); res = bind (sockets->mcast_send, (struct sockaddr *)&sockaddr, addrlen); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "Unable to bind the socket to send multicast packets"); return (-1); } /* * Setup unicast socket */ sockets->token = socket (bindnet_address->family, SOCK_DGRAM, 0); if (sockets->token == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "socket() failed"); return (-1); } totemip_nosigpipe (sockets->token); res = fcntl (sockets->token, F_SETFL, O_NONBLOCK); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "Could not set non-blocking operation on token socket"); return (-1); } /* * Force reuse */ flag = 1; if ( setsockopt(sockets->token, SOL_SOCKET, SO_REUSEADDR, (char *)&flag, sizeof (flag)) < 0) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "setsockopt(SO_REUSEADDR) failed"); return (-1); } /* * Bind to unicast socket used for token send/receives * This has the side effect of binding to the correct interface */ totemip_totemip_to_sockaddr_convert(bound_to, instance->totem_interface->ip_port, &sockaddr, &addrlen); res = bind (sockets->token, (struct sockaddr *)&sockaddr, addrlen); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "Unable to bind UDP unicast socket"); return (-1); } recvbuf_size = MCAST_SOCKET_BUFFER_SIZE; sendbuf_size = MCAST_SOCKET_BUFFER_SIZE; /* * Set buffer sizes to avoid overruns */ res = setsockopt (sockets->mcast_recv, SOL_SOCKET, SO_RCVBUF, &recvbuf_size, optlen); res = setsockopt (sockets->mcast_send, SOL_SOCKET, SO_SNDBUF, &sendbuf_size, optlen); res = getsockopt (sockets->mcast_recv, SOL_SOCKET, SO_RCVBUF, &recvbuf_size, &optlen); if (res == 0) { log_printf (instance->totemudp_log_level_debug, "Receive multicast socket recv buffer size (%d bytes).", recvbuf_size); } res = getsockopt (sockets->mcast_send, SOL_SOCKET, SO_SNDBUF, &sendbuf_size, &optlen); if (res == 0) { log_printf (instance->totemudp_log_level_debug, "Transmit multicast socket send buffer size (%d bytes).", sendbuf_size); } /* * Join group membership on socket */ totemip_totemip_to_sockaddr_convert(mcast_address, instance->totem_interface->ip_port, &mcast_ss, &addrlen); totemip_totemip_to_sockaddr_convert(bound_to, instance->totem_interface->ip_port, &boundto_ss, &addrlen); if (instance->totem_config->broadcast_use == 1) { unsigned int broadcast = 1; if ((setsockopt(sockets->mcast_recv, SOL_SOCKET, SO_BROADCAST, &broadcast, sizeof (broadcast))) == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "setting broadcast option failed"); return (-1); } if ((setsockopt(sockets->mcast_send, SOL_SOCKET, SO_BROADCAST, &broadcast, sizeof (broadcast))) == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "setting broadcast option failed"); return (-1); } } else { switch (bindnet_address->family) { case AF_INET: memset(&mreq, 0, sizeof(mreq)); mreq.imr_multiaddr.s_addr = mcast_sin->sin_addr.s_addr; mreq.imr_interface.s_addr = boundto_sin->sin_addr.s_addr; res = setsockopt (sockets->mcast_recv, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof (mreq)); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "join ipv4 multicast group failed"); return (-1); } break; case AF_INET6: memset(&mreq6, 0, sizeof(mreq6)); memcpy(&mreq6.ipv6mr_multiaddr, &mcast_sin6->sin6_addr, sizeof(struct in6_addr)); mreq6.ipv6mr_interface = interface_num; res = setsockopt (sockets->mcast_recv, IPPROTO_IPV6, IPV6_JOIN_GROUP, &mreq6, sizeof (mreq6)); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "join ipv6 multicast group failed"); return (-1); } break; } } /* * Turn on multicast loopback */ flag = 1; switch ( bindnet_address->family ) { case AF_INET: res = setsockopt (sockets->mcast_send, IPPROTO_IP, IP_MULTICAST_LOOP, &flag, sizeof (flag)); break; case AF_INET6: res = setsockopt (sockets->mcast_send, IPPROTO_IPV6, IPV6_MULTICAST_LOOP, &flag, sizeof (flag)); } if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "Unable to turn on multicast loopback"); return (-1); } /* * Set multicast packets TTL */ flag = instance->totem_interface->ttl; if (bindnet_address->family == AF_INET6) { res = setsockopt (sockets->mcast_send, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, &flag, sizeof (flag)); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "set mcast v6 TTL failed"); return (-1); } } else { res = setsockopt(sockets->mcast_send, IPPROTO_IP, IP_MULTICAST_TTL, &flag, sizeof(flag)); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "set mcast v4 TTL failed"); return (-1); } } /* * Bind to a specific interface for multicast send and receive */ switch ( bindnet_address->family ) { case AF_INET: if (setsockopt (sockets->mcast_send, IPPROTO_IP, IP_MULTICAST_IF, &boundto_sin->sin_addr, sizeof (boundto_sin->sin_addr)) < 0) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "cannot select interface for multicast packets (send)"); return (-1); } if (setsockopt (sockets->mcast_recv, IPPROTO_IP, IP_MULTICAST_IF, &boundto_sin->sin_addr, sizeof (boundto_sin->sin_addr)) < 0) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "cannot select interface for multicast packets (recv)"); return (-1); } break; case AF_INET6: if (setsockopt (sockets->mcast_send, IPPROTO_IPV6, IPV6_MULTICAST_IF, &interface_num, sizeof (interface_num)) < 0) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "cannot select interface for multicast packets (send v6)"); return (-1); } if (setsockopt (sockets->mcast_recv, IPPROTO_IPV6, IPV6_MULTICAST_IF, &interface_num, sizeof (interface_num)) < 0) { LOGSYS_PERROR (errno, instance->totemudp_log_level_warning, "cannot select interface for multicast packets (recv v6)"); return (-1); } break; } return 0; } static int totemudp_build_sockets ( struct totemudp_instance *instance, struct totem_ip_address *mcast_address, struct totem_ip_address *bindnet_address, struct totemudp_socket *sockets, struct totem_ip_address *bound_to) { int interface_num; int interface_up; int res; /* * Determine the ip address bound to and the interface name */ res = netif_determine (instance, bindnet_address, bound_to, &interface_up, &interface_num); if (res == -1) { return (-1); } totemip_copy(&instance->my_id, bound_to); res = totemudp_build_sockets_ip (instance, mcast_address, bindnet_address, sockets, bound_to, interface_num); /* We only send out of the token socket */ totemudp_traffic_control_set(instance, sockets->token); return res; } /* * Totem Network interface - also does encryption/decryption * depends on poll abstraction, POSIX, IPV4 */ /* * Create an instance */ int totemudp_initialize ( qb_loop_t *poll_handle, void **udp_context, struct totem_config *totem_config, int interface_no, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_address), void (*target_set_completed) ( void *context)) { struct totemudp_instance *instance; instance = malloc (sizeof (struct totemudp_instance)); if (instance == NULL) { return (-1); } totemudp_instance_initialize (instance); instance->totem_config = totem_config; /* * Configure logging */ instance->totemudp_log_level_security = 1; //totem_config->totem_logging_configuration.log_level_security; instance->totemudp_log_level_error = totem_config->totem_logging_configuration.log_level_error; instance->totemudp_log_level_warning = totem_config->totem_logging_configuration.log_level_warning; instance->totemudp_log_level_notice = totem_config->totem_logging_configuration.log_level_notice; instance->totemudp_log_level_debug = totem_config->totem_logging_configuration.log_level_debug; instance->totemudp_subsys_id = totem_config->totem_logging_configuration.log_subsys_id; instance->totemudp_log_printf = totem_config->totem_logging_configuration.log_printf; /* * Initialize random number generator for later use to generate salt */ instance->crypto_inst = crypto_init (totem_config->private_key, totem_config->private_key_len, totem_config->crypto_cipher_type, totem_config->crypto_hash_type, instance->totemudp_log_printf, instance->totemudp_log_level_security, instance->totemudp_log_level_notice, instance->totemudp_log_level_error, instance->totemudp_subsys_id); if (instance->crypto_inst == NULL) { return (-1); } /* * Initialize local variables for totemudp */ instance->totem_interface = &totem_config->interfaces[interface_no]; totemip_copy (&instance->mcast_address, &instance->totem_interface->mcast_addr); memset (instance->iov_buffer, 0, FRAME_SIZE_MAX); instance->totemudp_poll_handle = poll_handle; instance->totem_interface->bindnet.nodeid = instance->totem_config->node_id; instance->context = context; instance->totemudp_deliver_fn = deliver_fn; instance->totemudp_iface_change_fn = iface_change_fn; instance->totemudp_target_set_completed = target_set_completed; totemip_localhost (instance->mcast_address.family, &localhost); localhost.nodeid = instance->totem_config->node_id; /* * RRP layer isn't ready to receive message because it hasn't * initialized yet. Add short timer to check the interfaces. */ qb_loop_timer_add (instance->totemudp_poll_handle, QB_LOOP_MED, 100*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); *udp_context = instance; return (0); } void *totemudp_buffer_alloc (void) { return malloc (FRAME_SIZE_MAX); } void totemudp_buffer_release (void *ptr) { return free (ptr); } int totemudp_processor_count_set ( void *udp_context, int processor_count) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; int res = 0; instance->my_memb_entries = processor_count; qb_loop_timer_del (instance->totemudp_poll_handle, instance->timer_netif_check_timeout); if (processor_count == 1) { qb_loop_timer_add (instance->totemudp_poll_handle, QB_LOOP_MED, instance->totem_config->downcheck_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); } return (res); } int totemudp_recv_flush (void *udp_context) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; struct pollfd ufd; int nfds; int res = 0; instance->flushing = 1; do { ufd.fd = instance->totemudp_sockets.mcast_recv; ufd.events = POLLIN; nfds = poll (&ufd, 1, 0); if (nfds == 1 && ufd.revents & POLLIN) { net_deliver_fn (instance->totemudp_sockets.mcast_recv, ufd.revents, instance); } } while (nfds == 1); instance->flushing = 0; return (res); } int totemudp_send_flush (void *udp_context) { return 0; } int totemudp_token_send ( void *udp_context, const void *msg, unsigned int msg_len) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; int res = 0; ucast_sendmsg (instance, &instance->token_target, msg, msg_len); return (res); } int totemudp_mcast_flush_send ( void *udp_context, const void *msg, unsigned int msg_len) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; int res = 0; mcast_sendmsg (instance, msg, msg_len); return (res); } int totemudp_mcast_noflush_send ( void *udp_context, const void *msg, unsigned int msg_len) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; int res = 0; mcast_sendmsg (instance, msg, msg_len); return (res); } extern int totemudp_iface_check (void *udp_context) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; int res = 0; timer_function_netif_check_timeout (instance); return (res); } extern void totemudp_net_mtu_adjust (void *udp_context, struct totem_config *totem_config) { #define UDPIP_HEADER_SIZE (20 + 8) /* 20 bytes for ip 8 bytes for udp */ totem_config->net_mtu -= crypto_sec_header_size(totem_config->crypto_cipher_type, totem_config->crypto_hash_type) + UDPIP_HEADER_SIZE; } const char *totemudp_iface_print (void *udp_context) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; const char *ret_char; ret_char = totemip_print (&instance->my_id); return (ret_char); } int totemudp_iface_get ( void *udp_context, struct totem_ip_address *addr) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; int res = 0; memcpy (addr, &instance->my_id, sizeof (struct totem_ip_address)); return (res); } int totemudp_token_target_set ( void *udp_context, const struct totem_ip_address *token_target) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; int res = 0; memcpy (&instance->token_target, token_target, sizeof (struct totem_ip_address)); instance->totemudp_target_set_completed (instance->context); return (res); } extern int totemudp_recv_mcast_empty ( void *udp_context) { struct totemudp_instance *instance = (struct totemudp_instance *)udp_context; unsigned int res; struct sockaddr_storage system_from; struct msghdr msg_recv; struct pollfd ufd; int nfds; int msg_processed = 0; /* * Receive datagram */ msg_recv.msg_name = &system_from; msg_recv.msg_namelen = sizeof (struct sockaddr_storage); msg_recv.msg_iov = &instance->totemudp_iov_recv_flush; msg_recv.msg_iovlen = 1; #if !defined(COROSYNC_SOLARIS) msg_recv.msg_control = 0; msg_recv.msg_controllen = 0; msg_recv.msg_flags = 0; #else msg_recv.msg_accrights = NULL; msg_recv.msg_accrightslen = 0; #endif do { ufd.fd = instance->totemudp_sockets.mcast_recv; ufd.events = POLLIN; nfds = poll (&ufd, 1, 0); if (nfds == 1 && ufd.revents & POLLIN) { res = recvmsg (instance->totemudp_sockets.mcast_recv, &msg_recv, MSG_NOSIGNAL | MSG_DONTWAIT); if (res != -1) { msg_processed = 1; } else { msg_processed = -1; } } } while (nfds == 1); return (msg_processed); } diff --git a/exec/totemudp.h b/exec/totemudp.h index 8acd853c..ba22b4b3 100644 --- a/exec/totemudp.h +++ b/exec/totemudp.h @@ -1,116 +1,117 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2011 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TOTEMUDP_H_DEFINED #define TOTEMUDP_H_DEFINED #include #include #include #include /** * Create an instance */ extern int totemudp_initialize ( qb_loop_t* poll_handle, void **udp_context, struct totem_config *totem_config, int interface_no, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_address), void (*target_set_completed) ( void *context)); extern void *totemudp_buffer_alloc (void); extern void totemudp_buffer_release (void *ptr); extern int totemudp_processor_count_set ( void *udp_context, int processor_count); extern int totemudp_token_send ( void *udp_context, const void *msg, unsigned int msg_len); extern int totemudp_mcast_flush_send ( void *udp_context, const void *msg, unsigned int msg_len); extern int totemudp_mcast_noflush_send ( void *udp_context, const void *msg, unsigned int msg_len); extern int totemudp_recv_flush (void *udp_context); extern int totemudp_send_flush (void *udp_context); extern int totemudp_iface_check (void *udp_context); extern int totemudp_finalize (void *udp_context); extern void totemudp_net_mtu_adjust (void *udp_context, struct totem_config *totem_config); extern const char *totemudp_iface_print (void *udp_context); extern int totemudp_iface_get ( void *udp_context, struct totem_ip_address *addr); extern int totemudp_token_target_set ( void *udp_context, const struct totem_ip_address *token_target); extern int totemudp_crypto_set ( void *udp_context, - unsigned int type); + const char *cipher_type, + const char *hash_type); extern int totemudp_recv_mcast_empty ( void *udp_context); #endif /* TOTEMUDP_H_DEFINED */ diff --git a/exec/totemudpu.c b/exec/totemudpu.c index 4a2f291a..7b236cdc 100644 --- a/exec/totemudpu.c +++ b/exec/totemudpu.c @@ -1,1060 +1,1061 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2012 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define LOGSYS_UTILS_ONLY 1 #include #include "totemudpu.h" #include "util.h" #include "totemcrypto.h" #include #include #include #include #ifndef MSG_NOSIGNAL #define MSG_NOSIGNAL 0 #endif #define MCAST_SOCKET_BUFFER_SIZE (TRANSMITS_ALLOWED * FRAME_SIZE_MAX) #define NETIF_STATE_REPORT_UP 1 #define NETIF_STATE_REPORT_DOWN 2 #define BIND_STATE_UNBOUND 0 #define BIND_STATE_REGULAR 1 #define BIND_STATE_LOOPBACK 2 struct totemudpu_member { struct list_head list; struct totem_ip_address member; int fd; }; struct totemudpu_instance { struct crypto_instance *crypto_inst; qb_loop_t *totemudpu_poll_handle; struct totem_interface *totem_interface; int netif_state_report; int netif_bind_state; void *context; void (*totemudpu_deliver_fn) ( void *context, const void *msg, unsigned int msg_len); void (*totemudpu_iface_change_fn) ( void *context, const struct totem_ip_address *iface_address); void (*totemudpu_target_set_completed) (void *context); /* * Function and data used to log messages */ int totemudpu_log_level_security; int totemudpu_log_level_error; int totemudpu_log_level_warning; int totemudpu_log_level_notice; int totemudpu_log_level_debug; int totemudpu_subsys_id; void (*totemudpu_log_printf) ( int level, int subsys, const char *function, const char *file, int line, const char *format, ...)__attribute__((format(printf, 6, 7))); void *udpu_context; char iov_buffer[FRAME_SIZE_MAX]; struct iovec totemudpu_iov_recv; struct list_head member_list; int stats_sent; int stats_recv; int stats_delv; int stats_remcasts; int stats_orf_token; struct timeval stats_tv_start; struct totem_ip_address my_id; int firstrun; qb_loop_timer_handle timer_netif_check_timeout; unsigned int my_memb_entries; struct totem_config *totem_config; struct totem_ip_address token_target; int token_socket; }; struct work_item { const void *msg; unsigned int msg_len; struct totemudpu_instance *instance; }; static int totemudpu_build_sockets ( struct totemudpu_instance *instance, struct totem_ip_address *bindnet_address, struct totem_ip_address *bound_to); static struct totem_ip_address localhost; static void totemudpu_instance_initialize (struct totemudpu_instance *instance) { memset (instance, 0, sizeof (struct totemudpu_instance)); instance->netif_state_report = NETIF_STATE_REPORT_UP | NETIF_STATE_REPORT_DOWN; instance->totemudpu_iov_recv.iov_base = instance->iov_buffer; instance->totemudpu_iov_recv.iov_len = FRAME_SIZE_MAX; //sizeof (instance->iov_buffer); /* * There is always atleast 1 processor */ instance->my_memb_entries = 1; list_init (&instance->member_list); } #define log_printf(level, format, args...) \ do { \ instance->totemudpu_log_printf ( \ level, instance->totemudpu_subsys_id, \ __FUNCTION__, __FILE__, __LINE__, \ (const char *)format, ##args); \ } while (0); #define LOGSYS_PERROR(err_num, level, fmt, args...) \ do { \ char _error_str[LOGSYS_MAX_PERROR_MSG_LEN]; \ const char *_error_ptr = qb_strerror_r(err_num, _error_str, sizeof(_error_str)); \ instance->totemudpu_log_printf ( \ level, instance->totemudpu_subsys_id, \ __FUNCTION__, __FILE__, __LINE__, \ fmt ": %s (%d)", ##args, _error_ptr, err_num); \ } while(0) int totemudpu_crypto_set ( void *udpu_context, - unsigned int type) + const char *cipher_type, + const char *hash_type) { return (0); } static inline void ucast_sendmsg ( struct totemudpu_instance *instance, struct totem_ip_address *system_to, const void *msg, unsigned int msg_len) { struct msghdr msg_ucast; int res = 0; size_t buf_out_len; unsigned char buf_out[FRAME_SIZE_MAX]; struct sockaddr_storage sockaddr; struct iovec iovec; int addrlen; /* * Encrypt and digest the message */ if (crypto_encrypt_and_sign ( instance->crypto_inst, (const unsigned char *)msg, msg_len, buf_out, &buf_out_len) != 0) { log_printf(LOGSYS_LEVEL_CRIT, "Error encrypting/signing packet (non-critical)"); return; } iovec.iov_base = (void *)buf_out; iovec.iov_len = buf_out_len; /* * Build unicast message */ totemip_totemip_to_sockaddr_convert(system_to, instance->totem_interface->ip_port, &sockaddr, &addrlen); msg_ucast.msg_name = &sockaddr; msg_ucast.msg_namelen = addrlen; msg_ucast.msg_iov = (void *)&iovec; msg_ucast.msg_iovlen = 1; #if !defined(COROSYNC_SOLARIS) msg_ucast.msg_control = 0; msg_ucast.msg_controllen = 0; msg_ucast.msg_flags = 0; #else msg_ucast.msg_accrights = NULL; msg_ucast.msg_accrightslen = 0; #endif /* * Transmit unicast message * An error here is recovered by totemsrp */ res = sendmsg (instance->token_socket, &msg_ucast, MSG_NOSIGNAL); if (res < 0) { LOGSYS_PERROR (errno, instance->totemudpu_log_level_debug, "sendmsg(ucast) failed (non-critical)"); } } static inline void mcast_sendmsg ( struct totemudpu_instance *instance, const void *msg, unsigned int msg_len) { struct msghdr msg_mcast; int res = 0; size_t buf_out_len; unsigned char buf_out[FRAME_SIZE_MAX]; struct iovec iovec; struct sockaddr_storage sockaddr; int addrlen; struct list_head *list; struct totemudpu_member *member; /* * Encrypt and digest the message */ if (crypto_encrypt_and_sign ( instance->crypto_inst, (const unsigned char *)msg, msg_len, buf_out, &buf_out_len) != 0) { log_printf(LOGSYS_LEVEL_CRIT, "Error encrypting/signing packet (non-critical)"); return; } iovec.iov_base = (void *)buf_out; iovec.iov_len = buf_out_len; /* * Build multicast message */ for (list = instance->member_list.next; list != &instance->member_list; list = list->next) { member = list_entry (list, struct totemudpu_member, list); totemip_totemip_to_sockaddr_convert(&member->member, instance->totem_interface->ip_port, &sockaddr, &addrlen); msg_mcast.msg_name = &sockaddr; msg_mcast.msg_namelen = addrlen; msg_mcast.msg_iov = (void *)&iovec; msg_mcast.msg_iovlen = 1; #if !defined(COROSYNC_SOLARIS) msg_mcast.msg_control = 0; msg_mcast.msg_controllen = 0; msg_mcast.msg_flags = 0; #else msg_mcast.msg_accrights = NULL; msg_mcast.msg_accrightslen = 0; #endif /* * Transmit multicast message * An error here is recovered by totemsrp */ res = sendmsg (member->fd, &msg_mcast, MSG_NOSIGNAL); if (res < 0) { LOGSYS_PERROR (errno, instance->totemudpu_log_level_debug, "sendmsg(mcast) failed (non-critical)"); } } } int totemudpu_finalize ( void *udpu_context) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; int res = 0; if (instance->token_socket > 0) { close (instance->token_socket); qb_loop_poll_del (instance->totemudpu_poll_handle, instance->token_socket); } return (res); } static int net_deliver_fn ( int fd, int revents, void *data) { struct totemudpu_instance *instance = (struct totemudpu_instance *)data; struct msghdr msg_recv; struct iovec *iovec; struct sockaddr_storage system_from; int bytes_received; int res = 0; iovec = &instance->totemudpu_iov_recv; /* * Receive datagram */ msg_recv.msg_name = &system_from; msg_recv.msg_namelen = sizeof (struct sockaddr_storage); msg_recv.msg_iov = iovec; msg_recv.msg_iovlen = 1; #if !defined(COROSYNC_SOLARIS) msg_recv.msg_control = 0; msg_recv.msg_controllen = 0; msg_recv.msg_flags = 0; #else msg_recv.msg_accrights = NULL; msg_recv.msg_accrightslen = 0; #endif bytes_received = recvmsg (fd, &msg_recv, MSG_NOSIGNAL | MSG_DONTWAIT); if (bytes_received == -1) { return (0); } else { instance->stats_recv += bytes_received; } /* * Authenticate and if authenticated, decrypt datagram */ res = crypto_authenticate_and_decrypt (instance->crypto_inst, iovec->iov_base, &bytes_received); if (res == -1) { log_printf (instance->totemudpu_log_level_security, "Received message has invalid digest... ignoring."); log_printf (instance->totemudpu_log_level_security, "Invalid packet data"); iovec->iov_len = FRAME_SIZE_MAX; return 0; } iovec->iov_len = bytes_received; /* * Handle incoming message */ instance->totemudpu_deliver_fn ( instance->context, iovec->iov_base, iovec->iov_len); iovec->iov_len = FRAME_SIZE_MAX; return (0); } static int netif_determine ( struct totemudpu_instance *instance, struct totem_ip_address *bindnet, struct totem_ip_address *bound_to, int *interface_up, int *interface_num) { int res; res = totemip_iface_check (bindnet, bound_to, interface_up, interface_num, instance->totem_config->clear_node_high_bit); return (res); } /* * If the interface is up, the sockets for totem are built. If the interface is down * this function is requeued in the timer list to retry building the sockets later. */ static void timer_function_netif_check_timeout ( void *data) { struct totemudpu_instance *instance = (struct totemudpu_instance *)data; int interface_up; int interface_num; struct totem_ip_address *bind_address; /* * Build sockets for every interface */ netif_determine (instance, &instance->totem_interface->bindnet, &instance->totem_interface->boundto, &interface_up, &interface_num); /* * If the network interface isn't back up and we are already * in loopback mode, add timer to check again and return */ if ((instance->netif_bind_state == BIND_STATE_LOOPBACK && interface_up == 0) || (instance->my_memb_entries == 1 && instance->netif_bind_state == BIND_STATE_REGULAR && interface_up == 1)) { qb_loop_timer_add (instance->totemudpu_poll_handle, QB_LOOP_MED, instance->totem_config->downcheck_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); /* * Add a timer to check for a downed regular interface */ return; } if (instance->token_socket > 0) { close (instance->token_socket); qb_loop_poll_del (instance->totemudpu_poll_handle, instance->token_socket); } if (interface_up == 0) { /* * Interface is not up */ instance->netif_bind_state = BIND_STATE_LOOPBACK; bind_address = &localhost; /* * Add a timer to retry building interfaces and request memb_gather_enter */ qb_loop_timer_add (instance->totemudpu_poll_handle, QB_LOOP_MED, instance->totem_config->downcheck_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); } else { /* * Interface is up */ instance->netif_bind_state = BIND_STATE_REGULAR; bind_address = &instance->totem_interface->bindnet; } /* * Create and bind the multicast and unicast sockets */ totemudpu_build_sockets (instance, bind_address, &instance->totem_interface->boundto); qb_loop_poll_add (instance->totemudpu_poll_handle, QB_LOOP_MED, instance->token_socket, POLLIN, instance, net_deliver_fn); totemip_copy (&instance->my_id, &instance->totem_interface->boundto); /* * This reports changes in the interface to the user and totemsrp */ if (instance->netif_bind_state == BIND_STATE_REGULAR) { if (instance->netif_state_report & NETIF_STATE_REPORT_UP) { log_printf (instance->totemudpu_log_level_notice, "The network interface [%s] is now up.", totemip_print (&instance->totem_interface->boundto)); instance->netif_state_report = NETIF_STATE_REPORT_DOWN; instance->totemudpu_iface_change_fn (instance->context, &instance->my_id); } /* * Add a timer to check for interface going down in single membership */ if (instance->my_memb_entries == 1) { qb_loop_timer_add (instance->totemudpu_poll_handle, QB_LOOP_MED, instance->totem_config->downcheck_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); } } else { if (instance->netif_state_report & NETIF_STATE_REPORT_DOWN) { log_printf (instance->totemudpu_log_level_notice, "The network interface is down."); instance->totemudpu_iface_change_fn (instance->context, &instance->my_id); } instance->netif_state_report = NETIF_STATE_REPORT_UP; } } /* Set the socket priority to INTERACTIVE to ensure that our messages don't get queued behind anything else */ static void totemudpu_traffic_control_set(struct totemudpu_instance *instance, int sock) { #ifdef SO_PRIORITY int prio = 6; /* TC_PRIO_INTERACTIVE */ if (setsockopt(sock, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(int))) { LOGSYS_PERROR (errno, instance->totemudpu_log_level_warning, "Could not set traffic priority"); } #endif } static int totemudpu_build_sockets_ip ( struct totemudpu_instance *instance, struct totem_ip_address *bindnet_address, struct totem_ip_address *bound_to, int interface_num) { struct sockaddr_storage sockaddr; int addrlen; int res; unsigned int recvbuf_size; unsigned int optlen = sizeof (recvbuf_size); /* * Setup unicast socket */ instance->token_socket = socket (bindnet_address->family, SOCK_DGRAM, 0); if (instance->token_socket == -1) { LOGSYS_PERROR (errno, instance->totemudpu_log_level_warning, "socket() failed"); return (-1); } totemip_nosigpipe (instance->token_socket); res = fcntl (instance->token_socket, F_SETFL, O_NONBLOCK); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudpu_log_level_warning, "Could not set non-blocking operation on token socket"); return (-1); } /* * Bind to unicast socket used for token send/receives * This has the side effect of binding to the correct interface */ totemip_totemip_to_sockaddr_convert(bound_to, instance->totem_interface->ip_port, &sockaddr, &addrlen); res = bind (instance->token_socket, (struct sockaddr *)&sockaddr, addrlen); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudpu_log_level_warning, "bind token socket failed"); return (-1); } /* * the token_socket can receive many messages. Allow a large number * of receive messages on this socket */ recvbuf_size = MCAST_SOCKET_BUFFER_SIZE; res = setsockopt (instance->token_socket, SOL_SOCKET, SO_RCVBUF, &recvbuf_size, optlen); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudpu_log_level_notice, "Could not set recvbuf size"); } return 0; } static int totemudpu_build_sockets ( struct totemudpu_instance *instance, struct totem_ip_address *bindnet_address, struct totem_ip_address *bound_to) { int interface_num; int interface_up; int res; /* * Determine the ip address bound to and the interface name */ res = netif_determine (instance, bindnet_address, bound_to, &interface_up, &interface_num); if (res == -1) { return (-1); } totemip_copy(&instance->my_id, bound_to); res = totemudpu_build_sockets_ip (instance, bindnet_address, bound_to, interface_num); /* We only send out of the token socket */ totemudpu_traffic_control_set(instance, instance->token_socket); return res; } /* * Totem Network interface - also does encryption/decryption * depends on poll abstraction, POSIX, IPV4 */ /* * Create an instance */ int totemudpu_initialize ( qb_loop_t *poll_handle, void **udpu_context, struct totem_config *totem_config, int interface_no, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_address), void (*target_set_completed) ( void *context)) { struct totemudpu_instance *instance; instance = malloc (sizeof (struct totemudpu_instance)); if (instance == NULL) { return (-1); } totemudpu_instance_initialize (instance); instance->totem_config = totem_config; /* * Configure logging */ instance->totemudpu_log_level_security = 1; //totem_config->totem_logging_configuration.log_level_security; instance->totemudpu_log_level_error = totem_config->totem_logging_configuration.log_level_error; instance->totemudpu_log_level_warning = totem_config->totem_logging_configuration.log_level_warning; instance->totemudpu_log_level_notice = totem_config->totem_logging_configuration.log_level_notice; instance->totemudpu_log_level_debug = totem_config->totem_logging_configuration.log_level_debug; instance->totemudpu_subsys_id = totem_config->totem_logging_configuration.log_subsys_id; instance->totemudpu_log_printf = totem_config->totem_logging_configuration.log_printf; /* * Initialize random number generator for later use to generate salt */ instance->crypto_inst = crypto_init (totem_config->private_key, totem_config->private_key_len, totem_config->crypto_cipher_type, totem_config->crypto_hash_type, instance->totemudpu_log_printf, instance->totemudpu_log_level_security, instance->totemudpu_log_level_notice, instance->totemudpu_log_level_error, instance->totemudpu_subsys_id); if (instance->crypto_inst == NULL) { return (-1); } /* * Initialize local variables for totemudpu */ instance->totem_interface = &totem_config->interfaces[interface_no]; memset (instance->iov_buffer, 0, FRAME_SIZE_MAX); instance->totemudpu_poll_handle = poll_handle; instance->totem_interface->bindnet.nodeid = instance->totem_config->node_id; instance->context = context; instance->totemudpu_deliver_fn = deliver_fn; instance->totemudpu_iface_change_fn = iface_change_fn; instance->totemudpu_target_set_completed = target_set_completed; totemip_localhost (AF_INET, &localhost); localhost.nodeid = instance->totem_config->node_id; /* * RRP layer isn't ready to receive message because it hasn't * initialized yet. Add short timer to check the interfaces. */ qb_loop_timer_add (instance->totemudpu_poll_handle, QB_LOOP_MED, 100*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); *udpu_context = instance; return (0); } void *totemudpu_buffer_alloc (void) { return malloc (FRAME_SIZE_MAX); } void totemudpu_buffer_release (void *ptr) { return free (ptr); } int totemudpu_processor_count_set ( void *udpu_context, int processor_count) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; int res = 0; instance->my_memb_entries = processor_count; qb_loop_timer_del (instance->totemudpu_poll_handle, instance->timer_netif_check_timeout); if (processor_count == 1) { qb_loop_timer_add (instance->totemudpu_poll_handle, QB_LOOP_MED, instance->totem_config->downcheck_timeout*QB_TIME_NS_IN_MSEC, (void *)instance, timer_function_netif_check_timeout, &instance->timer_netif_check_timeout); } return (res); } int totemudpu_recv_flush (void *udpu_context) { int res = 0; return (res); } int totemudpu_send_flush (void *udpu_context) { int res = 0; return (res); } int totemudpu_token_send ( void *udpu_context, const void *msg, unsigned int msg_len) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; int res = 0; ucast_sendmsg (instance, &instance->token_target, msg, msg_len); return (res); } int totemudpu_mcast_flush_send ( void *udpu_context, const void *msg, unsigned int msg_len) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; int res = 0; mcast_sendmsg (instance, msg, msg_len); return (res); } int totemudpu_mcast_noflush_send ( void *udpu_context, const void *msg, unsigned int msg_len) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; int res = 0; mcast_sendmsg (instance, msg, msg_len); return (res); } extern int totemudpu_iface_check (void *udpu_context) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; int res = 0; timer_function_netif_check_timeout (instance); return (res); } extern void totemudpu_net_mtu_adjust (void *udpu_context, struct totem_config *totem_config) { #define UDPIP_HEADER_SIZE (20 + 8) /* 20 bytes for ip 8 bytes for udp */ totem_config->net_mtu -= crypto_sec_header_size(totem_config->crypto_cipher_type, totem_config->crypto_hash_type) + UDPIP_HEADER_SIZE; } const char *totemudpu_iface_print (void *udpu_context) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; const char *ret_char; ret_char = totemip_print (&instance->my_id); return (ret_char); } int totemudpu_iface_get ( void *udpu_context, struct totem_ip_address *addr) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; int res = 0; memcpy (addr, &instance->my_id, sizeof (struct totem_ip_address)); return (res); } int totemudpu_token_target_set ( void *udpu_context, const struct totem_ip_address *token_target) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; int res = 0; memcpy (&instance->token_target, token_target, sizeof (struct totem_ip_address)); instance->totemudpu_target_set_completed (instance->context); return (res); } extern int totemudpu_recv_mcast_empty ( void *udpu_context) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; unsigned int res; struct sockaddr_storage system_from; struct msghdr msg_recv; struct pollfd ufd; int nfds; int msg_processed = 0; /* * Receive datagram */ msg_recv.msg_name = &system_from; msg_recv.msg_namelen = sizeof (struct sockaddr_storage); msg_recv.msg_iov = &instance->totemudpu_iov_recv; msg_recv.msg_iovlen = 1; #if !defined(COROSYNC_SOLARIS) msg_recv.msg_control = 0; msg_recv.msg_controllen = 0; msg_recv.msg_flags = 0; #else msg_recv.msg_accrights = NULL; msg_recv.msg_accrightslen = 0; #endif do { ufd.fd = instance->token_socket; ufd.events = POLLIN; nfds = poll (&ufd, 1, 0); if (nfds == 1 && ufd.revents & POLLIN) { res = recvmsg (instance->token_socket, &msg_recv, MSG_NOSIGNAL | MSG_DONTWAIT); if (res != -1) { msg_processed = 1; } else { msg_processed = -1; } } } while (nfds == 1); return (msg_processed); } int totemudpu_member_add ( void *udpu_context, const struct totem_ip_address *member) { struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; struct totemudpu_member *new_member; int res; unsigned int sendbuf_size; unsigned int optlen = sizeof (sendbuf_size); new_member = malloc (sizeof (struct totemudpu_member)); if (new_member == NULL) { return (-1); } log_printf (LOGSYS_LEVEL_NOTICE, "adding new UDPU member {%s}", totemip_print(member)); list_init (&new_member->list); list_add_tail (&new_member->list, &instance->member_list); memcpy (&new_member->member, member, sizeof (struct totem_ip_address)); new_member->fd = socket (member->family, SOCK_DGRAM, 0); if (new_member->fd == -1) { LOGSYS_PERROR (errno, instance->totemudpu_log_level_warning, "Could not create socket for new member"); return (-1); } totemip_nosigpipe (new_member->fd); res = fcntl (new_member->fd, F_SETFL, O_NONBLOCK); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudpu_log_level_warning, "Could not set non-blocking operation on token socket"); return (-1); } /* * These sockets are used to send multicast messages, so their buffers * should be large */ sendbuf_size = MCAST_SOCKET_BUFFER_SIZE; res = setsockopt (new_member->fd, SOL_SOCKET, SO_SNDBUF, &sendbuf_size, optlen); if (res == -1) { LOGSYS_PERROR (errno, instance->totemudpu_log_level_notice, "Could not set sendbuf size"); } return (0); } int totemudpu_member_remove ( void *udpu_context, const struct totem_ip_address *token_target) { int found = 0; struct list_head *list; struct totemudpu_member *member; struct totemudpu_instance *instance = (struct totemudpu_instance *)udpu_context; /* * Find the member to remove and close its socket */ for (list = instance->member_list.next; list != &instance->member_list; list = list->next) { member = list_entry (list, struct totemudpu_member, list); if (totemip_compare (token_target, &member->member)==0) { log_printf(LOGSYS_LEVEL_NOTICE, "removing UDPU member {%s}", totemip_print(&member->member)); if (member->fd > 0) { log_printf(LOGSYS_LEVEL_DEBUG, "Closing socket to: {%s}", totemip_print(&member->member)); qb_loop_poll_del (instance->totemudpu_poll_handle, member->fd); close (member->fd); } found = 1; break; } } /* * Delete the member from the list */ if (found) { list_del (list); } instance = NULL; return (0); } diff --git a/exec/totemudpu.h b/exec/totemudpu.h index 1d1409a8..136960cf 100644 --- a/exec/totemudpu.h +++ b/exec/totemudpu.h @@ -1,124 +1,125 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2011 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TOTEMUDPU_H_DEFINED #define TOTEMUDPU_H_DEFINED #include #include #include #include /** * Create an instance */ extern int totemudpu_initialize ( qb_loop_t *poll_handle, void **udpu_context, struct totem_config *totem_config, int interface_no, void *context, void (*deliver_fn) ( void *context, const void *msg, unsigned int msg_len), void (*iface_change_fn) ( void *context, const struct totem_ip_address *iface_address), void (*target_set_completed) ( void *context)); extern void *totemudpu_buffer_alloc (void); extern void totemudpu_buffer_release (void *ptr); extern int totemudpu_processor_count_set ( void *udpu_context, int processor_count); extern int totemudpu_token_send ( void *udpu_context, const void *msg, unsigned int msg_len); extern int totemudpu_mcast_flush_send ( void *udpu_context, const void *msg, unsigned int msg_len); extern int totemudpu_mcast_noflush_send ( void *udpu_context, const void *msg, unsigned int msg_len); extern int totemudpu_recv_flush (void *udpu_context); extern int totemudpu_send_flush (void *udpu_context); extern int totemudpu_iface_check (void *udpu_context); extern int totemudpu_finalize (void *udpu_context); extern void totemudpu_net_mtu_adjust (void *udpu_context, struct totem_config *totem_config); extern const char *totemudpu_iface_print (void *udpu_context); extern int totemudpu_iface_get ( void *udpu_context, struct totem_ip_address *addr); extern int totemudpu_token_target_set ( void *udpu_context, const struct totem_ip_address *token_target); extern int totemudpu_crypto_set ( void *udpu_context, - unsigned int type); + const char *cipher_type, + const char *hash_type); extern int totemudpu_recv_mcast_empty ( void *udpu_context); extern int totemudpu_member_add ( void *udpu_context, const struct totem_ip_address *member); extern int totemudpu_member_remove ( void *udpu_context, const struct totem_ip_address *member); #endif /* TOTEMUDPU_H_DEFINED */ diff --git a/include/corosync/cfg.h b/include/corosync/cfg.h index 273c214a..d26d5d19 100644 --- a/include/corosync/cfg.h +++ b/include/corosync/cfg.h @@ -1,169 +1,164 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2006-2012 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef COROSYNC_CFG_H_DEFINED #define COROSYNC_CFG_H_DEFINED #include #include typedef uint64_t corosync_cfg_handle_t; /** * Shutdown types. */ typedef enum { /** * REQUEST is the normal shutdown. * Other daemons will be consulted. */ COROSYNC_CFG_SHUTDOWN_FLAG_REQUEST = 0, /** * REGARDLESS will tell other daemons but ignore their opinions. */ COROSYNC_CFG_SHUTDOWN_FLAG_REGARDLESS = 1, /** * IMMEDIATE will shut down straight away * (but still tell other nodes). */ COROSYNC_CFG_SHUTDOWN_FLAG_IMMEDIATE = 2, } corosync_cfg_shutdown_flags_t; typedef enum { COROSYNC_CFG_SHUTDOWN_FLAG_NO = 0, COROSYNC_CFG_SHUTDOWN_FLAG_YES = 1, } corosync_cfg_shutdown_reply_flags_t; typedef void (*corosync_cfg_shutdown_callback_t) ( corosync_cfg_handle_t cfg_handle, corosync_cfg_shutdown_flags_t flags); typedef struct { corosync_cfg_shutdown_callback_t corosync_cfg_shutdown_callback; } corosync_cfg_callbacks_t; /** * A node address. This is a complete sockaddr_in[6] * * To explain: * If you cast cna_address to a 'struct sockaddr', the sa_family field * will be AF_INET or AF_INET6. Armed with that knowledge you can then * cast it to a sockaddr_in or sockaddr_in6 and pull out the address. * No other sockaddr fields are valid. * Also, you must ignore any part of the sockaddr beyond the length supplied */ typedef struct { int address_length; /**< @todo FIXME: set but never used */ char address[sizeof(struct sockaddr_in6)]; } corosync_cfg_node_address_t; /* * Interfaces */ #ifdef __cplusplus extern "C" { #endif cs_error_t corosync_cfg_initialize ( corosync_cfg_handle_t *cfg_handle, const corosync_cfg_callbacks_t *cfg_callbacks); cs_error_t corosync_cfg_fd_get ( corosync_cfg_handle_t cfg_handle, int32_t *selection_fd); cs_error_t corosync_cfg_dispatch ( corosync_cfg_handle_t cfg_handle, cs_dispatch_flags_t dispatch_flags); cs_error_t corosync_cfg_finalize ( corosync_cfg_handle_t cfg_handle); cs_error_t corosync_cfg_ring_status_get ( corosync_cfg_handle_t cfg_handle, char ***interface_names, char ***status, unsigned int *interface_count); cs_error_t corosync_cfg_ring_reenable ( corosync_cfg_handle_t cfg_handle); cs_error_t corosync_cfg_kill_node ( corosync_cfg_handle_t cfg_handle, unsigned int nodeid, const char *reason); cs_error_t corosync_cfg_try_shutdown ( corosync_cfg_handle_t cfg_handle, corosync_cfg_shutdown_flags_t flags); cs_error_t corosync_cfg_replyto_shutdown ( corosync_cfg_handle_t cfg_handle, corosync_cfg_shutdown_reply_flags_t flags); cs_error_t corosync_cfg_get_node_addrs ( corosync_cfg_handle_t cfg_handle, int nodeid, size_t max_addrs, int *num_addrs, corosync_cfg_node_address_t *addrs); cs_error_t corosync_cfg_local_get ( corosync_cfg_handle_t handle, unsigned int *local_nodeid); -cs_error_t -corosync_cfg_crypto_set ( - corosync_cfg_handle_t handle, - unsigned int type); - #ifdef __cplusplus } #endif #endif /* COROSYNC_CFG_H_DEFINED */ diff --git a/include/corosync/coroapi.h b/include/corosync/coroapi.h index cbadeeb2..ef22bd36 100644 --- a/include/corosync/coroapi.h +++ b/include/corosync/coroapi.h @@ -1,462 +1,462 @@ /* * Copyright (c) 2008-2012 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef COROAPI_H_DEFINED #define COROAPI_H_DEFINED #include #ifdef COROSYNC_BSD #include #endif #include #include #include typedef struct { uint32_t nodeid __attribute__((aligned(8))); void *conn __attribute__((aligned(8))); } mar_message_source_t __attribute__((aligned(8))); static inline void swab_mar_message_source_t (mar_message_source_t *to_swab) { swab32 (to_swab->nodeid); /* * if it is from a byteswapped machine, then we can safely * ignore its conn info data structure since this is only * local to the machine */ to_swab->conn = NULL; } #ifndef TIMER_HANDLE_T typedef qb_loop_timer_handle corosync_timer_handle_t; #define TIMER_HANDLE_T 1 #endif struct corosync_tpg_group { const void *group; size_t group_len; }; #define TOTEMIP_ADDRLEN (sizeof(struct in6_addr)) #define INTERFACE_MAX 2 #ifndef MESSAGE_QUEUE_MAX #ifdef HAVE_SMALL_MEMORY_FOOTPRINT #define PROCESSOR_COUNT_MAX 16 #define MESSAGE_SIZE_MAX 1024*64 #define MESSAGE_QUEUE_MAX 512 #else #define PROCESSOR_COUNT_MAX 384 #define MESSAGE_SIZE_MAX 1024*1024 #define MESSAGE_QUEUE_MAX ((4 * MESSAGE_SIZE_MAX) / totem_config->net_mtu) #endif /* HAVE_SMALL_MEMORY_FOOTPRINT */ #endif /* MESSAGE_QUEUE_MAX */ #define TOTEM_AGREED 0 #define TOTEM_SAFE 1 #define MILLI_2_NANO_SECONDS 1000000ULL #if !defined(TOTEM_IP_ADDRESS) struct totem_ip_address { unsigned int nodeid; unsigned short family; unsigned char addr[TOTEMIP_ADDRLEN]; } __attribute__((packed)); #endif #if !defined(MEMB_RING_ID) struct memb_ring_id { struct totem_ip_address rep; unsigned long long seq; } __attribute__((packed)); #endif #if !defined(TOTEM_CONFIGURATION_TYPE) enum totem_configuration_type { TOTEM_CONFIGURATION_REGULAR, TOTEM_CONFIGURATION_TRANSITIONAL }; #endif #if !defined(TOTEM_CALLBACK_TOKEN_TYPE) enum totem_callback_token_type { TOTEM_CALLBACK_TOKEN_RECEIVED = 1, TOTEM_CALLBACK_TOKEN_SENT = 2 }; #endif enum cs_lib_flow_control { CS_LIB_FLOW_CONTROL_REQUIRED = 1, CS_LIB_FLOW_CONTROL_NOT_REQUIRED = 2 }; #define corosync_lib_flow_control cs_lib_flow_control #define COROSYNC_LIB_FLOW_CONTROL_REQUIRED CS_LIB_FLOW_CONTROL_REQUIRED #define COROSYNC_LIB_FLOW_CONTROL_NOT_REQUIRED CS_LIB_FLOW_CONTROL_NOT_REQUIRED enum cs_lib_allow_inquorate { CS_LIB_DISALLOW_INQUORATE = 0, /* default */ CS_LIB_ALLOW_INQUORATE = 1 }; #if !defined (COROSYNC_FLOW_CONTROL_STATE) enum cs_flow_control_state { CS_FLOW_CONTROL_STATE_DISABLED, CS_FLOW_CONTROL_STATE_ENABLED }; #define corosync_flow_control_state cs_flow_control_state #define CS_FLOW_CONTROL_STATE_DISABLED CS_FLOW_CONTROL_STATE_DISABLED #define CS_FLOW_CONTROL_STATE_ENABLED CS_FLOW_CONTROL_STATE_ENABLED #endif /* COROSYNC_FLOW_CONTROL_STATE */ typedef enum { COROSYNC_FATAL_ERROR_EXIT = -1, COROSYNC_LIBAIS_SOCKET = -6, COROSYNC_LIBAIS_BIND = -7, COROSYNC_READKEY = -8, COROSYNC_INVALID_CONFIG = -9, COROSYNC_DYNAMICLOAD = -12, COROSYNC_OUT_OF_MEMORY = -15, COROSYNC_FATAL_ERR = -16 } cs_fatal_error_t; #define corosync_fatal_error_t cs_fatal_error_t; #ifndef QUORUM_H_DEFINED typedef void (*quorum_callback_fn_t) (int quorate, void *context); struct quorum_callin_functions { int (*quorate) (void); int (*register_callback) (quorum_callback_fn_t callback_fn, void *contexxt); int (*unregister_callback) (quorum_callback_fn_t callback_fn, void *context); }; typedef void (*sync_callback_fn_t) ( const unsigned int *view_list, size_t view_list_entries, int primary_designated, struct memb_ring_id *ring_id); #endif /* QUORUM_H_DEFINED */ struct corosync_api_v1 { /* * Time and timer APIs */ int (*timer_add_duration) ( unsigned long long nanoseconds_in_future, void *data, void (*timer_nf) (void *data), corosync_timer_handle_t *handle); int (*timer_add_absolute) ( unsigned long long nanoseconds_from_epoch, void *data, void (*timer_fn) (void *data), corosync_timer_handle_t *handle); void (*timer_delete) ( corosync_timer_handle_t timer_handle); unsigned long long (*timer_time_get) (void); unsigned long long (*timer_expire_time_get) ( corosync_timer_handle_t timer_handle); /* * IPC APIs */ void (*ipc_source_set) (mar_message_source_t *source, void *conn); int (*ipc_source_is_local) (const mar_message_source_t *source); void *(*ipc_private_data_get) (void *conn); int (*ipc_response_send) (void *conn, const void *msg, size_t mlen); int (*ipc_response_iov_send) (void *conn, const struct iovec *iov, unsigned int iov_len); int (*ipc_dispatch_send) (void *conn, const void *msg, size_t mlen); int (*ipc_dispatch_iov_send) (void *conn, const struct iovec *iov, unsigned int iov_len); void (*ipc_refcnt_inc) (void *conn); void (*ipc_refcnt_dec) (void *conn); /* * Totem APIs */ unsigned int (*totem_nodeid_get) (void); int (*totem_family_get) (void); int (*totem_ring_reenable) (void); int (*totem_mcast) (const struct iovec *iovec, unsigned int iov_len, unsigned int guarantee); int (*totem_ifaces_get) ( unsigned int nodeid, struct totem_ip_address *interfaces, char ***status, unsigned int *iface_count); const char *(*totem_ifaces_print) (unsigned int nodeid); const char *(*totem_ip_print) (const struct totem_ip_address *addr); - int (*totem_crypto_set) (unsigned int type); + int (*totem_crypto_set) (const char *cipher_type, const char *hash_type); int (*totem_callback_token_create) ( void **handle_out, enum totem_callback_token_type type, int delete, int (*callback_fn) (enum totem_callback_token_type type, const void *), const void *data); /* * Totem open process groups API for those service engines * wanting their own groups */ int (*tpg_init) ( void **instance, void (*deliver_fn) ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required), void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id)); int (*tpg_exit) ( void *instance); int (*tpg_join) ( void *instance, const struct corosync_tpg_group *groups, size_t group_cnt); int (*tpg_leave) ( void *instance, const struct corosync_tpg_group *groups, size_t group_cnt); int (*tpg_joined_mcast) ( void *totempg_groups_instance, const struct iovec *iovec, unsigned int iov_len, int guarantee); int (*tpg_joined_reserve) ( void *totempg_groups_instance, const struct iovec *iovec, unsigned int iov_len); int (*tpg_joined_release) ( int reserved_msgs); int (*tpg_groups_mcast) ( void *instance, int guarantee, const struct corosync_tpg_group *groups, size_t groups_cnt, const struct iovec *iovec, unsigned int iov_len); int (*tpg_groups_reserve) ( void *instance, const struct corosync_tpg_group *groups, size_t groups_cnt, const struct iovec *iovec, unsigned int iov_len); int (*tpg_groups_release) ( int reserved_msgs); int (*schedwrk_create) ( hdb_handle_t *handle, int (schedwrk_fn) (const void *), const void *context); void (*schedwrk_destroy) (hdb_handle_t handle); int (*sync_request) ( const char *service_name); /* * User plugin-callable functions for quorum */ int (*quorum_is_quorate) (void); int (*quorum_register_callback) (quorum_callback_fn_t callback_fn, void *context); int (*quorum_unregister_callback) (quorum_callback_fn_t callback_fn, void *context); /* * This one is for the quorum management plugin's use */ int (*quorum_initialize)(struct quorum_callin_functions *fns); /* * Plugin loading and unloading */ int (*plugin_interface_reference) ( hdb_handle_t *handle, const char *iface_name, int version, void **interface, void *context); int (*plugin_interface_release) (hdb_handle_t handle); /* * Service loading and unloading APIs */ unsigned int (*service_link_and_init) ( struct corosync_api_v1 *corosync_api_v1, const char *service_name, unsigned int service_ver); unsigned int (*service_unlink_and_exit) ( struct corosync_api_v1 *corosync_api_v1, const char *service_name, unsigned int service_ver); /* * Error handling APIs */ void (*error_memory_failure) (void) __attribute__ ((noreturn)); #define corosync_fatal_error(err) api->fatal_error ((err), __FILE__, __LINE__) void (*fatal_error) (cs_fatal_error_t err, const char *file, unsigned int line) __attribute__ ((noreturn)); void (*shutdown_request) (void); void (*state_dump) (void); qb_loop_t *(*poll_handle_get) (void); void *(*totem_get_stats)(void); int (*schedwrk_create_nolock) ( hdb_handle_t *handle, int (schedwrk_fn) (const void *), const void *context); int (*poll_dispatch_add) (qb_loop_t * handle, int fd, int events, void *data, int (*dispatch_fn) (int fd, int revents, void *data)); int (*poll_dispatch_delete) ( qb_loop_t * handle, int fd); }; #define SERVICE_ID_MAKE(a,b) ( ((a)<<16) | (b) ) #define SERVICE_HANDLER_MAXIMUM_COUNT 64 struct corosync_lib_handler { void (*lib_handler_fn) (void *conn, const void *msg); enum cs_lib_flow_control flow_control; }; struct corosync_exec_handler { void (*exec_handler_fn) (const void *msg, unsigned int nodeid); void (*exec_endian_convert_fn) (void *msg); }; struct corosync_service_engine_iface_ver0 { struct corosync_service_engine *(*corosync_get_service_engine_ver0) (void); }; struct corosync_service_engine { const char *name; unsigned short id; unsigned short priority; /* Lower priority are loaded first, unloaded last. * 0 is a special case which always loaded _and_ unloaded last */ unsigned int private_data_size; enum cs_lib_flow_control flow_control; enum cs_lib_allow_inquorate allow_inquorate; char *(*exec_init_fn) (struct corosync_api_v1 *); int (*exec_exit_fn) (void); void (*exec_dump_fn) (void); int (*lib_init_fn) (void *conn); int (*lib_exit_fn) (void *conn); struct corosync_lib_handler *lib_engine; int lib_engine_count; struct corosync_exec_handler *exec_engine; int exec_engine_count; int (*config_init_fn) (struct corosync_api_v1 *); void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id); void (*sync_init) ( const unsigned int *trans_list, size_t trans_list_entries, const unsigned int *member_list, size_t member_list_entries, const struct memb_ring_id *ring_id); int (*sync_process) (void); void (*sync_activate) (void); void (*sync_abort) (void); }; #endif /* COROAPI_H_DEFINED */ diff --git a/include/corosync/ipc_cfg.h b/include/corosync/ipc_cfg.h index 4ddc0396..1ff7e2f9 100644 --- a/include/corosync/ipc_cfg.h +++ b/include/corosync/ipc_cfg.h @@ -1,173 +1,164 @@ /* * Copyright (c) 2005 MontaVista Software, Inc. * Copyright (c) 2009-2012 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef IPC_CFG_H_DEFINED #define IPC_CFG_H_DEFINED #include #include #include enum req_lib_cfg_types { MESSAGE_REQ_CFG_RINGSTATUSGET = 0, MESSAGE_REQ_CFG_RINGREENABLE = 1, MESSAGE_REQ_CFG_KILLNODE = 2, MESSAGE_REQ_CFG_TRYSHUTDOWN = 3, MESSAGE_REQ_CFG_REPLYTOSHUTDOWN = 4, MESSAGE_REQ_CFG_GET_NODE_ADDRS = 5, MESSAGE_REQ_CFG_LOCAL_GET = 6, MESSAGE_REQ_CFG_CRYPTO_SET = 7 }; enum res_lib_cfg_types { MESSAGE_RES_CFG_RINGSTATUSGET = 0, MESSAGE_RES_CFG_RINGREENABLE = 1, MESSAGE_RES_CFG_STATETRACKSTART = 2, MESSAGE_RES_CFG_STATETRACKSTOP = 3, MESSAGE_RES_CFG_ADMINISTRATIVESTATESET = 4, MESSAGE_RES_CFG_ADMINISTRATIVESTATEGET = 5, MESSAGE_RES_CFG_SERVICELOAD = 6, MESSAGE_RES_CFG_SERVICEUNLOAD = 7, MESSAGE_RES_CFG_KILLNODE = 8, MESSAGE_RES_CFG_TRYSHUTDOWN = 9, MESSAGE_RES_CFG_TESTSHUTDOWN = 10, MESSAGE_RES_CFG_GET_NODE_ADDRS = 11, MESSAGE_RES_CFG_LOCAL_GET = 12, MESSAGE_RES_CFG_REPLYTOSHUTDOWN = 13, MESSAGE_RES_CFG_CRYPTO_SET = 14, }; struct req_lib_cfg_ringstatusget { struct qb_ipc_request_header header __attribute__((aligned(8))); }; struct res_lib_cfg_ringstatusget { struct qb_ipc_response_header header __attribute__((aligned(8))); mar_uint32_t interface_count __attribute__((aligned(8))); char interface_name[16][128] __attribute__((aligned(8))); char interface_status[16][512] __attribute__((aligned(8))); }; struct req_lib_cfg_ringreenable { struct qb_ipc_request_header header __attribute__((aligned(8))); }; struct res_lib_cfg_ringreenable { struct qb_ipc_response_header header __attribute__((aligned(8))); }; struct req_lib_cfg_killnode { struct qb_ipc_request_header header __attribute__((aligned(8))); unsigned int nodeid __attribute__((aligned(8))); cs_name_t reason __attribute__((aligned(8))); }; struct res_lib_cfg_killnode { struct qb_ipc_response_header header __attribute__((aligned(8))); }; struct req_lib_cfg_tryshutdown { struct qb_ipc_request_header header __attribute__((aligned(8))); unsigned int flags; }; struct res_lib_cfg_tryshutdown { struct qb_ipc_response_header header __attribute__((aligned(8))); }; struct req_lib_cfg_replytoshutdown { struct qb_ipc_request_header header __attribute__((aligned(8))); unsigned int response; }; struct res_lib_cfg_replytoshutdown { struct qb_ipc_response_header header __attribute__((aligned(8))); }; struct res_lib_cfg_testshutdown { struct qb_ipc_response_header header __attribute__((aligned(8))); unsigned int flags; }; struct req_lib_cfg_get_node_addrs { struct qb_ipc_request_header header __attribute__((aligned(8))); unsigned int nodeid; }; struct res_lib_cfg_get_node_addrs { struct qb_ipc_response_header header __attribute__((aligned(8))); unsigned int family; unsigned int num_addrs; char addrs[TOTEMIP_ADDRLEN][0]; }; struct req_lib_cfg_local_get { struct qb_ipc_request_header header __attribute__((aligned(8))); }; struct res_lib_cfg_local_get { struct qb_ipc_response_header header __attribute__((aligned(8))); mar_uint32_t local_nodeid __attribute__((aligned(8))); }; -struct req_lib_cfg_crypto_set { - struct qb_ipc_response_header header __attribute__((aligned(8))); - mar_uint32_t type __attribute__((aligned(8))); -}; - -struct res_lib_cfg_crypto_set { - struct qb_ipc_response_header header __attribute__((aligned(8))); -}; - typedef enum { AIS_AMF_ADMINISTRATIVETARGET_SERVICEUNIT = 0, AIS_AMF_ADMINISTRATIVETARGET_SERVICEGROUP = 1, AIS_AMF_ADMINISTRATIVETARGET_COMPONENTSERVICEINSTANCE = 2, AIS_AMF_ADMINISTRATIVETARGET_NODE = 3 } corosync_administrative_target_t; typedef enum { AIS_AMF_ADMINISTRATIVESTATE_UNLOCKED = 0, AIS_AMF_ADMINISTRATIVESTATE_LOCKED = 1, AIS_AMF_ADMINISTRATIVESTATE_STOPPING = 2 } corosync_administrative_state_t; typedef enum { CFG_SHUTDOWN_FLAG_REQUEST = 0, CFG_SHUTDOWN_FLAG_REGARDLESS = 1, CFG_SHUTDOWN_FLAG_IMMEDIATE = 2, } corosync_shutdown_flags_t; #endif /* IPC_CFG_H_DEFINED */ diff --git a/include/corosync/totem/totempg.h b/include/corosync/totem/totempg.h index 98fed8f6..8a94aec2 100644 --- a/include/corosync/totem/totempg.h +++ b/include/corosync/totem/totempg.h @@ -1,189 +1,189 @@ /* * Copyright (c) 2003-2005 MontaVista Software, Inc. * Copyright (c) 2006-2011 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file * Totem Single Ring Protocol * * depends on poll abstraction, POSIX, IPV4 */ #ifndef TOTEMPG_H_DEFINED #define TOTEMPG_H_DEFINED #ifdef __cplusplus extern "C" { #endif #include #include "totem.h" #include struct totempg_group { const void *group; size_t group_len; }; #define TOTEMPG_AGREED 0 #define TOTEMPG_SAFE 1 /** * Initialize the totem process groups abstraction */ extern int totempg_initialize ( qb_loop_t* poll_handle, struct totem_config *totem_config ); extern void totempg_finalize (void); extern int totempg_callback_token_create (void **handle_out, enum totem_callback_token_type type, int delete, int (*callback_fn) (enum totem_callback_token_type type, const void *), const void *data); extern void totempg_callback_token_destroy (void *handle); /** * Initialize a groups instance */ extern int totempg_groups_initialize ( void **instance, void (*deliver_fn) ( unsigned int nodeid, const void *msg, unsigned int msg_len, int endian_conversion_required), void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id)); extern int totempg_groups_finalize (void *instance); extern int totempg_groups_join ( void *instance, const struct totempg_group *groups, size_t group_cnt); extern int totempg_groups_leave ( void *instance, const struct totempg_group *groups, size_t group_cnt); extern int totempg_groups_mcast_joined ( void *instance, const struct iovec *iovec, unsigned int iov_len, int guarantee); extern int totempg_groups_joined_reserve ( void *instance, const struct iovec *iovec, unsigned int iov_len); extern int totempg_groups_joined_release ( int msg_count); extern int totempg_groups_mcast_groups ( void *instance, int guarantee, const struct totempg_group *groups, size_t groups_cnt, const struct iovec *iovec, unsigned int iov_len); extern int totempg_groups_send_ok_groups ( void *instance, const struct totempg_group *groups, size_t groups_cnt, const struct iovec *iovec, unsigned int iov_len); extern int totempg_ifaces_get ( unsigned int nodeid, struct totem_ip_address *interfaces, char ***status, unsigned int *iface_count); extern void* totempg_get_stats (void); void totempg_event_signal (enum totem_event_type type, int value); extern const char *totempg_ifaces_print (unsigned int nodeid); extern unsigned int totempg_my_nodeid_get (void); extern int totempg_my_family_get (void); -extern int totempg_crypto_set (unsigned int type); +extern int totempg_crypto_set (const char *cipher_type, const char *hash_type); extern int totempg_ring_reenable (void); extern void totempg_service_ready_register ( void (*totem_service_ready) (void)); extern int totempg_member_add ( const struct totem_ip_address *member, int ring_no); extern int totempg_member_remove ( const struct totem_ip_address *member, int ring_no); enum totem_q_level { TOTEM_Q_LEVEL_LOW, TOTEM_Q_LEVEL_GOOD, TOTEM_Q_LEVEL_HIGH, TOTEM_Q_LEVEL_CRITICAL }; void totempg_check_q_level(void *instance); typedef void (*totem_queue_level_changed_fn) (enum totem_q_level level); extern void totempg_queue_level_register_callback (totem_queue_level_changed_fn); extern void totempg_threaded_mode_enable (void); #ifdef __cplusplus } #endif #endif /* TOTEMPG_H_DEFINED */