Page MenuHomeClusterLabs Projects

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/exec/logsys.c b/exec/logsys.c
index 4f97dfc4..77e8bd72 100644
--- a/exec/logsys.c
+++ b/exec/logsys.c
@@ -1,1628 +1,1622 @@
/*
* Copyright (c) 2002-2004 MontaVista Software, Inc.
* Copyright (c) 2006-2009 Red Hat, Inc.
*
* Author: Steven Dake (sdake@redhat.com)
* Author: Lon Hohberger (lhh@redhat.com)
* Author: Fabio M. Di Nitto (fdinitto@redhat.com)
*
* All rights reserved.
*
* This software licensed under BSD license, the text of which follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the MontaVista Software, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <config.h>
#include <stdio.h>
#include <ctype.h>
#include <string.h>
#include <stdarg.h>
#include <sys/time.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <time.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
#if defined(COROSYNC_LINUX)
#include <linux/un.h>
#endif
#if defined(COROSYNC_BSD) || defined(COROSYNC_DARWIN)
#include <sys/un.h>
#endif
#include <syslog.h>
#include <stdlib.h>
#include <pthread.h>
#include <limits.h>
#include <corosync/engine/logsys.h>
#define YIELD_AFTER_LOG_OPS 10
#define MIN(x,y) ((x) < (y) ? (x) : (y))
/*
* syslog prioritynames, facility names to value mapping
* Some C libraries build this in to their headers, but it is non-portable
* so logsys supplies its own version.
*/
struct syslog_names {
const char *c_name;
int c_val;
};
struct syslog_names prioritynames[] =
{
{ "alert", LOG_ALERT },
{ "crit", LOG_CRIT },
{ "debug", LOG_DEBUG },
{ "emerg", LOG_EMERG },
{ "err", LOG_ERR },
{ "error", LOG_ERR },
{ "info", LOG_INFO },
{ "notice", LOG_NOTICE },
{ "warning", LOG_WARNING },
{ NULL, -1 }
};
struct syslog_names facilitynames[] =
{
{ "auth", LOG_AUTH },
{ "cron", LOG_CRON },
{ "daemon", LOG_DAEMON },
{ "kern", LOG_KERN },
{ "lpr", LOG_LPR },
{ "mail", LOG_MAIL },
{ "news", LOG_NEWS },
{ "syslog", LOG_SYSLOG },
{ "user", LOG_USER },
{ "uucp", LOG_UUCP },
{ "local0", LOG_LOCAL0 },
{ "local1", LOG_LOCAL1 },
{ "local2", LOG_LOCAL2 },
{ "local3", LOG_LOCAL3 },
{ "local4", LOG_LOCAL4 },
{ "local5", LOG_LOCAL5 },
{ "local6", LOG_LOCAL6 },
{ "local7", LOG_LOCAL7 },
{ NULL, -1 }
};
/*
* These are not static so they can be read from the core file
*/
int *flt_data;
int flt_data_size;
#define COMBINE_BUFFER_SIZE 2048
/* values for logsys_logger init_status */
#define LOGSYS_LOGGER_INIT_DONE 0
#define LOGSYS_LOGGER_NEEDS_INIT 1
static int logsys_system_needs_init = LOGSYS_LOGGER_NEEDS_INIT;
static int logsys_after_log_ops_yield = 10;
/*
* need unlogical order to preserve 64bit alignment
*/
struct logsys_logger {
char subsys[LOGSYS_MAX_SUBSYS_NAMELEN]; /* subsystem name */
char *logfile; /* log to file */
FILE *logfile_fp; /* track file descriptor */
unsigned int mode; /* subsystem mode */
unsigned int debug; /* debug on|off */
int syslog_facility; /* facility */
int syslog_priority; /* priority */
int logfile_priority; /* priority to file */
int init_status; /* internal field to handle init queues
for subsystems */
};
/*
* operating global variables
*/
static struct logsys_logger logsys_loggers[LOGSYS_MAX_SUBSYS_COUNT + 1];
static int wthread_active = 0;
static int wthread_should_exit = 0;
static pthread_mutex_t logsys_config_mutex = PTHREAD_MUTEX_INITIALIZER;
static unsigned int records_written = 1;
static pthread_t logsys_thread_id;
static pthread_cond_t logsys_cond;
static pthread_mutex_t logsys_cond_mutex;
#if defined(HAVE_PTHREAD_SPIN_LOCK)
static pthread_spinlock_t logsys_idx_spinlock;
#else
static pthread_mutex_t logsys_idx_mutex = PTHREAD_MUTEX_INITIALIZER;
#endif
static unsigned int log_rec_idx;
static int logsys_buffer_full = 0;
static char *format_buffer=NULL;
static int log_requests_pending = 0;
static int log_requests_lost = 0;
void *logsys_rec_end;
#define FDHEAD_INDEX (flt_data_size)
#define FDTAIL_INDEX (flt_data_size + 1)
/* forward declarations */
static void logsys_close_logfile(int subsysid);
#ifdef LOGSYS_DEBUG
static char *decode_mode(int subsysid, char *buf, size_t buflen)
{
memset(buf, 0, buflen);
if (logsys_loggers[subsysid].mode & LOGSYS_MODE_OUTPUT_FILE)
snprintf(buf+strlen(buf), buflen, "FILE,");
if (logsys_loggers[subsysid].mode & LOGSYS_MODE_OUTPUT_STDERR)
snprintf(buf+strlen(buf), buflen, "STDERR,");
if (logsys_loggers[subsysid].mode & LOGSYS_MODE_OUTPUT_SYSLOG)
snprintf(buf+strlen(buf), buflen, "SYSLOG,");
if (subsysid == LOGSYS_MAX_SUBSYS_COUNT) {
if (logsys_loggers[subsysid].mode & LOGSYS_MODE_FORK)
snprintf(buf+strlen(buf), buflen, "FORK,");
if (logsys_loggers[subsysid].mode & LOGSYS_MODE_THREADED)
snprintf(buf+strlen(buf), buflen, "THREADED,");
}
memset(buf+strlen(buf)-1,0,1);
return buf;
}
static const char *decode_debug(int subsysid)
{
if (logsys_loggers[subsysid].debug)
return "on";
return "off";
}
static const char *decode_status(int subsysid)
{
if (!logsys_loggers[subsysid].init_status)
return "INIT_DONE";
return "NEEDS_INIT";
}
static void dump_subsys_config(int subsysid)
{
char modebuf[1024];
fprintf(stderr,
"ID: %d\n"
"subsys: %s\n"
"logfile: %s\n"
"logfile_fp: %p\n"
"mode: %s\n"
"debug: %s\n"
"syslog_fac: %s\n"
"syslog_pri: %s\n"
"logfile_pri: %s\n"
"init_status: %s\n",
subsysid,
logsys_loggers[subsysid].subsys,
logsys_loggers[subsysid].logfile,
logsys_loggers[subsysid].logfile_fp,
decode_mode(subsysid, modebuf, sizeof(modebuf)),
decode_debug(subsysid),
logsys_facility_name_get(logsys_loggers[subsysid].syslog_facility),
logsys_priority_name_get(logsys_loggers[subsysid].syslog_priority),
logsys_priority_name_get(logsys_loggers[subsysid].logfile_priority),
decode_status(subsysid));
}
static void dump_full_config(void)
{
int i;
for (i = 0; i <= LOGSYS_MAX_SUBSYS_COUNT; i++) {
if (strlen(logsys_loggers[i].subsys) > 0)
dump_subsys_config(i);
}
}
#endif
/*
* Helpers for _logsys_log_rec functionality
*/
static inline void my_memcpy_32bit (int *dest, const int *src, unsigned int words)
{
unsigned int word_idx;
for (word_idx = 0; word_idx < words; word_idx++) {
dest[word_idx] = src[word_idx];
}
}
static inline void my_memcpy_8bit (char *dest, const char *src,
unsigned int bytes)
{
unsigned int byte_idx;
for (byte_idx = 0; byte_idx < bytes; byte_idx++) {
dest[byte_idx] = src[byte_idx];
}
}
#if defined(HAVE_PTHREAD_SPIN_LOCK)
static void logsys_lock (void)
{
pthread_spin_lock (&logsys_idx_spinlock);
}
static void logsys_unlock (void)
{
pthread_spin_unlock (&logsys_idx_spinlock);
}
#else
static void logsys_lock (void)
{
pthread_mutex_lock (&logsys_idx_mutex);
}
static void logsys_unlock (void)
{
pthread_mutex_unlock (&logsys_idx_mutex);
}
#endif
/*
* Before any write operation, a reclaim on the buffer area must be executed
*/
static inline void records_reclaim (unsigned int idx, unsigned int words)
{
unsigned int should_reclaim;
should_reclaim = 0;
if ((idx + words) >= flt_data_size) {
logsys_buffer_full = 1;
}
if (logsys_buffer_full == 0) {
return;
}
logsys_lock();
if (flt_data[FDTAIL_INDEX] > flt_data[FDHEAD_INDEX]) {
if (idx + words >= flt_data[FDTAIL_INDEX]) {
should_reclaim = 1;
}
} else {
if ((idx + words) >= (flt_data[FDTAIL_INDEX] + flt_data_size)) {
should_reclaim = 1;
}
}
if (should_reclaim) {
int words_needed = 0;
words_needed = words + 1;
do {
unsigned int old_tail;
words_needed -= flt_data[flt_data[FDTAIL_INDEX]];
old_tail = flt_data[FDTAIL_INDEX];
flt_data[FDTAIL_INDEX] =
(flt_data[FDTAIL_INDEX] +
flt_data[flt_data[FDTAIL_INDEX]]) % (flt_data_size);
if (log_rec_idx == old_tail) {
log_requests_lost += 1;
log_rec_idx = flt_data[FDTAIL_INDEX];
}
} while (words_needed > 0);
}
logsys_unlock();
}
#define idx_word_step(idx) \
do { \
if (idx > (flt_data_size - 1)) { \
idx = 0; \
} \
} while (0);
#define idx_buffer_step(idx) \
do { \
if (idx > (flt_data_size - 1)) { \
idx = ((idx) % (flt_data_size)); \
} \
} while (0);
/*
* Internal threaded logging implementation
*/
static inline int strcpy_cutoff (char *dest, const char *src, size_t cutoff,
size_t buf_len)
{
size_t len = strlen (src);
if (buf_len <= 1) {
if (buf_len == 0)
dest[0] = 0;
return 0;
}
if (cutoff == 0) {
cutoff = len;
}
cutoff = MIN (cutoff, buf_len - 1);
len = MIN (len, cutoff);
memcpy (dest, src, len);
memset (dest + len, ' ', cutoff - len);
dest[cutoff] = '\0';
return (cutoff);
}
/*
* %s SUBSYSTEM
* %n FUNCTION NAME
* %f FILENAME
* %l FILELINE
* %p PRIORITY
* %t TIMESTAMP
* %b BUFFER
*
* any number between % and character specify field length to pad or chop
*/
static void log_printf_to_logs (
- const char *subsys,
+ unsigned int rec_ident,
const char *file_name,
const char *function_name,
int file_line,
- unsigned int level,
- unsigned int rec_ident,
const char *buffer)
{
char normal_output_buffer[COMBINE_BUFFER_SIZE];
char syslog_output_buffer[COMBINE_BUFFER_SIZE];
char char_time[128];
char line_no[30];
unsigned int format_buffer_idx = 0;
unsigned int normal_output_buffer_idx = 0;
unsigned int syslog_output_buffer_idx = 0;
struct timeval tv;
size_t cutoff;
unsigned int normal_len, syslog_len;
int subsysid;
+ unsigned int level;
int c;
- if (rec_ident != LOGSYS_RECID_LOG) {
+ if (LOGSYS_DECODE_RECID(rec_ident) != LOGSYS_RECID_LOG) {
return;
}
- subsysid = _logsys_config_subsys_get(subsys);
- if (subsysid <= - 1) {
- return;
- }
+ subsysid = LOGSYS_DECODE_SUBSYSID(rec_ident);
+ level = LOGSYS_DECODE_LEVEL(rec_ident);
while ((c = format_buffer[format_buffer_idx])) {
cutoff = 0;
if (c != '%') {
normal_output_buffer[normal_output_buffer_idx++] = c;
syslog_output_buffer[syslog_output_buffer_idx++] = c;
format_buffer_idx++;
} else {
const char *normal_p, *syslog_p;
format_buffer_idx += 1;
if (isdigit (format_buffer[format_buffer_idx])) {
cutoff = atoi (&format_buffer[format_buffer_idx]);
}
while (isdigit (format_buffer[format_buffer_idx])) {
format_buffer_idx += 1;
}
switch (format_buffer[format_buffer_idx]) {
case 's':
- normal_p = subsys;
- syslog_p = subsys;
+ normal_p = logsys_loggers[subsysid].subsys;
+ syslog_p = logsys_loggers[subsysid].subsys;
break;
case 'n':
normal_p = function_name;
syslog_p = function_name;
break;
case 'f':
normal_p = file_name;
syslog_p = file_name;
break;
case 'l':
sprintf (line_no, "%d", file_line);
normal_p = line_no;
syslog_p = line_no;
break;
case 't':
gettimeofday (&tv, NULL);
(void)strftime (char_time, sizeof (char_time), "%b %d %T", localtime ((time_t *)&tv.tv_sec));
normal_p = char_time;
/*
* syslog does timestamping on its own.
* also strip extra space in case.
*/
syslog_p = "";
break;
case 'b':
normal_p = buffer;
syslog_p = buffer;
break;
case 'p':
normal_p = logsys_loggers[LOGSYS_MAX_SUBSYS_COUNT].subsys;
syslog_p = "";
break;
default:
normal_p = "";
syslog_p = "";
break;
}
normal_len = strcpy_cutoff (normal_output_buffer + normal_output_buffer_idx,
normal_p, cutoff,
(sizeof (normal_output_buffer)
- normal_output_buffer_idx));
normal_output_buffer_idx += normal_len;
syslog_len = strcpy_cutoff (syslog_output_buffer + syslog_output_buffer_idx,
syslog_p, cutoff,
(sizeof (syslog_output_buffer)
- syslog_output_buffer_idx));
syslog_output_buffer_idx += syslog_len;
format_buffer_idx += 1;
}
if ((normal_output_buffer_idx == sizeof (normal_output_buffer)) ||
(syslog_output_buffer_idx == sizeof (syslog_output_buffer))) {
break;
}
}
normal_output_buffer[normal_output_buffer_idx] = '\0';
syslog_output_buffer[syslog_output_buffer_idx] = '\0';
/*
* Output to syslog
*/
if ((logsys_loggers[subsysid].mode & LOGSYS_MODE_OUTPUT_SYSLOG) &&
((level <= logsys_loggers[subsysid].syslog_priority) ||
(logsys_loggers[subsysid].debug != 0))) {
syslog (level | logsys_loggers[subsysid].syslog_facility, "%s", syslog_output_buffer);
}
/*
* Terminate string with \n \0
*/
normal_output_buffer[normal_output_buffer_idx++] = '\n';
normal_output_buffer[normal_output_buffer_idx] = '\0';
/*
* Output to configured file
*/
if (((logsys_loggers[subsysid].mode & LOGSYS_MODE_OUTPUT_FILE) &&
(logsys_loggers[subsysid].logfile_fp != NULL)) &&
((level <= logsys_loggers[subsysid].logfile_priority) ||
(logsys_loggers[subsysid].debug != 0))) {
/*
* Output to a file
*/
if ((fwrite (normal_output_buffer, strlen (normal_output_buffer), 1,
logsys_loggers[subsysid].logfile_fp) < 1) ||
(fflush (logsys_loggers[subsysid].logfile_fp) == EOF)) {
char tmpbuffer[1024];
/*
* if we are here, it's bad.. it's really really bad.
* Best thing would be to light a candle in a church
* and pray.
*/
snprintf(tmpbuffer, sizeof(tmpbuffer),
"LOGSYS EMERGENCY: %s Unable to write to %s.",
logsys_loggers[subsysid].subsys,
logsys_loggers[subsysid].logfile);
pthread_mutex_lock (&logsys_config_mutex);
logsys_close_logfile(subsysid);
logsys_loggers[subsysid].mode &= ~LOGSYS_MODE_OUTPUT_FILE;
pthread_mutex_unlock (&logsys_config_mutex);
- log_printf_to_logs(logsys_loggers[subsysid].subsys,
+ log_printf_to_logs(
+ LOGSYS_ENCODE_RECID(
+ LOGSYS_LEVEL_EMERG,
+ subsysid,
+ LOGSYS_RECID_LOG),
__FILE__, __FUNCTION__, __LINE__,
- LOGSYS_LEVEL_EMERG, 0, tmpbuffer);
+ tmpbuffer);
}
}
/*
* Output to stderr
*/
if ((logsys_loggers[subsysid].mode & LOGSYS_MODE_OUTPUT_STDERR) &&
((level <= logsys_loggers[subsysid].logfile_priority) ||
(logsys_loggers[subsysid].debug != 0))) {
if (write (STDERR_FILENO, normal_output_buffer, strlen (normal_output_buffer)) < 0) {
char tmpbuffer[1024];
/*
* if we are here, it's bad.. it's really really bad.
* Best thing would be to light 20 candles for each saint
* in the calendar and pray a lot...
*/
pthread_mutex_lock (&logsys_config_mutex);
logsys_loggers[subsysid].mode &= ~LOGSYS_MODE_OUTPUT_STDERR;
pthread_mutex_unlock (&logsys_config_mutex);
snprintf(tmpbuffer, sizeof(tmpbuffer),
"LOGSYS EMERGENCY: %s Unable to write to STDERR.",
logsys_loggers[subsysid].subsys);
- log_printf_to_logs(logsys_loggers[subsysid].subsys,
- __FILE__, __FUNCTION__, __LINE__,
- LOGSYS_LEVEL_EMERG, 0, tmpbuffer);
+ log_printf_to_logs(
+ LOGSYS_ENCODE_RECID(
+ LOGSYS_LEVEL_EMERG,
+ subsysid,
+ LOGSYS_RECID_LOG),
+ __FILE__, __FUNCTION__, __LINE__,
+ tmpbuffer);
}
}
}
static void record_print (const char *buf)
{
const int *buf_uint32t = (const int *)buf;
unsigned int rec_size = buf_uint32t[0];
- unsigned int level = buf_uint32t[1];
- unsigned int rec_ident = buf_uint32t[2];
- unsigned int file_line = buf_uint32t[3];
+ unsigned int rec_ident = buf_uint32t[1];
+ unsigned int file_line = buf_uint32t[2];
unsigned int i;
unsigned int words_processed;
unsigned int arg_size_idx;
const void *arguments[64];
unsigned int arg_count;
- arg_size_idx = 5;
- words_processed = 5;
+ arg_size_idx = 4;
+ words_processed = 4;
arg_count = 0;
for (i = 0; words_processed < rec_size; i++) {
arguments[arg_count++] = &buf_uint32t[arg_size_idx + 1];
arg_size_idx += buf_uint32t[arg_size_idx] + 1;
words_processed += buf_uint32t[arg_size_idx] + 1;
}
/*
* (char *)arguments[0] -> subsystem
* (char *)arguments[1] -> file_name
* (char *)arguments[2] -> function_name
* (char *)arguments[3] -> message
*/
log_printf_to_logs (
- (char *)arguments[0],
+ rec_ident,
(char *)arguments[1],
(char *)arguments[2],
file_line,
- level,
- rec_ident,
(char *)arguments[3]);
}
static int record_read (char *buf, int rec_idx, int *log_msg) {
unsigned int rec_size;
- unsigned int level;
unsigned int rec_ident;
int firstcopy, secondcopy;
rec_size = flt_data[rec_idx];
- level = flt_data[(rec_idx + 1) % flt_data_size];
- rec_ident = flt_data[(rec_idx + 2) % flt_data_size];
+ rec_ident = flt_data[(rec_idx + 1) % flt_data_size];
/*
* Not a log record
*/
- if (rec_ident != LOGSYS_RECID_LOG) {
+ if (LOGSYS_DECODE_RECID(rec_ident) != LOGSYS_RECID_LOG) {
*log_msg = 0;
return ((rec_idx + rec_size) % flt_data_size);
}
/*
* A log record
*/
*log_msg = 1;
firstcopy = rec_size;
secondcopy = 0;
if (firstcopy + rec_idx > flt_data_size) {
firstcopy = flt_data_size - rec_idx;
secondcopy -= firstcopy - rec_size;
}
memcpy (&buf[0], &flt_data[rec_idx], firstcopy << 2);
if (secondcopy) {
memcpy (&buf[(firstcopy << 2)], &flt_data[0], secondcopy << 2);
}
return ((rec_idx + rec_size) % flt_data_size);
}
static inline void wthread_signal (void)
{
if (wthread_active == 0) {
return;
}
pthread_mutex_lock (&logsys_cond_mutex);
pthread_cond_signal (&logsys_cond);
pthread_mutex_unlock (&logsys_cond_mutex);
}
static inline void wthread_wait (void)
{
pthread_mutex_lock (&logsys_cond_mutex);
pthread_cond_wait (&logsys_cond, &logsys_cond_mutex);
pthread_mutex_unlock (&logsys_cond_mutex);
}
static inline void wthread_wait_locked (void)
{
pthread_cond_wait (&logsys_cond, &logsys_cond_mutex);
pthread_mutex_unlock (&logsys_cond_mutex);
}
static void *logsys_worker_thread (void *data) __attribute__((__noreturn__));
static void *logsys_worker_thread (void *data)
{
int log_msg;
char buf[COMBINE_BUFFER_SIZE];
/*
* Signal wthread_create that the initialization process may continue
*/
wthread_signal ();
logsys_lock();
log_rec_idx = flt_data[FDTAIL_INDEX];
logsys_unlock();
for (;;) {
wthread_wait ();
/*
* Read and copy the logging record index position
* It may have been updated by records_reclaim if
* messages were lost or or log_rec on the first new
* logging record available
*/
/*
* Process any pending log messages here
*/
for (;;) {
int yield_counter = 1;
logsys_lock();
if (log_requests_lost > 0) {
printf ("lost %d log requests\n", log_requests_lost);
log_requests_pending -= log_requests_lost;
log_requests_lost = 0;
}
if (log_requests_pending == 0) {
logsys_unlock();
break;
}
log_rec_idx = record_read (buf, log_rec_idx, &log_msg);
if (log_msg) {
log_requests_pending -= 1;
}
logsys_unlock();
/*
* print the stored buffer
*/
if (log_msg) {
record_print (buf);
if (yield_counter++ > logsys_after_log_ops_yield) {
yield_counter = 0;
sched_yield ();
}
}
}
if (wthread_should_exit) {
pthread_exit (NULL);
}
}
}
static void wthread_create (void)
{
int res;
if (wthread_active) {
return;
}
wthread_active = 1;
pthread_mutex_init (&logsys_cond_mutex, NULL);
pthread_cond_init (&logsys_cond, NULL);
pthread_mutex_lock (&logsys_cond_mutex);
res = pthread_create (&logsys_thread_id, NULL,
logsys_worker_thread, NULL);
/*
* Wait for thread to be started
*/
wthread_wait_locked ();
}
static int _logsys_config_subsys_get_unlocked (const char *subsys)
{
unsigned int i;
if (!subsys) {
return LOGSYS_MAX_SUBSYS_COUNT;
}
for (i = 0; i <= LOGSYS_MAX_SUBSYS_COUNT; i++) {
if (strcmp (logsys_loggers[i].subsys, subsys) == 0) {
pthread_mutex_unlock (&logsys_config_mutex);
return i;
}
}
return (-1);
}
static void syslog_facility_reconf (void)
{
closelog();
openlog(logsys_loggers[LOGSYS_MAX_SUBSYS_COUNT].subsys,
LOG_CONS|LOG_PID,
logsys_loggers[LOGSYS_MAX_SUBSYS_COUNT].syslog_facility);
}
/*
* this is always invoked within the mutex, so it's safe to parse the
* whole thing as we need.
*/
static void logsys_close_logfile (
int subsysid)
{
int i;
if ((logsys_loggers[subsysid].logfile_fp == NULL) &&
(logsys_loggers[subsysid].logfile == NULL)) {
return;
}
/*
* if there is another subsystem or system using the same fp,
* then we clean our own structs, but we can't close the file
* as it is in use by somebody else.
* Only the last users will be allowed to perform the fclose.
*/
for (i = 0; i <= LOGSYS_MAX_SUBSYS_COUNT; i++) {
if ((logsys_loggers[i].logfile_fp == logsys_loggers[subsysid].logfile_fp) &&
(i != subsysid)) {
logsys_loggers[subsysid].logfile = NULL;
logsys_loggers[subsysid].logfile_fp = NULL;
return;
}
}
/*
* if we are here, we are the last users of that fp, so we can safely
* close it.
*/
fclose (logsys_loggers[subsysid].logfile_fp);
logsys_loggers[subsysid].logfile_fp = NULL;
free (logsys_loggers[subsysid].logfile);
logsys_loggers[subsysid].logfile = NULL;
}
/*
* we need a version that can work when somebody else is already
* holding a config mutex lock or we will never get out of here
*/
static int logsys_config_file_set_unlocked (
int subsysid,
const char **error_string,
const char *file)
{
static char error_string_response[512];
int i;
logsys_close_logfile(subsysid);
if ((file == NULL) ||
(strcmp(logsys_loggers[subsysid].subsys, "") == 0)) {
return (0);
}
if (strlen(file) >= PATH_MAX) {
snprintf (error_string_response,
sizeof(error_string_response),
"%s: logfile name exceed maximum system filename lenght\n",
logsys_loggers[subsysid].subsys);
*error_string = error_string_response;
return (-1);
}
for (i = 0; i <= LOGSYS_MAX_SUBSYS_COUNT; i++) {
if ((logsys_loggers[i].logfile != NULL) &&
(strcmp (logsys_loggers[i].logfile, file) == 0) &&
(i != subsysid)) {
logsys_loggers[subsysid].logfile =
logsys_loggers[i].logfile;
logsys_loggers[subsysid].logfile_fp =
logsys_loggers[i].logfile_fp;
return (0);
}
}
logsys_loggers[subsysid].logfile = strdup(file);
if (logsys_loggers[subsysid].logfile == NULL) {
snprintf (error_string_response,
sizeof(error_string_response),
"Unable to allocate memory for logfile '%s'\n",
file);
*error_string = error_string_response;
return (-1);
}
logsys_loggers[subsysid].logfile_fp = fopen (file, "a+");
if (logsys_loggers[subsysid].logfile_fp == NULL) {
free(logsys_loggers[subsysid].logfile);
logsys_loggers[subsysid].logfile = NULL;
snprintf (error_string_response,
sizeof(error_string_response),
"Can't open logfile '%s' for reason (%s).\n",
file, strerror (errno));
*error_string = error_string_response;
return (-1);
}
return (0);
}
static void logsys_subsys_init (
const char *subsys,
int subsysid)
{
if (logsys_system_needs_init == LOGSYS_LOGGER_NEEDS_INIT) {
logsys_loggers[subsysid].init_status =
LOGSYS_LOGGER_NEEDS_INIT;
} else {
memcpy(&logsys_loggers[subsysid],
&logsys_loggers[LOGSYS_MAX_SUBSYS_COUNT],
sizeof(logsys_loggers[LOGSYS_MAX_SUBSYS_COUNT]));
logsys_loggers[subsysid].init_status =
LOGSYS_LOGGER_INIT_DONE;
}
strncpy (logsys_loggers[subsysid].subsys, subsys,
LOGSYS_MAX_SUBSYS_NAMELEN);
}
/*
* Internal API - exported
*/
int _logsys_system_setup(
const char *mainsystem,
unsigned int mode,
unsigned int debug,
const char *logfile,
int logfile_priority,
int syslog_facility,
int syslog_priority)
{
int i;
const char *errstr;
char tempsubsys[LOGSYS_MAX_SUBSYS_NAMELEN];
if ((mainsystem == NULL) ||
(strlen(mainsystem) >= LOGSYS_MAX_SUBSYS_NAMELEN)) {
return -1;
}
i = LOGSYS_MAX_SUBSYS_COUNT;
pthread_mutex_lock (&logsys_config_mutex);
snprintf(logsys_loggers[i].subsys,
LOGSYS_MAX_SUBSYS_NAMELEN,
"%s", mainsystem);
logsys_loggers[i].mode = mode;
logsys_loggers[i].debug = debug;
if (logsys_config_file_set_unlocked (i, &errstr, logfile) < 0) {
pthread_mutex_unlock (&logsys_config_mutex);
return (-1);
}
logsys_loggers[i].logfile_priority = logfile_priority;
logsys_loggers[i].syslog_facility = syslog_facility;
logsys_loggers[i].syslog_priority = syslog_priority;
syslog_facility_reconf();
logsys_loggers[i].init_status = LOGSYS_LOGGER_INIT_DONE;
logsys_system_needs_init = LOGSYS_LOGGER_INIT_DONE;
for (i = 0; i < LOGSYS_MAX_SUBSYS_COUNT; i++) {
if ((strcmp (logsys_loggers[i].subsys, "") != 0) &&
(logsys_loggers[i].init_status ==
LOGSYS_LOGGER_NEEDS_INIT)) {
strncpy (tempsubsys, logsys_loggers[i].subsys,
LOGSYS_MAX_SUBSYS_NAMELEN);
logsys_subsys_init(tempsubsys, i);
}
}
pthread_mutex_unlock (&logsys_config_mutex);
return (0);
}
unsigned int _logsys_subsys_create (const char *subsys)
{
int i;
if ((subsys == NULL) ||
(strlen(subsys) >= LOGSYS_MAX_SUBSYS_NAMELEN)) {
return -1;
}
pthread_mutex_lock (&logsys_config_mutex);
i = _logsys_config_subsys_get_unlocked (subsys);
if ((i > -1) && (i < LOGSYS_MAX_SUBSYS_COUNT)) {
pthread_mutex_unlock (&logsys_config_mutex);
return i;
}
for (i = 0; i < LOGSYS_MAX_SUBSYS_COUNT; i++) {
if (strcmp (logsys_loggers[i].subsys, "") == 0) {
logsys_subsys_init(subsys, i);
break;
}
}
if (i >= LOGSYS_MAX_SUBSYS_COUNT) {
i = -1;
}
pthread_mutex_unlock (&logsys_config_mutex);
return i;
}
int _logsys_wthread_create (void)
{
if (((logsys_loggers[LOGSYS_MAX_SUBSYS_COUNT].mode & LOGSYS_MODE_FORK) == 0) &&
((logsys_loggers[LOGSYS_MAX_SUBSYS_COUNT].mode & LOGSYS_MODE_THREADED) != 0)) {
wthread_create();
atexit (logsys_atexit);
}
return (0);
}
int _logsys_rec_init (unsigned int size)
{
/*
* First record starts at zero
* Last record ends at zero
*/
flt_data = malloc ((size + 2) * sizeof (unsigned int));
if (flt_data == NULL) {
return (-1);
}
flt_data_size = size;
flt_data[FDHEAD_INDEX] = 0;
flt_data[FDTAIL_INDEX] = 0;
#if defined(HAVE_PTHREAD_SPIN_LOCK)
pthread_spin_init (&logsys_idx_spinlock, 0);
#endif
return (0);
}
/*
* u32 RECORD SIZE
* u32 record ident
* u32 arg count
* u32 file line
* u32 subsys length
* buffer null terminated subsys
* u32 filename length
* buffer null terminated filename
* u32 filename length
* buffer null terminated function
* u32 arg1 length
* buffer arg1
* ... repeats length & arg
*/
void _logsys_log_rec (
- int subsysid,
+ unsigned int rec_ident,
const char *function_name,
const char *file_name,
int file_line,
- unsigned int level,
- unsigned int rec_ident,
...)
{
va_list ap;
const void *buf_args[64];
unsigned int buf_len[64];
unsigned int i;
unsigned int idx;
unsigned int arguments = 0;
unsigned int record_reclaim_size;
unsigned int index_start;
int words_written;
+ int subsysid;
record_reclaim_size = 0;
+ subsysid = LOGSYS_DECODE_SUBSYSID(rec_ident);
+
/*
* Decode VA Args
*/
- va_start (ap, rec_ident);
+ va_start (ap, file_line);
arguments = 3;
for (;;) {
buf_args[arguments] = va_arg (ap, void *);
if (buf_args[arguments] == LOGSYS_REC_END) {
break;
}
buf_len[arguments] = va_arg (ap, int);
record_reclaim_size += ((buf_len[arguments] + 3) >> 2) + 1;
arguments++;
if (arguments >= 64) {
break;
}
}
va_end (ap);
/*
* Encode logsys subsystem identity, filename, and function
*/
buf_args[0] = logsys_loggers[subsysid].subsys;
buf_len[0] = strlen (logsys_loggers[subsysid].subsys) + 1;
buf_args[1] = file_name;
buf_len[1] = strlen (file_name) + 1;
buf_args[2] = function_name;
buf_len[2] = strlen (function_name) + 1;
for (i = 0; i < 3; i++) {
record_reclaim_size += ((buf_len[i] + 3) >> 2) + 1;
}
idx = flt_data[FDHEAD_INDEX];
index_start = idx;
/*
* Reclaim data needed for record including 4 words for the header
*/
- records_reclaim (idx, record_reclaim_size + 5);
+ records_reclaim (idx, record_reclaim_size + 4);
/*
* Write record size of zero and rest of header information
*/
flt_data[idx++] = 0;
idx_word_step(idx);
- flt_data[idx++] = level;
- idx_word_step(idx);
-
flt_data[idx++] = rec_ident;
idx_word_step(idx);
flt_data[idx++] = file_line;
idx_word_step(idx);
flt_data[idx++] = records_written;
idx_word_step(idx);
/*
* Encode all of the arguments into the log message
*/
for (i = 0; i < arguments; i++) {
unsigned int bytes;
unsigned int full_words;
unsigned int total_words;
bytes = buf_len[i];
full_words = bytes >> 2;
total_words = (bytes + 3) >> 2;
flt_data[idx++] = total_words;
idx_word_step(idx);
/*
* determine if this is a wrapped write or normal write
*/
if (idx + total_words < flt_data_size) {
/*
* dont need to wrap buffer
*/
my_memcpy_32bit (&flt_data[idx], buf_args[i], full_words);
if (bytes % 4) {
my_memcpy_8bit ((char *)&flt_data[idx + full_words],
((const char *)buf_args[i]) + (full_words << 2), bytes % 4);
}
} else {
/*
* need to wrap buffer
*/
unsigned int first;
unsigned int second;
first = flt_data_size - idx;
if (first > full_words) {
first = full_words;
}
second = full_words - first;
my_memcpy_32bit (&flt_data[idx],
(const int *)buf_args[i], first);
my_memcpy_32bit (&flt_data[0],
(const int *)(((const unsigned char *)buf_args[i]) + (first << 2)),
second);
if (bytes % 4) {
my_memcpy_8bit ((char *)&flt_data[0 + second],
((const char *)buf_args[i]) + (full_words << 2), bytes % 4);
}
}
idx += total_words;
idx_buffer_step (idx);
}
words_written = idx - index_start;
if (words_written < 0) {
words_written += flt_data_size;
}
/*
* Commit the write of the record size now that the full record
* is in the memory buffer
*/
flt_data[index_start] = words_written;
/*
* If the index of the current head equals the current log_rec_idx,
* and this is not a log_printf operation, set the log_rec_idx to
* the new head position and commit the new head.
*/
logsys_lock();
- if (rec_ident == LOGSYS_RECID_LOG) {
+ if (LOGSYS_DECODE_RECID(rec_ident) == LOGSYS_RECID_LOG) {
log_requests_pending += 1;
}
if (log_requests_pending == 0) {
log_rec_idx = idx;
}
flt_data[FDHEAD_INDEX] = idx;
logsys_unlock();
records_written++;
}
void _logsys_log_vprintf (
- int subsysid,
+ unsigned int rec_ident,
const char *function_name,
const char *file_name,
int file_line,
- unsigned int level,
- unsigned int rec_ident,
const char *format,
va_list ap)
{
char logsys_print_buffer[COMBINE_BUFFER_SIZE];
unsigned int len;
+ unsigned int level;
+ int subsysid;
- if (subsysid <= -1) {
- subsysid = LOGSYS_MAX_SUBSYS_COUNT;
- }
+ subsysid = LOGSYS_DECODE_SUBSYSID(rec_ident);
+ level = LOGSYS_DECODE_LEVEL(rec_ident);
if ((level > logsys_loggers[subsysid].syslog_priority) &&
(level > logsys_loggers[subsysid].logfile_priority) &&
(logsys_loggers[subsysid].debug == 0)) {
return;
}
len = vsprintf (logsys_print_buffer, format, ap);
if (logsys_print_buffer[len - 1] == '\n') {
logsys_print_buffer[len - 1] = '\0';
len -= 1;
}
/*
* Create a log record
*/
- _logsys_log_rec (subsysid,
+ _logsys_log_rec (
+ rec_ident,
function_name,
file_name,
file_line,
- level,
- rec_ident,
logsys_print_buffer, len + 1,
LOGSYS_REC_END);
if ((logsys_loggers[LOGSYS_MAX_SUBSYS_COUNT].mode & LOGSYS_MODE_THREADED) == 0) {
/*
* Output (and block) if the log mode is not threaded otherwise
* expect the worker thread to output the log data once signaled
*/
- log_printf_to_logs (logsys_loggers[subsysid].subsys,
- file_name, function_name, file_line, level, rec_ident,
- logsys_print_buffer);
+ log_printf_to_logs (rec_ident,
+ file_name, function_name, file_line,
+ logsys_print_buffer);
} else {
/*
* Signal worker thread to display logging output
*/
wthread_signal ();
}
}
void _logsys_log_printf (
- int subsysid,
+ unsigned int rec_ident,
const char *function_name,
const char *file_name,
int file_line,
- unsigned int level,
- unsigned int rec_ident,
const char *format,
...)
{
va_list ap;
va_start (ap, format);
- _logsys_log_vprintf (subsysid, function_name, file_name, file_line,
- level, rec_ident, format, ap);
+ _logsys_log_vprintf (rec_ident, function_name, file_name, file_line,
+ format, ap);
va_end (ap);
}
int _logsys_config_subsys_get (const char *subsys)
{
unsigned int i;
pthread_mutex_lock (&logsys_config_mutex);
i = _logsys_config_subsys_get_unlocked (subsys);
pthread_mutex_unlock (&logsys_config_mutex);
return i;
}
/*
* External Configuration and Initialization API
*/
void logsys_fork_completed (void)
{
logsys_loggers[LOGSYS_MAX_SUBSYS_COUNT].mode &= ~LOGSYS_MODE_FORK;
_logsys_wthread_create ();
}
unsigned int logsys_config_mode_set (const char *subsys, unsigned int mode)
{
int i;
pthread_mutex_lock (&logsys_config_mutex);
if (subsys != NULL) {
i = _logsys_config_subsys_get_unlocked (subsys);
if (i >= 0) {
logsys_loggers[i].mode = mode;
i = 0;
}
} else {
for (i = 0; i <= LOGSYS_MAX_SUBSYS_COUNT; i++) {
logsys_loggers[i].mode = mode;
}
i = 0;
}
pthread_mutex_unlock (&logsys_config_mutex);
return i;
}
unsigned int logsys_config_mode_get (const char *subsys)
{
int i;
i = _logsys_config_subsys_get (subsys);
if (i < 0) {
return i;
}
return logsys_loggers[i].mode;
}
int logsys_config_file_set (
const char *subsys,
const char **error_string,
const char *file)
{
int i;
int res;
pthread_mutex_lock (&logsys_config_mutex);
if (subsys != NULL) {
i = _logsys_config_subsys_get_unlocked (subsys);
if (i < 0) {
res = i;
} else {
res = logsys_config_file_set_unlocked(i, error_string, file);
}
} else {
for (i = 0; i <= LOGSYS_MAX_SUBSYS_COUNT; i++) {
res = logsys_config_file_set_unlocked(i, error_string, file);
if (res < 0) {
break;
}
}
}
pthread_mutex_unlock (&logsys_config_mutex);
return res;
}
int logsys_format_set (const char *format)
{
int ret = 0;
pthread_mutex_lock (&logsys_config_mutex);
if (format_buffer) {
free(format_buffer);
format_buffer = NULL;
}
format_buffer = strdup(format ? format : "%p [%6s] %b");
if (format_buffer == NULL) {
ret = -1;
}
pthread_mutex_unlock (&logsys_config_mutex);
return ret;
}
char *logsys_format_get (void)
{
return format_buffer;
}
unsigned int logsys_config_syslog_facility_set (
const char *subsys,
unsigned int facility)
{
int i;
pthread_mutex_lock (&logsys_config_mutex);
if (subsys != NULL) {
i = _logsys_config_subsys_get_unlocked (subsys);
if (i >= 0) {
logsys_loggers[i].syslog_facility = facility;
if (i == LOGSYS_MAX_SUBSYS_COUNT) {
syslog_facility_reconf();
}
i = 0;
}
} else {
for (i = 0; i <= LOGSYS_MAX_SUBSYS_COUNT; i++) {
logsys_loggers[i].syslog_facility = facility;
}
syslog_facility_reconf();
i = 0;
}
pthread_mutex_unlock (&logsys_config_mutex);
return i;
}
unsigned int logsys_config_syslog_priority_set (
const char *subsys,
unsigned int priority)
{
int i;
pthread_mutex_lock (&logsys_config_mutex);
if (subsys != NULL) {
i = _logsys_config_subsys_get_unlocked (subsys);
if (i >= 0) {
logsys_loggers[i].syslog_priority = priority;
i = 0;
}
} else {
for (i = 0; i <= LOGSYS_MAX_SUBSYS_COUNT; i++) {
logsys_loggers[i].syslog_priority = priority;
}
i = 0;
}
pthread_mutex_unlock (&logsys_config_mutex);
return i;
}
unsigned int logsys_config_logfile_priority_set (
const char *subsys,
unsigned int priority)
{
int i;
pthread_mutex_lock (&logsys_config_mutex);
if (subsys != NULL) {
i = _logsys_config_subsys_get_unlocked (subsys);
if (i >= 0) {
logsys_loggers[i].logfile_priority = priority;
i = 0;
}
} else {
for (i = 0; i <= LOGSYS_MAX_SUBSYS_COUNT; i++) {
logsys_loggers[i].logfile_priority = priority;
}
i = 0;
}
pthread_mutex_unlock (&logsys_config_mutex);
return i;
}
unsigned int logsys_config_debug_set (
const char *subsys,
unsigned int debug)
{
int i;
pthread_mutex_lock (&logsys_config_mutex);
if (subsys != NULL) {
i = _logsys_config_subsys_get_unlocked (subsys);
if (i >= 0) {
logsys_loggers[i].debug = debug;
i = 0;
}
} else {
for (i = 0; i <= LOGSYS_MAX_SUBSYS_COUNT; i++) {
logsys_loggers[i].debug = debug;
}
i = 0;
}
pthread_mutex_unlock (&logsys_config_mutex);
return i;
}
int logsys_facility_id_get (const char *name)
{
unsigned int i;
for (i = 0; facilitynames[i].c_name != NULL; i++) {
if (strcasecmp(name, facilitynames[i].c_name) == 0) {
return (facilitynames[i].c_val);
}
}
return (-1);
}
const char *logsys_facility_name_get (unsigned int facility)
{
unsigned int i;
for (i = 0; facilitynames[i].c_name != NULL; i++) {
if (facility == facilitynames[i].c_val) {
return (facilitynames[i].c_name);
}
}
return (NULL);
}
int logsys_priority_id_get (const char *name)
{
unsigned int i;
for (i = 0; prioritynames[i].c_name != NULL; i++) {
if (strcasecmp(name, prioritynames[i].c_name) == 0) {
return (prioritynames[i].c_val);
}
}
return (-1);
}
const char *logsys_priority_name_get (unsigned int priority)
{
unsigned int i;
for (i = 0; prioritynames[i].c_name != NULL; i++) {
if (priority == prioritynames[i].c_val) {
return (prioritynames[i].c_name);
}
}
return (NULL);
}
int logsys_thread_priority_set (
int policy,
const struct sched_param *param,
unsigned int after_log_ops_yield)
{
int res = 0;
if (policy != SCHED_OTHER) {
res = pthread_setschedparam (logsys_thread_id, policy, param);
}
if (after_log_ops_yield > 0) {
logsys_after_log_ops_yield = after_log_ops_yield;
}
return (res);
}
int logsys_log_rec_store (const char *filename)
{
int fd;
ssize_t written_size;
size_t size_to_write = (flt_data_size + 2) * sizeof (unsigned int);
fd = open (filename, O_CREAT|O_RDWR, 0700);
if (fd < 0) {
return (-1);
}
written_size = write (fd, flt_data, size_to_write);
if (close (fd) != 0)
return (-1);
if (written_size < 0) {
return (-1);
} else if ((size_t)written_size != size_to_write) {
return (-1);
}
return (0);
}
void logsys_atexit (void)
{
if (wthread_active) {
wthread_should_exit = 1;
wthread_signal ();
pthread_join (logsys_thread_id, NULL);
}
}
void logsys_flush (void)
{
wthread_signal ();
}
diff --git a/exec/main.c b/exec/main.c
index 9901788e..3f4e4253 100644
--- a/exec/main.c
+++ b/exec/main.c
@@ -1,916 +1,924 @@
/*
* Copyright (c) 2002-2006 MontaVista Software, Inc.
* Copyright (c) 2006-2009 Red Hat, Inc.
*
* All rights reserved.
*
* Author: Steven Dake (sdake@redhat.com)
*
* This software licensed under BSD license, the text of which follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the MontaVista Software, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <config.h>
#include <pthread.h>
#include <assert.h>
#include <sys/types.h>
#include <sys/poll.h>
#include <sys/uio.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <signal.h>
#include <sched.h>
#include <time.h>
#include <corosync/swab.h>
#include <corosync/corotypes.h>
#include <corosync/coroipc_types.h>
#include <corosync/corodefs.h>
#include <corosync/list.h>
#include <corosync/lcr/lcr_ifact.h>
#include <corosync/totem/coropoll.h>
#include <corosync/totem/totempg.h>
#include <corosync/engine/objdb.h>
#include <corosync/engine/config.h>
#include <corosync/engine/logsys.h>
#include <corosync/coroipcs.h>
#include "quorum.h"
#include "totemsrp.h"
#include "mainconfig.h"
#include "totemconfig.h"
#include "main.h"
#include "sync.h"
#include "tlist.h"
#include "timer.h"
#include "util.h"
#include "apidef.h"
#include "service.h"
#include "schedwrk.h"
#include "version.h"
LOGSYS_DECLARE_SYSTEM ("corosync",
LOGSYS_MODE_OUTPUT_STDERR | LOGSYS_MODE_THREADED | LOGSYS_MODE_FORK,
0,
NULL,
LOG_INFO,
LOG_DAEMON,
LOG_INFO,
NULL,
1000000);
LOGSYS_DECLARE_SUBSYS ("MAIN");
#define SERVER_BACKLOG 5
static int sched_priority = 0;
static unsigned int service_count = 32;
#if defined(HAVE_PTHREAD_SPIN_LOCK)
static pthread_spinlock_t serialize_spin;
#else
static pthread_mutex_t serialize_mutex = PTHREAD_MUTEX_INITIALIZER;
#endif
static struct totem_logging_configuration totem_logging_configuration;
static int num_config_modules;
static struct config_iface_ver0 *config_modules[MAX_DYNAMIC_SERVICES];
static struct objdb_iface_ver0 *objdb = NULL;
static struct corosync_api_v1 *api = NULL;
static struct ug_config ug_config;
unsigned long long *(*main_clm_get_by_nodeid) (unsigned int node_id);
hdb_handle_t corosync_poll_handle;
struct sched_param global_sched_param;
static void sigusr2_handler (int num)
{
int i;
for (i = 0; i < SERVICE_HANDLER_MAXIMUM_COUNT; i++) {
if (ais_service[i] && ais_service[i]->exec_dump_fn) {
ais_service[i]->exec_dump_fn ();
}
}
}
/*
* TODO this function needs some love
*/
void corosync_request_shutdown (void)
{
if (api) {
corosync_service_unlink_all (api);
}
poll_stop (0);
totempg_finalize ();
coroipcs_ipc_exit ();
corosync_exit_error (AIS_DONE_EXIT);
}
static void sigquit_handler (int num)
{
corosync_request_shutdown ();
}
static void sigintr_handler (int num)
{
corosync_request_shutdown ();
}
static void sigsegv_handler (int num)
{
(void)signal (SIGSEGV, SIG_DFL);
logsys_atexit();
logsys_log_rec_store (LOCALSTATEDIR "/lib/corosync/fdata");
raise (SIGSEGV);
}
static void sigabrt_handler (int num)
{
(void)signal (SIGABRT, SIG_DFL);
logsys_atexit();
logsys_log_rec_store (LOCALSTATEDIR "/lib/corosync/fdata");
raise (SIGABRT);
}
#define LOCALHOST_IP inet_addr("127.0.0.1")
hdb_handle_t corosync_group_handle;
struct totempg_group corosync_group = {
.group = "a",
.group_len = 1
};
#if defined(HAVE_PTHREAD_SPIN_LOCK)
static void serialize_lock (void)
{
pthread_spin_lock (&serialize_spin);
}
static void serialize_unlock (void)
{
pthread_spin_unlock (&serialize_spin);
}
#else
static void serialize_lock (void)
{
pthread_mutex_lock (&serialize_mutex);
}
static void serialize_unlock (void)
{
pthread_mutex_unlock (&serialize_mutex);
}
#endif
static void corosync_sync_completed (void)
{
}
static int corosync_sync_callbacks_retrieve (int sync_id,
struct sync_callbacks *callbacks)
{
unsigned int ais_service_index;
unsigned int ais_services_found = 0;
for (ais_service_index = 0;
ais_service_index < SERVICE_HANDLER_MAXIMUM_COUNT;
ais_service_index++) {
if (ais_service[ais_service_index] != NULL) {
if (ais_services_found == sync_id) {
break;
}
ais_services_found += 1;
}
}
if (ais_service_index == SERVICE_HANDLER_MAXIMUM_COUNT) {
memset (callbacks, 0, sizeof (struct sync_callbacks));
return (-1);
}
callbacks->name = ais_service[ais_service_index]->name;
callbacks->sync_init = ais_service[ais_service_index]->sync_init;
callbacks->sync_process = ais_service[ais_service_index]->sync_process;
callbacks->sync_activate = ais_service[ais_service_index]->sync_activate;
callbacks->sync_abort = ais_service[ais_service_index]->sync_abort;
return (0);
}
static struct memb_ring_id corosync_ring_id;
static void confchg_fn (
enum totem_configuration_type configuration_type,
const unsigned int *member_list, size_t member_list_entries,
const unsigned int *left_list, size_t left_list_entries,
const unsigned int *joined_list, size_t joined_list_entries,
const struct memb_ring_id *ring_id)
{
int i;
serialize_lock ();
memcpy (&corosync_ring_id, ring_id, sizeof (struct memb_ring_id));
/*
* Call configuration change for all services
*/
for (i = 0; i < service_count; i++) {
if (ais_service[i] && ais_service[i]->confchg_fn) {
ais_service[i]->confchg_fn (configuration_type,
member_list, member_list_entries,
left_list, left_list_entries,
joined_list, joined_list_entries, ring_id);
}
}
serialize_unlock ();
}
static void priv_drop (void)
{
return; /* TODO: we are still not dropping privs */
setuid (ug_config.uid);
setegid (ug_config.gid);
}
static void corosync_tty_detach (void)
{
int fd;
/*
* Disconnect from TTY if this is not a debug run
*/
switch (fork ()) {
case -1:
corosync_exit_error (AIS_DONE_FORK);
break;
case 0:
/*
* child which is disconnected, run this process
*/
/* setset();
close (0);
close (1);
close (2);
*/
break;
default:
exit (0);
break;
}
/* Create new session */
(void)setsid();
/*
* Map stdin/out/err to /dev/null.
*/
fd = open("/dev/null", O_RDWR);
if (fd >= 0) {
/* dup2 to 0 / 1 / 2 (stdin / stdout / stderr) */
dup2(fd, STDIN_FILENO); /* 0 */
dup2(fd, STDOUT_FILENO); /* 1 */
dup2(fd, STDERR_FILENO); /* 2 */
/* Should be 0, but just in case it isn't... */
if (fd > 2)
close(fd);
}
}
static void corosync_setscheduler (void)
{
#if ! defined(TS_CLASS) && (defined(COROSYNC_BSD) || defined(COROSYNC_LINUX) || defined(COROSYNC_SOLARIS))
int res;
sched_priority = sched_get_priority_max (SCHED_RR);
if (sched_priority != -1) {
global_sched_param.sched_priority = sched_priority;
res = sched_setscheduler (0, SCHED_RR, &global_sched_param);
if (res == -1) {
log_printf (LOGSYS_LEVEL_WARNING, "Could not set SCHED_RR at priority %d: %s\n",
global_sched_param.sched_priority, strerror (errno));
}
} else {
log_printf (LOGSYS_LEVEL_WARNING, "Could not get maximum scheduler priority: %s\n", strerror (errno));
sched_priority = 0;
}
#else
log_printf(LOGSYS_LEVEL_WARNING, "Scheduler priority left to default value (no OS support)\n");
#endif
}
static void corosync_mlockall (void)
{
#if !defined(COROSYNC_BSD)
int res;
#endif
struct rlimit rlimit;
rlimit.rlim_cur = RLIM_INFINITY;
rlimit.rlim_max = RLIM_INFINITY;
#ifndef COROSYNC_SOLARIS
setrlimit (RLIMIT_MEMLOCK, &rlimit);
#else
setrlimit (RLIMIT_VMEM, &rlimit);
#endif
#if defined(COROSYNC_BSD)
/* under FreeBSD a process with locked page cannot call dlopen
* code disabled until FreeBSD bug i386/93396 was solved
*/
log_printf (LOGSYS_LEVEL_WARNING, "Could not lock memory of service to avoid page faults\n");
#else
res = mlockall (MCL_CURRENT | MCL_FUTURE);
if (res == -1) {
log_printf (LOGSYS_LEVEL_WARNING, "Could not lock memory of service to avoid page faults: %s\n", strerror (errno));
};
#endif
}
static void deliver_fn (
unsigned int nodeid,
const void *msg,
unsigned int msg_len,
int endian_conversion_required)
{
const coroipc_request_header_t *header;
int service;
int fn_id;
unsigned int id;
unsigned int size;
header = msg;
if (endian_conversion_required) {
id = swab32 (header->id);
size = swab32 (header->size);
} else {
id = header->id;
size = header->size;
}
/*
* Call the proper executive handler
*/
service = id >> 16;
fn_id = id & 0xffff;
if (!ais_service[service])
return;
serialize_lock();
if (endian_conversion_required) {
assert(ais_service[service]->exec_engine[fn_id].exec_endian_convert_fn != NULL);
ais_service[service]->exec_engine[fn_id].exec_endian_convert_fn
((void *)msg);
}
ais_service[service]->exec_engine[fn_id].exec_handler_fn
(msg, nodeid);
serialize_unlock();
}
void main_get_config_modules(struct config_iface_ver0 ***modules, int *num)
{
*modules = config_modules;
*num = num_config_modules;
}
int main_mcast (
const struct iovec *iovec,
unsigned int iov_len,
unsigned int guarantee)
{
return (totempg_groups_mcast_joined (corosync_group_handle, iovec, iov_len, guarantee));
}
int message_source_is_local (const mar_message_source_t *source)
{
int ret = 0;
assert (source != NULL);
if (source->nodeid == totempg_my_nodeid_get ()) {
ret = 1;
}
return ret;
}
void message_source_set (
mar_message_source_t *source,
void *conn)
{
assert ((source != NULL) && (conn != NULL));
memset (source, 0, sizeof (mar_message_source_t));
source->nodeid = totempg_my_nodeid_get ();
source->conn = conn;
}
/*
* Provides the glue from corosync to the IPC Service
*/
static int corosync_private_data_size_get (unsigned int service)
{
return (ais_service[service]->private_data_size);
}
static coroipcs_init_fn_lvalue corosync_init_fn_get (unsigned int service)
{
return (ais_service[service]->lib_init_fn);
}
static coroipcs_exit_fn_lvalue corosync_exit_fn_get (unsigned int service)
{
return (ais_service[service]->lib_exit_fn);
}
static coroipcs_handler_fn_lvalue corosync_handler_fn_get (unsigned int service, unsigned int id)
{
return (ais_service[service]->lib_engine[id].lib_handler_fn);
}
static int corosync_security_valid (int euid, int egid)
{
struct list_head *iter;
if (euid == 0 || egid == 0) {
return (1);
}
for (iter = ug_config.uidgid_list.next; iter != &ug_config.uidgid_list; iter = iter->next) {
struct uidgid_item *ugi = list_entry (iter, struct uidgid_item, list);
if (euid == ugi->uid || egid == ugi->gid)
return (1);
}
return (0);
}
static int corosync_service_available (unsigned int service)
{
return (ais_service[service] != NULL);
}
struct sending_allowed_private_data_struct {
int reserved_msgs;
};
static int corosync_sending_allowed (
unsigned int service,
unsigned int id,
const void *msg,
void *sending_allowed_private_data)
{
struct sending_allowed_private_data_struct *pd =
(struct sending_allowed_private_data_struct *)sending_allowed_private_data;
struct iovec reserve_iovec;
coroipc_request_header_t *header = (coroipc_request_header_t *)msg;
int sending_allowed;
reserve_iovec.iov_base = (char *)header;
reserve_iovec.iov_len = header->size;
pd->reserved_msgs = totempg_groups_joined_reserve (
corosync_group_handle,
&reserve_iovec, 1);
sending_allowed =
(corosync_quorum_is_quorate() == 1 ||
ais_service[service]->allow_inquorate == CS_LIB_ALLOW_INQUORATE) &&
((ais_service[service]->lib_engine[id].flow_control == CS_LIB_FLOW_CONTROL_NOT_REQUIRED) ||
((ais_service[service]->lib_engine[id].flow_control == CS_LIB_FLOW_CONTROL_REQUIRED) &&
(pd->reserved_msgs) &&
(sync_in_process() == 0)));
return (sending_allowed);
}
static void corosync_sending_allowed_release (void *sending_allowed_private_data)
{
struct sending_allowed_private_data_struct *pd =
(struct sending_allowed_private_data_struct *)sending_allowed_private_data;
totempg_groups_joined_release (pd->reserved_msgs);
}
static int ipc_subsys_id = -1;
static void ipc_log_printf (const char *format, ...) __attribute__((format(printf, 1, 2)));
static void ipc_log_printf (const char *format, ...) {
va_list ap;
va_start (ap, format);
- _logsys_log_vprintf (ipc_subsys_id, __FUNCTION__,
- __FILE__, __LINE__, LOGSYS_LEVEL_ERROR, LOGSYS_RECID_LOG, format, ap);
+ _logsys_log_vprintf (
+ LOGSYS_ENCODE_RECID(ipc_subsys_id,
+ LOGSYS_LEVEL_ERROR,
+ LOGSYS_RECID_LOG),
+ __FUNCTION__, __FILE__, __LINE__,
+ format, ap);
va_end (ap);
}
static void ipc_fatal_error(const char *error_msg) {
- _logsys_log_printf (ipc_subsys_id, __FUNCTION__,
- __FILE__, __LINE__, LOGSYS_LEVEL_ERROR, LOGSYS_RECID_LOG, "%s", error_msg);
+ _logsys_log_printf (
+ LOGSYS_ENCODE_RECID(ipc_subsys_id,
+ LOGSYS_LEVEL_ERROR,
+ LOGSYS_RECID_LOG),
+ __FUNCTION__, __FILE__, __LINE__,
+ "%s", error_msg);
exit(EXIT_FAILURE);
}
static int corosync_poll_handler_accept (
hdb_handle_t handle,
int fd,
int revent,
void *context)
{
return (coroipcs_handler_accept (fd, revent, context));
}
static int corosync_poll_handler_dispatch (
hdb_handle_t handle,
int fd,
int revent,
void *context)
{
return (coroipcs_handler_dispatch (fd, revent, context));
}
static void corosync_poll_accept_add (
int fd)
{
poll_dispatch_add (corosync_poll_handle, fd, POLLIN|POLLNVAL, 0,
corosync_poll_handler_accept);
}
static void corosync_poll_dispatch_add (
int fd,
void *context)
{
poll_dispatch_add (corosync_poll_handle, fd, POLLIN|POLLNVAL, context,
corosync_poll_handler_dispatch);
}
static void corosync_poll_dispatch_modify (
int fd,
int events)
{
poll_dispatch_modify (corosync_poll_handle, fd, events,
corosync_poll_handler_dispatch);
}
struct coroipcs_init_state ipc_init_state = {
.socket_name = COROSYNC_SOCKET_NAME,
.sched_policy = SCHED_RR,
.sched_param = &global_sched_param,
.malloc = malloc,
.free = free,
.log_printf = ipc_log_printf,
.fatal_error = ipc_fatal_error,
.security_valid = corosync_security_valid,
.service_available = corosync_service_available,
.private_data_size_get = corosync_private_data_size_get,
.serialize_lock = serialize_lock,
.serialize_unlock = serialize_unlock,
.sending_allowed = corosync_sending_allowed,
.sending_allowed_release = corosync_sending_allowed_release,
.poll_accept_add = corosync_poll_accept_add,
.poll_dispatch_add = corosync_poll_dispatch_add,
.poll_dispatch_modify = corosync_poll_dispatch_modify,
.init_fn_get = corosync_init_fn_get,
.exit_fn_get = corosync_exit_fn_get,
.handler_fn_get = corosync_handler_fn_get
};
int main (int argc, char **argv)
{
const char *error_string;
struct totem_config totem_config;
hdb_handle_t objdb_handle;
hdb_handle_t config_handle;
unsigned int config_version = 0;
void *objdb_p;
struct config_iface_ver0 *config;
void *config_p;
const char *config_iface_init;
char *config_iface;
char *iface;
int res, ch;
int background, setprio;
#if defined(HAVE_PTHREAD_SPIN_LOCK)
pthread_spin_init (&serialize_spin, 0);
#endif
/* default configuration
*/
background = 1;
setprio = 1;
while ((ch = getopt (argc, argv, "fp")) != EOF) {
switch (ch) {
case 'f':
background = 0;
logsys_config_mode_set (NULL, LOGSYS_MODE_OUTPUT_STDERR|LOGSYS_MODE_THREADED|LOGSYS_MODE_FORK);
break;
case 'p':
setprio = 0;
break;
default:
fprintf(stderr, \
"usage:\n"\
" -f : Start application in foreground.\n"\
" -p : Do not set process priority. \n");
return EXIT_FAILURE;
}
}
if (background)
corosync_tty_detach ();
/*
* Set round robin realtime scheduling with priority 99
* Lock all memory to avoid page faults which may interrupt
* application healthchecking
*/
if (setprio) {
corosync_setscheduler ();
}
corosync_mlockall ();
log_printf (LOGSYS_LEVEL_NOTICE, "Corosync Executive Service RELEASE '%s'\n", RELEASE_VERSION);
log_printf (LOGSYS_LEVEL_NOTICE, "Copyright (C) 2002-2006 MontaVista Software, Inc and contributors.\n");
log_printf (LOGSYS_LEVEL_NOTICE, "Copyright (C) 2006-2008 Red Hat, Inc.\n");
(void)signal (SIGINT, sigintr_handler);
(void)signal (SIGUSR2, sigusr2_handler);
(void)signal (SIGSEGV, sigsegv_handler);
(void)signal (SIGABRT, sigabrt_handler);
(void)signal (SIGQUIT, sigquit_handler);
#if MSG_NOSIGNAL == 0
(void)signal (SIGPIPE, SIG_IGN);
#endif
corosync_timer_init (
serialize_lock,
serialize_unlock,
sched_priority);
log_printf (LOGSYS_LEVEL_NOTICE, "Corosync Executive Service: started and ready to provide service.\n");
corosync_poll_handle = poll_create ();
/*
* Load the object database interface
*/
res = lcr_ifact_reference (
&objdb_handle,
"objdb",
0,
&objdb_p,
0);
if (res == -1) {
log_printf (LOGSYS_LEVEL_ERROR, "Corosync Executive couldn't open configuration object database component.\n");
corosync_exit_error (AIS_DONE_OBJDB);
}
objdb = (struct objdb_iface_ver0 *)objdb_p;
objdb->objdb_init ();
/*
* Initialize the corosync_api_v1 definition
*/
apidef_init (objdb);
api = apidef_get ();
num_config_modules = 0;
/*
* Bootstrap in the default configuration parser or use
* the corosync default built in parser if the configuration parser
* isn't overridden
*/
config_iface_init = getenv("COROSYNC_DEFAULT_CONFIG_IFACE");
if (!config_iface_init) {
config_iface_init = "corosync_parser";
}
/* Make a copy so we can deface it with strtok */
if ((config_iface = strdup(config_iface_init)) == NULL) {
log_printf (LOGSYS_LEVEL_ERROR, "exhausted virtual memory");
corosync_exit_error (AIS_DONE_OBJDB);
}
iface = strtok(config_iface, ":");
while (iface)
{
res = lcr_ifact_reference (
&config_handle,
iface,
config_version,
&config_p,
0);
config = (struct config_iface_ver0 *)config_p;
if (res == -1) {
log_printf (LOGSYS_LEVEL_ERROR, "Corosync Executive couldn't open configuration component '%s'\n", iface);
corosync_exit_error (AIS_DONE_MAINCONFIGREAD);
}
res = config->config_readconfig(objdb, &error_string);
if (res == -1) {
log_printf (LOGSYS_LEVEL_ERROR, "%s", error_string);
corosync_exit_error (AIS_DONE_MAINCONFIGREAD);
}
log_printf (LOGSYS_LEVEL_NOTICE, "%s", error_string);
config_modules[num_config_modules++] = config;
iface = strtok(NULL, ":");
}
free(config_iface);
res = corosync_main_config_read (objdb, &error_string, &ug_config);
if (res == -1) {
/*
* if we are here, we _must_ flush the logsys queue
* and try to inform that we couldn't read the config.
* this is a desperate attempt before certain death
* and there is no guarantee that we can print to stderr
* nor that logsys is sending the messages where we expect.
*/
log_printf (LOGSYS_LEVEL_ERROR, "%s", error_string);
fprintf(stderr, "%s", error_string);
syslog (LOGSYS_LEVEL_ERROR, "%s", error_string);
corosync_exit_error (AIS_DONE_MAINCONFIGREAD);
}
logsys_fork_completed();
if (setprio) {
res = logsys_thread_priority_set (SCHED_RR, &global_sched_param, 10);
if (res == -1) {
log_printf (LOGSYS_LEVEL_ERROR,
"Could not set logsys thread priority. Can't continue because of priority inversions.");
corosync_exit_error (AIS_DONE_LOGSETUP);
}
} else {
res = logsys_thread_priority_set (SCHED_OTHER, NULL, 1);
}
res = totem_config_read (objdb, &totem_config, &error_string);
if (res == -1) {
log_printf (LOGSYS_LEVEL_ERROR, "%s", error_string);
corosync_exit_error (AIS_DONE_MAINCONFIGREAD);
}
res = totem_config_keyread (objdb, &totem_config, &error_string);
if (res == -1) {
log_printf (LOGSYS_LEVEL_ERROR, "%s", error_string);
corosync_exit_error (AIS_DONE_MAINCONFIGREAD);
}
res = totem_config_validate (&totem_config, &error_string);
if (res == -1) {
log_printf (LOGSYS_LEVEL_ERROR, "%s", error_string);
corosync_exit_error (AIS_DONE_MAINCONFIGREAD);
}
totem_config.totem_logging_configuration = totem_logging_configuration;
totem_config.totem_logging_configuration.log_subsys_id =
_logsys_subsys_create ("TOTEM");
if (totem_config.totem_logging_configuration.log_subsys_id < 0) {
log_printf (LOGSYS_LEVEL_ERROR,
"Unable to initialize TOTEM logging subsystem\n");
corosync_exit_error (AIS_DONE_MAINCONFIGREAD);
}
totem_config.totem_logging_configuration.log_level_security = LOGSYS_LEVEL_WARNING;
totem_config.totem_logging_configuration.log_level_error = LOGSYS_LEVEL_ERROR;
totem_config.totem_logging_configuration.log_level_warning = LOGSYS_LEVEL_WARNING;
totem_config.totem_logging_configuration.log_level_notice = LOGSYS_LEVEL_NOTICE;
totem_config.totem_logging_configuration.log_level_debug = LOGSYS_LEVEL_DEBUG;
totem_config.totem_logging_configuration.log_printf = _logsys_log_printf;
/*
* Sleep for a while to let other nodes in the cluster
* understand that this node has been away (if it was
* an corosync restart).
*/
// TODO what is this hack for? usleep(totem_config.token_timeout * 2000);
/*
* if totempg_initialize doesn't have root priveleges, it cannot
* bind to a specific interface. This only matters if
* there is more then one interface in a system, so
* in this case, only a warning is printed
*/
/*
* Join multicast group and setup delivery
* and configuration change functions
*/
totempg_initialize (
corosync_poll_handle,
&totem_config);
totempg_groups_initialize (
&corosync_group_handle,
deliver_fn,
confchg_fn);
totempg_groups_join (
corosync_group_handle,
&corosync_group,
1);
/*
* This must occur after totempg is initialized because "this_ip" must be set
*/
res = corosync_service_defaults_link_and_init (api);
if (res == -1) {
log_printf (LOGSYS_LEVEL_ERROR, "Could not initialize default services\n");
corosync_exit_error (AIS_DONE_INIT_SERVICES);
}
sync_register (corosync_sync_callbacks_retrieve, corosync_sync_completed);
/*
* Drop root privleges to user 'ais'
* TODO: Don't really need full root capabilities;
* needed capabilities are:
* CAP_NET_RAW (bindtodevice)
* CAP_SYS_NICE (setscheduler)
* CAP_IPC_LOCK (mlockall)
*/
priv_drop ();
schedwrk_init (
serialize_lock,
serialize_unlock);
ipc_subsys_id = _logsys_subsys_create ("IPC");
if (ipc_subsys_id < 0) {
log_printf (LOGSYS_LEVEL_ERROR,
"Could not initialize IPC logging subsystem\n");
corosync_exit_error (AIS_DONE_INIT_SERVICES);
}
coroipcs_ipc_init (&ipc_init_state);
/*
* Start main processing loop
*/
poll_run (corosync_poll_handle);
return EXIT_SUCCESS;
}
diff --git a/exec/totemnet.c b/exec/totemnet.c
index b1f19c4f..e7ead32e 100644
--- a/exec/totemnet.c
+++ b/exec/totemnet.c
@@ -1,2074 +1,2077 @@
/*
* Copyright (c) 2005 MontaVista Software, Inc.
* Copyright (c) 2006-2009 Red Hat, Inc.
*
* All rights reserved.
*
* Author: Steven Dake (sdake@redhat.com)
* This software licensed under BSD license, the text of which follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the MontaVista Software, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <config.h>
#include <assert.h>
#include <pthread.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <netdb.h>
#include <sys/un.h>
#include <sys/ioctl.h>
#include <sys/param.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <sched.h>
#include <time.h>
#include <sys/time.h>
#include <sys/poll.h>
#include <limits.h>
#include <corosync/sq.h>
#include <corosync/list.h>
#include <corosync/hdb.h>
#include <corosync/swab.h>
#include <corosync/totem/coropoll.h>
#define LOGSYS_UTILS_ONLY 1
#include <corosync/engine/logsys.h>
#include "totemnet.h"
#include "wthread.h"
#include "crypto.h"
#ifdef HAVE_LIBNSS
#include <nss.h>
#include <pk11pub.h>
#include <pkcs11.h>
#include <prerror.h>
#endif
#ifndef MSG_NOSIGNAL
#define MSG_NOSIGNAL 0
#endif
#define MCAST_SOCKET_BUFFER_SIZE (TRANSMITS_ALLOWED * FRAME_SIZE_MAX)
#define NETIF_STATE_REPORT_UP 1
#define NETIF_STATE_REPORT_DOWN 2
#define BIND_STATE_UNBOUND 0
#define BIND_STATE_REGULAR 1
#define BIND_STATE_LOOPBACK 2
#define HMAC_HASH_SIZE 20
struct security_header {
unsigned char hash_digest[HMAC_HASH_SIZE]; /* The hash *MUST* be first in the data structure */
unsigned char salt[16]; /* random number */
char msg[0];
} __attribute__((packed));
struct totemnet_mcast_thread_state {
unsigned char iobuf[FRAME_SIZE_MAX];
prng_state prng_state;
};
struct totemnet_socket {
int mcast_recv;
int mcast_send;
int token;
};
struct totemnet_instance {
hmac_state totemnet_hmac_state;
prng_state totemnet_prng_state;
#ifdef HAVE_LIBNSS
PK11SymKey *nss_sym_key;
PK11SymKey *nss_sym_key_sign;
#endif
unsigned char totemnet_private_key[1024];
unsigned int totemnet_private_key_len;
hdb_handle_t totemnet_poll_handle;
struct totem_interface *totem_interface;
int netif_state_report;
int netif_bind_state;
struct worker_thread_group worker_thread_group;
void *context;
void (*totemnet_deliver_fn) (
void *context,
const void *msg,
unsigned int msg_len);
void (*totemnet_iface_change_fn) (
void *context,
const struct totem_ip_address *iface_address);
/*
* Function and data used to log messages
*/
int totemnet_log_level_security;
int totemnet_log_level_error;
int totemnet_log_level_warning;
int totemnet_log_level_notice;
int totemnet_log_level_debug;
int totemnet_subsys_id;
- void (*totemnet_log_printf) (int subsys,
+ void (*totemnet_log_printf) (
+ unsigned int rec_ident,
const char *function,
const char *file,
- int line, unsigned int level,
- unsigned int rec_ident,
+ int line,
const char *format,
- ...)__attribute__((format(printf, 7, 8)));
+ ...)__attribute__((format(printf, 5, 6)));
hdb_handle_t handle;
char iov_buffer[FRAME_SIZE_MAX];
char iov_buffer_flush[FRAME_SIZE_MAX];
struct iovec totemnet_iov_recv;
struct iovec totemnet_iov_recv_flush;
struct totemnet_socket totemnet_sockets;
struct totem_ip_address mcast_address;
int stats_sent;
int stats_recv;
int stats_delv;
int stats_remcasts;
int stats_orf_token;
struct timeval stats_tv_start;
struct totem_ip_address my_id;
int firstrun;
poll_timer_handle timer_netif_check_timeout;
unsigned int my_memb_entries;
int flushing;
struct totem_config *totem_config;
struct totem_ip_address token_target;
};
struct work_item {
const void *msg;
unsigned int msg_len;
struct totemnet_instance *instance;
};
static void netif_down_check (struct totemnet_instance *instance);
static int totemnet_build_sockets (
struct totemnet_instance *instance,
struct totem_ip_address *bindnet_address,
struct totem_ip_address *mcastaddress,
struct totemnet_socket *sockets,
struct totem_ip_address *bound_to);
static struct totem_ip_address localhost;
DECLARE_HDB_DATABASE (totemnet_instance_database,NULL);
static void totemnet_instance_initialize (struct totemnet_instance *instance)
{
memset (instance, 0, sizeof (struct totemnet_instance));
instance->netif_state_report = NETIF_STATE_REPORT_UP | NETIF_STATE_REPORT_DOWN;
instance->totemnet_iov_recv.iov_base = instance->iov_buffer;
instance->totemnet_iov_recv.iov_len = FRAME_SIZE_MAX; //sizeof (instance->iov_buffer);
instance->totemnet_iov_recv_flush.iov_base = instance->iov_buffer_flush;
instance->totemnet_iov_recv_flush.iov_len = FRAME_SIZE_MAX; //sizeof (instance->iov_buffer);
/*
* There is always atleast 1 processor
*/
instance->my_memb_entries = 1;
}
#define log_printf(level, format, args...) \
do { \
- instance->totemnet_log_printf (instance->totemnet_subsys_id, \
+ instance->totemnet_log_printf ( \
+ LOGSYS_ENCODE_RECID(level, \
+ instance->totemnet_subsys_id, \
+ LOGSYS_RECID_LOG), \
__FUNCTION__, __FILE__, __LINE__, \
- level, LOGSYS_RECID_LOG, (const char *)format, ##args); \
+ (const char *)format, ##args); \
} while (0);
static int authenticate_and_decrypt_sober (
struct totemnet_instance *instance,
struct iovec *iov,
unsigned int iov_len)
{
unsigned char keys[48];
struct security_header *header = iov[0].iov_base;
prng_state keygen_prng_state;
prng_state stream_prng_state;
unsigned char *hmac_key = &keys[32];
unsigned char *cipher_key = &keys[16];
unsigned char *initial_vector = &keys[0];
unsigned char digest_comparison[HMAC_HASH_SIZE];
unsigned long len;
/*
* Generate MAC, CIPHER, IV keys from private key
*/
memset (keys, 0, sizeof (keys));
sober128_start (&keygen_prng_state);
sober128_add_entropy (instance->totemnet_private_key,
instance->totemnet_private_key_len, &keygen_prng_state);
sober128_add_entropy (header->salt, sizeof (header->salt), &keygen_prng_state);
sober128_read (keys, sizeof (keys), &keygen_prng_state);
/*
* Setup stream cipher
*/
sober128_start (&stream_prng_state);
sober128_add_entropy (cipher_key, 16, &stream_prng_state);
sober128_add_entropy (initial_vector, 16, &stream_prng_state);
/*
* Authenticate contents of message
*/
hmac_init (&instance->totemnet_hmac_state, DIGEST_SHA1, hmac_key, 16);
hmac_process (&instance->totemnet_hmac_state,
(unsigned char *)iov->iov_base + HMAC_HASH_SIZE,
iov->iov_len - HMAC_HASH_SIZE);
len = hash_descriptor[DIGEST_SHA1]->hashsize;
assert (HMAC_HASH_SIZE >= len);
hmac_done (&instance->totemnet_hmac_state, digest_comparison, &len);
if (memcmp (digest_comparison, header->hash_digest, len) != 0) {
return (-1);
}
/*
* Decrypt the contents of the message with the cipher key
*/
sober128_read ((unsigned char*)iov->iov_base +
sizeof (struct security_header),
iov->iov_len - sizeof (struct security_header),
&stream_prng_state);
return (0);
}
static void init_sober_crypto(
struct totemnet_instance *instance)
{
log_printf(instance->totemnet_log_level_notice,
"Initializing transmit/receive security: libtomcrypt SOBER128/SHA1HMAC (mode 0).\n");
rng_make_prng (128, PRNG_SOBER, &instance->totemnet_prng_state, NULL);
}
#ifdef HAVE_LIBNSS
static unsigned char *copy_from_iovec(
const struct iovec *iov,
unsigned int iov_len,
size_t *buf_size)
{
int i;
size_t bufptr;
size_t buflen = 0;
unsigned char *newbuf;
for (i=0; i<iov_len; i++)
buflen += iov[i].iov_len;
newbuf = malloc(buflen);
if (!newbuf)
return NULL;
bufptr=0;
for (i=0; i<iov_len; i++) {
memcpy(newbuf+bufptr, iov[i].iov_base, iov[i].iov_len);
bufptr += iov[i].iov_len;
}
*buf_size = buflen;
return newbuf;
}
static void copy_to_iovec(
struct iovec *iov,
unsigned int iov_len,
const unsigned char *buf,
size_t buf_size)
{
int i;
size_t copylen;
size_t bufptr = 0;
bufptr=0;
for (i=0; i<iov_len; i++) {
copylen = iov[i].iov_len;
if (bufptr + copylen > buf_size) {
copylen = buf_size - bufptr;
}
memcpy(iov[i].iov_base, buf+bufptr, copylen);
bufptr += copylen;
if (iov[i].iov_len != copylen) {
iov[i].iov_len = copylen;
return;
}
}
}
static void init_nss_crypto(
struct totemnet_instance *instance)
{
PK11SlotInfo* aes_slot = NULL;
PK11SlotInfo* sha1_slot = NULL;
SECItem key_item;
SECStatus rv;
log_printf(instance->totemnet_log_level_notice,
"Initializing transmit/receive security: NSS AES128CBC/SHA1HMAC (mode 1).\n");
rv = NSS_NoDB_Init(".");
if (rv != SECSuccess)
{
log_printf(instance->totemnet_log_level_security, "NSS initialization failed (err %d)\n",
PR_GetError());
goto out;
}
aes_slot = PK11_GetBestSlot(instance->totem_config->crypto_crypt_type, NULL);
if (aes_slot == NULL)
{
log_printf(instance->totemnet_log_level_security, "Unable to find security slot (err %d)\n",
PR_GetError());
goto out;
}
sha1_slot = PK11_GetBestSlot(CKM_SHA_1_HMAC, NULL);
if (sha1_slot == NULL)
{
log_printf(instance->totemnet_log_level_security, "Unable to find security slot (err %d)\n",
PR_GetError());
goto out;
}
/*
* Make the private key into a SymKey that we can use
*/
key_item.type = siBuffer;
key_item.data = instance->totem_config->private_key;
key_item.len = 32; /* Use 128 bits */
instance->nss_sym_key = PK11_ImportSymKey(aes_slot,
instance->totem_config->crypto_crypt_type,
PK11_OriginUnwrap, CKA_ENCRYPT|CKA_DECRYPT,
&key_item, NULL);
if (instance->nss_sym_key == NULL)
{
log_printf(instance->totemnet_log_level_security, "Failure to import key into NSS (err %d)\n",
PR_GetError());
goto out;
}
instance->nss_sym_key_sign = PK11_ImportSymKey(sha1_slot,
CKM_SHA_1_HMAC,
PK11_OriginUnwrap, CKA_SIGN,
&key_item, NULL);
if (instance->nss_sym_key_sign == NULL) {
log_printf(instance->totemnet_log_level_security, "Failure to import key into NSS (err %d)\n",
PR_GetError());
goto out;
}
out:
return;
}
static int encrypt_and_sign_nss (
struct totemnet_instance *instance,
unsigned char *buf,
size_t *buf_len,
const struct iovec *iovec,
unsigned int iov_len)
{
PK11Context* enc_context = NULL;
SECStatus rv1, rv2;
int tmp1_outlen;
unsigned int tmp2_outlen;
unsigned char *inbuf;
unsigned char *data;
unsigned char *outdata;
size_t datalen;
SECItem no_params;
SECItem iv_item;
struct security_header *header;
SECItem *nss_sec_param;
unsigned char nss_iv_data[16];
SECStatus rv;
no_params.type = siBuffer;
no_params.data = 0;
no_params.len = 0;
tmp1_outlen = tmp2_outlen = 0;
inbuf = copy_from_iovec(iovec, iov_len, &datalen);
if (!inbuf) {
log_printf(instance->totemnet_log_level_security, "malloc error copying buffer from iovec\n");
return -1;
}
data = inbuf + sizeof (struct security_header);
datalen -= sizeof (struct security_header);
outdata = buf + sizeof (struct security_header);
header = (struct security_header *)buf;
rv = PK11_GenerateRandom (
nss_iv_data,
sizeof (nss_iv_data));
if (rv != SECSuccess) {
log_printf(instance->totemnet_log_level_security,
"Failure to generate a random number %d\n",
PR_GetError());
}
memcpy(header->salt, nss_iv_data, sizeof(nss_iv_data));
iv_item.type = siBuffer;
iv_item.data = nss_iv_data;
iv_item.len = sizeof (nss_iv_data);
nss_sec_param = PK11_ParamFromIV (
instance->totem_config->crypto_crypt_type,
&iv_item);
if (nss_sec_param == NULL) {
log_printf(instance->totemnet_log_level_security,
"Failure to set up PKCS11 param (err %d)\n",
PR_GetError());
goto out;
}
/*
* Create cipher context for encryption
*/
enc_context = PK11_CreateContextBySymKey (
instance->totem_config->crypto_crypt_type,
CKA_ENCRYPT,
instance->nss_sym_key,
nss_sec_param);
if (!enc_context) {
char err[1024];
PR_GetErrorText(err);
err[PR_GetErrorTextLength()] = 0;
log_printf(instance->totemnet_log_level_security,
"PK11_CreateContext failed (encrypt) crypt_type=%d (err %d): %s\n",
instance->totem_config->crypto_crypt_type,
PR_GetError(), err);
return -1;
}
rv1 = PK11_CipherOp(enc_context, outdata,
&tmp1_outlen, FRAME_SIZE_MAX - sizeof(struct security_header),
data, datalen);
rv2 = PK11_DigestFinal(enc_context, outdata + tmp1_outlen, &tmp2_outlen,
FRAME_SIZE_MAX - tmp1_outlen);
PK11_DestroyContext(enc_context, PR_TRUE);
*buf_len = tmp1_outlen + tmp2_outlen;
free(inbuf);
// memcpy(&outdata[*buf_len], nss_iv_data, sizeof(nss_iv_data));
if (rv1 != SECSuccess || rv2 != SECSuccess)
goto out;
/* Now do the digest */
enc_context = PK11_CreateContextBySymKey(CKM_SHA_1_HMAC,
CKA_SIGN, instance->nss_sym_key_sign, &no_params);
if (!enc_context) {
char err[1024];
PR_GetErrorText(err);
err[PR_GetErrorTextLength()] = 0;
log_printf(instance->totemnet_log_level_security, "encrypt: PK11_CreateContext failed (digest) err %d: %s\n",
PR_GetError(), err);
return -1;
}
PK11_DigestBegin(enc_context);
rv1 = PK11_DigestOp(enc_context, outdata - 16, *buf_len + 16);
rv2 = PK11_DigestFinal(enc_context, header->hash_digest, &tmp2_outlen, sizeof(header->hash_digest));
PK11_DestroyContext(enc_context, PR_TRUE);
if (rv1 != SECSuccess || rv2 != SECSuccess)
goto out;
*buf_len = *buf_len + sizeof(struct security_header);
SECITEM_FreeItem(nss_sec_param, PR_TRUE);
return 0;
out:
return -1;
}
static int authenticate_and_decrypt_nss (
struct totemnet_instance *instance,
struct iovec *iov,
unsigned int iov_len)
{
PK11Context* enc_context = NULL;
SECStatus rv1, rv2;
int tmp1_outlen;
unsigned int tmp2_outlen;
unsigned char outbuf[FRAME_SIZE_MAX];
unsigned char digest[HMAC_HASH_SIZE];
unsigned char *outdata;
int result_len;
unsigned char *data;
unsigned char *inbuf;
size_t datalen;
struct security_header *header = iov[0].iov_base;
SECItem no_params;
SECItem ivdata;
no_params.type = siBuffer;
no_params.data = 0;
no_params.len = 0;
tmp1_outlen = tmp2_outlen = 0;
if (iov_len > 1) {
inbuf = copy_from_iovec(iov, iov_len, &datalen);
if (!inbuf) {
log_printf(instance->totemnet_log_level_security, "malloc error copying buffer from iovec\n");
return -1;
}
}
else {
inbuf = iov[0].iov_base;
datalen = iov[0].iov_len;
}
data = inbuf + sizeof (struct security_header) - 16;
datalen = datalen - sizeof (struct security_header) + 16;
outdata = outbuf + sizeof (struct security_header);
/* Check the digest */
enc_context = PK11_CreateContextBySymKey (
CKM_SHA_1_HMAC, CKA_SIGN,
instance->nss_sym_key_sign,
&no_params);
if (!enc_context) {
char err[1024];
PR_GetErrorText(err);
err[PR_GetErrorTextLength()] = 0;
log_printf(instance->totemnet_log_level_security, "PK11_CreateContext failed (check digest) err %d: %s\n",
PR_GetError(), err);
return -1;
}
PK11_DigestBegin(enc_context);
rv1 = PK11_DigestOp(enc_context, data, datalen);
rv2 = PK11_DigestFinal(enc_context, digest, &tmp2_outlen, sizeof(digest));
PK11_DestroyContext(enc_context, PR_TRUE);
if (rv1 != SECSuccess || rv2 != SECSuccess) {
log_printf(instance->totemnet_log_level_security, "Digest check failed\n");
return -1;
}
if (memcmp(digest, header->hash_digest, tmp2_outlen) != 0) {
log_printf(instance->totemnet_log_level_error, "Digest does not match\n");
return -1;
}
/*
* Get rid of salt
*/
data += 16;
datalen -= 16;
/* Create cipher context for decryption */
ivdata.type = siBuffer;
ivdata.data = header->salt;
ivdata.len = sizeof(header->salt);
enc_context = PK11_CreateContextBySymKey(
instance->totem_config->crypto_crypt_type,
CKA_DECRYPT,
instance->nss_sym_key, &ivdata);
if (!enc_context) {
log_printf(instance->totemnet_log_level_security,
"PK11_CreateContext (decrypt) failed (err %d)\n",
PR_GetError());
return -1;
}
rv1 = PK11_CipherOp(enc_context, outdata, &tmp1_outlen,
sizeof(outbuf) - sizeof (struct security_header),
data, datalen);
if (rv1 != SECSuccess) {
log_printf(instance->totemnet_log_level_security,
"PK11_CipherOp (decrypt) failed (err %d)\n",
PR_GetError());
}
rv2 = PK11_DigestFinal(enc_context, outdata + tmp1_outlen, &tmp2_outlen,
sizeof(outbuf) - tmp1_outlen);
PK11_DestroyContext(enc_context, PR_TRUE);
result_len = tmp1_outlen + tmp2_outlen + sizeof (struct security_header);
/* Copy it back to the buffer */
copy_to_iovec(iov, iov_len, outbuf, result_len);
if (iov_len > 1)
free(inbuf);
if (rv1 != SECSuccess || rv2 != SECSuccess)
return -1;
return 0;
}
#endif
static int encrypt_and_sign_sober (
struct totemnet_instance *instance,
unsigned char *buf,
size_t *buf_len,
const struct iovec *iovec,
unsigned int iov_len)
{
int i;
unsigned char *addr;
unsigned char keys[48];
struct security_header *header;
unsigned char *hmac_key = &keys[32];
unsigned char *cipher_key = &keys[16];
unsigned char *initial_vector = &keys[0];
unsigned long len;
size_t outlen = 0;
hmac_state hmac_st;
prng_state keygen_prng_state;
prng_state stream_prng_state;
prng_state *prng_state_in = &instance->totemnet_prng_state;
header = (struct security_header *)buf;
addr = buf + sizeof (struct security_header);
memset (keys, 0, sizeof (keys));
memset (header->salt, 0, sizeof (header->salt));
/*
* Generate MAC, CIPHER, IV keys from private key
*/
sober128_read (header->salt, sizeof (header->salt), prng_state_in);
sober128_start (&keygen_prng_state);
sober128_add_entropy (instance->totemnet_private_key,
instance->totemnet_private_key_len,
&keygen_prng_state);
sober128_add_entropy (header->salt, sizeof (header->salt),
&keygen_prng_state);
sober128_read (keys, sizeof (keys), &keygen_prng_state);
/*
* Setup stream cipher
*/
sober128_start (&stream_prng_state);
sober128_add_entropy (cipher_key, 16, &stream_prng_state);
sober128_add_entropy (initial_vector, 16, &stream_prng_state);
outlen = sizeof (struct security_header);
/*
* Copy remainder of message, then encrypt it
*/
for (i = 1; i < iov_len; i++) {
memcpy (addr, iovec[i].iov_base, iovec[i].iov_len);
addr += iovec[i].iov_len;
outlen += iovec[i].iov_len;
}
/*
* Encrypt message by XORing stream cipher data
*/
sober128_read (buf + sizeof (struct security_header),
outlen - sizeof (struct security_header),
&stream_prng_state);
memset (&hmac_st, 0, sizeof (hmac_st));
/*
* Sign the contents of the message with the hmac key and store signature in message
*/
hmac_init (&hmac_st, DIGEST_SHA1, hmac_key, 16);
hmac_process (&hmac_st,
buf + HMAC_HASH_SIZE,
outlen - HMAC_HASH_SIZE);
len = hash_descriptor[DIGEST_SHA1]->hashsize;
hmac_done (&hmac_st, header->hash_digest, &len);
*buf_len = outlen;
return 0;
}
static int encrypt_and_sign_worker (
struct totemnet_instance *instance,
unsigned char *buf,
size_t *buf_len,
const struct iovec *iovec,
unsigned int iov_len)
{
if (instance->totem_config->crypto_type == TOTEM_CRYPTO_SOBER ||
instance->totem_config->crypto_accept == TOTEM_CRYPTO_ACCEPT_OLD)
return encrypt_and_sign_sober(instance, buf, buf_len, iovec, iov_len);
#ifdef HAVE_LIBNSS
if (instance->totem_config->crypto_type == TOTEM_CRYPTO_NSS)
return encrypt_and_sign_nss(instance, buf, buf_len, iovec, iov_len);
#endif
return -1;
}
static int authenticate_and_decrypt (
struct totemnet_instance *instance,
struct iovec *iov,
unsigned int iov_len)
{
unsigned char type;
unsigned char *endbuf = iov[iov_len-1].iov_base;
int res = -1;
/*
* Get the encryption type and remove it from the buffer
*/
type = endbuf[iov[iov_len-1].iov_len-1];
iov[iov_len-1].iov_len -= 1;
if (type == TOTEM_CRYPTO_SOBER)
res = authenticate_and_decrypt_sober(instance, iov, iov_len);
/*
* Only try higher crypto options if NEW has been requested
*/
if (instance->totem_config->crypto_accept == TOTEM_CRYPTO_ACCEPT_NEW) {
#ifdef HAVE_LIBNSS
if (type == TOTEM_CRYPTO_NSS)
res = authenticate_and_decrypt_nss(instance, iov, iov_len);
#endif
}
/*
* If it failed, then try decrypting the whole packet as it might be
* from aisexec
*/
if (res == -1) {
iov[iov_len-1].iov_len += 1;
res = authenticate_and_decrypt_sober(instance, iov, iov_len);
}
return res;
}
static void init_crypto(
struct totemnet_instance *instance)
{
/*
* If we are expecting NEW crypto type then initialise all available
* crypto options. For OLD then we only need SOBER128.
*/
init_sober_crypto(instance);
if (instance->totem_config->crypto_accept == TOTEM_CRYPTO_ACCEPT_OLD)
return;
#ifdef HAVE_LIBNSS
init_nss_crypto(instance);
#endif
}
int totemnet_crypto_set (hdb_handle_t handle,
unsigned int type)
{
struct totemnet_instance *instance;
int res = 0;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
/*
* Can't set crypto type if OLD is selected
*/
if (instance->totem_config->crypto_accept == TOTEM_CRYPTO_ACCEPT_OLD) {
res = -1;
} else {
/*
* Validate crypto algorithm
*/
switch (type) {
case TOTEM_CRYPTO_SOBER:
log_printf(instance->totemnet_log_level_security,
"Transmit security set to: libtomcrypt SOBER128/SHA1HMAC (mode 0)");
break;
case TOTEM_CRYPTO_NSS:
log_printf(instance->totemnet_log_level_security,
"Transmit security set to: NSS AES128CBC/SHA1HMAC (mode 1)");
break;
default:
res = -1;
break;
}
}
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return res;
}
static inline void ucast_sendmsg (
struct totemnet_instance *instance,
struct totem_ip_address *system_to,
const void *msg,
unsigned int msg_len)
{
struct msghdr msg_ucast;
int res = 0;
size_t buf_len;
unsigned char sheader[sizeof (struct security_header)];
unsigned char encrypt_data[FRAME_SIZE_MAX];
struct iovec iovec_encrypt[2];
const struct iovec *iovec_sendmsg;
struct sockaddr_storage sockaddr;
struct iovec iovec;
unsigned int iov_len;
int addrlen;
if (instance->totem_config->secauth == 1) {
iovec_encrypt[0].iov_base = sheader;
iovec_encrypt[0].iov_len = sizeof (struct security_header);
iovec_encrypt[1].iov_base = (void *)msg;
iovec_encrypt[1].iov_len = msg_len;
/*
* Encrypt and digest the message
*/
encrypt_and_sign_worker (
instance,
encrypt_data,
&buf_len,
iovec_encrypt,
2);
if (instance->totem_config->crypto_accept == TOTEM_CRYPTO_ACCEPT_NEW) {
encrypt_data[buf_len++] = instance->totem_config->crypto_type;
}
else {
encrypt_data[buf_len++] = 0;
}
iovec_encrypt[0].iov_base = encrypt_data;
iovec_encrypt[0].iov_len = buf_len;
iovec_sendmsg = &iovec_encrypt[0];
iov_len = 1;
} else {
iovec.iov_base = (void *)msg;
iovec.iov_len = msg_len;
iovec_sendmsg = &iovec;
iov_len = 1;
}
/*
* Build unicast message
*/
totemip_totemip_to_sockaddr_convert(system_to,
instance->totem_interface->ip_port, &sockaddr, &addrlen);
msg_ucast.msg_name = &sockaddr;
msg_ucast.msg_namelen = addrlen;
msg_ucast.msg_iov = (void *) iovec_sendmsg;
msg_ucast.msg_iovlen = iov_len;
#if !defined(COROSYNC_SOLARIS)
msg_ucast.msg_control = 0;
msg_ucast.msg_controllen = 0;
msg_ucast.msg_flags = 0;
#else
msg_ucast.msg_accrights = NULL;
msg_ucast.msg_accrightslen = 0;
#endif
/*
* Transmit multicast message
* An error here is recovered by totemsrp
*/
res = sendmsg (instance->totemnet_sockets.mcast_send, &msg_ucast,
MSG_NOSIGNAL);
}
static inline void mcast_sendmsg (
struct totemnet_instance *instance,
const void *msg,
unsigned int msg_len)
{
struct msghdr msg_mcast;
int res = 0;
size_t buf_len;
unsigned char sheader[sizeof (struct security_header)];
unsigned char encrypt_data[FRAME_SIZE_MAX];
struct iovec iovec_encrypt[2];
struct iovec iovec;
const struct iovec *iovec_sendmsg;
struct sockaddr_storage sockaddr;
unsigned int iov_len;
int addrlen;
if (instance->totem_config->secauth == 1) {
iovec_encrypt[0].iov_base = sheader;
iovec_encrypt[0].iov_len = sizeof (struct security_header);
iovec_encrypt[1].iov_base = (void *)msg;
iovec_encrypt[1].iov_len = msg_len;
/*
* Encrypt and digest the message
*/
encrypt_and_sign_worker (
instance,
encrypt_data,
&buf_len,
iovec_encrypt,
2);
if (instance->totem_config->crypto_accept == TOTEM_CRYPTO_ACCEPT_NEW) {
encrypt_data[buf_len++] = instance->totem_config->crypto_type;
}
else {
encrypt_data[buf_len++] = 0;
}
iovec_encrypt[0].iov_base = encrypt_data;
iovec_encrypt[0].iov_len = buf_len;
iovec_sendmsg = &iovec_encrypt[0];
iov_len = 1;
} else {
iovec.iov_base = (void *)msg;
iovec.iov_len = msg_len;
iovec_sendmsg = &iovec;
iov_len = 1;
}
/*
* Build multicast message
*/
totemip_totemip_to_sockaddr_convert(&instance->mcast_address,
instance->totem_interface->ip_port, &sockaddr, &addrlen);
msg_mcast.msg_name = &sockaddr;
msg_mcast.msg_namelen = addrlen;
msg_mcast.msg_iov = (void *) iovec_sendmsg;
msg_mcast.msg_iovlen = iov_len;
#if !defined(COROSYNC_SOLARIS)
msg_mcast.msg_control = 0;
msg_mcast.msg_controllen = 0;
msg_mcast.msg_flags = 0;
#else
msg_mcast.msg_accrights = NULL;
msg_mcast.msg_accrightslen = 0;
#endif
/*
* Transmit multicast message
* An error here is recovered by totemsrp
*/
res = sendmsg (instance->totemnet_sockets.mcast_send, &msg_mcast,
MSG_NOSIGNAL);
}
static void totemnet_mcast_thread_state_constructor (
void *totemnet_mcast_thread_state_in)
{
struct totemnet_mcast_thread_state *totemnet_mcast_thread_state =
(struct totemnet_mcast_thread_state *)totemnet_mcast_thread_state_in;
memset (totemnet_mcast_thread_state, 0,
sizeof (totemnet_mcast_thread_state));
rng_make_prng (128, PRNG_SOBER,
&totemnet_mcast_thread_state->prng_state, NULL);
}
static void totemnet_mcast_worker_fn (void *thread_state, void *work_item_in)
{
struct work_item *work_item = (struct work_item *)work_item_in;
struct totemnet_mcast_thread_state *totemnet_mcast_thread_state =
(struct totemnet_mcast_thread_state *)thread_state;
struct totemnet_instance *instance = work_item->instance;
struct msghdr msg_mcast;
unsigned char sheader[sizeof (struct security_header)];
int res = 0;
size_t buf_len;
struct iovec iovec_enc[2];
struct iovec iovec;
struct sockaddr_storage sockaddr;
int addrlen;
if (instance->totem_config->secauth == 1) {
iovec_enc[0].iov_base = sheader;
iovec_enc[0].iov_len = sizeof (struct security_header);
iovec_enc[1].iov_base = (void *)work_item->msg;
iovec_enc[1].iov_len = work_item->msg_len;
/*
* Encrypt and digest the message
*/
encrypt_and_sign_worker (
instance,
totemnet_mcast_thread_state->iobuf,
&buf_len,
iovec_enc, 2);
iovec.iov_base = totemnet_mcast_thread_state->iobuf;
iovec.iov_len = buf_len;
} else {
iovec.iov_base = (void *)work_item->msg;
iovec.iov_len = work_item->msg_len;
}
totemip_totemip_to_sockaddr_convert(&instance->mcast_address,
instance->totem_interface->ip_port, &sockaddr, &addrlen);
msg_mcast.msg_name = &sockaddr;
msg_mcast.msg_namelen = addrlen;
msg_mcast.msg_iov = &iovec;
msg_mcast.msg_iovlen = 1;
#if !defined(COROSYNC_SOLARIS)
msg_mcast.msg_control = 0;
msg_mcast.msg_controllen = 0;
msg_mcast.msg_flags = 0;
#else
msg_mcast.msg_accrights = NULL;
msg_mcast.msg_accrightslen = 0;
#endif
/*
* Transmit multicast message
* An error here is recovered by totemnet
*/
res = sendmsg (instance->totemnet_sockets.mcast_send, &msg_mcast,
MSG_NOSIGNAL);
}
int totemnet_finalize (
hdb_handle_t handle)
{
struct totemnet_instance *instance;
int res = 0;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
worker_thread_group_exit (&instance->worker_thread_group);
if (instance->totemnet_sockets.mcast_recv > 0) {
close (instance->totemnet_sockets.mcast_recv);
poll_dispatch_delete (instance->totemnet_poll_handle,
instance->totemnet_sockets.mcast_recv);
}
if (instance->totemnet_sockets.mcast_send > 0) {
close (instance->totemnet_sockets.mcast_send);
}
if (instance->totemnet_sockets.token > 0) {
close (instance->totemnet_sockets.token);
poll_dispatch_delete (instance->totemnet_poll_handle,
instance->totemnet_sockets.token);
}
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (res);
}
/*
* Only designed to work with a message with one iov
*/
static int net_deliver_fn (
hdb_handle_t handle,
int fd,
int revents,
void *data)
{
struct totemnet_instance *instance = (struct totemnet_instance *)data;
struct msghdr msg_recv;
struct iovec *iovec;
struct security_header *security_header;
struct sockaddr_storage system_from;
int bytes_received;
int res = 0;
unsigned char *msg_offset;
unsigned int size_delv;
if (instance->flushing == 1) {
iovec = &instance->totemnet_iov_recv_flush;
} else {
iovec = &instance->totemnet_iov_recv;
}
/*
* Receive datagram
*/
msg_recv.msg_name = &system_from;
msg_recv.msg_namelen = sizeof (struct sockaddr_storage);
msg_recv.msg_iov = iovec;
msg_recv.msg_iovlen = 1;
#if !defined(COROSYNC_SOLARIS)
msg_recv.msg_control = 0;
msg_recv.msg_controllen = 0;
msg_recv.msg_flags = 0;
#else
msg_recv.msg_accrights = NULL;
msg_recv.msg_accrightslen = 0;
#endif
bytes_received = recvmsg (fd, &msg_recv, MSG_NOSIGNAL | MSG_DONTWAIT);
if (bytes_received == -1) {
return (0);
} else {
instance->stats_recv += bytes_received;
}
if ((instance->totem_config->secauth == 1) &&
(bytes_received < sizeof (struct security_header))) {
log_printf (instance->totemnet_log_level_security, "Received message is too short... ignoring %d.\n", bytes_received);
return (0);
}
security_header = (struct security_header *)iovec->iov_base;
iovec->iov_len = bytes_received;
if (instance->totem_config->secauth == 1) {
/*
* Authenticate and if authenticated, decrypt datagram
*/
res = authenticate_and_decrypt (instance, iovec, 1);
if (res == -1) {
log_printf (instance->totemnet_log_level_security, "Received message has invalid digest... ignoring.\n");
log_printf (instance->totemnet_log_level_security,
"Invalid packet data\n");
iovec->iov_len = FRAME_SIZE_MAX;
return 0;
}
msg_offset = (unsigned char *)iovec->iov_base +
sizeof (struct security_header);
size_delv = bytes_received - sizeof (struct security_header);
} else {
msg_offset = iovec->iov_base;
size_delv = bytes_received;
}
/*
* Handle incoming message
*/
instance->totemnet_deliver_fn (
instance->context,
msg_offset,
size_delv);
iovec->iov_len = FRAME_SIZE_MAX;
return (0);
}
static int netif_determine (
struct totemnet_instance *instance,
struct totem_ip_address *bindnet,
struct totem_ip_address *bound_to,
int *interface_up,
int *interface_num)
{
int res;
res = totemip_iface_check (bindnet, bound_to,
interface_up, interface_num,
instance->totem_config->clear_node_high_bit);
return (res);
}
/*
* If the interface is up, the sockets for totem are built. If the interface is down
* this function is requeued in the timer list to retry building the sockets later.
*/
static void timer_function_netif_check_timeout (
void *data)
{
struct totemnet_instance *instance = (struct totemnet_instance *)data;
int res;
int interface_up;
int interface_num;
struct totem_ip_address *bind_address;
/*
* Build sockets for every interface
*/
netif_determine (instance,
&instance->totem_interface->bindnet,
&instance->totem_interface->boundto,
&interface_up, &interface_num);
/*
* If the network interface isn't back up and we are already
* in loopback mode, add timer to check again and return
*/
if ((instance->netif_bind_state == BIND_STATE_LOOPBACK &&
interface_up == 0) ||
(instance->my_memb_entries == 1 &&
instance->netif_bind_state == BIND_STATE_REGULAR &&
interface_up == 1)) {
poll_timer_add (instance->totemnet_poll_handle,
instance->totem_config->downcheck_timeout,
(void *)instance,
timer_function_netif_check_timeout,
&instance->timer_netif_check_timeout);
/*
* Add a timer to check for a downed regular interface
*/
return;
}
if (instance->totemnet_sockets.mcast_recv > 0) {
close (instance->totemnet_sockets.mcast_recv);
poll_dispatch_delete (instance->totemnet_poll_handle,
instance->totemnet_sockets.mcast_recv);
}
if (instance->totemnet_sockets.mcast_send > 0) {
close (instance->totemnet_sockets.mcast_send);
}
if (instance->totemnet_sockets.token > 0) {
close (instance->totemnet_sockets.token);
poll_dispatch_delete (instance->totemnet_poll_handle,
instance->totemnet_sockets.token);
}
if (interface_up == 0) {
/*
* Interface is not up
*/
instance->netif_bind_state = BIND_STATE_LOOPBACK;
bind_address = &localhost;
/*
* Add a timer to retry building interfaces and request memb_gather_enter
*/
poll_timer_add (instance->totemnet_poll_handle,
instance->totem_config->downcheck_timeout,
(void *)instance,
timer_function_netif_check_timeout,
&instance->timer_netif_check_timeout);
} else {
/*
* Interface is up
*/
instance->netif_bind_state = BIND_STATE_REGULAR;
bind_address = &instance->totem_interface->bindnet;
}
/*
* Create and bind the multicast and unicast sockets
*/
res = totemnet_build_sockets (instance,
&instance->mcast_address,
bind_address,
&instance->totemnet_sockets,
&instance->totem_interface->boundto);
poll_dispatch_add (
instance->totemnet_poll_handle,
instance->totemnet_sockets.mcast_recv,
POLLIN, instance, net_deliver_fn);
poll_dispatch_add (
instance->totemnet_poll_handle,
instance->totemnet_sockets.token,
POLLIN, instance, net_deliver_fn);
totemip_copy (&instance->my_id, &instance->totem_interface->boundto);
/*
* This reports changes in the interface to the user and totemsrp
*/
if (instance->netif_bind_state == BIND_STATE_REGULAR) {
if (instance->netif_state_report & NETIF_STATE_REPORT_UP) {
log_printf (instance->totemnet_log_level_notice,
"The network interface [%s] is now up.\n",
totemip_print (&instance->totem_interface->boundto));
instance->netif_state_report = NETIF_STATE_REPORT_DOWN;
instance->totemnet_iface_change_fn (instance->context, &instance->my_id);
}
/*
* Add a timer to check for interface going down in single membership
*/
if (instance->my_memb_entries == 1) {
poll_timer_add (instance->totemnet_poll_handle,
instance->totem_config->downcheck_timeout,
(void *)instance,
timer_function_netif_check_timeout,
&instance->timer_netif_check_timeout);
}
} else {
if (instance->netif_state_report & NETIF_STATE_REPORT_DOWN) {
log_printf (instance->totemnet_log_level_notice,
"The network interface is down.\n");
instance->totemnet_iface_change_fn (instance->context, &instance->my_id);
}
instance->netif_state_report = NETIF_STATE_REPORT_UP;
}
}
/*
* Check if an interface is down and reconfigure
* totemnet waiting for it to come back up
*/
static void netif_down_check (struct totemnet_instance *instance)
{
timer_function_netif_check_timeout (instance);
}
/* Set the socket priority to INTERACTIVE to ensure
that our messages don't get queued behind anything else */
static void totemnet_traffic_control_set(struct totemnet_instance *instance, int sock)
{
#ifdef SO_PRIORITY
int prio = 6; /* TC_PRIO_INTERACTIVE */
if (setsockopt(sock, SOL_SOCKET, SO_PRIORITY, &prio, sizeof(int)))
log_printf (instance->totemnet_log_level_warning, "Could not set traffic priority. (%s)\n", strerror (errno));
#endif
}
static int totemnet_build_sockets_ip (
struct totemnet_instance *instance,
struct totem_ip_address *mcast_address,
struct totem_ip_address *bindnet_address,
struct totemnet_socket *sockets,
struct totem_ip_address *bound_to,
int interface_num)
{
struct sockaddr_storage sockaddr;
struct ipv6_mreq mreq6;
struct ip_mreq mreq;
struct sockaddr_storage mcast_ss, boundto_ss;
struct sockaddr_in6 *mcast_sin6 = (struct sockaddr_in6 *)&mcast_ss;
struct sockaddr_in *mcast_sin = (struct sockaddr_in *)&mcast_ss;
struct sockaddr_in *boundto_sin = (struct sockaddr_in *)&boundto_ss;
unsigned int sendbuf_size;
unsigned int recvbuf_size;
unsigned int optlen = sizeof (sendbuf_size);
int addrlen;
int res;
int flag;
/*
* Create multicast recv socket
*/
sockets->mcast_recv = socket (bindnet_address->family, SOCK_DGRAM, 0);
if (sockets->mcast_recv == -1) {
perror ("socket");
return (-1);
}
totemip_nosigpipe (sockets->mcast_recv);
res = fcntl (sockets->mcast_recv, F_SETFL, O_NONBLOCK);
if (res == -1) {
log_printf (instance->totemnet_log_level_warning, "Could not set non-blocking operation on multicast socket: %s\n", strerror (errno));
return (-1);
}
/*
* Force reuse
*/
flag = 1;
if ( setsockopt(sockets->mcast_recv, SOL_SOCKET, SO_REUSEADDR, (char *)&flag, sizeof (flag)) < 0) {
perror("setsockopt reuseaddr");
return (-1);
}
/*
* Bind to multicast socket used for multicast receives
*/
totemip_totemip_to_sockaddr_convert(mcast_address,
instance->totem_interface->ip_port, &sockaddr, &addrlen);
res = bind (sockets->mcast_recv, (struct sockaddr *)&sockaddr, addrlen);
if (res == -1) {
perror ("bind mcast recv socket failed");
return (-1);
}
/*
* Setup mcast send socket
*/
sockets->mcast_send = socket (bindnet_address->family, SOCK_DGRAM, 0);
if (sockets->mcast_send == -1) {
perror ("socket");
return (-1);
}
totemip_nosigpipe (sockets->mcast_send);
res = fcntl (sockets->mcast_send, F_SETFL, O_NONBLOCK);
if (res == -1) {
log_printf (instance->totemnet_log_level_warning, "Could not set non-blocking operation on multicast socket: %s\n", strerror (errno));
return (-1);
}
/*
* Force reuse
*/
flag = 1;
if ( setsockopt(sockets->mcast_send, SOL_SOCKET, SO_REUSEADDR, (char *)&flag, sizeof (flag)) < 0) {
perror("setsockopt reuseaddr");
return (-1);
}
totemip_totemip_to_sockaddr_convert(bound_to, instance->totem_interface->ip_port - 1,
&sockaddr, &addrlen);
res = bind (sockets->mcast_send, (struct sockaddr *)&sockaddr, addrlen);
if (res == -1) {
perror ("bind mcast send socket failed");
return (-1);
}
/*
* Setup unicast socket
*/
sockets->token = socket (bindnet_address->family, SOCK_DGRAM, 0);
if (sockets->token == -1) {
perror ("socket2");
return (-1);
}
totemip_nosigpipe (sockets->token);
res = fcntl (sockets->token, F_SETFL, O_NONBLOCK);
if (res == -1) {
log_printf (instance->totemnet_log_level_warning, "Could not set non-blocking operation on token socket: %s\n", strerror (errno));
return (-1);
}
/*
* Force reuse
*/
flag = 1;
if ( setsockopt(sockets->token, SOL_SOCKET, SO_REUSEADDR, (char *)&flag, sizeof (flag)) < 0) {
perror("setsockopt reuseaddr");
return (-1);
}
/*
* Bind to unicast socket used for token send/receives
* This has the side effect of binding to the correct interface
*/
totemip_totemip_to_sockaddr_convert(bound_to, instance->totem_interface->ip_port, &sockaddr, &addrlen);
res = bind (sockets->token, (struct sockaddr *)&sockaddr, addrlen);
if (res == -1) {
perror ("bind token socket failed");
return (-1);
}
recvbuf_size = MCAST_SOCKET_BUFFER_SIZE;
sendbuf_size = MCAST_SOCKET_BUFFER_SIZE;
/*
* Set buffer sizes to avoid overruns
*/
res = setsockopt (sockets->mcast_recv, SOL_SOCKET, SO_RCVBUF, &recvbuf_size, optlen);
res = setsockopt (sockets->mcast_send, SOL_SOCKET, SO_SNDBUF, &sendbuf_size, optlen);
res = getsockopt (sockets->mcast_recv, SOL_SOCKET, SO_RCVBUF, &recvbuf_size, &optlen);
if (res == 0) {
log_printf (instance->totemnet_log_level_notice,
"Receive multicast socket recv buffer size (%d bytes).\n", recvbuf_size);
}
res = getsockopt (sockets->mcast_send, SOL_SOCKET, SO_SNDBUF, &sendbuf_size, &optlen);
if (res == 0) {
log_printf (instance->totemnet_log_level_notice,
"Transmit multicast socket send buffer size (%d bytes).\n", sendbuf_size);
}
/*
* Join group membership on socket
*/
totemip_totemip_to_sockaddr_convert(mcast_address, instance->totem_interface->ip_port, &mcast_ss, &addrlen);
totemip_totemip_to_sockaddr_convert(bound_to, instance->totem_interface->ip_port, &boundto_ss, &addrlen);
if (instance->totem_config->broadcast_use == 1) {
unsigned int broadcast = 1;
if ((setsockopt(sockets->mcast_recv, SOL_SOCKET,
SO_BROADCAST, &broadcast, sizeof (broadcast))) == -1) {
perror("setting broadcast option");
return (-1);
}
if ((setsockopt(sockets->mcast_send, SOL_SOCKET,
SO_BROADCAST, &broadcast, sizeof (broadcast))) == -1) {
perror("setting broadcast option");
return (-1);
}
} else {
switch (bindnet_address->family) {
case AF_INET:
memset(&mreq, 0, sizeof(mreq));
mreq.imr_multiaddr.s_addr = mcast_sin->sin_addr.s_addr;
mreq.imr_interface.s_addr = boundto_sin->sin_addr.s_addr;
res = setsockopt (sockets->mcast_recv, IPPROTO_IP, IP_ADD_MEMBERSHIP,
&mreq, sizeof (mreq));
if (res == -1) {
perror ("join ipv4 multicast group failed");
return (-1);
}
break;
case AF_INET6:
memset(&mreq6, 0, sizeof(mreq6));
memcpy(&mreq6.ipv6mr_multiaddr, &mcast_sin6->sin6_addr, sizeof(struct in6_addr));
mreq6.ipv6mr_interface = interface_num;
res = setsockopt (sockets->mcast_recv, IPPROTO_IPV6, IPV6_JOIN_GROUP,
&mreq6, sizeof (mreq6));
if (res == -1) {
perror ("join ipv6 multicast group failed");
return (-1);
}
break;
}
}
/*
* Turn on multicast loopback
*/
flag = 1;
switch ( bindnet_address->family ) {
case AF_INET:
res = setsockopt (sockets->mcast_send, IPPROTO_IP, IP_MULTICAST_LOOP,
&flag, sizeof (flag));
break;
case AF_INET6:
res = setsockopt (sockets->mcast_send, IPPROTO_IPV6, IPV6_MULTICAST_LOOP,
&flag, sizeof (flag));
}
if (res == -1) {
perror ("turn off loopback");
return (-1);
}
/*
* Set multicast packets TTL
*/
if ( bindnet_address->family == AF_INET6 )
{
flag = 255;
res = setsockopt (sockets->mcast_send, IPPROTO_IPV6, IPV6_MULTICAST_HOPS,
&flag, sizeof (flag));
if (res == -1) {
perror ("setp mcast hops");
return (-1);
}
}
/*
* Bind to a specific interface for multicast send and receive
*/
switch ( bindnet_address->family ) {
case AF_INET:
if (setsockopt (sockets->mcast_send, IPPROTO_IP, IP_MULTICAST_IF,
&boundto_sin->sin_addr, sizeof (boundto_sin->sin_addr)) < 0) {
perror ("cannot select interface");
return (-1);
}
if (setsockopt (sockets->mcast_recv, IPPROTO_IP, IP_MULTICAST_IF,
&boundto_sin->sin_addr, sizeof (boundto_sin->sin_addr)) < 0) {
perror ("cannot select interface");
return (-1);
}
break;
case AF_INET6:
if (setsockopt (sockets->mcast_send, IPPROTO_IPV6, IPV6_MULTICAST_IF,
&interface_num, sizeof (interface_num)) < 0) {
perror ("cannot select interface");
return (-1);
}
if (setsockopt (sockets->mcast_recv, IPPROTO_IPV6, IPV6_MULTICAST_IF,
&interface_num, sizeof (interface_num)) < 0) {
perror ("cannot select interface");
return (-1);
}
break;
}
return 0;
}
static int totemnet_build_sockets (
struct totemnet_instance *instance,
struct totem_ip_address *mcast_address,
struct totem_ip_address *bindnet_address,
struct totemnet_socket *sockets,
struct totem_ip_address *bound_to)
{
int interface_num;
int interface_up;
int res;
/*
* Determine the ip address bound to and the interface name
*/
res = netif_determine (instance,
bindnet_address,
bound_to,
&interface_up,
&interface_num);
if (res == -1) {
return (-1);
}
totemip_copy(&instance->my_id, bound_to);
res = totemnet_build_sockets_ip (instance, mcast_address,
bindnet_address, sockets, bound_to, interface_num);
/* We only send out of the token socket */
totemnet_traffic_control_set(instance, sockets->token);
return res;
}
/*
* Totem Network interface - also does encryption/decryption
* depends on poll abstraction, POSIX, IPV4
*/
/*
* Create an instance
*/
int totemnet_initialize (
hdb_handle_t poll_handle,
hdb_handle_t *handle,
struct totem_config *totem_config,
int interface_no,
void *context,
void (*deliver_fn) (
void *context,
const void *msg,
unsigned int msg_len),
void (*iface_change_fn) (
void *context,
const struct totem_ip_address *iface_address))
{
struct totemnet_instance *instance;
unsigned int res;
res = hdb_handle_create (&totemnet_instance_database,
sizeof (struct totemnet_instance), handle);
if (res != 0) {
goto error_exit;
}
res = hdb_handle_get (&totemnet_instance_database, *handle,
(void *)&instance);
if (res != 0) {
goto error_destroy;
}
totemnet_instance_initialize (instance);
instance->totem_config = totem_config;
/*
* Configure logging
*/
instance->totemnet_log_level_security = 1; //totem_config->totem_logging_configuration.log_level_security;
instance->totemnet_log_level_error = totem_config->totem_logging_configuration.log_level_error;
instance->totemnet_log_level_warning = totem_config->totem_logging_configuration.log_level_warning;
instance->totemnet_log_level_notice = totem_config->totem_logging_configuration.log_level_notice;
instance->totemnet_log_level_debug = totem_config->totem_logging_configuration.log_level_debug;
instance->totemnet_subsys_id = totem_config->totem_logging_configuration.log_subsys_id;
instance->totemnet_log_printf = totem_config->totem_logging_configuration.log_printf;
/*
* Initialize random number generator for later use to generate salt
*/
memcpy (instance->totemnet_private_key, totem_config->private_key,
totem_config->private_key_len);
instance->totemnet_private_key_len = totem_config->private_key_len;
init_crypto(instance);
/*
* Initialize local variables for totemnet
*/
instance->totem_interface = &totem_config->interfaces[interface_no];
totemip_copy (&instance->mcast_address, &instance->totem_interface->mcast_addr);
memset (instance->iov_buffer, 0, FRAME_SIZE_MAX);
/*
* If threaded send requested, initialize thread group data structure
*/
if (totem_config->threads) {
worker_thread_group_init (
&instance->worker_thread_group,
totem_config->threads, 128,
sizeof (struct work_item),
sizeof (struct totemnet_mcast_thread_state),
totemnet_mcast_thread_state_constructor,
totemnet_mcast_worker_fn);
}
instance->totemnet_poll_handle = poll_handle;
instance->totem_interface->bindnet.nodeid = instance->totem_config->node_id;
instance->context = context;
instance->totemnet_deliver_fn = deliver_fn;
instance->totemnet_iface_change_fn = iface_change_fn;
instance->handle = *handle;
totemip_localhost (instance->mcast_address.family, &localhost);
netif_down_check (instance);
error_exit:
hdb_handle_put (&totemnet_instance_database, *handle);
return (0);
error_destroy:
hdb_handle_destroy (&totemnet_instance_database, *handle);
return (-1);
}
int totemnet_processor_count_set (
hdb_handle_t handle,
int processor_count)
{
struct totemnet_instance *instance;
int res = 0;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
instance->my_memb_entries = processor_count;
poll_timer_delete (instance->totemnet_poll_handle,
instance->timer_netif_check_timeout);
if (processor_count == 1) {
poll_timer_add (instance->totemnet_poll_handle,
instance->totem_config->downcheck_timeout,
(void *)instance,
timer_function_netif_check_timeout,
&instance->timer_netif_check_timeout);
}
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (res);
}
int totemnet_recv_flush (hdb_handle_t handle)
{
struct totemnet_instance *instance;
struct pollfd ufd;
int nfds;
int res = 0;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
instance->flushing = 1;
do {
ufd.fd = instance->totemnet_sockets.mcast_recv;
ufd.events = POLLIN;
nfds = poll (&ufd, 1, 0);
if (nfds == 1 && ufd.revents & POLLIN) {
net_deliver_fn (0, instance->totemnet_sockets.mcast_recv,
ufd.revents, instance);
}
} while (nfds == 1);
instance->flushing = 0;
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (res);
}
int totemnet_send_flush (hdb_handle_t handle)
{
struct totemnet_instance *instance;
int res = 0;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
worker_thread_group_wait (&instance->worker_thread_group);
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (res);
}
int totemnet_token_send (
hdb_handle_t handle,
const void *msg,
unsigned int msg_len)
{
struct totemnet_instance *instance;
int res = 0;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
ucast_sendmsg (instance, &instance->token_target, msg, msg_len);
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (res);
}
int totemnet_mcast_flush_send (
hdb_handle_t handle,
const void *msg,
unsigned int msg_len)
{
struct totemnet_instance *instance;
int res = 0;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
mcast_sendmsg (instance, msg, msg_len);
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (res);
}
int totemnet_mcast_noflush_send (
hdb_handle_t handle,
const void *msg,
unsigned int msg_len)
{
struct totemnet_instance *instance;
struct work_item work_item;
int res = 0;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
if (instance->totem_config->threads) {
work_item.msg = msg;
work_item.msg_len = msg_len;
work_item.instance = instance;
worker_thread_group_work_add (&instance->worker_thread_group,
&work_item);
} else {
mcast_sendmsg (instance, msg, msg_len);
}
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (res);
}
extern int totemnet_iface_check (hdb_handle_t handle)
{
struct totemnet_instance *instance;
int res = 0;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
timer_function_netif_check_timeout (instance);
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (res);
}
extern void totemnet_net_mtu_adjust (struct totem_config *totem_config)
{
#define UDPIP_HEADER_SIZE (20 + 8) /* 20 bytes for ip 8 bytes for udp */
if (totem_config->secauth == 1) {
totem_config->net_mtu -= sizeof (struct security_header) +
UDPIP_HEADER_SIZE;
} else {
totem_config->net_mtu -= UDPIP_HEADER_SIZE;
}
}
const char *totemnet_iface_print (hdb_handle_t handle) {
struct totemnet_instance *instance;
int res = 0;
const char *ret_char;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
ret_char = "Invalid totemnet handle";
goto error_exit;
}
ret_char = totemip_print (&instance->my_id);
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (ret_char);
}
int totemnet_iface_get (
hdb_handle_t handle,
struct totem_ip_address *addr)
{
struct totemnet_instance *instance;
unsigned int res;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
memcpy (addr, &instance->my_id, sizeof (struct totem_ip_address));
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (res);
}
int totemnet_token_target_set (
hdb_handle_t handle,
const struct totem_ip_address *token_target)
{
struct totemnet_instance *instance;
unsigned int res;
res = hdb_handle_get (&totemnet_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
memcpy (&instance->token_target, token_target,
sizeof (struct totem_ip_address));
hdb_handle_put (&totemnet_instance_database, handle);
error_exit:
return (res);
}
diff --git a/exec/totempg.c b/exec/totempg.c
index 7565805a..7d24ae10 100644
--- a/exec/totempg.c
+++ b/exec/totempg.c
@@ -1,1323 +1,1329 @@
/*
* Copyright (c) 2003-2005 MontaVista Software, Inc.
* Copyright (c) 2005 OSDL.
* Copyright (c) 2006-2009 Red Hat, Inc.
*
* All rights reserved.
*
* Author: Steven Dake (sdake@redhat.com)
* Author: Mark Haverkamp (markh@osdl.org)
*
* This software licensed under BSD license, the text of which follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the MontaVista Software, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* FRAGMENTATION AND PACKING ALGORITHM:
*
* Assemble the entire message into one buffer
* if full fragment
* store fragment into lengths list
* for each full fragment
* multicast fragment
* set length and fragment fields of pg mesage
* store remaining multicast into head of fragmentation data and set lens field
*
* If a message exceeds the maximum packet size allowed by the totem
* single ring protocol, the protocol could lose forward progress.
* Statically calculating the allowed data amount doesn't work because
* the amount of data allowed depends on the number of fragments in
* each message. In this implementation, the maximum fragment size
* is dynamically calculated for each fragment added to the message.
* It is possible for a message to be two bytes short of the maximum
* packet size. This occurs when a message or collection of
* messages + the mcast header + the lens are two bytes short of the
* end of the packet. Since another len field consumes two bytes, the
* len field would consume the rest of the packet without room for data.
*
* One optimization would be to forgo the final len field and determine
* it from the size of the udp datagram. Then this condition would no
* longer occur.
*/
/*
* ASSEMBLY AND UNPACKING ALGORITHM:
*
* copy incoming packet into assembly data buffer indexed by current
* location of end of fragment
*
* if not fragmented
* deliver all messages in assembly data buffer
* else
* if msg_count > 1 and fragmented
* deliver all messages except last message in assembly data buffer
* copy last fragmented section to start of assembly data buffer
* else
* if msg_count = 1 and fragmented
* do nothing
*
*/
#include <config.h>
#include <netinet/in.h>
#include <sys/uio.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <pthread.h>
#include <errno.h>
#include <limits.h>
#include <corosync/swab.h>
#include <corosync/hdb.h>
#include <corosync/list.h>
#include <corosync/totem/coropoll.h>
#include <corosync/totem/totempg.h>
#define LOGSYS_UTILS_ONLY 1
#include <corosync/engine/logsys.h>
#include "totemmrp.h"
#include "totemsrp.h"
#define min(a,b) ((a) < (b)) ? a : b
struct totempg_mcast_header {
short version;
short type;
};
/*
* totempg_mcast structure
*
* header: Identify the mcast.
* fragmented: Set if this message continues into next message
* continuation: Set if this message is a continuation from last message
* msg_count Indicates how many packed messages are contained
* in the mcast.
* Also, the size of each packed message and the messages themselves are
* appended to the end of this structure when sent.
*/
struct totempg_mcast {
struct totempg_mcast_header header;
unsigned char fragmented;
unsigned char continuation;
unsigned short msg_count;
/*
* short msg_len[msg_count];
*/
/*
* data for messages
*/
};
/*
* Maximum packet size for totem pg messages
*/
#define TOTEMPG_PACKET_SIZE (totempg_totem_config->net_mtu - \
sizeof (struct totempg_mcast))
/*
* Local variables used for packing small messages
*/
static unsigned short mcast_packed_msg_lens[FRAME_SIZE_MAX];
static int mcast_packed_msg_count = 0;
static int totempg_reserved = 0;
/*
* Function and data used to log messages
*/
static int totempg_log_level_security;
static int totempg_log_level_error;
static int totempg_log_level_warning;
static int totempg_log_level_notice;
static int totempg_log_level_debug;
static int totempg_subsys_id;
-static void (*totempg_log_printf) (int subsys_id, const char *function,
- const char *file, int line, unsigned int level, unsigned int rec_ident,
- const char *format, ...) __attribute__((format(printf, 7, 8)));
+static void (*totempg_log_printf) (
+ unsigned int rec_ident,
+ const char *function,
+ const char *file,
+ int line,
+ const char *format, ...) __attribute__((format(printf, 5, 6)));
struct totem_config *totempg_totem_config;
enum throw_away_mode {
THROW_AWAY_INACTIVE,
THROW_AWAY_ACTIVE
};
struct assembly {
unsigned int nodeid;
unsigned char data[MESSAGE_SIZE_MAX];
int index;
unsigned char last_frag_num;
enum throw_away_mode throw_away_mode;
struct list_head list;
};
static void assembly_deref (struct assembly *assembly);
static int callback_token_received_fn (enum totem_callback_token_type type,
const void *data);
DECLARE_LIST_INIT(assembly_list_inuse);
DECLARE_LIST_INIT(assembly_list_free);
/*
* Staging buffer for packed messages. Messages are staged in this buffer
* before sending. Multiple messages may fit which cuts down on the
* number of mcasts sent. If a message doesn't completely fit, then
* the mcast header has a fragment bit set that says that there are more
* data to follow. fragment_size is an index into the buffer. It indicates
* the size of message data and where to place new message data.
* fragment_contuation indicates whether the first packed message in
* the buffer is a continuation of a previously packed fragment.
*/
static unsigned char *fragmentation_data;
static int fragment_size = 0;
static int fragment_continuation = 0;
static struct iovec iov_delv;
static unsigned int totempg_max_handle = 0;
struct totempg_group_instance {
void (*deliver_fn) (
unsigned int nodeid,
const void *msg,
unsigned int msg_len,
int endian_conversion_required);
void (*confchg_fn) (
enum totem_configuration_type configuration_type,
const unsigned int *member_list, size_t member_list_entries,
const unsigned int *left_list, size_t left_list_entries,
const unsigned int *joined_list, size_t joined_list_entries,
const struct memb_ring_id *ring_id);
struct totempg_group *groups;
int groups_cnt;
};
DECLARE_HDB_DATABASE (totempg_groups_instance_database,NULL);
static unsigned char next_fragment = 1;
static pthread_mutex_t totempg_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t callback_token_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t mcast_msg_mutex = PTHREAD_MUTEX_INITIALIZER;
#define log_printf(level, format, args...) \
do { \
- totempg_log_printf (totempg_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, level, LOGSYS_RECID_LOG, \
+ totempg_log_printf ( \
+ LOGSYS_ENCODE_RECID(level, \
+ totempg_subsys_id, \
+ LOGSYS_RECID_LOG), \
+ __FUNCTION__, __FILE__, __LINE__, \
format, ##args); \
} while (0);
static int msg_count_send_ok (int msg_count);
static int byte_count_send_ok (int byte_count);
static struct assembly *assembly_ref (unsigned int nodeid)
{
struct assembly *assembly;
struct list_head *list;
/*
* Search inuse list for node id and return assembly buffer if found
*/
for (list = assembly_list_inuse.next;
list != &assembly_list_inuse;
list = list->next) {
assembly = list_entry (list, struct assembly, list);
if (nodeid == assembly->nodeid) {
return (assembly);
}
}
/*
* Nothing found in inuse list get one from free list if available
*/
if (list_empty (&assembly_list_free) == 0) {
assembly = list_entry (assembly_list_free.next, struct assembly, list);
list_del (&assembly->list);
list_add (&assembly->list, &assembly_list_inuse);
assembly->nodeid = nodeid;
assembly->index = 0;
assembly->last_frag_num = 0;
assembly->throw_away_mode = THROW_AWAY_INACTIVE;
return (assembly);
}
/*
* Nothing available in inuse or free list, so allocate a new one
*/
assembly = malloc (sizeof (struct assembly));
/*
* TODO handle memory allocation failure here
*/
assert (assembly);
assembly->nodeid = nodeid;
assembly->data[0] = 0;
assembly->index = 0;
assembly->last_frag_num = 0;
assembly->throw_away_mode = THROW_AWAY_INACTIVE;
list_init (&assembly->list);
list_add (&assembly->list, &assembly_list_inuse);
return (assembly);
}
static void assembly_deref (struct assembly *assembly)
{
list_del (&assembly->list);
list_add (&assembly->list, &assembly_list_free);
}
static inline void app_confchg_fn (
enum totem_configuration_type configuration_type,
const unsigned int *member_list, size_t member_list_entries,
const unsigned int *left_list, size_t left_list_entries,
const unsigned int *joined_list, size_t joined_list_entries,
const struct memb_ring_id *ring_id)
{
int i;
struct totempg_group_instance *instance;
unsigned int res;
for (i = 0; i <= totempg_max_handle; i++) {
res = hdb_handle_get (&totempg_groups_instance_database,
hdb_nocheck_convert (i), (void *)&instance);
if (res == 0) {
if (instance->confchg_fn) {
instance->confchg_fn (
configuration_type,
member_list,
member_list_entries,
left_list,
left_list_entries,
joined_list,
joined_list_entries,
ring_id);
}
hdb_handle_put (&totempg_groups_instance_database,
hdb_nocheck_convert (i));
}
}
}
static inline void group_endian_convert (
void *msg,
int msg_len)
{
unsigned short *group_len;
int i;
char *aligned_msg;
/*
* Align data structure for sparc and ia64
*/
if ((size_t)msg % 4 != 0) {
aligned_msg = alloca(msg_len);
memcpy(aligned_msg, msg, msg_len);
} else {
aligned_msg = msg;
}
group_len = (unsigned short *)aligned_msg;
group_len[0] = swab16(group_len[0]);
for (i = 1; i < group_len[0] + 1; i++) {
group_len[i] = swab16(group_len[i]);
}
if (aligned_msg != msg) {
memcpy(msg, aligned_msg, msg_len);
}
}
static inline int group_matches (
struct iovec *iovec,
unsigned int iov_len,
struct totempg_group *groups_b,
unsigned int group_b_cnt,
unsigned int *adjust_iovec)
{
unsigned short *group_len;
char *group_name;
int i;
int j;
struct iovec iovec_aligned = { NULL, 0 };
assert (iov_len == 1);
/*
* Align data structure for sparc and ia64
*/
if ((size_t)iovec->iov_base % 4 != 0) {
iovec_aligned.iov_base = alloca(iovec->iov_len);
memcpy(iovec_aligned.iov_base, iovec->iov_base, iovec->iov_len);
iovec_aligned.iov_len = iovec->iov_len;
iovec = &iovec_aligned;
}
group_len = (unsigned short *)iovec->iov_base;
group_name = ((char *)iovec->iov_base) +
sizeof (unsigned short) * (group_len[0] + 1);
/*
* Calculate amount to adjust the iovec by before delivering to app
*/
*adjust_iovec = sizeof (unsigned short) * (group_len[0] + 1);
for (i = 1; i < group_len[0] + 1; i++) {
*adjust_iovec += group_len[i];
}
/*
* Determine if this message should be delivered to this instance
*/
for (i = 1; i < group_len[0] + 1; i++) {
for (j = 0; j < group_b_cnt; j++) {
if ((group_len[i] == groups_b[j].group_len) &&
(memcmp (groups_b[j].group, group_name, group_len[i]) == 0)) {
return (1);
}
}
group_name += group_len[i];
}
return (0);
}
static inline void app_deliver_fn (
unsigned int nodeid,
void *msg,
unsigned int msg_len,
int endian_conversion_required)
{
int i;
struct totempg_group_instance *instance;
struct iovec stripped_iovec;
unsigned int adjust_iovec;
unsigned int res;
struct iovec *iovec;
struct iovec aligned_iovec = { NULL, 0 };
if (endian_conversion_required) {
group_endian_convert (msg, msg_len);
}
/*
* TODO This function needs to be rewritten for proper alignment to avoid 3+ memory copies
*/
/*
* Align data structure for sparc and ia64
*/
aligned_iovec.iov_base = alloca(msg_len);
aligned_iovec.iov_len = msg_len;
memcpy(aligned_iovec.iov_base, msg, msg_len);
iovec = &aligned_iovec;
for (i = 0; i <= totempg_max_handle; i++) {
res = hdb_handle_get (&totempg_groups_instance_database,
hdb_nocheck_convert (i), (void *)&instance);
if (res == 0) {
if (group_matches (iovec, 1, instance->groups, instance->groups_cnt, &adjust_iovec)) {
stripped_iovec.iov_len = iovec->iov_len - adjust_iovec;
stripped_iovec.iov_base = (char *)iovec->iov_base + adjust_iovec;
/*
* Align data structure for sparc and ia64
*/
if ((char *)iovec->iov_base + adjust_iovec % 4 != 0) {
/*
* Deal with misalignment
*/
stripped_iovec.iov_base =
alloca (stripped_iovec.iov_len);
memcpy (stripped_iovec.iov_base,
(char *)iovec->iov_base + adjust_iovec,
stripped_iovec.iov_len);
}
instance->deliver_fn (
nodeid,
stripped_iovec.iov_base,
stripped_iovec.iov_len,
endian_conversion_required);
}
hdb_handle_put (&totempg_groups_instance_database, hdb_nocheck_convert(i));
}
}
}
static void totempg_confchg_fn (
enum totem_configuration_type configuration_type,
const unsigned int *member_list, size_t member_list_entries,
const unsigned int *left_list, size_t left_list_entries,
const unsigned int *joined_list, size_t joined_list_entries,
const struct memb_ring_id *ring_id)
{
// TODO optimize this
app_confchg_fn (configuration_type,
member_list, member_list_entries,
left_list, left_list_entries,
joined_list, joined_list_entries,
ring_id);
}
static void totempg_deliver_fn (
unsigned int nodeid,
const void *msg,
unsigned int msg_len,
int endian_conversion_required)
{
struct totempg_mcast *mcast;
unsigned short *msg_lens;
int i;
struct assembly *assembly;
char header[FRAME_SIZE_MAX];
int msg_count;
int continuation;
int start;
const char *data;
int datasize;
assembly = assembly_ref (nodeid);
assert (assembly);
/*
* Assemble the header into one block of data and
* assemble the packet contents into one block of data to simplify delivery
*/
mcast = (struct totempg_mcast *)msg;
if (endian_conversion_required) {
mcast->msg_count = swab16 (mcast->msg_count);
}
msg_count = mcast->msg_count;
datasize = sizeof (struct totempg_mcast) +
msg_count * sizeof (unsigned short);
memcpy (header, msg, datasize);
data = msg;
msg_lens = (unsigned short *) (header + sizeof (struct totempg_mcast));
if (endian_conversion_required) {
for (i = 0; i < mcast->msg_count; i++) {
msg_lens[i] = swab16 (msg_lens[i]);
}
}
memcpy (&assembly->data[assembly->index], &data[datasize],
msg_len - datasize);
/*
* If the last message in the buffer is a fragment, then we
* can't deliver it. We'll first deliver the full messages
* then adjust the assembly buffer so we can add the rest of the
* fragment when it arrives.
*/
msg_count = mcast->fragmented ? mcast->msg_count - 1 : mcast->msg_count;
continuation = mcast->continuation;
iov_delv.iov_base = &assembly->data[0];
iov_delv.iov_len = assembly->index + msg_lens[0];
/*
* Make sure that if this message is a continuation, that it
* matches the sequence number of the previous fragment.
* Also, if the first packed message is a continuation
* of a previous message, but the assembly buffer
* is empty, then we need to discard it since we can't
* assemble a complete message. Likewise, if this message isn't a
* continuation and the assembly buffer is empty, we have to discard
* the continued message.
*/
start = 0;
if (assembly->throw_away_mode == THROW_AWAY_ACTIVE) {
/* Throw away the first msg block */
if (mcast->fragmented == 0 || mcast->fragmented == 1) {
assembly->throw_away_mode = THROW_AWAY_INACTIVE;
assembly->index += msg_lens[0];
iov_delv.iov_base = &assembly->data[assembly->index];
iov_delv.iov_len = msg_lens[1];
start = 1;
}
} else
if (assembly->throw_away_mode == THROW_AWAY_INACTIVE) {
if (continuation == assembly->last_frag_num) {
assembly->last_frag_num = mcast->fragmented;
for (i = start; i < msg_count; i++) {
app_deliver_fn(nodeid, iov_delv.iov_base, iov_delv.iov_len,
endian_conversion_required);
assembly->index += msg_lens[i];
iov_delv.iov_base = &assembly->data[assembly->index];
if (i < (msg_count - 1)) {
iov_delv.iov_len = msg_lens[i + 1];
}
}
} else {
assembly->throw_away_mode = THROW_AWAY_ACTIVE;
}
}
if (mcast->fragmented == 0) {
/*
* End of messages, dereference assembly struct
*/
assembly->last_frag_num = 0;
assembly->index = 0;
assembly_deref (assembly);
} else {
/*
* Message is fragmented, keep around assembly list
*/
if (mcast->msg_count > 1) {
memmove (&assembly->data[0],
&assembly->data[assembly->index],
msg_lens[msg_count]);
assembly->index = 0;
}
assembly->index += msg_lens[msg_count];
}
}
/*
* Totem Process Group Abstraction
* depends on poll abstraction, POSIX, IPV4
*/
void *callback_token_received_handle;
int callback_token_received_fn (enum totem_callback_token_type type,
const void *data)
{
struct totempg_mcast mcast;
struct iovec iovecs[3];
int res;
pthread_mutex_lock (&mcast_msg_mutex);
if (mcast_packed_msg_count == 0) {
pthread_mutex_unlock (&mcast_msg_mutex);
return (0);
}
if (totemmrp_avail() == 0) {
pthread_mutex_unlock (&mcast_msg_mutex);
return (0);
}
mcast.fragmented = 0;
/*
* Was the first message in this buffer a continuation of a
* fragmented message?
*/
mcast.continuation = fragment_continuation;
fragment_continuation = 0;
mcast.msg_count = mcast_packed_msg_count;
iovecs[0].iov_base = &mcast;
iovecs[0].iov_len = sizeof (struct totempg_mcast);
iovecs[1].iov_base = mcast_packed_msg_lens;
iovecs[1].iov_len = mcast_packed_msg_count * sizeof (unsigned short);
iovecs[2].iov_base = &fragmentation_data[0];
iovecs[2].iov_len = fragment_size;
res = totemmrp_mcast (iovecs, 3, 0);
mcast_packed_msg_count = 0;
fragment_size = 0;
pthread_mutex_unlock (&mcast_msg_mutex);
return (0);
}
/*
* Initialize the totem process group abstraction
*/
int totempg_initialize (
hdb_handle_t poll_handle,
struct totem_config *totem_config)
{
int res;
totempg_totem_config = totem_config;
totempg_log_level_security = totem_config->totem_logging_configuration.log_level_security;
totempg_log_level_error = totem_config->totem_logging_configuration.log_level_error;
totempg_log_level_warning = totem_config->totem_logging_configuration.log_level_warning;
totempg_log_level_notice = totem_config->totem_logging_configuration.log_level_notice;
totempg_log_level_debug = totem_config->totem_logging_configuration.log_level_debug;
totempg_log_printf = totem_config->totem_logging_configuration.log_printf;
totempg_subsys_id = totem_config->totem_logging_configuration.log_subsys_id;
fragmentation_data = malloc (TOTEMPG_PACKET_SIZE);
if (fragmentation_data == 0) {
return (-1);
}
res = totemmrp_initialize (
poll_handle,
totem_config,
totempg_deliver_fn,
totempg_confchg_fn);
totemmrp_callback_token_create (
&callback_token_received_handle,
TOTEM_CALLBACK_TOKEN_RECEIVED,
0,
callback_token_received_fn,
0);
totemsrp_net_mtu_adjust (totem_config);
return (res);
}
void totempg_finalize (void)
{
pthread_mutex_lock (&totempg_mutex);
totemmrp_finalize ();
pthread_mutex_unlock (&totempg_mutex);
}
/*
* Multicast a message
*/
static int mcast_msg (
struct iovec *iovec_in,
unsigned int iov_len,
int guarantee)
{
int res = 0;
struct totempg_mcast mcast;
struct iovec iovecs[3];
struct iovec iovec[64];
int i;
int dest, src;
int max_packet_size = 0;
int copy_len = 0;
int copy_base = 0;
int total_size = 0;
pthread_mutex_lock (&mcast_msg_mutex);
totemmrp_new_msg_signal ();
/*
* Remove zero length iovectors from the list
*/
assert (iov_len < 64);
for (dest = 0, src = 0; src < iov_len; src++) {
if (iovec_in[src].iov_len) {
memcpy (&iovec[dest++], &iovec_in[src],
sizeof (struct iovec));
}
}
iov_len = dest;
max_packet_size = TOTEMPG_PACKET_SIZE -
(sizeof (unsigned short) * (mcast_packed_msg_count + 1));
mcast_packed_msg_lens[mcast_packed_msg_count] = 0;
/*
* Check if we would overwrite new message queue
*/
for (i = 0; i < iov_len; i++) {
total_size += iovec[i].iov_len;
}
if (byte_count_send_ok (total_size + sizeof(unsigned short) *
(mcast_packed_msg_count+1)) == 0) {
pthread_mutex_unlock (&mcast_msg_mutex);
return(-1);
}
for (i = 0; i < iov_len; ) {
mcast.fragmented = 0;
mcast.continuation = fragment_continuation;
copy_len = iovec[i].iov_len - copy_base;
/*
* If it all fits with room left over, copy it in.
* We need to leave at least sizeof(short) + 1 bytes in the
* fragment_buffer on exit so that max_packet_size + fragment_size
* doesn't exceed the size of the fragment_buffer on the next call.
*/
if ((copy_len + fragment_size) <
(max_packet_size - sizeof (unsigned short))) {
memcpy (&fragmentation_data[fragment_size],
(char *)iovec[i].iov_base + copy_base, copy_len);
fragment_size += copy_len;
mcast_packed_msg_lens[mcast_packed_msg_count] += copy_len;
next_fragment = 1;
copy_len = 0;
copy_base = 0;
i++;
continue;
/*
* If it just fits or is too big, then send out what fits.
*/
} else {
unsigned char *data_ptr;
copy_len = min(copy_len, max_packet_size - fragment_size);
if( copy_len == max_packet_size )
data_ptr = (unsigned char *)iovec[i].iov_base + copy_base;
else {
data_ptr = fragmentation_data;
memcpy (&fragmentation_data[fragment_size],
(unsigned char *)iovec[i].iov_base + copy_base, copy_len);
}
memcpy (&fragmentation_data[fragment_size],
(unsigned char *)iovec[i].iov_base + copy_base, copy_len);
mcast_packed_msg_lens[mcast_packed_msg_count] += copy_len;
/*
* if we're not on the last iovec or the iovec is too large to
* fit, then indicate a fragment. This also means that the next
* message will have the continuation of this one.
*/
if ((i < (iov_len - 1)) ||
((copy_base + copy_len) < iovec[i].iov_len)) {
if (!next_fragment) {
next_fragment++;
}
fragment_continuation = next_fragment;
mcast.fragmented = next_fragment++;
assert(fragment_continuation != 0);
assert(mcast.fragmented != 0);
} else {
fragment_continuation = 0;
}
/*
* assemble the message and send it
*/
mcast.msg_count = ++mcast_packed_msg_count;
iovecs[0].iov_base = &mcast;
iovecs[0].iov_len = sizeof(struct totempg_mcast);
iovecs[1].iov_base = mcast_packed_msg_lens;
iovecs[1].iov_len = mcast_packed_msg_count *
sizeof(unsigned short);
iovecs[2].iov_base = data_ptr;
iovecs[2].iov_len = max_packet_size;
assert (totemmrp_avail() > 0);
res = totemmrp_mcast (iovecs, 3, guarantee);
/*
* Recalculate counts and indexes for the next.
*/
mcast_packed_msg_lens[0] = 0;
mcast_packed_msg_count = 0;
fragment_size = 0;
max_packet_size = TOTEMPG_PACKET_SIZE - (sizeof(unsigned short));
/*
* If the iovec all fit, go to the next iovec
*/
if ((copy_base + copy_len) == iovec[i].iov_len) {
copy_len = 0;
copy_base = 0;
i++;
/*
* Continue with the rest of the current iovec.
*/
} else {
copy_base += copy_len;
}
}
}
/*
* Bump only if we added message data. This may be zero if
* the last buffer just fit into the fragmentation_data buffer
* and we were at the last iovec.
*/
if (mcast_packed_msg_lens[mcast_packed_msg_count]) {
mcast_packed_msg_count++;
}
pthread_mutex_unlock (&mcast_msg_mutex);
return (res);
}
/*
* Determine if a message of msg_size could be queued
*/
static int msg_count_send_ok (
int msg_count)
{
int avail = 0;
avail = totemmrp_avail () - totempg_reserved - 1;
return (avail > msg_count);
}
static int byte_count_send_ok (
int byte_count)
{
unsigned int msg_count = 0;
int avail = 0;
avail = totemmrp_avail () - 1;
msg_count = (byte_count / (totempg_totem_config->net_mtu - 25)) + 1;
return (avail > msg_count);
}
static int send_reserve (
int msg_size)
{
unsigned int msg_count = 0;
msg_count = (msg_size / (totempg_totem_config->net_mtu - 25)) + 1;
totempg_reserved += msg_count;
return (msg_count);
}
static void send_release (
int msg_count)
{
totempg_reserved -= msg_count;
}
int totempg_callback_token_create (
void **handle_out,
enum totem_callback_token_type type,
int delete,
int (*callback_fn) (enum totem_callback_token_type type, const void *),
const void *data)
{
unsigned int res;
pthread_mutex_lock (&callback_token_mutex);
res = totemmrp_callback_token_create (handle_out, type, delete,
callback_fn, data);
pthread_mutex_unlock (&callback_token_mutex);
return (res);
}
void totempg_callback_token_destroy (
void *handle_out)
{
pthread_mutex_lock (&callback_token_mutex);
totemmrp_callback_token_destroy (handle_out);
pthread_mutex_unlock (&callback_token_mutex);
}
/*
* vi: set autoindent tabstop=4 shiftwidth=4 :
*/
int totempg_groups_initialize (
hdb_handle_t *handle,
void (*deliver_fn) (
unsigned int nodeid,
const void *msg,
unsigned int msg_len,
int endian_conversion_required),
void (*confchg_fn) (
enum totem_configuration_type configuration_type,
const unsigned int *member_list, size_t member_list_entries,
const unsigned int *left_list, size_t left_list_entries,
const unsigned int *joined_list, size_t joined_list_entries,
const struct memb_ring_id *ring_id))
{
struct totempg_group_instance *instance;
unsigned int res;
pthread_mutex_lock (&totempg_mutex);
res = hdb_handle_create (&totempg_groups_instance_database,
sizeof (struct totempg_group_instance), handle);
if (res != 0) {
goto error_exit;
}
if (*handle > totempg_max_handle) {
totempg_max_handle = *handle;
}
res = hdb_handle_get (&totempg_groups_instance_database, *handle,
(void *)&instance);
if (res != 0) {
goto error_destroy;
}
instance->deliver_fn = deliver_fn;
instance->confchg_fn = confchg_fn;
instance->groups = 0;
instance->groups_cnt = 0;
hdb_handle_put (&totempg_groups_instance_database, *handle);
pthread_mutex_unlock (&totempg_mutex);
return (0);
error_destroy:
hdb_handle_destroy (&totempg_groups_instance_database, *handle);
error_exit:
pthread_mutex_unlock (&totempg_mutex);
return (-1);
}
int totempg_groups_join (
hdb_handle_t handle,
const struct totempg_group *groups,
size_t group_cnt)
{
struct totempg_group_instance *instance;
struct totempg_group *new_groups;
unsigned int res;
pthread_mutex_lock (&totempg_mutex);
res = hdb_handle_get (&totempg_groups_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
new_groups = realloc (instance->groups,
sizeof (struct totempg_group) *
(instance->groups_cnt + group_cnt));
if (new_groups == 0) {
res = ENOMEM;
goto error_exit;
}
memcpy (&new_groups[instance->groups_cnt],
groups, group_cnt * sizeof (struct totempg_group));
instance->groups = new_groups;
instance->groups_cnt += group_cnt;
hdb_handle_put (&totempg_groups_instance_database, handle);
error_exit:
pthread_mutex_unlock (&totempg_mutex);
return (res);
}
int totempg_groups_leave (
hdb_handle_t handle,
const struct totempg_group *groups,
size_t group_cnt)
{
struct totempg_group_instance *instance;
unsigned int res;
pthread_mutex_lock (&totempg_mutex);
res = hdb_handle_get (&totempg_groups_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
hdb_handle_put (&totempg_groups_instance_database, handle);
error_exit:
pthread_mutex_unlock (&totempg_mutex);
return (res);
}
#define MAX_IOVECS_FROM_APP 32
#define MAX_GROUPS_PER_MSG 32
int totempg_groups_mcast_joined (
hdb_handle_t handle,
const struct iovec *iovec,
unsigned int iov_len,
int guarantee)
{
struct totempg_group_instance *instance;
unsigned short group_len[MAX_GROUPS_PER_MSG + 1];
struct iovec iovec_mcast[MAX_GROUPS_PER_MSG + 1 + MAX_IOVECS_FROM_APP];
int i;
unsigned int res;
pthread_mutex_lock (&totempg_mutex);
res = hdb_handle_get (&totempg_groups_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
/*
* Build group_len structure and the iovec_mcast structure
*/
group_len[0] = instance->groups_cnt;
for (i = 0; i < instance->groups_cnt; i++) {
group_len[i + 1] = instance->groups[i].group_len;
iovec_mcast[i + 1].iov_len = instance->groups[i].group_len;
iovec_mcast[i + 1].iov_base = (void *) instance->groups[i].group;
}
iovec_mcast[0].iov_len = (instance->groups_cnt + 1) * sizeof (unsigned short);
iovec_mcast[0].iov_base = group_len;
for (i = 0; i < iov_len; i++) {
iovec_mcast[i + instance->groups_cnt + 1].iov_len = iovec[i].iov_len;
iovec_mcast[i + instance->groups_cnt + 1].iov_base = iovec[i].iov_base;
}
res = mcast_msg (iovec_mcast, iov_len + instance->groups_cnt + 1, guarantee);
hdb_handle_put (&totempg_groups_instance_database, handle);
error_exit:
pthread_mutex_unlock (&totempg_mutex);
return (res);
}
int totempg_groups_joined_reserve (
hdb_handle_t handle,
const struct iovec *iovec,
unsigned int iov_len)
{
struct totempg_group_instance *instance;
unsigned int size = 0;
unsigned int i;
unsigned int res;
unsigned int reserved = 0;
pthread_mutex_lock (&totempg_mutex);
pthread_mutex_lock (&mcast_msg_mutex);
res = hdb_handle_get (&totempg_groups_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
for (i = 0; i < instance->groups_cnt; i++) {
size += instance->groups[i].group_len;
}
for (i = 0; i < iov_len; i++) {
size += iovec[i].iov_len;
}
reserved = send_reserve (size);
if (msg_count_send_ok (reserved) == 0) {
send_release (reserved);
reserved = 0;
}
hdb_handle_put (&totempg_groups_instance_database, handle);
error_exit:
pthread_mutex_unlock (&mcast_msg_mutex);
pthread_mutex_unlock (&totempg_mutex);
return (reserved);
}
int totempg_groups_joined_release (int msg_count)
{
pthread_mutex_lock (&totempg_mutex);
pthread_mutex_lock (&mcast_msg_mutex);
send_release (msg_count);
pthread_mutex_unlock (&mcast_msg_mutex);
pthread_mutex_unlock (&totempg_mutex);
return 0;
}
int totempg_groups_mcast_groups (
hdb_handle_t handle,
int guarantee,
const struct totempg_group *groups,
size_t groups_cnt,
const struct iovec *iovec,
unsigned int iov_len)
{
struct totempg_group_instance *instance;
unsigned short group_len[MAX_GROUPS_PER_MSG + 1];
struct iovec iovec_mcast[MAX_GROUPS_PER_MSG + 1 + MAX_IOVECS_FROM_APP];
int i;
unsigned int res;
pthread_mutex_lock (&totempg_mutex);
res = hdb_handle_get (&totempg_groups_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
/*
* Build group_len structure and the iovec_mcast structure
*/
group_len[0] = groups_cnt;
for (i = 0; i < groups_cnt; i++) {
group_len[i + 1] = groups[i].group_len;
iovec_mcast[i + 1].iov_len = groups[i].group_len;
iovec_mcast[i + 1].iov_base = (void *) groups[i].group;
}
iovec_mcast[0].iov_len = (groups_cnt + 1) * sizeof (unsigned short);
iovec_mcast[0].iov_base = group_len;
for (i = 0; i < iov_len; i++) {
iovec_mcast[i + groups_cnt + 1].iov_len = iovec[i].iov_len;
iovec_mcast[i + groups_cnt + 1].iov_base = iovec[i].iov_base;
}
res = mcast_msg (iovec_mcast, iov_len + groups_cnt + 1, guarantee);
hdb_handle_put (&totempg_groups_instance_database, handle);
error_exit:
pthread_mutex_unlock (&totempg_mutex);
return (res);
}
/*
* Returns -1 if error, 0 if can't send, 1 if can send the message
*/
int totempg_groups_send_ok_groups (
hdb_handle_t handle,
const struct totempg_group *groups,
size_t groups_cnt,
const struct iovec *iovec,
unsigned int iov_len)
{
struct totempg_group_instance *instance;
unsigned int size = 0;
unsigned int i;
unsigned int res;
pthread_mutex_lock (&totempg_mutex);
res = hdb_handle_get (&totempg_groups_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
for (i = 0; i < groups_cnt; i++) {
size += groups[i].group_len;
}
for (i = 0; i < iov_len; i++) {
size += iovec[i].iov_len;
}
res = msg_count_send_ok (size);
hdb_handle_put (&totempg_groups_instance_database, handle);
error_exit:
pthread_mutex_unlock (&totempg_mutex);
return (res);
}
int totempg_ifaces_get (
unsigned int nodeid,
struct totem_ip_address *interfaces,
char ***status,
unsigned int *iface_count)
{
int res;
res = totemmrp_ifaces_get (
nodeid,
interfaces,
status,
iface_count);
return (res);
}
int totempg_crypto_set (
unsigned int type)
{
int res;
res = totemmrp_crypto_set (
type);
return (res);
}
int totempg_ring_reenable (void)
{
int res;
res = totemmrp_ring_reenable ();
return (res);
}
const char *totempg_ifaces_print (unsigned int nodeid)
{
static char iface_string[256 * INTERFACE_MAX];
char one_iface[64];
struct totem_ip_address interfaces[INTERFACE_MAX];
char **status;
unsigned int iface_count;
unsigned int i;
int res;
iface_string[0] = '\0';
res = totempg_ifaces_get (nodeid, interfaces, &status, &iface_count);
if (res == -1) {
return ("no interface found for nodeid");
}
for (i = 0; i < iface_count; i++) {
sprintf (one_iface, "r(%d) ip(%s) ",
i, totemip_print (&interfaces[i]));
strcat (iface_string, one_iface);
}
return (iface_string);
}
unsigned int totempg_my_nodeid_get (void)
{
return (totemmrp_my_nodeid_get());
}
int totempg_my_family_get (void)
{
return (totemmrp_my_family_get());
}
diff --git a/exec/totemrrp.c b/exec/totemrrp.c
index e0d19cab..c0eb2e7a 100644
--- a/exec/totemrrp.c
+++ b/exec/totemrrp.c
@@ -1,1759 +1,1764 @@
/*
* Copyright (c) 2005 MontaVista Software, Inc.
* Copyright (c) 2006-2009 Red Hat, Inc.
*
* All rights reserved.
*
* Author: Steven Dake (sdake@redhat.com)
*
* This software licensed under BSD license, the text of which follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the MontaVista Software, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <config.h>
#include <assert.h>
#include <pthread.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <netdb.h>
#include <sys/un.h>
#include <sys/ioctl.h>
#include <sys/param.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <sched.h>
#include <time.h>
#include <sys/time.h>
#include <sys/poll.h>
#include <limits.h>
#include <corosync/sq.h>
#include <corosync/list.h>
#include <corosync/hdb.h>
#include <corosync/swab.h>
#include <corosync/totem/coropoll.h>
#define LOGSYS_UTILS_ONLY 1
#include <corosync/engine/logsys.h>
#include "totemnet.h"
#include "totemrrp.h"
void rrp_deliver_fn (
void *context,
const void *msg,
unsigned int msg_len);
void rrp_iface_change_fn (
void *context,
const struct totem_ip_address *iface_addr);
struct totemrrp_instance;
struct passive_instance {
struct totemrrp_instance *rrp_instance;
unsigned int *faulty;
unsigned int *token_recv_count;
unsigned int *mcast_recv_count;
unsigned char token[15000];
unsigned int token_len;
poll_timer_handle timer_expired_token;
poll_timer_handle timer_problem_decrementer;
void *totemrrp_context;
unsigned int token_xmit_iface;
unsigned int msg_xmit_iface;
};
struct active_instance {
struct totemrrp_instance *rrp_instance;
unsigned int *faulty;
unsigned int *last_token_recv;
unsigned int *counter_problems;
unsigned char token[15000];
unsigned int token_len;
unsigned int last_token_seq;
poll_timer_handle timer_expired_token;
poll_timer_handle timer_problem_decrementer;
void *totemrrp_context;
};
struct rrp_algo {
const char *name;
void * (*initialize) (
struct totemrrp_instance *rrp_instance,
int interface_count);
void (*mcast_recv) (
struct totemrrp_instance *instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len);
void (*mcast_noflush_send) (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
void (*mcast_flush_send) (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
void (*token_recv) (
struct totemrrp_instance *instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len,
unsigned int token_seqid);
void (*token_send) (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
void (*recv_flush) (
struct totemrrp_instance *instance);
void (*send_flush) (
struct totemrrp_instance *instance);
void (*iface_check) (
struct totemrrp_instance *instance);
void (*processor_count_set) (
struct totemrrp_instance *instance,
unsigned int processor_count);
void (*token_target_set) (
struct totemrrp_instance *instance,
struct totem_ip_address *token_target,
unsigned int iface_no);
void (*ring_reenable) (
struct totemrrp_instance *instance);
};
struct totemrrp_instance {
hdb_handle_t totemrrp_poll_handle;
struct totem_interface *interfaces;
struct rrp_algo *rrp_algo;
void *context;
char *status[INTERFACE_MAX];
void (*totemrrp_deliver_fn) (
void *context,
const void *msg,
unsigned int msg_len);
void (*totemrrp_iface_change_fn) (
void *context,
const struct totem_ip_address *iface_addr,
unsigned int iface_no);
void (*totemrrp_token_seqid_get) (
const void *msg,
unsigned int *seqid,
unsigned int *token_is);
unsigned int (*totemrrp_msgs_missing) (void);
/*
* Function and data used to log messages
*/
int totemrrp_log_level_security;
int totemrrp_log_level_error;
int totemrrp_log_level_warning;
int totemrrp_log_level_notice;
int totemrrp_log_level_debug;
int totemrrp_subsys_id;
- void (*totemrrp_log_printf) (int subsys, const char *function,
- const char *file, int line, unsigned int level, unsigned int rec_ident,
- const char *format, ...)__attribute__((format(printf, 7, 8)));
+ void (*totemrrp_log_printf) (
+ unsigned int rec_ident,
+ const char *function,
+ const char *file,
+ int line,
+ const char *format, ...)__attribute__((format(printf, 5, 6)));
hdb_handle_t handle;
hdb_handle_t *net_handles;
void *rrp_algo_instance;
int interface_count;
hdb_handle_t poll_handle;
int processor_count;
struct totem_config *totem_config;
};
/*
* None Replication Forward Declerations
*/
static void none_mcast_recv (
struct totemrrp_instance *instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len);
static void none_mcast_noflush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
static void none_mcast_flush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
static void none_token_recv (
struct totemrrp_instance *instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len,
unsigned int token_seqid);
static void none_token_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
static void none_recv_flush (
struct totemrrp_instance *instance);
static void none_send_flush (
struct totemrrp_instance *instance);
static void none_iface_check (
struct totemrrp_instance *instance);
static void none_processor_count_set (
struct totemrrp_instance *instance,
unsigned int processor_count_set);
static void none_token_target_set (
struct totemrrp_instance *instance,
struct totem_ip_address *token_target,
unsigned int iface_no);
static void none_ring_reenable (
struct totemrrp_instance *instance);
/*
* Passive Replication Forward Declerations
*/
static void *passive_instance_initialize (
struct totemrrp_instance *rrp_instance,
int interface_count);
static void passive_mcast_recv (
struct totemrrp_instance *instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len);
static void passive_mcast_noflush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
static void passive_mcast_flush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
static void passive_token_recv (
struct totemrrp_instance *instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len,
unsigned int token_seqid);
static void passive_token_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
static void passive_recv_flush (
struct totemrrp_instance *instance);
static void passive_send_flush (
struct totemrrp_instance *instance);
static void passive_iface_check (
struct totemrrp_instance *instance);
static void passive_processor_count_set (
struct totemrrp_instance *instance,
unsigned int processor_count_set);
static void passive_token_target_set (
struct totemrrp_instance *instance,
struct totem_ip_address *token_target,
unsigned int iface_no);
static void passive_ring_reenable (
struct totemrrp_instance *instance);
/*
* Active Replication Forward Definitions
*/
static void *active_instance_initialize (
struct totemrrp_instance *rrp_instance,
int interface_count);
static void active_mcast_recv (
struct totemrrp_instance *instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len);
static void active_mcast_noflush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
static void active_mcast_flush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
static void active_token_recv (
struct totemrrp_instance *instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len,
unsigned int token_seqid);
static void active_token_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len);
static void active_recv_flush (
struct totemrrp_instance *instance);
static void active_send_flush (
struct totemrrp_instance *instance);
static void active_iface_check (
struct totemrrp_instance *instance);
static void active_processor_count_set (
struct totemrrp_instance *instance,
unsigned int processor_count_set);
static void active_token_target_set (
struct totemrrp_instance *instance,
struct totem_ip_address *token_target,
unsigned int iface_no);
static void active_ring_reenable (
struct totemrrp_instance *instance);
static void active_timer_expired_token_start (
struct active_instance *active_instance);
static void active_timer_expired_token_cancel (
struct active_instance *active_instance);
static void active_timer_problem_decrementer_start (
struct active_instance *active_instance);
static void active_timer_problem_decrementer_cancel (
struct active_instance *active_instance);
struct rrp_algo none_algo = {
.name = "none",
.initialize = NULL,
.mcast_recv = none_mcast_recv,
.mcast_noflush_send = none_mcast_noflush_send,
.mcast_flush_send = none_mcast_flush_send,
.token_recv = none_token_recv,
.token_send = none_token_send,
.recv_flush = none_recv_flush,
.send_flush = none_send_flush,
.iface_check = none_iface_check,
.processor_count_set = none_processor_count_set,
.token_target_set = none_token_target_set,
.ring_reenable = none_ring_reenable
};
struct rrp_algo passive_algo = {
.name = "passive",
.initialize = passive_instance_initialize,
.mcast_recv = passive_mcast_recv,
.mcast_noflush_send = passive_mcast_noflush_send,
.mcast_flush_send = passive_mcast_flush_send,
.token_recv = passive_token_recv,
.token_send = passive_token_send,
.recv_flush = passive_recv_flush,
.send_flush = passive_send_flush,
.iface_check = passive_iface_check,
.processor_count_set = passive_processor_count_set,
.token_target_set = passive_token_target_set,
.ring_reenable = passive_ring_reenable
};
struct rrp_algo active_algo = {
.name = "active",
.initialize = active_instance_initialize,
.mcast_recv = active_mcast_recv,
.mcast_noflush_send = active_mcast_noflush_send,
.mcast_flush_send = active_mcast_flush_send,
.token_recv = active_token_recv,
.token_send = active_token_send,
.recv_flush = active_recv_flush,
.send_flush = active_send_flush,
.iface_check = active_iface_check,
.processor_count_set = active_processor_count_set,
.token_target_set = active_token_target_set,
.ring_reenable = active_ring_reenable
};
struct rrp_algo *rrp_algos[] = {
&none_algo,
&passive_algo,
&active_algo
};
#define RRP_ALGOS_COUNT 3
/*
* All instances in one database
*/
DECLARE_HDB_DATABASE (totemrrp_instance_database,NULL);
#define log_printf(level, format, args...) \
do { \
rrp_instance->totemrrp_log_printf ( \
- rrp_instance->totemrrp_subsys_id, \
- __FUNCTION__, __FILE__, __LINE__, level, \
- LOGSYS_RECID_LOG, format, ##args); \
+ LOGSYS_ENCODE_RECID(level, \
+ rrp_instance->totemrrp_subsys_id, \
+ LOGSYS_RECID_LOG), \
+ __FUNCTION__, __FILE__, __LINE__, \
+ format, ##args); \
} while (0);
/*
* None Replication Implementation
*/
static void none_mcast_recv (
struct totemrrp_instance *rrp_instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len)
{
rrp_instance->totemrrp_deliver_fn (
context,
msg,
msg_len);
}
static void none_mcast_flush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len)
{
totemnet_mcast_flush_send (instance->net_handles[0], msg, msg_len);
}
static void none_mcast_noflush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len)
{
totemnet_mcast_noflush_send (instance->net_handles[0], msg, msg_len);
}
static void none_token_recv (
struct totemrrp_instance *rrp_instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len,
unsigned int token_seq)
{
rrp_instance->totemrrp_deliver_fn (
context,
msg,
msg_len);
}
static void none_token_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len)
{
totemnet_token_send (
instance->net_handles[0],
msg, msg_len);
}
static void none_recv_flush (struct totemrrp_instance *instance)
{
totemnet_recv_flush (instance->net_handles[0]);
}
static void none_send_flush (struct totemrrp_instance *instance)
{
totemnet_send_flush (instance->net_handles[0]);
}
static void none_iface_check (struct totemrrp_instance *instance)
{
totemnet_iface_check (instance->net_handles[0]);
}
static void none_processor_count_set (
struct totemrrp_instance *instance,
unsigned int processor_count)
{
totemnet_processor_count_set (instance->net_handles[0],
processor_count);
}
static void none_token_target_set (
struct totemrrp_instance *instance,
struct totem_ip_address *token_target,
unsigned int iface_no)
{
totemnet_token_target_set (instance->net_handles[0], token_target);
}
static void none_ring_reenable (
struct totemrrp_instance *instance)
{
/*
* No operation
*/
}
/*
* Passive Replication Implementation
*/
void *passive_instance_initialize (
struct totemrrp_instance *rrp_instance,
int interface_count)
{
struct passive_instance *instance;
instance = malloc (sizeof (struct passive_instance));
if (instance == 0) {
goto error_exit;
}
memset (instance, 0, sizeof (struct passive_instance));
instance->faulty = malloc (sizeof (int) * interface_count);
if (instance->faulty == 0) {
free (instance);
instance = 0;
goto error_exit;
}
memset (instance->faulty, 0, sizeof (int) * interface_count);
instance->token_recv_count = malloc (sizeof (int) * interface_count);
if (instance->token_recv_count == 0) {
free (instance->faulty);
free (instance);
instance = 0;
goto error_exit;
}
memset (instance->token_recv_count, 0, sizeof (int) * interface_count);
instance->mcast_recv_count = malloc (sizeof (int) * interface_count);
if (instance->mcast_recv_count == 0) {
free (instance->token_recv_count);
free (instance->faulty);
free (instance);
instance = 0;
goto error_exit;
}
memset (instance->mcast_recv_count, 0, sizeof (int) * interface_count);
error_exit:
return ((void *)instance);
}
static void timer_function_passive_token_expired (void *context)
{
struct passive_instance *passive_instance = (struct passive_instance *)context;
struct totemrrp_instance *rrp_instance = passive_instance->rrp_instance;
rrp_instance->totemrrp_deliver_fn (
passive_instance->totemrrp_context,
passive_instance->token,
passive_instance->token_len);
}
/* TODO
static void timer_function_passive_problem_decrementer (void *context)
{
// struct passive_instance *passive_instance = (struct passive_instance *)context;
// struct totemrrp_instance *rrp_instance = passive_instance->rrp_instance;
}
*/
static void passive_timer_expired_token_start (
struct passive_instance *passive_instance)
{
poll_timer_add (
passive_instance->rrp_instance->poll_handle,
passive_instance->rrp_instance->totem_config->rrp_token_expired_timeout,
(void *)passive_instance,
timer_function_passive_token_expired,
&passive_instance->timer_expired_token);
}
static void passive_timer_expired_token_cancel (
struct passive_instance *passive_instance)
{
poll_timer_delete (
passive_instance->rrp_instance->poll_handle,
passive_instance->timer_expired_token);
}
/*
static void passive_timer_problem_decrementer_start (
struct passive_instance *passive_instance)
{
poll_timer_add (
passive_instance->rrp_instance->poll_handle,
passive_instance->rrp_instance->totem_config->rrp_problem_count_timeout,
(void *)passive_instance,
timer_function_passive_problem_decrementer,
&passive_instance->timer_problem_decrementer);
}
static void passive_timer_problem_decrementer_cancel (
struct passive_instance *passive_instance)
{
poll_timer_delete (
passive_instance->rrp_instance->poll_handle,
passive_instance->timer_problem_decrementer);
}
*/
static void passive_mcast_recv (
struct totemrrp_instance *rrp_instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len)
{
struct passive_instance *passive_instance = (struct passive_instance *)rrp_instance->rrp_algo_instance;
unsigned int max;
unsigned int i;
rrp_instance->totemrrp_deliver_fn (
context,
msg,
msg_len);
if (rrp_instance->totemrrp_msgs_missing() == 0 &&
passive_instance->timer_expired_token) {
/*
* Delivers the last token
*/
rrp_instance->totemrrp_deliver_fn (
passive_instance->totemrrp_context,
passive_instance->token,
passive_instance->token_len);
passive_timer_expired_token_cancel (passive_instance);
}
/*
* Monitor for failures
* TODO doesn't handle wrap-around of the mcast recv count
*/
passive_instance->mcast_recv_count[iface_no] += 1;
max = 0;
for (i = 0; i < rrp_instance->interface_count; i++) {
if (max < passive_instance->mcast_recv_count[i]) {
max = passive_instance->mcast_recv_count[i];
}
}
for (i = 0; i < rrp_instance->interface_count; i++) {
if ((passive_instance->faulty[i] == 0) &&
(max - passive_instance->mcast_recv_count[i] >
rrp_instance->totem_config->rrp_problem_count_threshold)) {
passive_instance->faulty[i] = 1;
sprintf (rrp_instance->status[i],
"Marking ringid %u interface %s FAULTY - adminisrtative intervention required.",
i,
totemnet_iface_print (rrp_instance->net_handles[i]));
log_printf (
rrp_instance->totemrrp_log_level_error,
"%s",
rrp_instance->status[i]);
}
}
}
static void passive_mcast_flush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len)
{
struct passive_instance *passive_instance = (struct passive_instance *)instance->rrp_algo_instance;
do {
passive_instance->msg_xmit_iface = (passive_instance->msg_xmit_iface + 1) % instance->interface_count;
} while (passive_instance->faulty[passive_instance->msg_xmit_iface] == 1);
totemnet_mcast_flush_send (instance->net_handles[passive_instance->msg_xmit_iface], msg, msg_len);
}
static void passive_mcast_noflush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len)
{
struct passive_instance *passive_instance = (struct passive_instance *)instance->rrp_algo_instance;
do {
passive_instance->msg_xmit_iface = (passive_instance->msg_xmit_iface + 1) % instance->interface_count;
} while (passive_instance->faulty[passive_instance->msg_xmit_iface] == 1);
totemnet_mcast_noflush_send (instance->net_handles[passive_instance->msg_xmit_iface], msg, msg_len);
}
static void passive_token_recv (
struct totemrrp_instance *rrp_instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len,
unsigned int token_seq)
{
struct passive_instance *passive_instance = (struct passive_instance *)rrp_instance->rrp_algo_instance;
unsigned int max;
unsigned int i;
passive_instance->totemrrp_context = context; // this should be in totemrrp_instance ? TODO
if (rrp_instance->totemrrp_msgs_missing() == 0) {
rrp_instance->totemrrp_deliver_fn (
context,
msg,
msg_len);
} else {
memcpy (passive_instance->token, msg, msg_len);
passive_timer_expired_token_start (passive_instance);
}
/*
* Monitor for failures
* TODO doesn't handle wrap-around of the token
*/
passive_instance->token_recv_count[iface_no] += 1;
max = 0;
for (i = 0; i < rrp_instance->interface_count; i++) {
if (max < passive_instance->token_recv_count[i]) {
max = passive_instance->token_recv_count[i];
}
}
for (i = 0; i < rrp_instance->interface_count; i++) {
if ((passive_instance->faulty[i] == 0) &&
(max - passive_instance->token_recv_count[i] >
rrp_instance->totem_config->rrp_problem_count_threshold)) {
passive_instance->faulty[i] = 1;
sprintf (rrp_instance->status[i],
"Marking seqid %d ringid %u interface %s FAULTY - adminisrtative intervention required.",
token_seq,
i,
totemnet_iface_print (rrp_instance->net_handles[i]));
log_printf (
rrp_instance->totemrrp_log_level_error,
"%s",
rrp_instance->status[i]);
}
}
}
static void passive_token_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len)
{
struct passive_instance *passive_instance = (struct passive_instance *)instance->rrp_algo_instance;
do {
passive_instance->token_xmit_iface = (passive_instance->token_xmit_iface + 1) % instance->interface_count;
} while (passive_instance->faulty[passive_instance->token_xmit_iface] == 1);
totemnet_token_send (
instance->net_handles[passive_instance->token_xmit_iface],
msg, msg_len);
}
static void passive_recv_flush (struct totemrrp_instance *instance)
{
struct passive_instance *rrp_algo_instance = (struct passive_instance *)instance->rrp_algo_instance;
unsigned int i;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_recv_flush (instance->net_handles[i]);
}
}
}
static void passive_send_flush (struct totemrrp_instance *instance)
{
struct passive_instance *rrp_algo_instance = (struct passive_instance *)instance->rrp_algo_instance;
unsigned int i;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_send_flush (instance->net_handles[i]);
}
}
}
static void passive_iface_check (struct totemrrp_instance *instance)
{
struct passive_instance *rrp_algo_instance = (struct passive_instance *)instance->rrp_algo_instance;
unsigned int i;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_iface_check (instance->net_handles[i]);
}
}
}
static void passive_processor_count_set (
struct totemrrp_instance *instance,
unsigned int processor_count)
{
struct passive_instance *rrp_algo_instance = (struct passive_instance *)instance->rrp_algo_instance;
unsigned int i;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_processor_count_set (instance->net_handles[i],
processor_count);
}
}
}
static void passive_token_target_set (
struct totemrrp_instance *instance,
struct totem_ip_address *token_target,
unsigned int iface_no)
{
totemnet_token_target_set (instance->net_handles[iface_no], token_target);
}
static void passive_ring_reenable (
struct totemrrp_instance *instance)
{
struct passive_instance *rrp_algo_instance = (struct passive_instance *)instance->rrp_algo_instance;
memset (rrp_algo_instance->mcast_recv_count, 0, sizeof (unsigned int) *
instance->interface_count);
memset (rrp_algo_instance->token_recv_count, 0, sizeof (unsigned int) *
instance->interface_count);
memset (rrp_algo_instance->faulty, 0, sizeof (unsigned int) *
instance->interface_count);
}
/*
* Active Replication Implementation
*/
void *active_instance_initialize (
struct totemrrp_instance *rrp_instance,
int interface_count)
{
struct active_instance *instance;
instance = malloc (sizeof (struct active_instance));
if (instance == 0) {
goto error_exit;
}
memset (instance, 0, sizeof (struct active_instance));
instance->faulty = malloc (sizeof (int) * interface_count);
if (instance->faulty == 0) {
free (instance);
instance = 0;
goto error_exit;
}
memset (instance->faulty, 0, sizeof (unsigned int) * interface_count);
instance->last_token_recv = malloc (sizeof (int) * interface_count);
if (instance->last_token_recv == 0) {
free (instance->faulty);
free (instance);
instance = 0;
goto error_exit;
}
memset (instance->last_token_recv, 0, sizeof (unsigned int) * interface_count);
instance->counter_problems = malloc (sizeof (int) * interface_count);
if (instance->counter_problems == 0) {
free (instance->last_token_recv);
free (instance->faulty);
free (instance);
instance = 0;
goto error_exit;
}
memset (instance->counter_problems, 0, sizeof (unsigned int) * interface_count);
instance->timer_expired_token = 0;
instance->timer_problem_decrementer = 0;
instance->rrp_instance = rrp_instance;
error_exit:
return ((void *)instance);
}
static void timer_function_active_problem_decrementer (void *context)
{
struct active_instance *active_instance = (struct active_instance *)context;
struct totemrrp_instance *rrp_instance = active_instance->rrp_instance;
unsigned int problem_found = 0;
unsigned int i;
for (i = 0; i < rrp_instance->interface_count; i++) {
if (active_instance->counter_problems[i] > 0) {
problem_found = 1;
active_instance->counter_problems[i] -= 1;
if (active_instance->counter_problems[i] == 0) {
sprintf (rrp_instance->status[i],
"ring %d active with no faults", i);
} else {
sprintf (rrp_instance->status[i],
"Decrementing problem counter for iface %s to [%d of %d]",
totemnet_iface_print (rrp_instance->net_handles[i]),
active_instance->counter_problems[i],
rrp_instance->totem_config->rrp_problem_count_threshold);
}
log_printf (
rrp_instance->totemrrp_log_level_warning,
"%s",
rrp_instance->status[i]);
}
}
if (problem_found) {
active_timer_problem_decrementer_start (active_instance);
} else {
active_instance->timer_problem_decrementer = 0;
}
}
static void timer_function_active_token_expired (void *context)
{
struct active_instance *active_instance = (struct active_instance *)context;
struct totemrrp_instance *rrp_instance = active_instance->rrp_instance;
unsigned int i;
for (i = 0; i < rrp_instance->interface_count; i++) {
if (active_instance->last_token_recv[i] == 0) {
active_instance->counter_problems[i] += 1;
if (active_instance->timer_problem_decrementer == 0) {
active_timer_problem_decrementer_start (active_instance);
}
sprintf (rrp_instance->status[i],
"Incrementing problem counter for seqid %d iface %s to [%d of %d]",
active_instance->last_token_seq,
totemnet_iface_print (rrp_instance->net_handles[i]),
active_instance->counter_problems[i],
rrp_instance->totem_config->rrp_problem_count_threshold);
log_printf (
rrp_instance->totemrrp_log_level_warning,
"%s",
rrp_instance->status[i]);
}
}
for (i = 0; i < rrp_instance->interface_count; i++) {
if (active_instance->counter_problems[i] >= rrp_instance->totem_config->rrp_problem_count_threshold)
{
active_instance->faulty[i] = 1;
sprintf (rrp_instance->status[i],
"Marking seqid %d ringid %u interface %s FAULTY - adminisrtative intervention required.",
active_instance->last_token_seq,
i,
totemnet_iface_print (rrp_instance->net_handles[i]));
log_printf (
rrp_instance->totemrrp_log_level_error,
"%s",
rrp_instance->status[i]);
active_timer_problem_decrementer_cancel (active_instance);
}
}
rrp_instance->totemrrp_deliver_fn (
active_instance->totemrrp_context,
active_instance->token,
active_instance->token_len);
}
static void active_timer_expired_token_start (
struct active_instance *active_instance)
{
poll_timer_add (
active_instance->rrp_instance->poll_handle,
active_instance->rrp_instance->totem_config->rrp_token_expired_timeout,
(void *)active_instance,
timer_function_active_token_expired,
&active_instance->timer_expired_token);
}
static void active_timer_expired_token_cancel (
struct active_instance *active_instance)
{
poll_timer_delete (
active_instance->rrp_instance->poll_handle,
active_instance->timer_expired_token);
}
static void active_timer_problem_decrementer_start (
struct active_instance *active_instance)
{
poll_timer_add (
active_instance->rrp_instance->poll_handle,
active_instance->rrp_instance->totem_config->rrp_problem_count_timeout,
(void *)active_instance,
timer_function_active_problem_decrementer,
&active_instance->timer_problem_decrementer);
}
static void active_timer_problem_decrementer_cancel (
struct active_instance *active_instance)
{
poll_timer_delete (
active_instance->rrp_instance->poll_handle,
active_instance->timer_problem_decrementer);
}
/*
* active replication
*/
static void active_mcast_recv (
struct totemrrp_instance *instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len)
{
instance->totemrrp_deliver_fn (
context,
msg,
msg_len);
}
static void active_mcast_flush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len)
{
int i;
struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_mcast_flush_send (instance->net_handles[i], msg, msg_len);
}
}
}
static void active_mcast_noflush_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len)
{
int i;
struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_mcast_noflush_send (instance->net_handles[i], msg, msg_len);
}
}
}
static void active_token_recv (
struct totemrrp_instance *instance,
unsigned int iface_no,
void *context,
const void *msg,
unsigned int msg_len,
unsigned int token_seq)
{
int i;
struct active_instance *active_instance = (struct active_instance *)instance->rrp_algo_instance;
active_instance->totemrrp_context = context; // this should be in totemrrp_instance ?
if (token_seq > active_instance->last_token_seq) {
memcpy (active_instance->token, msg, msg_len);
active_instance->token_len = msg_len;
for (i = 0; i < instance->interface_count; i++) {
active_instance->last_token_recv[i] = 0;
}
active_instance->last_token_recv[iface_no] = 1;
active_timer_expired_token_start (active_instance);
}
active_instance->last_token_seq = token_seq;
if (token_seq == active_instance->last_token_seq) {
active_instance->last_token_recv[iface_no] = 1;
for (i = 0; i < instance->interface_count; i++) {
if ((active_instance->last_token_recv[i] == 0) &&
active_instance->faulty[i] == 0) {
return; /* don't deliver token */
}
}
active_timer_expired_token_cancel (active_instance);
instance->totemrrp_deliver_fn (
context,
msg,
msg_len);
}
}
static void active_token_send (
struct totemrrp_instance *instance,
const void *msg,
unsigned int msg_len)
{
struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance;
int i;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_token_send (
instance->net_handles[i],
msg, msg_len);
}
}
}
static void active_recv_flush (struct totemrrp_instance *instance)
{
struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance;
unsigned int i;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_recv_flush (instance->net_handles[i]);
}
}
}
static void active_send_flush (struct totemrrp_instance *instance)
{
struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance;
unsigned int i;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_send_flush (instance->net_handles[i]);
}
}
}
static void active_iface_check (struct totemrrp_instance *instance)
{
struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance;
unsigned int i;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_iface_check (instance->net_handles[i]);
}
}
}
static void active_processor_count_set (
struct totemrrp_instance *instance,
unsigned int processor_count)
{
struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance;
unsigned int i;
for (i = 0; i < instance->interface_count; i++) {
if (rrp_algo_instance->faulty[i] == 0) {
totemnet_processor_count_set (instance->net_handles[i],
processor_count);
}
}
}
static void active_token_target_set (
struct totemrrp_instance *instance,
struct totem_ip_address *token_target,
unsigned int iface_no)
{
totemnet_token_target_set (instance->net_handles[iface_no], token_target);
}
static void active_ring_reenable (
struct totemrrp_instance *instance)
{
struct active_instance *rrp_algo_instance = (struct active_instance *)instance->rrp_algo_instance;
memset (rrp_algo_instance->last_token_recv, 0, sizeof (unsigned int) *
instance->interface_count);
memset (rrp_algo_instance->faulty, 0, sizeof (unsigned int) *
instance->interface_count);
memset (rrp_algo_instance->counter_problems, 0, sizeof (unsigned int) *
instance->interface_count);
}
struct deliver_fn_context {
struct totemrrp_instance *instance;
void *context;
int iface_no;
};
static void totemrrp_instance_initialize (struct totemrrp_instance *instance)
{
memset (instance, 0, sizeof (struct totemrrp_instance));
}
static int totemrrp_algorithm_set (
struct totem_config *totem_config,
struct totemrrp_instance *instance)
{
unsigned int res = -1;
unsigned int i;
for (i = 0; i < RRP_ALGOS_COUNT; i++) {
if (strcmp (totem_config->rrp_mode, rrp_algos[i]->name) == 0) {
instance->rrp_algo = rrp_algos[i];
if (rrp_algos[i]->initialize) {
instance->rrp_algo_instance = rrp_algos[i]->initialize (
instance,
totem_config->interface_count);
}
res = 0;
break;
}
}
for (i = 0; i < totem_config->interface_count; i++) {
instance->status[i] = malloc (1024);
sprintf (instance->status[i], "ring %d active with no faults", i);
}
return (res);
}
void rrp_deliver_fn (
void *context,
const void *msg,
unsigned int msg_len)
{
unsigned int token_seqid;
unsigned int token_is;
struct deliver_fn_context *deliver_fn_context = (struct deliver_fn_context *)context;
deliver_fn_context->instance->totemrrp_token_seqid_get (
msg,
&token_seqid,
&token_is);
if (token_is) {
/*
* Deliver to the token receiver for this rrp algorithm
*/
deliver_fn_context->instance->rrp_algo->token_recv (
deliver_fn_context->instance,
deliver_fn_context->iface_no,
deliver_fn_context->context,
msg,
msg_len,
token_seqid);
} else {
/*
* Deliver to the mcast receiver for this rrp algorithm
*/
deliver_fn_context->instance->rrp_algo->mcast_recv (
deliver_fn_context->instance,
deliver_fn_context->iface_no,
deliver_fn_context->context,
msg,
msg_len);
}
}
void rrp_iface_change_fn (
void *context,
const struct totem_ip_address *iface_addr)
{
struct deliver_fn_context *deliver_fn_context = (struct deliver_fn_context *)context;
deliver_fn_context->instance->totemrrp_iface_change_fn (
deliver_fn_context->context,
iface_addr,
deliver_fn_context->iface_no);
}
int totemrrp_finalize (
hdb_handle_t handle)
{
struct totemrrp_instance *instance;
int res = 0;
int i;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
for (i = 0; i < instance->interface_count; i++) {
totemnet_finalize (instance->net_handles[i]);
}
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
/*
* Totem Redundant Ring interface
* depends on poll abstraction, POSIX, IPV4
*/
/*
* Create an instance
*/
int totemrrp_initialize (
hdb_handle_t poll_handle,
hdb_handle_t *handle,
struct totem_config *totem_config,
void *context,
void (*deliver_fn) (
void *context,
const void *msg,
unsigned int msg_len),
void (*iface_change_fn) (
void *context,
const struct totem_ip_address *iface_addr,
unsigned int iface_no),
void (*token_seqid_get) (
const void *msg,
unsigned int *seqid,
unsigned int *token_is),
unsigned int (*msgs_missing) (void))
{
struct totemrrp_instance *instance;
unsigned int res;
int i;
res = hdb_handle_create (&totemrrp_instance_database,
sizeof (struct totemrrp_instance), handle);
if (res != 0) {
goto error_exit;
}
res = hdb_handle_get (&totemrrp_instance_database, *handle,
(void *)&instance);
if (res != 0) {
goto error_destroy;
}
totemrrp_instance_initialize (instance);
instance->totem_config = totem_config;
res = totemrrp_algorithm_set (
instance->totem_config,
instance);
if (res == -1) {
goto error_put;
}
/*
* Configure logging
*/
instance->totemrrp_log_level_security = totem_config->totem_logging_configuration.log_level_security;
instance->totemrrp_log_level_error = totem_config->totem_logging_configuration.log_level_error;
instance->totemrrp_log_level_warning = totem_config->totem_logging_configuration.log_level_warning;
instance->totemrrp_log_level_notice = totem_config->totem_logging_configuration.log_level_notice;
instance->totemrrp_log_level_debug = totem_config->totem_logging_configuration.log_level_debug;
instance->totemrrp_subsys_id = totem_config->totem_logging_configuration.log_subsys_id;
instance->totemrrp_log_printf = totem_config->totem_logging_configuration.log_printf;
instance->interfaces = totem_config->interfaces;
instance->totemrrp_poll_handle = poll_handle;
instance->totemrrp_deliver_fn = deliver_fn;
instance->totemrrp_iface_change_fn = iface_change_fn;
instance->totemrrp_token_seqid_get = token_seqid_get;
instance->totemrrp_msgs_missing = msgs_missing;
instance->interface_count = totem_config->interface_count;
instance->net_handles = malloc (sizeof (hdb_handle_t) * totem_config->interface_count);
instance->context = context;
instance->poll_handle = poll_handle;
for (i = 0; i < totem_config->interface_count; i++) {
struct deliver_fn_context *deliver_fn_context;
deliver_fn_context = malloc (sizeof (struct deliver_fn_context));
assert (deliver_fn_context);
deliver_fn_context->instance = instance;
deliver_fn_context->context = context;
deliver_fn_context->iface_no = i;
totemnet_initialize (
poll_handle,
&instance->net_handles[i],
totem_config,
i,
(void *)deliver_fn_context,
rrp_deliver_fn,
rrp_iface_change_fn);
}
totemnet_net_mtu_adjust (totem_config);
error_exit:
hdb_handle_put (&totemrrp_instance_database, *handle);
return (0);
error_put:
hdb_handle_put (&totemrrp_instance_database, *handle);
error_destroy:
hdb_handle_destroy (&totemrrp_instance_database, *handle);
return (res);
}
int totemrrp_processor_count_set (
hdb_handle_t handle,
unsigned int processor_count)
{
struct totemrrp_instance *instance;
int res = 0;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
instance->rrp_algo->processor_count_set (instance, processor_count);
instance->processor_count = processor_count;
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
int totemrrp_token_target_set (
hdb_handle_t handle,
struct totem_ip_address *addr,
unsigned int iface_no)
{
struct totemrrp_instance *instance;
int res = 0;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
instance->rrp_algo->token_target_set (instance, addr, iface_no);
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
int totemrrp_recv_flush (hdb_handle_t handle)
{
struct totemrrp_instance *instance;
int res = 0;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
instance->rrp_algo->recv_flush (instance);
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
int totemrrp_send_flush (hdb_handle_t handle)
{
struct totemrrp_instance *instance;
int res = 0;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
instance->rrp_algo->send_flush (instance);
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
int totemrrp_token_send (
hdb_handle_t handle,
const void *msg,
unsigned int msg_len)
{
struct totemrrp_instance *instance;
int res = 0;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
instance->rrp_algo->token_send (instance, msg, msg_len);
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
int totemrrp_mcast_flush_send (
hdb_handle_t handle,
const void *msg,
unsigned int msg_len)
{
struct totemrrp_instance *instance;
int res = 0;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
// TODO this needs to return the result
instance->rrp_algo->mcast_flush_send (instance, msg, msg_len);
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
int totemrrp_mcast_noflush_send (
hdb_handle_t handle,
const void *msg,
unsigned int msg_len)
{
struct totemrrp_instance *instance;
int res = 0;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
/*
* merge detects go out through mcast_flush_send so it is safe to
* flush these messages if we are only one processor. This avoids
* an encryption/hmac and decryption/hmac
*/
if (instance->processor_count > 1) {
// TODO this needs to return the result
instance->rrp_algo->mcast_noflush_send (instance, msg, msg_len);
}
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
int totemrrp_iface_check (hdb_handle_t handle)
{
struct totemrrp_instance *instance;
int res = 0;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
instance->rrp_algo->iface_check (instance);
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
int totemrrp_ifaces_get (
hdb_handle_t handle,
char ***status,
unsigned int *iface_count)
{
struct totemrrp_instance *instance;
int res = 0;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
*status = instance->status;
if (iface_count) {
*iface_count = instance->interface_count;
}
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
int totemrrp_crypto_set (
hdb_handle_t handle,
unsigned int type)
{
int res;
struct totemrrp_instance *instance;
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
return (0);
}
res = totemnet_crypto_set(instance->net_handles[0], type);
hdb_handle_put (&totemrrp_instance_database, handle);
return (res);
}
int totemrrp_ring_reenable (
hdb_handle_t handle)
{
struct totemrrp_instance *instance;
int res = 0;
unsigned int i;
printf ("totemrrp ring reenable\n");
res = hdb_handle_get (&totemrrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
res = ENOENT;
goto error_exit;
}
instance->rrp_algo->ring_reenable (instance);
for (i = 0; i < instance->interface_count; i++) {
sprintf (instance->status[i], "ring %d active with no faults", i);
}
hdb_handle_put (&totemrrp_instance_database, handle);
error_exit:
return (res);
}
diff --git a/exec/totemsrp.c b/exec/totemsrp.c
index e14e3776..19074640 100644
--- a/exec/totemsrp.c
+++ b/exec/totemsrp.c
@@ -1,4236 +1,4241 @@
/*
* Copyright (c) 2003-2006 MontaVista Software, Inc.
* Copyright (c) 2006-2009 Red Hat, Inc.
*
* All rights reserved.
*
* Author: Steven Dake (sdake@redhat.com)
*
* This software licensed under BSD license, the text of which follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the MontaVista Software, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* The first version of this code was based upon Yair Amir's PhD thesis:
* http://www.cs.jhu.edu/~yairamir/phd.ps) (ch4,5).
*
* The current version of totemsrp implements the Totem protocol specified in:
* http://citeseer.ist.psu.edu/amir95totem.html
*
* The deviations from the above published protocols are:
* - encryption of message contents with SOBER128
* - authentication of meessage contents with SHA1/HMAC
* - token hold mode where token doesn't rotate on unused ring - reduces cpu
* usage on 1.6ghz xeon from 35% to less then .1 % as measured by top
*/
#include <config.h>
#include <assert.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <netdb.h>
#include <sys/un.h>
#include <sys/ioctl.h>
#include <sys/param.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <sched.h>
#include <time.h>
#include <sys/time.h>
#include <sys/poll.h>
#include <limits.h>
#include <corosync/swab.h>
#include <corosync/cs_queue.h>
#include <corosync/sq.h>
#include <corosync/list.h>
#include <corosync/hdb.h>
#include <corosync/totem/coropoll.h>
#define LOGSYS_UTILS_ONLY 1
#include <corosync/engine/logsys.h>
#include "totemsrp.h"
#include "totemrrp.h"
#include "totemnet.h"
#include "wthread.h"
#include "crypto.h"
#define LOCALHOST_IP inet_addr("127.0.0.1")
#define QUEUE_RTR_ITEMS_SIZE_MAX 256 /* allow 256 retransmit items */
#define RETRANS_MESSAGE_QUEUE_SIZE_MAX 500 /* allow 500 messages to be queued */
#define RECEIVED_MESSAGE_QUEUE_SIZE_MAX 500 /* allow 500 messages to be queued */
#define MAXIOVS 5
#define RETRANSMIT_ENTRIES_MAX 30
#define TOKEN_SIZE_MAX 64000 /* bytes */
#define LEAVE_DUMMY_NODEID 0
/*
* Rollover handling:
* SEQNO_START_MSG is the starting sequence number after a new configuration
* This should remain zero, unless testing overflow in which case
* 0x7ffff000 and 0xfffff000 are good starting values.
*
* SEQNO_START_TOKEN is the starting sequence number after a new configuration
* for a token. This should remain zero, unless testing overflow in which
* case 07fffff00 or 0xffffff00 are good starting values.
*
* SEQNO_START_MSG is the starting sequence number after a new configuration
* This should remain zero, unless testing overflow in which case
* 0x7ffff000 and 0xfffff000 are good values to start with
*/
#define SEQNO_START_MSG 0x0
#define SEQNO_START_TOKEN 0x0
/*
* These can be used ot test different rollover points
* #define SEQNO_START_MSG 0xfffffe00
* #define SEQNO_START_TOKEN 0xfffffe00
*/
/*
* These can be used to test the error recovery algorithms
* #define TEST_DROP_ORF_TOKEN_PERCENTAGE 30
* #define TEST_DROP_COMMIT_TOKEN_PERCENTAGE 30
* #define TEST_DROP_MCAST_PERCENTAGE 50
* #define TEST_RECOVERY_MSG_COUNT 300
*/
/*
* we compare incoming messages to determine if their endian is
* different - if so convert them
*
* do not change
*/
#define ENDIAN_LOCAL 0xff22
enum message_type {
MESSAGE_TYPE_ORF_TOKEN = 0, /* Ordering, Reliability, Flow (ORF) control Token */
MESSAGE_TYPE_MCAST = 1, /* ring ordered multicast message */
MESSAGE_TYPE_MEMB_MERGE_DETECT = 2, /* merge rings if there are available rings */
MESSAGE_TYPE_MEMB_JOIN = 3, /* membership join message */
MESSAGE_TYPE_MEMB_COMMIT_TOKEN = 4, /* membership commit token */
MESSAGE_TYPE_TOKEN_HOLD_CANCEL = 5, /* cancel the holding of the token */
};
enum encapsulation_type {
MESSAGE_ENCAPSULATED = 1,
MESSAGE_NOT_ENCAPSULATED = 2
};
/*
* New membership algorithm local variables
*/
struct srp_addr {
struct totem_ip_address addr[INTERFACE_MAX];
};
struct consensus_list_item {
struct srp_addr addr;
int set;
};
struct token_callback_instance {
struct list_head list;
int (*callback_fn) (enum totem_callback_token_type type, const void *);
enum totem_callback_token_type callback_type;
int delete;
void *data;
};
struct totemsrp_socket {
int mcast;
int token;
};
struct message_header {
char type;
char encapsulated;
unsigned short endian_detector;
unsigned int nodeid;
} __attribute__((packed));
struct mcast {
struct message_header header;
struct srp_addr system_from;
unsigned int seq;
int this_seqno;
struct memb_ring_id ring_id;
unsigned int node_id;
int guarantee;
} __attribute__((packed));
/*
* MTU - multicast message header - IP header - UDP header
*
* On lossy switches, making use of the DF UDP flag can lead to loss of
* forward progress. So the packets must be fragmented by a higher layer
*
* This layer can only handle packets of MTU size.
*/
#define FRAGMENT_SIZE (FRAME_SIZE_MAX - sizeof (struct mcast) - 20 - 8)
struct rtr_item {
struct memb_ring_id ring_id;
unsigned int seq;
}__attribute__((packed));
struct orf_token {
struct message_header header;
unsigned int seq;
unsigned int token_seq;
unsigned int aru;
unsigned int aru_addr;
struct memb_ring_id ring_id;
unsigned int backlog;
unsigned int fcc;
int retrans_flg;
int rtr_list_entries;
struct rtr_item rtr_list[0];
}__attribute__((packed));
struct memb_join {
struct message_header header;
struct srp_addr system_from;
unsigned int proc_list_entries;
unsigned int failed_list_entries;
unsigned long long ring_seq;
unsigned char end_of_memb_join[0];
/*
* These parts of the data structure are dynamic:
* struct srp_addr proc_list[];
* struct srp_addr failed_list[];
*/
} __attribute__((packed));
struct memb_merge_detect {
struct message_header header;
struct srp_addr system_from;
struct memb_ring_id ring_id;
} __attribute__((packed));
struct token_hold_cancel {
struct message_header header;
struct memb_ring_id ring_id;
} __attribute__((packed));
struct memb_commit_token_memb_entry {
struct memb_ring_id ring_id;
unsigned int aru;
unsigned int high_delivered;
unsigned int received_flg;
}__attribute__((packed));
struct memb_commit_token {
struct message_header header;
unsigned int token_seq;
struct memb_ring_id ring_id;
unsigned int retrans_flg;
int memb_index;
int addr_entries;
unsigned char end_of_commit_token[0];
/*
* These parts of the data structure are dynamic:
*
* struct srp_addr addr[PROCESSOR_COUNT_MAX];
* struct memb_commit_token_memb_entry memb_list[PROCESSOR_COUNT_MAX];
*/
}__attribute__((packed));
struct message_item {
struct mcast *mcast;
unsigned int msg_len;
};
struct sort_queue_item {
struct mcast *mcast;
unsigned int msg_len;
};
struct orf_token_mcast_thread_state {
char iobuf[9000];
prng_state prng_state;
};
enum memb_state {
MEMB_STATE_OPERATIONAL = 1,
MEMB_STATE_GATHER = 2,
MEMB_STATE_COMMIT = 3,
MEMB_STATE_RECOVERY = 4
};
struct totemsrp_instance {
int iface_changes;
/*
* Flow control mcasts and remcasts on last and current orf_token
*/
int fcc_remcast_last;
int fcc_mcast_last;
int fcc_remcast_current;
struct consensus_list_item consensus_list[PROCESSOR_COUNT_MAX];
int consensus_list_entries;
struct srp_addr my_id;
struct srp_addr my_proc_list[PROCESSOR_COUNT_MAX];
struct srp_addr my_failed_list[PROCESSOR_COUNT_MAX];
struct srp_addr my_new_memb_list[PROCESSOR_COUNT_MAX];
struct srp_addr my_trans_memb_list[PROCESSOR_COUNT_MAX];
struct srp_addr my_memb_list[PROCESSOR_COUNT_MAX];
struct srp_addr my_deliver_memb_list[PROCESSOR_COUNT_MAX];
struct srp_addr my_left_memb_list[PROCESSOR_COUNT_MAX];
int my_proc_list_entries;
int my_failed_list_entries;
int my_new_memb_entries;
int my_trans_memb_entries;
int my_memb_entries;
int my_deliver_memb_entries;
int my_left_memb_entries;
struct memb_ring_id my_ring_id;
struct memb_ring_id my_old_ring_id;
int my_aru_count;
int my_merge_detect_timeout_outstanding;
unsigned int my_last_aru;
int my_seq_unchanged;
int my_received_flg;
unsigned int my_high_seq_received;
unsigned int my_install_seq;
int my_rotation_counter;
int my_set_retrans_flg;
int my_retrans_flg_count;
unsigned int my_high_ring_delivered;
int heartbeat_timeout;
/*
* Queues used to order, deliver, and recover messages
*/
struct cs_queue new_message_queue;
struct cs_queue retrans_message_queue;
struct sq regular_sort_queue;
struct sq recovery_sort_queue;
/*
* Received up to and including
*/
unsigned int my_aru;
unsigned int my_high_delivered;
struct list_head token_callback_received_listhead;
struct list_head token_callback_sent_listhead;
char *orf_token_retransmit[TOKEN_SIZE_MAX];
int orf_token_retransmit_size;
unsigned int my_token_seq;
/*
* Timers
*/
poll_timer_handle timer_orf_token_timeout;
poll_timer_handle timer_orf_token_retransmit_timeout;
poll_timer_handle timer_orf_token_hold_retransmit_timeout;
poll_timer_handle timer_merge_detect_timeout;
poll_timer_handle memb_timer_state_gather_join_timeout;
poll_timer_handle memb_timer_state_gather_consensus_timeout;
poll_timer_handle memb_timer_state_commit_timeout;
poll_timer_handle timer_heartbeat_timeout;
/*
* Function and data used to log messages
*/
int totemsrp_log_level_security;
int totemsrp_log_level_error;
int totemsrp_log_level_warning;
int totemsrp_log_level_notice;
int totemsrp_log_level_debug;
int totemsrp_subsys_id;
- void (*totemsrp_log_printf) (int subsys,
- const char *function, const char *file,
- int line, unsigned int level, unsigned int rec_ident,
- const char *format, ...)__attribute__((format(printf, 7, 8)));;
+ void (*totemsrp_log_printf) (
+ unsigned int rec_ident,
+ const char *function,
+ const char *file,
+ int line,
+ const char *format, ...)__attribute__((format(printf, 5, 6)));;
enum memb_state memb_state;
//TODO struct srp_addr next_memb;
hdb_handle_t totemsrp_poll_handle;
struct totem_ip_address mcast_address;
void (*totemsrp_deliver_fn) (
unsigned int nodeid,
const void *msg,
unsigned int msg_len,
int endian_conversion_required);
void (*totemsrp_confchg_fn) (
enum totem_configuration_type configuration_type,
const unsigned int *member_list, size_t member_list_entries,
const unsigned int *left_list, size_t left_list_entries,
const unsigned int *joined_list, size_t joined_list_entries,
const struct memb_ring_id *ring_id);
int global_seqno;
int my_token_held;
unsigned long long token_ring_id_seq;
unsigned int last_released;
unsigned int set_aru;
int old_ring_state_saved;
int old_ring_state_aru;
unsigned int old_ring_state_high_seq_received;
int ring_saved;
unsigned int my_last_seq;
struct timeval tv_old;
hdb_handle_t totemrrp_handle;
struct totem_config *totem_config;
unsigned int use_heartbeat;
unsigned int my_trc;
unsigned int my_pbl;
unsigned int my_cbl;
};
struct message_handlers {
int count;
int (*handler_functions[6]) (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed);
};
/*
* forward decls
*/
static int message_handler_orf_token (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed);
static int message_handler_mcast (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed);
static int message_handler_memb_merge_detect (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed);
static int message_handler_memb_join (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed);
static int message_handler_memb_commit_token (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed);
static int message_handler_token_hold_cancel (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed);
static void totemsrp_instance_initialize (struct totemsrp_instance *instance);
static unsigned int main_msgs_missing (void);
static void main_token_seqid_get (
const void *msg,
unsigned int *seqid,
unsigned int *token_is);
static void srp_addr_copy (struct srp_addr *dest, const struct srp_addr *src);
static void srp_addr_to_nodeid (
unsigned int *nodeid_out,
struct srp_addr *srp_addr_in,
unsigned int entries);
static int srp_addr_equal (const struct srp_addr *a, const struct srp_addr *b);
static void memb_leave_message_send (struct totemsrp_instance *instance);
static void memb_ring_id_create_or_load (struct totemsrp_instance *, struct memb_ring_id *);
static void token_callbacks_execute (struct totemsrp_instance *instance, enum totem_callback_token_type type);
static void memb_state_gather_enter (struct totemsrp_instance *instance, int gather_from);
static void messages_deliver_to_app (struct totemsrp_instance *instance, int skip, unsigned int end_point);
static int orf_token_mcast (struct totemsrp_instance *instance, struct orf_token *oken,
int fcc_mcasts_allowed);
static void messages_free (struct totemsrp_instance *instance, unsigned int token_aru);
static void memb_ring_id_set_and_store (struct totemsrp_instance *instance,
const struct memb_ring_id *ring_id);
static void memb_state_commit_token_update (struct totemsrp_instance *instance, struct memb_commit_token *commit_token);
static void memb_state_commit_token_target_set (struct totemsrp_instance *instance, struct memb_commit_token *commit_token);
static int memb_state_commit_token_send (struct totemsrp_instance *instance, struct memb_commit_token *memb_commit_token);
static void memb_state_commit_token_create (struct totemsrp_instance *instance, struct memb_commit_token *commit_token);
static int token_hold_cancel_send (struct totemsrp_instance *instance);
static void orf_token_endian_convert (const struct orf_token *in, struct orf_token *out);
static void memb_commit_token_endian_convert (const struct memb_commit_token *in, struct memb_commit_token *out);
static void memb_join_endian_convert (const struct memb_join *in, struct memb_join *out);
static void mcast_endian_convert (const struct mcast *in, struct mcast *out);
static void memb_merge_detect_endian_convert (
const struct memb_merge_detect *in,
struct memb_merge_detect *out);
static void srp_addr_copy_endian_convert (struct srp_addr *out, const struct srp_addr *in);
static void timer_function_orf_token_timeout (void *data);
static void timer_function_heartbeat_timeout (void *data);
static void timer_function_token_retransmit_timeout (void *data);
static void timer_function_token_hold_retransmit_timeout (void *data);
static void timer_function_merge_detect_timeout (void *data);
void main_deliver_fn (
void *context,
const void *msg,
unsigned int msg_len);
void main_iface_change_fn (
void *context,
const struct totem_ip_address *iface_address,
unsigned int iface_no);
/*
* All instances in one database
*/
DECLARE_HDB_DATABASE (totemsrp_instance_database,NULL);
struct message_handlers totemsrp_message_handlers = {
6,
{
message_handler_orf_token,
message_handler_mcast,
message_handler_memb_merge_detect,
message_handler_memb_join,
message_handler_memb_commit_token,
message_handler_token_hold_cancel
}
};
static const char *rundir = NULL;
#define log_printf(level, format, args...) \
do { \
- instance->totemsrp_log_printf (instance->totemsrp_subsys_id, \
- __FUNCTION__, __FILE__, __LINE__, level, \
- LOGSYS_RECID_LOG, format, ##args); \
+ instance->totemsrp_log_printf ( \
+ LOGSYS_ENCODE_RECID(level, \
+ instance->totemsrp_subsys_id, \
+ LOGSYS_RECID_LOG), \
+ __FUNCTION__, __FILE__, __LINE__, \
+ format, ##args); \
} while (0);
static void totemsrp_instance_initialize (struct totemsrp_instance *instance)
{
memset (instance, 0, sizeof (struct totemsrp_instance));
list_init (&instance->token_callback_received_listhead);
list_init (&instance->token_callback_sent_listhead);
instance->my_received_flg = 1;
instance->my_token_seq = SEQNO_START_TOKEN - 1;
instance->memb_state = MEMB_STATE_OPERATIONAL;
instance->set_aru = -1;
instance->my_aru = SEQNO_START_MSG;
instance->my_high_seq_received = SEQNO_START_MSG;
instance->my_high_delivered = SEQNO_START_MSG;
}
static void main_token_seqid_get (
const void *msg,
unsigned int *seqid,
unsigned int *token_is)
{
const struct orf_token *token = msg;
*seqid = 0;
*token_is = 0;
if (token->header.type == MESSAGE_TYPE_ORF_TOKEN) {
*seqid = token->token_seq;
*token_is = 1;
}
}
static unsigned int main_msgs_missing (void)
{
// TODO
return (0);
}
/*
* Exported interfaces
*/
int totemsrp_initialize (
hdb_handle_t poll_handle,
hdb_handle_t *handle,
struct totem_config *totem_config,
void (*deliver_fn) (
unsigned int nodeid,
const void *msg,
unsigned int msg_len,
int endian_conversion_required),
void (*confchg_fn) (
enum totem_configuration_type configuration_type,
const unsigned int *member_list, size_t member_list_entries,
const unsigned int *left_list, size_t left_list_entries,
const unsigned int *joined_list, size_t joined_list_entries,
const struct memb_ring_id *ring_id))
{
struct totemsrp_instance *instance;
unsigned int res;
res = hdb_handle_create (&totemsrp_instance_database,
sizeof (struct totemsrp_instance), handle);
if (res != 0) {
goto error_exit;
}
res = hdb_handle_get (&totemsrp_instance_database, *handle,
(void *)&instance);
if (res != 0) {
goto error_destroy;
}
rundir = getenv ("COROSYNC_RUN_DIR");
if (rundir == NULL) {
rundir = LOCALSTATEDIR "/lib/corosync";
}
res = mkdir (rundir, 0700);
if (res == -1 && errno != EEXIST) {
goto error_put;
}
res = chdir (rundir);
if (res == -1) {
goto error_put;
}
totemsrp_instance_initialize (instance);
instance->totem_config = totem_config;
/*
* Configure logging
*/
instance->totemsrp_log_level_security = totem_config->totem_logging_configuration.log_level_security;
instance->totemsrp_log_level_error = totem_config->totem_logging_configuration.log_level_error;
instance->totemsrp_log_level_warning = totem_config->totem_logging_configuration.log_level_warning;
instance->totemsrp_log_level_notice = totem_config->totem_logging_configuration.log_level_notice;
instance->totemsrp_log_level_debug = totem_config->totem_logging_configuration.log_level_debug;
instance->totemsrp_subsys_id = totem_config->totem_logging_configuration.log_subsys_id;
instance->totemsrp_log_printf = totem_config->totem_logging_configuration.log_printf;
/*
* Initialize local variables for totemsrp
*/
totemip_copy (&instance->mcast_address, &totem_config->interfaces[0].mcast_addr);
/*
* Display totem configuration
*/
log_printf (instance->totemsrp_log_level_notice,
"Token Timeout (%d ms) retransmit timeout (%d ms)\n",
totem_config->token_timeout, totem_config->token_retransmit_timeout);
log_printf (instance->totemsrp_log_level_notice,
"token hold (%d ms) retransmits before loss (%d retrans)\n",
totem_config->token_hold_timeout, totem_config->token_retransmits_before_loss_const);
log_printf (instance->totemsrp_log_level_notice,
"join (%d ms) send_join (%d ms) consensus (%d ms) merge (%d ms)\n",
totem_config->join_timeout,
totem_config->send_join_timeout,
totem_config->consensus_timeout,
totem_config->merge_timeout);
log_printf (instance->totemsrp_log_level_notice,
"downcheck (%d ms) fail to recv const (%d msgs)\n",
totem_config->downcheck_timeout, totem_config->fail_to_recv_const);
log_printf (instance->totemsrp_log_level_notice,
"seqno unchanged const (%d rotations) Maximum network MTU %d\n", totem_config->seqno_unchanged_const, totem_config->net_mtu);
log_printf (instance->totemsrp_log_level_notice,
"window size per rotation (%d messages) maximum messages per rotation (%d messages)\n",
totem_config->window_size, totem_config->max_messages);
log_printf (instance->totemsrp_log_level_notice,
"send threads (%d threads)\n", totem_config->threads);
log_printf (instance->totemsrp_log_level_notice,
"RRP token expired timeout (%d ms)\n",
totem_config->rrp_token_expired_timeout);
log_printf (instance->totemsrp_log_level_notice,
"RRP token problem counter (%d ms)\n",
totem_config->rrp_problem_count_timeout);
log_printf (instance->totemsrp_log_level_notice,
"RRP threshold (%d problem count)\n",
totem_config->rrp_problem_count_threshold);
log_printf (instance->totemsrp_log_level_notice,
"RRP mode set to %s.\n", instance->totem_config->rrp_mode);
log_printf (instance->totemsrp_log_level_notice,
"heartbeat_failures_allowed (%d)\n", totem_config->heartbeat_failures_allowed);
log_printf (instance->totemsrp_log_level_notice,
"max_network_delay (%d ms)\n", totem_config->max_network_delay);
cs_queue_init (&instance->retrans_message_queue, RETRANS_MESSAGE_QUEUE_SIZE_MAX,
sizeof (struct message_item));
sq_init (&instance->regular_sort_queue,
QUEUE_RTR_ITEMS_SIZE_MAX, sizeof (struct sort_queue_item), 0);
sq_init (&instance->recovery_sort_queue,
QUEUE_RTR_ITEMS_SIZE_MAX, sizeof (struct sort_queue_item), 0);
instance->totemsrp_poll_handle = poll_handle;
instance->totemsrp_deliver_fn = deliver_fn;
instance->totemsrp_confchg_fn = confchg_fn;
instance->use_heartbeat = 1;
if ( totem_config->heartbeat_failures_allowed == 0 ) {
log_printf (instance->totemsrp_log_level_notice,
"HeartBeat is Disabled. To enable set heartbeat_failures_allowed > 0\n");
instance->use_heartbeat = 0;
}
if (instance->use_heartbeat) {
instance->heartbeat_timeout
= (totem_config->heartbeat_failures_allowed) * totem_config->token_retransmit_timeout
+ totem_config->max_network_delay;
if (instance->heartbeat_timeout >= totem_config->token_timeout) {
log_printf (instance->totemsrp_log_level_notice,
"total heartbeat_timeout (%d ms) is not less than token timeout (%d ms)\n",
instance->heartbeat_timeout,
totem_config->token_timeout);
log_printf (instance->totemsrp_log_level_notice,
"heartbeat_timeout = heartbeat_failures_allowed * token_retransmit_timeout + max_network_delay\n");
log_printf (instance->totemsrp_log_level_notice,
"heartbeat timeout should be less than the token timeout. HeartBeat is Diabled !!\n");
instance->use_heartbeat = 0;
}
else {
log_printf (instance->totemsrp_log_level_notice,
"total heartbeat_timeout (%d ms)\n", instance->heartbeat_timeout);
}
}
totemrrp_initialize (
poll_handle,
&instance->totemrrp_handle,
totem_config,
instance,
main_deliver_fn,
main_iface_change_fn,
main_token_seqid_get,
main_msgs_missing);
/*
* Must have net_mtu adjusted by totemrrp_initialize first
*/
cs_queue_init (&instance->new_message_queue,
MESSAGE_QUEUE_MAX,
sizeof (struct message_item));
hdb_handle_put (&totemsrp_instance_database, *handle);
return (0);
error_put:
hdb_handle_put (&totemsrp_instance_database, *handle);
error_destroy:
hdb_handle_destroy (&totemsrp_instance_database, *handle);
error_exit:
return (-1);
}
void totemsrp_finalize (
hdb_handle_t handle)
{
struct totemsrp_instance *instance;
unsigned int res;
res = hdb_handle_get (&totemsrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
return;
}
memb_leave_message_send (instance);
hdb_handle_put (&totemsrp_instance_database, handle);
}
int totemsrp_ifaces_get (
hdb_handle_t handle,
unsigned int nodeid,
struct totem_ip_address *interfaces,
char ***status,
unsigned int *iface_count)
{
struct totemsrp_instance *instance;
int res;
unsigned int found = 0;
unsigned int i;
res = hdb_handle_get (&totemsrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
for (i = 0; i < instance->my_memb_entries; i++) {
if (instance->my_memb_list[i].addr[0].nodeid == nodeid) {
found = 1;
break;
}
}
if (found) {
memcpy (interfaces, &instance->my_memb_list[i],
sizeof (struct srp_addr));
*iface_count = instance->totem_config->interface_count;
goto finish;
}
for (i = 0; i < instance->my_left_memb_entries; i++) {
if (instance->my_left_memb_list[i].addr[0].nodeid == nodeid) {
found = 1;
break;
}
}
if (found) {
memcpy (interfaces, &instance->my_left_memb_list[i],
sizeof (struct srp_addr));
*iface_count = instance->totem_config->interface_count;
} else {
res = -1;
}
finish:
totemrrp_ifaces_get (instance->totemrrp_handle, status, NULL);
hdb_handle_put (&totemsrp_instance_database, handle);
error_exit:
return (res);
}
int totemsrp_crypto_set (
hdb_handle_t handle,
unsigned int type)
{
int res;
struct totemsrp_instance *instance;
res = hdb_handle_get (&totemsrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
return (0);
}
res = totemrrp_crypto_set(instance->totemrrp_handle, type);
hdb_handle_put (&totemsrp_instance_database, handle);
return (res);
}
unsigned int totemsrp_my_nodeid_get (
hdb_handle_t handle)
{
struct totemsrp_instance *instance;
unsigned int res;
res = hdb_handle_get (&totemsrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
return (0);
}
res = instance->totem_config->interfaces[0].boundto.nodeid;
hdb_handle_put (&totemsrp_instance_database, handle);
return (res);
}
int totemsrp_my_family_get (
hdb_handle_t handle)
{
struct totemsrp_instance *instance;
int res;
res = hdb_handle_get (&totemsrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
return (0);
}
res = instance->totem_config->interfaces[0].boundto.family;
hdb_handle_put (&totemsrp_instance_database, handle);
return (res);
}
int totemsrp_ring_reenable (
hdb_handle_t handle)
{
struct totemsrp_instance *instance;
int res;
res = hdb_handle_get (&totemsrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
totemrrp_ring_reenable (instance->totemrrp_handle);
hdb_handle_put (&totemsrp_instance_database, handle);
error_exit:
return (res);
}
/*
* Set operations for use by the membership algorithm
*/
static int srp_addr_equal (const struct srp_addr *a, const struct srp_addr *b)
{
unsigned int i;
unsigned int res;
for (i = 0; i < 1; i++) {
res = totemip_equal (&a->addr[i], &b->addr[i]);
if (res == 0) {
return (0);
}
}
return (1);
}
static void srp_addr_copy (struct srp_addr *dest, const struct srp_addr *src)
{
unsigned int i;
for (i = 0; i < INTERFACE_MAX; i++) {
totemip_copy (&dest->addr[i], &src->addr[i]);
}
}
static void srp_addr_to_nodeid (
unsigned int *nodeid_out,
struct srp_addr *srp_addr_in,
unsigned int entries)
{
unsigned int i;
for (i = 0; i < entries; i++) {
nodeid_out[i] = srp_addr_in[i].addr[0].nodeid;
}
}
static void srp_addr_copy_endian_convert (struct srp_addr *out, const struct srp_addr *in)
{
int i;
for (i = 0; i < INTERFACE_MAX; i++) {
totemip_copy_endian_convert (&out->addr[i], &in->addr[i]);
}
}
static void memb_consensus_reset (struct totemsrp_instance *instance)
{
instance->consensus_list_entries = 0;
}
static void memb_set_subtract (
struct srp_addr *out_list, int *out_list_entries,
struct srp_addr *one_list, int one_list_entries,
struct srp_addr *two_list, int two_list_entries)
{
int found = 0;
int i;
int j;
*out_list_entries = 0;
for (i = 0; i < one_list_entries; i++) {
for (j = 0; j < two_list_entries; j++) {
if (srp_addr_equal (&one_list[i], &two_list[j])) {
found = 1;
break;
}
}
if (found == 0) {
srp_addr_copy (&out_list[*out_list_entries], &one_list[i]);
*out_list_entries = *out_list_entries + 1;
}
found = 0;
}
}
/*
* Set consensus for a specific processor
*/
static void memb_consensus_set (
struct totemsrp_instance *instance,
const struct srp_addr *addr)
{
int found = 0;
int i;
if (addr->addr[0].nodeid == LEAVE_DUMMY_NODEID)
return;
for (i = 0; i < instance->consensus_list_entries; i++) {
if (srp_addr_equal(addr, &instance->consensus_list[i].addr)) {
found = 1;
break; /* found entry */
}
}
srp_addr_copy (&instance->consensus_list[i].addr, addr);
instance->consensus_list[i].set = 1;
if (found == 0) {
instance->consensus_list_entries++;
}
return;
}
/*
* Is consensus set for a specific processor
*/
static int memb_consensus_isset (
struct totemsrp_instance *instance,
const struct srp_addr *addr)
{
int i;
for (i = 0; i < instance->consensus_list_entries; i++) {
if (srp_addr_equal (addr, &instance->consensus_list[i].addr)) {
return (instance->consensus_list[i].set);
}
}
return (0);
}
/*
* Is consensus agreed upon based upon consensus database
*/
static int memb_consensus_agreed (
struct totemsrp_instance *instance)
{
struct srp_addr token_memb[PROCESSOR_COUNT_MAX];
int token_memb_entries = 0;
int agreed = 1;
int i;
memb_set_subtract (token_memb, &token_memb_entries,
instance->my_proc_list, instance->my_proc_list_entries,
instance->my_failed_list, instance->my_failed_list_entries);
for (i = 0; i < token_memb_entries; i++) {
if (memb_consensus_isset (instance, &token_memb[i]) == 0) {
agreed = 0;
break;
}
}
assert (token_memb_entries >= 1);
return (agreed);
}
static void memb_consensus_notset (
struct totemsrp_instance *instance,
struct srp_addr *no_consensus_list,
int *no_consensus_list_entries,
struct srp_addr *comparison_list,
int comparison_list_entries)
{
int i;
*no_consensus_list_entries = 0;
for (i = 0; i < instance->my_proc_list_entries; i++) {
if (memb_consensus_isset (instance, &instance->my_proc_list[i]) == 0) {
srp_addr_copy (&no_consensus_list[*no_consensus_list_entries], &instance->my_proc_list[i]);
*no_consensus_list_entries = *no_consensus_list_entries + 1;
}
}
}
/*
* Is set1 equal to set2 Entries can be in different orders
*/
static int memb_set_equal (
struct srp_addr *set1, int set1_entries,
struct srp_addr *set2, int set2_entries)
{
int i;
int j;
int found = 0;
if (set1_entries != set2_entries) {
return (0);
}
for (i = 0; i < set2_entries; i++) {
for (j = 0; j < set1_entries; j++) {
if (srp_addr_equal (&set1[j], &set2[i])) {
found = 1;
break;
}
}
if (found == 0) {
return (0);
}
found = 0;
}
return (1);
}
/*
* Is subset fully contained in fullset
*/
static int memb_set_subset (
const struct srp_addr *subset, int subset_entries,
const struct srp_addr *fullset, int fullset_entries)
{
int i;
int j;
int found = 0;
if (subset_entries > fullset_entries) {
return (0);
}
for (i = 0; i < subset_entries; i++) {
for (j = 0; j < fullset_entries; j++) {
if (srp_addr_equal (&subset[i], &fullset[j])) {
found = 1;
}
}
if (found == 0) {
return (0);
}
found = 0;
}
return (1);
}
/*
* merge subset into fullset taking care not to add duplicates
*/
static void memb_set_merge (
const struct srp_addr *subset, int subset_entries,
struct srp_addr *fullset, int *fullset_entries)
{
int found = 0;
int i;
int j;
for (i = 0; i < subset_entries; i++) {
for (j = 0; j < *fullset_entries; j++) {
if (srp_addr_equal (&fullset[j], &subset[i])) {
found = 1;
break;
}
}
if (found == 0) {
srp_addr_copy (&fullset[*fullset_entries], &subset[i]);
*fullset_entries = *fullset_entries + 1;
}
found = 0;
}
return;
}
static void memb_set_and (
struct srp_addr *set1, int set1_entries,
struct srp_addr *set2, int set2_entries,
struct srp_addr *and, int *and_entries)
{
int i;
int j;
int found = 0;
*and_entries = 0;
for (i = 0; i < set2_entries; i++) {
for (j = 0; j < set1_entries; j++) {
if (srp_addr_equal (&set1[j], &set2[i])) {
found = 1;
break;
}
}
if (found) {
srp_addr_copy (&and[*and_entries], &set1[j]);
*and_entries = *and_entries + 1;
}
found = 0;
}
return;
}
#ifdef CODE_COVERAGE
static void memb_set_print (
char *string,
struct srp_addr *list,
int list_entries)
{
int i;
int j;
printf ("List '%s' contains %d entries:\n", string, list_entries);
for (i = 0; i < list_entries; i++) {
for (j = 0; j < INTERFACE_MAX; j++) {
printf ("Address %d\n", i);
printf ("\tiface %d %s\n", j, totemip_print (&list[i].addr[j]));
printf ("family %d\n", list[i].addr[j].family);
}
}
}
#endif
static void reset_token_retransmit_timeout (struct totemsrp_instance *instance)
{
poll_timer_delete (instance->totemsrp_poll_handle,
instance->timer_orf_token_retransmit_timeout);
poll_timer_add (instance->totemsrp_poll_handle,
instance->totem_config->token_retransmit_timeout,
(void *)instance,
timer_function_token_retransmit_timeout,
&instance->timer_orf_token_retransmit_timeout);
}
static void start_merge_detect_timeout (struct totemsrp_instance *instance)
{
if (instance->my_merge_detect_timeout_outstanding == 0) {
poll_timer_add (instance->totemsrp_poll_handle,
instance->totem_config->merge_timeout,
(void *)instance,
timer_function_merge_detect_timeout,
&instance->timer_merge_detect_timeout);
instance->my_merge_detect_timeout_outstanding = 1;
}
}
static void cancel_merge_detect_timeout (struct totemsrp_instance *instance)
{
poll_timer_delete (instance->totemsrp_poll_handle, instance->timer_merge_detect_timeout);
instance->my_merge_detect_timeout_outstanding = 0;
}
/*
* ring_state_* is used to save and restore the sort queue
* state when a recovery operation fails (and enters gather)
*/
static void old_ring_state_save (struct totemsrp_instance *instance)
{
if (instance->old_ring_state_saved == 0) {
instance->old_ring_state_saved = 1;
instance->old_ring_state_aru = instance->my_aru;
instance->old_ring_state_high_seq_received = instance->my_high_seq_received;
log_printf (instance->totemsrp_log_level_notice,
"Saving state aru %x high seq received %x\n",
instance->my_aru, instance->my_high_seq_received);
}
}
static void ring_save (struct totemsrp_instance *instance)
{
if (instance->ring_saved == 0) {
instance->ring_saved = 1;
memcpy (&instance->my_old_ring_id, &instance->my_ring_id,
sizeof (struct memb_ring_id));
}
}
static void ring_reset (struct totemsrp_instance *instance)
{
instance->ring_saved = 0;
}
static void ring_state_restore (struct totemsrp_instance *instance)
{
if (instance->old_ring_state_saved) {
totemip_zero_set(&instance->my_ring_id.rep);
instance->my_aru = instance->old_ring_state_aru;
instance->my_high_seq_received = instance->old_ring_state_high_seq_received;
log_printf (instance->totemsrp_log_level_notice,
"Restoring instance->my_aru %x my high seq received %x\n",
instance->my_aru, instance->my_high_seq_received);
}
}
static void old_ring_state_reset (struct totemsrp_instance *instance)
{
instance->old_ring_state_saved = 0;
}
static void reset_token_timeout (struct totemsrp_instance *instance) {
poll_timer_delete (instance->totemsrp_poll_handle, instance->timer_orf_token_timeout);
poll_timer_add (instance->totemsrp_poll_handle,
instance->totem_config->token_timeout,
(void *)instance,
timer_function_orf_token_timeout,
&instance->timer_orf_token_timeout);
}
static void reset_heartbeat_timeout (struct totemsrp_instance *instance) {
poll_timer_delete (instance->totemsrp_poll_handle, instance->timer_heartbeat_timeout);
poll_timer_add (instance->totemsrp_poll_handle,
instance->heartbeat_timeout,
(void *)instance,
timer_function_heartbeat_timeout,
&instance->timer_heartbeat_timeout);
}
static void cancel_token_timeout (struct totemsrp_instance *instance) {
poll_timer_delete (instance->totemsrp_poll_handle, instance->timer_orf_token_timeout);
}
static void cancel_heartbeat_timeout (struct totemsrp_instance *instance) {
poll_timer_delete (instance->totemsrp_poll_handle, instance->timer_heartbeat_timeout);
}
static void cancel_token_retransmit_timeout (struct totemsrp_instance *instance)
{
poll_timer_delete (instance->totemsrp_poll_handle, instance->timer_orf_token_retransmit_timeout);
}
static void start_token_hold_retransmit_timeout (struct totemsrp_instance *instance)
{
poll_timer_add (instance->totemsrp_poll_handle,
instance->totem_config->token_hold_timeout,
(void *)instance,
timer_function_token_hold_retransmit_timeout,
&instance->timer_orf_token_hold_retransmit_timeout);
}
static void cancel_token_hold_retransmit_timeout (struct totemsrp_instance *instance)
{
poll_timer_delete (instance->totemsrp_poll_handle,
instance->timer_orf_token_hold_retransmit_timeout);
}
static void memb_state_consensus_timeout_expired (
struct totemsrp_instance *instance)
{
struct srp_addr no_consensus_list[PROCESSOR_COUNT_MAX];
int no_consensus_list_entries;
if (memb_consensus_agreed (instance)) {
memb_consensus_reset (instance);
memb_consensus_set (instance, &instance->my_id);
reset_token_timeout (instance); // REVIEWED
} else {
memb_consensus_notset (
instance,
no_consensus_list,
&no_consensus_list_entries,
instance->my_proc_list,
instance->my_proc_list_entries);
memb_set_merge (no_consensus_list, no_consensus_list_entries,
instance->my_failed_list, &instance->my_failed_list_entries);
memb_state_gather_enter (instance, 0);
}
}
static void memb_join_message_send (struct totemsrp_instance *instance);
static void memb_merge_detect_transmit (struct totemsrp_instance *instance);
/*
* Timers used for various states of the membership algorithm
*/
static void timer_function_orf_token_timeout (void *data)
{
struct totemsrp_instance *instance = data;
switch (instance->memb_state) {
case MEMB_STATE_OPERATIONAL:
log_printf (instance->totemsrp_log_level_notice,
"The token was lost in the OPERATIONAL state.\n");
totemrrp_iface_check (instance->totemrrp_handle);
memb_state_gather_enter (instance, 2);
break;
case MEMB_STATE_GATHER:
log_printf (instance->totemsrp_log_level_notice,
"The consensus timeout expired.\n");
memb_state_consensus_timeout_expired (instance);
memb_state_gather_enter (instance, 3);
break;
case MEMB_STATE_COMMIT:
log_printf (instance->totemsrp_log_level_notice,
"The token was lost in the COMMIT state.\n");
memb_state_gather_enter (instance, 4);
break;
case MEMB_STATE_RECOVERY:
log_printf (instance->totemsrp_log_level_notice,
"The token was lost in the RECOVERY state.\n");
ring_state_restore (instance);
memb_state_gather_enter (instance, 5);
break;
}
}
static void timer_function_heartbeat_timeout (void *data)
{
struct totemsrp_instance *instance = data;
log_printf (instance->totemsrp_log_level_notice,
"HeartBeat Timer expired Invoking token loss mechanism in state %d \n", instance->memb_state);
timer_function_orf_token_timeout(data);
}
static void memb_timer_function_state_gather (void *data)
{
struct totemsrp_instance *instance = data;
switch (instance->memb_state) {
case MEMB_STATE_OPERATIONAL:
case MEMB_STATE_RECOVERY:
assert (0); /* this should never happen */
break;
case MEMB_STATE_GATHER:
case MEMB_STATE_COMMIT:
memb_join_message_send (instance);
/*
* Restart the join timeout
`*/
poll_timer_delete (instance->totemsrp_poll_handle, instance->memb_timer_state_gather_join_timeout);
poll_timer_add (instance->totemsrp_poll_handle,
instance->totem_config->join_timeout,
(void *)instance,
memb_timer_function_state_gather,
&instance->memb_timer_state_gather_join_timeout);
break;
}
}
static void memb_timer_function_gather_consensus_timeout (void *data)
{
struct totemsrp_instance *instance = data;
memb_state_consensus_timeout_expired (instance);
}
static void deliver_messages_from_recovery_to_regular (struct totemsrp_instance *instance)
{
unsigned int i;
struct sort_queue_item *recovery_message_item;
struct sort_queue_item regular_message_item;
unsigned int range = 0;
int res;
void *ptr;
struct mcast *mcast;
log_printf (instance->totemsrp_log_level_debug,
"recovery to regular %x-%x\n", SEQNO_START_MSG + 1, instance->my_aru);
range = instance->my_aru - SEQNO_START_MSG;
/*
* Move messages from recovery to regular sort queue
*/
// todo should i be initialized to 0 or 1 ?
for (i = 1; i <= range; i++) {
res = sq_item_get (&instance->recovery_sort_queue,
i + SEQNO_START_MSG, &ptr);
if (res != 0) {
continue;
}
recovery_message_item = ptr;
/*
* Convert recovery message into regular message
*/
mcast = recovery_message_item->mcast;
if (mcast->header.encapsulated == MESSAGE_ENCAPSULATED) {
/*
* Message is a recovery message encapsulated
* in a new ring message
*/
regular_message_item.mcast =
(struct mcast *)(((char *)recovery_message_item->mcast) + sizeof (struct mcast));
regular_message_item.msg_len =
recovery_message_item->msg_len - sizeof (struct mcast);
mcast = regular_message_item.mcast;
} else {
/*
* TODO this case shouldn't happen
*/
continue;
}
log_printf (instance->totemsrp_log_level_debug,
"comparing if ring id is for this processors old ring seqno %d\n",
mcast->seq);
/*
* Only add this message to the regular sort
* queue if it was originated with the same ring
* id as the previous ring
*/
if (memcmp (&instance->my_old_ring_id, &mcast->ring_id,
sizeof (struct memb_ring_id)) == 0) {
regular_message_item.msg_len = recovery_message_item->msg_len;
res = sq_item_inuse (&instance->regular_sort_queue, mcast->seq);
if (res == 0) {
sq_item_add (&instance->regular_sort_queue,
&regular_message_item, mcast->seq);
if (sq_lt_compare (instance->old_ring_state_high_seq_received, mcast->seq)) {
instance->old_ring_state_high_seq_received = mcast->seq;
}
}
} else {
log_printf (instance->totemsrp_log_level_notice,
"-not adding msg with seq no %x\n", mcast->seq);
}
}
}
/*
* Change states in the state machine of the membership algorithm
*/
static void memb_state_operational_enter (struct totemsrp_instance *instance)
{
struct srp_addr joined_list[PROCESSOR_COUNT_MAX];
int joined_list_entries = 0;
unsigned int aru_save;
unsigned int joined_list_totemip[PROCESSOR_COUNT_MAX];
unsigned int trans_memb_list_totemip[PROCESSOR_COUNT_MAX];
unsigned int new_memb_list_totemip[PROCESSOR_COUNT_MAX];
unsigned int left_list[PROCESSOR_COUNT_MAX];
memb_consensus_reset (instance);
old_ring_state_reset (instance);
ring_reset (instance);
deliver_messages_from_recovery_to_regular (instance);
log_printf (instance->totemsrp_log_level_debug,
"Delivering to app %x to %x\n",
instance->my_high_delivered + 1, instance->old_ring_state_high_seq_received);
aru_save = instance->my_aru;
instance->my_aru = instance->old_ring_state_aru;
messages_deliver_to_app (instance, 0, instance->old_ring_state_high_seq_received);
/*
* Calculate joined and left list
*/
memb_set_subtract (instance->my_left_memb_list,
&instance->my_left_memb_entries,
instance->my_memb_list, instance->my_memb_entries,
instance->my_trans_memb_list, instance->my_trans_memb_entries);
memb_set_subtract (joined_list, &joined_list_entries,
instance->my_new_memb_list, instance->my_new_memb_entries,
instance->my_trans_memb_list, instance->my_trans_memb_entries);
/*
* Install new membership
*/
instance->my_memb_entries = instance->my_new_memb_entries;
memcpy (&instance->my_memb_list, instance->my_new_memb_list,
sizeof (struct srp_addr) * instance->my_memb_entries);
instance->last_released = 0;
instance->my_set_retrans_flg = 0;
/*
* Deliver transitional configuration to application
*/
srp_addr_to_nodeid (left_list, instance->my_left_memb_list,
instance->my_left_memb_entries);
srp_addr_to_nodeid (trans_memb_list_totemip,
instance->my_trans_memb_list, instance->my_trans_memb_entries);
instance->totemsrp_confchg_fn (TOTEM_CONFIGURATION_TRANSITIONAL,
trans_memb_list_totemip, instance->my_trans_memb_entries,
left_list, instance->my_left_memb_entries,
0, 0, &instance->my_ring_id);
// TODO we need to filter to ensure we only deliver those
// messages which are part of instance->my_deliver_memb
messages_deliver_to_app (instance, 1, instance->old_ring_state_high_seq_received);
instance->my_aru = aru_save;
/*
* Deliver regular configuration to application
*/
srp_addr_to_nodeid (new_memb_list_totemip,
instance->my_new_memb_list, instance->my_new_memb_entries);
srp_addr_to_nodeid (joined_list_totemip, joined_list,
joined_list_entries);
instance->totemsrp_confchg_fn (TOTEM_CONFIGURATION_REGULAR,
new_memb_list_totemip, instance->my_new_memb_entries,
0, 0,
joined_list_totemip, joined_list_entries, &instance->my_ring_id);
/*
* The recovery sort queue now becomes the regular
* sort queue. It is necessary to copy the state
* into the regular sort queue.
*/
sq_copy (&instance->regular_sort_queue, &instance->recovery_sort_queue);
instance->my_last_aru = SEQNO_START_MSG;
sq_items_release (&instance->regular_sort_queue, SEQNO_START_MSG - 1);
/* When making my_proc_list smaller, ensure that the
* now non-used entries are zero-ed out. There are some suspect
* assert's that assume that there is always 2 entries in the list.
* These fail when my_proc_list is reduced to 1 entry (and the
* valid [0] entry is the same as the 'unused' [1] entry).
*/
memset(instance->my_proc_list, 0,
sizeof (struct srp_addr) * instance->my_proc_list_entries);
instance->my_proc_list_entries = instance->my_new_memb_entries;
memcpy (instance->my_proc_list, instance->my_new_memb_list,
sizeof (struct srp_addr) * instance->my_memb_entries);
instance->my_failed_list_entries = 0;
instance->my_high_delivered = instance->my_aru;
// TODO the recovery messages are leaked
log_printf (instance->totemsrp_log_level_notice,
"entering OPERATIONAL state.\n");
instance->memb_state = MEMB_STATE_OPERATIONAL;
instance->my_received_flg = 1;
return;
}
static void memb_state_gather_enter (
struct totemsrp_instance *instance,
int gather_from)
{
memb_set_merge (
&instance->my_id, 1,
instance->my_proc_list, &instance->my_proc_list_entries);
assert (srp_addr_equal (&instance->my_proc_list[0], &instance->my_proc_list[1]) == 0);
memb_join_message_send (instance);
/*
* Restart the join timeout
*/
poll_timer_delete (instance->totemsrp_poll_handle, instance->memb_timer_state_gather_join_timeout);
poll_timer_add (instance->totemsrp_poll_handle,
instance->totem_config->join_timeout,
(void *)instance,
memb_timer_function_state_gather,
&instance->memb_timer_state_gather_join_timeout);
/*
* Restart the consensus timeout
*/
poll_timer_delete (instance->totemsrp_poll_handle,
instance->memb_timer_state_gather_consensus_timeout);
poll_timer_add (instance->totemsrp_poll_handle,
instance->totem_config->consensus_timeout,
(void *)instance,
memb_timer_function_gather_consensus_timeout,
&instance->memb_timer_state_gather_consensus_timeout);
/*
* Cancel the token loss and token retransmission timeouts
*/
cancel_token_retransmit_timeout (instance); // REVIEWED
cancel_token_timeout (instance); // REVIEWED
cancel_merge_detect_timeout (instance);
memb_consensus_reset (instance);
memb_consensus_set (instance, &instance->my_id);
log_printf (instance->totemsrp_log_level_notice,
"entering GATHER state from %d.\n", gather_from);
instance->memb_state = MEMB_STATE_GATHER;
return;
}
static void timer_function_token_retransmit_timeout (void *data);
static void memb_state_commit_enter (
struct totemsrp_instance *instance,
struct memb_commit_token *commit_token)
{
ring_save (instance);
old_ring_state_save (instance);
memb_state_commit_token_update (instance, commit_token);
memb_state_commit_token_target_set (instance, commit_token);
memb_ring_id_set_and_store (instance, &commit_token->ring_id);
memb_state_commit_token_send (instance, commit_token);
instance->token_ring_id_seq = instance->my_ring_id.seq;
poll_timer_delete (instance->totemsrp_poll_handle, instance->memb_timer_state_gather_join_timeout);
instance->memb_timer_state_gather_join_timeout = 0;
poll_timer_delete (instance->totemsrp_poll_handle, instance->memb_timer_state_gather_consensus_timeout);
instance->memb_timer_state_gather_consensus_timeout = 0;
reset_token_timeout (instance); // REVIEWED
reset_token_retransmit_timeout (instance); // REVIEWED
log_printf (instance->totemsrp_log_level_notice,
"entering COMMIT state.\n");
instance->memb_state = MEMB_STATE_COMMIT;
/*
* reset all flow control variables since we are starting a new ring
*/
instance->my_trc = 0;
instance->my_pbl = 0;
instance->my_cbl = 0;
return;
}
static void memb_state_recovery_enter (
struct totemsrp_instance *instance,
struct memb_commit_token *commit_token)
{
int i;
int local_received_flg = 1;
unsigned int low_ring_aru;
unsigned int range = 0;
unsigned int messages_originated = 0;
char is_originated[4096];
char not_originated[4096];
char seqno_string_hex[10];
const struct srp_addr *addr;
struct memb_commit_token_memb_entry *memb_list;
addr = (const struct srp_addr *)commit_token->end_of_commit_token;
memb_list = (struct memb_commit_token_memb_entry *)(addr + commit_token->addr_entries);
log_printf (instance->totemsrp_log_level_notice,
"entering RECOVERY state.\n");
instance->my_high_ring_delivered = 0;
sq_reinit (&instance->recovery_sort_queue, SEQNO_START_MSG);
cs_queue_reinit (&instance->retrans_message_queue);
low_ring_aru = instance->old_ring_state_high_seq_received;
memb_state_commit_token_send (instance, commit_token);
instance->my_token_seq = SEQNO_START_TOKEN - 1;
/*
* Build regular configuration
*/
totemrrp_processor_count_set (
instance->totemrrp_handle,
commit_token->addr_entries);
/*
* Build transitional configuration
*/
memb_set_and (instance->my_new_memb_list, instance->my_new_memb_entries,
instance->my_memb_list, instance->my_memb_entries,
instance->my_trans_memb_list, &instance->my_trans_memb_entries);
for (i = 0; i < instance->my_new_memb_entries; i++) {
log_printf (instance->totemsrp_log_level_notice,
"position [%d] member %s:\n", i, totemip_print (&addr[i].addr[0]));
log_printf (instance->totemsrp_log_level_notice,
"previous ring seq %lld rep %s\n",
memb_list[i].ring_id.seq,
totemip_print (&memb_list[i].ring_id.rep));
log_printf (instance->totemsrp_log_level_notice,
"aru %x high delivered %x received flag %d\n",
memb_list[i].aru,
memb_list[i].high_delivered,
memb_list[i].received_flg);
// assert (totemip_print (&memb_list[i].ring_id.rep) != 0);
}
/*
* Determine if any received flag is false
*/
for (i = 0; i < commit_token->addr_entries; i++) {
if (memb_set_subset (&instance->my_new_memb_list[i], 1,
instance->my_trans_memb_list, instance->my_trans_memb_entries) &&
memb_list[i].received_flg == 0) {
instance->my_deliver_memb_entries = instance->my_trans_memb_entries;
memcpy (instance->my_deliver_memb_list, instance->my_trans_memb_list,
sizeof (struct srp_addr) * instance->my_trans_memb_entries);
local_received_flg = 0;
break;
}
}
if (local_received_flg == 1) {
goto no_originate;
} /* Else originate messages if we should */
/*
* Calculate my_low_ring_aru, instance->my_high_ring_delivered for the transitional membership
*/
for (i = 0; i < commit_token->addr_entries; i++) {
if (memb_set_subset (&instance->my_new_memb_list[i], 1,
instance->my_deliver_memb_list,
instance->my_deliver_memb_entries) &&
memcmp (&instance->my_old_ring_id,
&memb_list[i].ring_id,
sizeof (struct memb_ring_id)) == 0) {
if (sq_lt_compare (memb_list[i].aru, low_ring_aru)) {
low_ring_aru = memb_list[i].aru;
}
if (sq_lt_compare (instance->my_high_ring_delivered, memb_list[i].high_delivered)) {
instance->my_high_ring_delivered = memb_list[i].high_delivered;
}
}
}
/*
* Copy all old ring messages to instance->retrans_message_queue
*/
range = instance->old_ring_state_high_seq_received - low_ring_aru;
if (range == 0) {
/*
* No messages to copy
*/
goto no_originate;
}
assert (range < 1024);
log_printf (instance->totemsrp_log_level_notice,
"copying all old ring messages from %x-%x.\n",
low_ring_aru + 1, instance->old_ring_state_high_seq_received);
strcpy (not_originated, "Not Originated for recovery: ");
strcpy (is_originated, "Originated for recovery: ");
for (i = 1; i <= range; i++) {
struct sort_queue_item *sort_queue_item;
struct message_item message_item;
void *ptr;
int res;
sprintf (seqno_string_hex, "%x ", low_ring_aru + i);
res = sq_item_get (&instance->regular_sort_queue,
low_ring_aru + i, &ptr);
if (res != 0) {
strcat (not_originated, seqno_string_hex);
continue;
}
strcat (is_originated, seqno_string_hex);
sort_queue_item = ptr;
messages_originated++;
memset (&message_item, 0, sizeof (struct message_item));
// TODO LEAK
message_item.mcast = malloc (10000);
assert (message_item.mcast);
message_item.mcast->header.type = MESSAGE_TYPE_MCAST;
srp_addr_copy (&message_item.mcast->system_from, &instance->my_id);
message_item.mcast->header.encapsulated = MESSAGE_ENCAPSULATED;
message_item.mcast->header.nodeid = instance->my_id.addr[0].nodeid;
assert (message_item.mcast->header.nodeid);
message_item.mcast->header.endian_detector = ENDIAN_LOCAL;
memcpy (&message_item.mcast->ring_id, &instance->my_ring_id,
sizeof (struct memb_ring_id));
message_item.msg_len = sort_queue_item->msg_len + sizeof (struct mcast);
memcpy (((char *)message_item.mcast) + sizeof (struct mcast),
sort_queue_item->mcast,
sort_queue_item->msg_len);
cs_queue_item_add (&instance->retrans_message_queue, &message_item);
}
log_printf (instance->totemsrp_log_level_notice,
"Originated %d messages in RECOVERY.\n", messages_originated);
strcat (not_originated, "\n");
strcat (is_originated, "\n");
log_printf (instance->totemsrp_log_level_notice, "%s", is_originated);
log_printf (instance->totemsrp_log_level_notice, "%s", not_originated);
goto originated;
no_originate:
log_printf (instance->totemsrp_log_level_notice,
"Did not need to originate any messages in recovery.\n");
originated:
instance->my_aru = SEQNO_START_MSG;
instance->my_aru_count = 0;
instance->my_seq_unchanged = 0;
instance->my_high_seq_received = SEQNO_START_MSG;
instance->my_install_seq = SEQNO_START_MSG;
instance->last_released = SEQNO_START_MSG;
reset_token_timeout (instance); // REVIEWED
reset_token_retransmit_timeout (instance); // REVIEWED
instance->memb_state = MEMB_STATE_RECOVERY;
return;
}
int totemsrp_new_msg_signal (hdb_handle_t handle)
{
struct totemsrp_instance *instance;
unsigned int res;
res = hdb_handle_get (&totemsrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
token_hold_cancel_send (instance);
hdb_handle_put (&totemsrp_instance_database, handle);
return (0);
error_exit:
return (-1);
}
int totemsrp_mcast (
hdb_handle_t handle,
struct iovec *iovec,
unsigned int iov_len,
int guarantee)
{
int i;
struct message_item message_item;
struct totemsrp_instance *instance;
char *addr;
unsigned int addr_idx;
unsigned int res;
res = hdb_handle_get (&totemsrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
if (cs_queue_is_full (&instance->new_message_queue)) {
log_printf (instance->totemsrp_log_level_warning, "queue full\n");
return (-1);
}
memset (&message_item, 0, sizeof (struct message_item));
/*
* Allocate pending item
*/
message_item.mcast = malloc (10000);
if (message_item.mcast == 0) {
goto error_mcast;
}
/*
* Set mcast header
*/
message_item.mcast->header.type = MESSAGE_TYPE_MCAST;
message_item.mcast->header.endian_detector = ENDIAN_LOCAL;
message_item.mcast->header.encapsulated = MESSAGE_NOT_ENCAPSULATED;
message_item.mcast->header.nodeid = instance->my_id.addr[0].nodeid;
assert (message_item.mcast->header.nodeid);
message_item.mcast->guarantee = guarantee;
srp_addr_copy (&message_item.mcast->system_from, &instance->my_id);
addr = (char *)message_item.mcast;
addr_idx = sizeof (struct mcast);
for (i = 0; i < iov_len; i++) {
memcpy (&addr[addr_idx], iovec[i].iov_base, iovec[i].iov_len);
addr_idx += iovec[i].iov_len;
}
message_item.msg_len = addr_idx;
log_printf (instance->totemsrp_log_level_debug, "mcasted message added to pending queue\n");
cs_queue_item_add (&instance->new_message_queue, &message_item);
hdb_handle_put (&totemsrp_instance_database, handle);
return (0);
error_mcast:
hdb_handle_put (&totemsrp_instance_database, handle);
error_exit:
return (-1);
}
/*
* Determine if there is room to queue a new message
*/
int totemsrp_avail (hdb_handle_t handle)
{
int avail;
struct totemsrp_instance *instance;
unsigned int res;
res = hdb_handle_get (&totemsrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
cs_queue_avail (&instance->new_message_queue, &avail);
hdb_handle_put (&totemsrp_instance_database, handle);
return (avail);
error_exit:
return (0);
}
/*
* ORF Token Management
*/
/*
* Recast message to mcast group if it is available
*/
static int orf_token_remcast (
struct totemsrp_instance *instance,
int seq)
{
struct sort_queue_item *sort_queue_item;
int res;
void *ptr;
struct sq *sort_queue;
if (instance->memb_state == MEMB_STATE_RECOVERY) {
sort_queue = &instance->recovery_sort_queue;
} else {
sort_queue = &instance->regular_sort_queue;
}
res = sq_in_range (sort_queue, seq);
if (res == 0) {
log_printf (instance->totemsrp_log_level_debug, "sq not in range\n");
return (-1);
}
/*
* Get RTR item at seq, if not available, return
*/
res = sq_item_get (sort_queue, seq, &ptr);
if (res != 0) {
return -1;
}
sort_queue_item = ptr;
totemrrp_mcast_noflush_send (
instance->totemrrp_handle,
sort_queue_item->mcast,
sort_queue_item->msg_len);
return (0);
}
/*
* Free all freeable messages from ring
*/
static void messages_free (
struct totemsrp_instance *instance,
unsigned int token_aru)
{
struct sort_queue_item *regular_message;
unsigned int i;
int res;
int log_release = 0;
unsigned int release_to;
unsigned int range = 0;
release_to = token_aru;
if (sq_lt_compare (instance->my_last_aru, release_to)) {
release_to = instance->my_last_aru;
}
if (sq_lt_compare (instance->my_high_delivered, release_to)) {
release_to = instance->my_high_delivered;
}
/*
* Ensure we dont try release before an already released point
*/
if (sq_lt_compare (release_to, instance->last_released)) {
return;
}
range = release_to - instance->last_released;
assert (range < 1024);
/*
* Release retransmit list items if group aru indicates they are transmitted
*/
for (i = 1; i <= range; i++) {
void *ptr;
res = sq_item_get (&instance->regular_sort_queue,
instance->last_released + i, &ptr);
if (res == 0) {
regular_message = ptr;
free (regular_message->mcast);
}
sq_items_release (&instance->regular_sort_queue,
instance->last_released + i);
log_release = 1;
}
instance->last_released += range;
if (log_release) {
log_printf (instance->totemsrp_log_level_debug,
"releasing messages up to and including %x\n", release_to);
}
}
static void update_aru (
struct totemsrp_instance *instance)
{
unsigned int i;
int res;
struct sq *sort_queue;
unsigned int range;
unsigned int my_aru_saved = 0;
if (instance->memb_state == MEMB_STATE_RECOVERY) {
sort_queue = &instance->recovery_sort_queue;
} else {
sort_queue = &instance->regular_sort_queue;
}
range = instance->my_high_seq_received - instance->my_aru;
if (range > 1024) {
return;
}
my_aru_saved = instance->my_aru;
for (i = 1; i <= range; i++) {
void *ptr;
res = sq_item_get (sort_queue, my_aru_saved + i, &ptr);
/*
* If hole, stop updating aru
*/
if (res != 0) {
break;
}
}
instance->my_aru += i - 1;
}
/*
* Multicasts pending messages onto the ring (requires orf_token possession)
*/
static int orf_token_mcast (
struct totemsrp_instance *instance,
struct orf_token *token,
int fcc_mcasts_allowed)
{
struct message_item *message_item = 0;
struct cs_queue *mcast_queue;
struct sq *sort_queue;
struct sort_queue_item sort_queue_item;
struct sort_queue_item *sort_queue_item_ptr;
struct mcast *mcast;
unsigned int fcc_mcast_current;
if (instance->memb_state == MEMB_STATE_RECOVERY) {
mcast_queue = &instance->retrans_message_queue;
sort_queue = &instance->recovery_sort_queue;
reset_token_retransmit_timeout (instance); // REVIEWED
} else {
mcast_queue = &instance->new_message_queue;
sort_queue = &instance->regular_sort_queue;
}
for (fcc_mcast_current = 0; fcc_mcast_current < fcc_mcasts_allowed; fcc_mcast_current++) {
if (cs_queue_is_empty (mcast_queue)) {
break;
}
message_item = (struct message_item *)cs_queue_item_get (mcast_queue);
/* preincrement required by algo */
if (instance->old_ring_state_saved &&
(instance->memb_state == MEMB_STATE_GATHER ||
instance->memb_state == MEMB_STATE_COMMIT)) {
log_printf (instance->totemsrp_log_level_debug,
"not multicasting at seqno is %d\n",
token->seq);
return (0);
}
message_item->mcast->seq = ++token->seq;
message_item->mcast->this_seqno = instance->global_seqno++;
/*
* Build IO vector
*/
memset (&sort_queue_item, 0, sizeof (struct sort_queue_item));
sort_queue_item.mcast = message_item->mcast;
sort_queue_item.msg_len = message_item->msg_len;
mcast = sort_queue_item.mcast;
memcpy (&mcast->ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id));
/*
* Add message to retransmit queue
*/
sort_queue_item_ptr = sq_item_add (sort_queue,
&sort_queue_item, message_item->mcast->seq);
totemrrp_mcast_noflush_send (
instance->totemrrp_handle,
message_item->mcast,
message_item->msg_len);
/*
* Delete item from pending queue
*/
cs_queue_item_remove (mcast_queue);
/*
* If messages mcasted, deliver any new messages to totempg
*/
instance->my_high_seq_received = token->seq;
}
update_aru (instance);
/*
* Return 1 if more messages are available for single node clusters
*/
return (fcc_mcast_current);
}
/*
* Remulticasts messages in orf_token's retransmit list (requires orf_token)
* Modify's orf_token's rtr to include retransmits required by this process
*/
static int orf_token_rtr (
struct totemsrp_instance *instance,
struct orf_token *orf_token,
unsigned int *fcc_allowed)
{
unsigned int res;
unsigned int i, j;
unsigned int found;
unsigned int total_entries;
struct sq *sort_queue;
struct rtr_item *rtr_list;
unsigned int range = 0;
char retransmit_msg[1024];
char value[64];
if (instance->memb_state == MEMB_STATE_RECOVERY) {
sort_queue = &instance->recovery_sort_queue;
} else {
sort_queue = &instance->regular_sort_queue;
}
rtr_list = &orf_token->rtr_list[0];
strcpy (retransmit_msg, "Retransmit List: ");
if (orf_token->rtr_list_entries) {
log_printf (instance->totemsrp_log_level_debug,
"Retransmit List %d\n", orf_token->rtr_list_entries);
for (i = 0; i < orf_token->rtr_list_entries; i++) {
sprintf (value, "%x ", rtr_list[i].seq);
strcat (retransmit_msg, value);
}
strcat (retransmit_msg, "\n");
log_printf (instance->totemsrp_log_level_notice,
"%s", retransmit_msg);
}
total_entries = orf_token->rtr_list_entries;
/*
* Retransmit messages on orf_token's RTR list from RTR queue
*/
for (instance->fcc_remcast_current = 0, i = 0;
instance->fcc_remcast_current < *fcc_allowed && i < orf_token->rtr_list_entries;) {
/*
* If this retransmit request isn't from this configuration,
* try next rtr entry
*/
if (memcmp (&rtr_list[i].ring_id, &instance->my_ring_id,
sizeof (struct memb_ring_id)) != 0) {
i += 1;
continue;
}
res = orf_token_remcast (instance, rtr_list[i].seq);
if (res == 0) {
/*
* Multicasted message, so no need to copy to new retransmit list
*/
orf_token->rtr_list_entries -= 1;
assert (orf_token->rtr_list_entries >= 0);
memmove (&rtr_list[i], &rtr_list[i + 1],
sizeof (struct rtr_item) * (orf_token->rtr_list_entries));
instance->fcc_remcast_current++;
} else {
i += 1;
}
}
*fcc_allowed = *fcc_allowed - instance->fcc_remcast_current;
/*
* Add messages to retransmit to RTR list
* but only retry if there is room in the retransmit list
*/
range = instance->my_high_seq_received - instance->my_aru;
assert (range < 100000);
for (i = 1; (orf_token->rtr_list_entries < RETRANSMIT_ENTRIES_MAX) &&
(i <= range); i++) {
/*
* Ensure message is within the sort queue range
*/
res = sq_in_range (sort_queue, instance->my_aru + i);
if (res == 0) {
break;
}
/*
* Find if a message is missing from this processor
*/
res = sq_item_inuse (sort_queue, instance->my_aru + i);
if (res == 0) {
/*
* Determine if missing message is already in retransmit list
*/
found = 0;
for (j = 0; j < orf_token->rtr_list_entries; j++) {
if (instance->my_aru + i == rtr_list[j].seq) {
found = 1;
}
}
if (found == 0) {
/*
* Missing message not found in current retransmit list so add it
*/
memcpy (&rtr_list[orf_token->rtr_list_entries].ring_id,
&instance->my_ring_id, sizeof (struct memb_ring_id));
rtr_list[orf_token->rtr_list_entries].seq = instance->my_aru + i;
orf_token->rtr_list_entries++;
}
}
}
return (instance->fcc_remcast_current);
}
static void token_retransmit (struct totemsrp_instance *instance)
{
totemrrp_token_send (instance->totemrrp_handle,
instance->orf_token_retransmit,
instance->orf_token_retransmit_size);
}
/*
* Retransmit the regular token if no mcast or token has
* been received in retransmit token period retransmit
* the token to the next processor
*/
static void timer_function_token_retransmit_timeout (void *data)
{
struct totemsrp_instance *instance = data;
switch (instance->memb_state) {
case MEMB_STATE_GATHER:
break;
case MEMB_STATE_COMMIT:
case MEMB_STATE_OPERATIONAL:
case MEMB_STATE_RECOVERY:
token_retransmit (instance);
reset_token_retransmit_timeout (instance); // REVIEWED
break;
}
}
static void timer_function_token_hold_retransmit_timeout (void *data)
{
struct totemsrp_instance *instance = data;
switch (instance->memb_state) {
case MEMB_STATE_GATHER:
break;
case MEMB_STATE_COMMIT:
break;
case MEMB_STATE_OPERATIONAL:
case MEMB_STATE_RECOVERY:
token_retransmit (instance);
break;
}
}
static void timer_function_merge_detect_timeout(void *data)
{
struct totemsrp_instance *instance = data;
instance->my_merge_detect_timeout_outstanding = 0;
switch (instance->memb_state) {
case MEMB_STATE_OPERATIONAL:
if (totemip_equal(&instance->my_ring_id.rep, &instance->my_id.addr[0])) {
memb_merge_detect_transmit (instance);
}
break;
case MEMB_STATE_GATHER:
case MEMB_STATE_COMMIT:
case MEMB_STATE_RECOVERY:
break;
}
}
/*
* Send orf_token to next member (requires orf_token)
*/
static int token_send (
struct totemsrp_instance *instance,
struct orf_token *orf_token,
int forward_token)
{
int res = 0;
unsigned int orf_token_size;
orf_token_size = sizeof (struct orf_token) +
(orf_token->rtr_list_entries * sizeof (struct rtr_item));
memcpy (instance->orf_token_retransmit, orf_token, orf_token_size);
instance->orf_token_retransmit_size = orf_token_size;
orf_token->header.nodeid = instance->my_id.addr[0].nodeid;
assert (orf_token->header.nodeid);
if (forward_token == 0) {
return (0);
}
totemrrp_token_send (instance->totemrrp_handle,
orf_token,
orf_token_size);
return (res);
}
static int token_hold_cancel_send (struct totemsrp_instance *instance)
{
struct token_hold_cancel token_hold_cancel;
/*
* Only cancel if the token is currently held
*/
if (instance->my_token_held == 0) {
return (0);
}
instance->my_token_held = 0;
/*
* Build message
*/
token_hold_cancel.header.type = MESSAGE_TYPE_TOKEN_HOLD_CANCEL;
token_hold_cancel.header.endian_detector = ENDIAN_LOCAL;
token_hold_cancel.header.nodeid = instance->my_id.addr[0].nodeid;
memcpy (&token_hold_cancel.ring_id, &instance->my_ring_id,
sizeof (struct memb_ring_id));
assert (token_hold_cancel.header.nodeid);
totemrrp_mcast_flush_send (instance->totemrrp_handle, &token_hold_cancel,
sizeof (struct token_hold_cancel));
return (0);
}
static int orf_token_send_initial (struct totemsrp_instance *instance)
{
struct orf_token orf_token;
int res;
orf_token.header.type = MESSAGE_TYPE_ORF_TOKEN;
orf_token.header.endian_detector = ENDIAN_LOCAL;
orf_token.header.encapsulated = 0;
orf_token.header.nodeid = instance->my_id.addr[0].nodeid;
assert (orf_token.header.nodeid);
orf_token.seq = SEQNO_START_MSG;
orf_token.token_seq = SEQNO_START_TOKEN;
orf_token.retrans_flg = 1;
instance->my_set_retrans_flg = 1;
if (cs_queue_is_empty (&instance->retrans_message_queue) == 1) {
orf_token.retrans_flg = 0;
instance->my_set_retrans_flg = 0;
} else {
orf_token.retrans_flg = 1;
instance->my_set_retrans_flg = 1;
}
orf_token.aru = 0;
orf_token.aru = SEQNO_START_MSG - 1;
orf_token.aru_addr = instance->my_id.addr[0].nodeid;
memcpy (&orf_token.ring_id, &instance->my_ring_id, sizeof (struct memb_ring_id));
orf_token.fcc = 0;
orf_token.backlog = 0;
orf_token.rtr_list_entries = 0;
res = token_send (instance, &orf_token, 1);
return (res);
}
static void memb_state_commit_token_update (
struct totemsrp_instance *instance,
struct memb_commit_token *commit_token)
{
struct srp_addr *addr;
struct memb_commit_token_memb_entry *memb_list;
unsigned int high_aru;
unsigned int i;
addr = (struct srp_addr *)commit_token->end_of_commit_token;
memb_list = (struct memb_commit_token_memb_entry *)(addr + commit_token->addr_entries);
memcpy (instance->my_new_memb_list, addr,
sizeof (struct srp_addr) * commit_token->addr_entries);
instance->my_new_memb_entries = commit_token->addr_entries;
memcpy (&memb_list[commit_token->memb_index].ring_id,
&instance->my_old_ring_id, sizeof (struct memb_ring_id));
assert (!totemip_zero_check(&instance->my_old_ring_id.rep));
memb_list[commit_token->memb_index].aru = instance->old_ring_state_aru;
/*
* TODO high delivered is really instance->my_aru, but with safe this
* could change?
*/
instance->my_received_flg =
(instance->my_aru == instance->my_high_seq_received);
memb_list[commit_token->memb_index].received_flg = instance->my_received_flg;
memb_list[commit_token->memb_index].high_delivered = instance->my_high_delivered;
/*
* find high aru up to current memb_index for all matching ring ids
* if any ring id matching memb_index has aru less then high aru set
* received flag for that entry to false
*/
high_aru = memb_list[commit_token->memb_index].aru;
for (i = 0; i <= commit_token->memb_index; i++) {
if (memcmp (&memb_list[commit_token->memb_index].ring_id,
&memb_list[i].ring_id,
sizeof (struct memb_ring_id)) == 0) {
if (sq_lt_compare (high_aru, memb_list[i].aru)) {
high_aru = memb_list[i].aru;
}
}
}
for (i = 0; i <= commit_token->memb_index; i++) {
if (memcmp (&memb_list[commit_token->memb_index].ring_id,
&memb_list[i].ring_id,
sizeof (struct memb_ring_id)) == 0) {
if (sq_lt_compare (memb_list[i].aru, high_aru)) {
memb_list[i].received_flg = 0;
if (i == commit_token->memb_index) {
instance->my_received_flg = 0;
}
}
}
}
commit_token->header.nodeid = instance->my_id.addr[0].nodeid;
commit_token->memb_index += 1;
assert (commit_token->memb_index <= commit_token->addr_entries);
assert (commit_token->header.nodeid);
}
static void memb_state_commit_token_target_set (
struct totemsrp_instance *instance,
struct memb_commit_token *commit_token)
{
struct srp_addr *addr;
unsigned int i;
addr = (struct srp_addr *)commit_token->end_of_commit_token;
for (i = 0; i < instance->totem_config->interface_count; i++) {
totemrrp_token_target_set (
instance->totemrrp_handle,
&addr[commit_token->memb_index %
commit_token->addr_entries].addr[i],
i);
}
}
static int memb_state_commit_token_send (
struct totemsrp_instance *instance,
struct memb_commit_token *commit_token)
{
struct srp_addr *addr;
struct memb_commit_token_memb_entry *memb_list;
unsigned int commit_token_size;
addr = (struct srp_addr *)commit_token->end_of_commit_token;
memb_list = (struct memb_commit_token_memb_entry *)(addr + commit_token->addr_entries);
commit_token->token_seq++;
commit_token_size = sizeof (struct memb_commit_token) +
((sizeof (struct srp_addr) +
sizeof (struct memb_commit_token_memb_entry)) * commit_token->addr_entries);
/*
* Make a copy for retransmission if necessary
*/
memcpy (instance->orf_token_retransmit, commit_token, commit_token_size);
instance->orf_token_retransmit_size = commit_token_size;
totemrrp_token_send (instance->totemrrp_handle,
commit_token,
commit_token_size);
/*
* Request retransmission of the commit token in case it is lost
*/
reset_token_retransmit_timeout (instance);
return (0);
}
static int memb_lowest_in_config (struct totemsrp_instance *instance)
{
struct srp_addr token_memb[PROCESSOR_COUNT_MAX];
int token_memb_entries = 0;
int i;
struct totem_ip_address *lowest_addr;
memb_set_subtract (token_memb, &token_memb_entries,
instance->my_proc_list, instance->my_proc_list_entries,
instance->my_failed_list, instance->my_failed_list_entries);
/*
* find representative by searching for smallest identifier
*/
lowest_addr = &token_memb[0].addr[0];
for (i = 1; i < token_memb_entries; i++) {
if (totemip_compare(lowest_addr, &token_memb[i].addr[0]) > 0) {
totemip_copy (lowest_addr, &token_memb[i].addr[0]);
}
}
return (totemip_compare (lowest_addr, &instance->my_id.addr[0]) == 0);
}
static int srp_addr_compare (const void *a, const void *b)
{
const struct srp_addr *srp_a = (const struct srp_addr *)a;
const struct srp_addr *srp_b = (const struct srp_addr *)b;
return (totemip_compare (&srp_a->addr[0], &srp_b->addr[0]));
}
static void memb_state_commit_token_create (
struct totemsrp_instance *instance,
struct memb_commit_token *commit_token)
{
struct srp_addr token_memb[PROCESSOR_COUNT_MAX];
struct srp_addr *addr;
struct memb_commit_token_memb_entry *memb_list;
int token_memb_entries = 0;
log_printf (instance->totemsrp_log_level_notice,
"Creating commit token because I am the rep.\n");
memb_set_subtract (token_memb, &token_memb_entries,
instance->my_proc_list, instance->my_proc_list_entries,
instance->my_failed_list, instance->my_failed_list_entries);
memset (commit_token, 0, sizeof (struct memb_commit_token));
commit_token->header.type = MESSAGE_TYPE_MEMB_COMMIT_TOKEN;
commit_token->header.endian_detector = ENDIAN_LOCAL;
commit_token->header.encapsulated = 0;
commit_token->header.nodeid = instance->my_id.addr[0].nodeid;
assert (commit_token->header.nodeid);
totemip_copy(&commit_token->ring_id.rep, &instance->my_id.addr[0]);
commit_token->ring_id.seq = instance->token_ring_id_seq + 4;
/*
* This qsort is necessary to ensure the commit token traverses
* the ring in the proper order
*/
qsort (token_memb, token_memb_entries, sizeof (struct srp_addr),
srp_addr_compare);
commit_token->memb_index = 0;
commit_token->addr_entries = token_memb_entries;
addr = (struct srp_addr *)commit_token->end_of_commit_token;
memb_list = (struct memb_commit_token_memb_entry *)(addr + commit_token->addr_entries);
memcpy (addr, token_memb,
token_memb_entries * sizeof (struct srp_addr));
memset (memb_list, 0,
sizeof (struct memb_commit_token_memb_entry) * token_memb_entries);
}
static void memb_join_message_send (struct totemsrp_instance *instance)
{
char memb_join_data[10000];
struct memb_join *memb_join = (struct memb_join *)memb_join_data;
char *addr;
unsigned int addr_idx;
memb_join->header.type = MESSAGE_TYPE_MEMB_JOIN;
memb_join->header.endian_detector = ENDIAN_LOCAL;
memb_join->header.encapsulated = 0;
memb_join->header.nodeid = instance->my_id.addr[0].nodeid;
assert (memb_join->header.nodeid);
assert (srp_addr_equal (&instance->my_proc_list[0], &instance->my_proc_list[1]) == 0);
memb_join->ring_seq = instance->my_ring_id.seq;
memb_join->proc_list_entries = instance->my_proc_list_entries;
memb_join->failed_list_entries = instance->my_failed_list_entries;
srp_addr_copy (&memb_join->system_from, &instance->my_id);
/*
* This mess adds the joined and failed processor lists into the join
* message
*/
addr = (char *)memb_join;
addr_idx = sizeof (struct memb_join);
memcpy (&addr[addr_idx],
instance->my_proc_list,
instance->my_proc_list_entries *
sizeof (struct srp_addr));
addr_idx +=
instance->my_proc_list_entries *
sizeof (struct srp_addr);
memcpy (&addr[addr_idx],
instance->my_failed_list,
instance->my_failed_list_entries *
sizeof (struct srp_addr));
addr_idx +=
instance->my_failed_list_entries *
sizeof (struct srp_addr);
if (instance->totem_config->send_join_timeout) {
usleep (random() % (instance->totem_config->send_join_timeout * 1000));
}
totemrrp_mcast_flush_send (
instance->totemrrp_handle,
memb_join,
addr_idx);
}
static void memb_leave_message_send (struct totemsrp_instance *instance)
{
char memb_join_data[10000];
struct memb_join *memb_join = (struct memb_join *)memb_join_data;
char *addr;
unsigned int addr_idx;
int active_memb_entries;
struct srp_addr active_memb[PROCESSOR_COUNT_MAX];
log_printf (instance->totemsrp_log_level_debug,
"sending join/leave message\n");
/*
* add us to the failed list, and remove us from
* the members list
*/
memb_set_merge(
&instance->my_id, 1,
instance->my_failed_list, &instance->my_failed_list_entries);
memb_set_subtract (active_memb, &active_memb_entries,
instance->my_proc_list, instance->my_proc_list_entries,
&instance->my_id, 1);
memb_join->header.type = MESSAGE_TYPE_MEMB_JOIN;
memb_join->header.endian_detector = ENDIAN_LOCAL;
memb_join->header.encapsulated = 0;
memb_join->header.nodeid = LEAVE_DUMMY_NODEID;
memb_join->ring_seq = instance->my_ring_id.seq;
memb_join->proc_list_entries = active_memb_entries;
memb_join->failed_list_entries = instance->my_failed_list_entries;
srp_addr_copy (&memb_join->system_from, &instance->my_id);
memb_join->system_from.addr[0].nodeid = LEAVE_DUMMY_NODEID;
// TODO: CC Maybe use the actual join send routine.
/*
* This mess adds the joined and failed processor lists into the join
* message
*/
addr = (char *)memb_join;
addr_idx = sizeof (struct memb_join);
memcpy (&addr[addr_idx],
active_memb,
active_memb_entries *
sizeof (struct srp_addr));
addr_idx +=
active_memb_entries *
sizeof (struct srp_addr);
memcpy (&addr[addr_idx],
instance->my_failed_list,
instance->my_failed_list_entries *
sizeof (struct srp_addr));
addr_idx +=
instance->my_failed_list_entries *
sizeof (struct srp_addr);
if (instance->totem_config->send_join_timeout) {
usleep (random() % (instance->totem_config->send_join_timeout * 1000));
}
totemrrp_mcast_flush_send (
instance->totemrrp_handle,
memb_join,
addr_idx);
}
static void memb_merge_detect_transmit (struct totemsrp_instance *instance)
{
struct memb_merge_detect memb_merge_detect;
memb_merge_detect.header.type = MESSAGE_TYPE_MEMB_MERGE_DETECT;
memb_merge_detect.header.endian_detector = ENDIAN_LOCAL;
memb_merge_detect.header.encapsulated = 0;
memb_merge_detect.header.nodeid = instance->my_id.addr[0].nodeid;
srp_addr_copy (&memb_merge_detect.system_from, &instance->my_id);
memcpy (&memb_merge_detect.ring_id, &instance->my_ring_id,
sizeof (struct memb_ring_id));
assert (memb_merge_detect.header.nodeid);
totemrrp_mcast_flush_send (instance->totemrrp_handle,
&memb_merge_detect,
sizeof (struct memb_merge_detect));
}
static void memb_ring_id_create_or_load (
struct totemsrp_instance *instance,
struct memb_ring_id *memb_ring_id)
{
int fd;
int res;
char filename[256];
snprintf (filename, sizeof(filename), "%s/ringid_%s",
rundir, totemip_print (&instance->my_id.addr[0]));
fd = open (filename, O_RDONLY, 0700);
if (fd > 0) {
res = read (fd, &memb_ring_id->seq, sizeof (unsigned long long));
assert (res == sizeof (unsigned long long));
close (fd);
} else
if (fd == -1 && errno == ENOENT) {
memb_ring_id->seq = 0;
umask(0);
fd = open (filename, O_CREAT|O_RDWR, 0700);
if (fd == -1) {
log_printf (instance->totemsrp_log_level_warning,
"Couldn't create %s %s\n", filename, strerror (errno));
}
res = write (fd, &memb_ring_id->seq, sizeof (unsigned long long));
assert (res == sizeof (unsigned long long));
close (fd);
} else {
log_printf (instance->totemsrp_log_level_warning,
"Couldn't open %s %s\n", filename, strerror (errno));
}
totemip_copy(&memb_ring_id->rep, &instance->my_id.addr[0]);
assert (!totemip_zero_check(&memb_ring_id->rep));
instance->token_ring_id_seq = memb_ring_id->seq;
}
static void memb_ring_id_set_and_store (
struct totemsrp_instance *instance,
const struct memb_ring_id *ring_id)
{
char filename[256];
int fd;
int res;
memcpy (&instance->my_ring_id, ring_id, sizeof (struct memb_ring_id));
snprintf (filename, sizeof(filename), "%s/ringid_%s",
rundir, totemip_print (&instance->my_id.addr[0]));
fd = open (filename, O_WRONLY, 0777);
if (fd == -1) {
fd = open (filename, O_CREAT|O_RDWR, 0777);
}
if (fd == -1) {
log_printf (instance->totemsrp_log_level_warning,
"Couldn't store new ring id %llx to stable storage (%s)\n",
instance->my_ring_id.seq, strerror (errno));
assert (0);
return;
}
log_printf (instance->totemsrp_log_level_notice,
"Storing new sequence id for ring %llx\n", instance->my_ring_id.seq);
//assert (fd > 0);
res = write (fd, &instance->my_ring_id.seq, sizeof (unsigned long long));
assert (res == sizeof (unsigned long long));
close (fd);
}
int totemsrp_callback_token_create (
hdb_handle_t handle,
void **handle_out,
enum totem_callback_token_type type,
int delete,
int (*callback_fn) (enum totem_callback_token_type type, const void *),
const void *data)
{
struct token_callback_instance *callback_handle;
struct totemsrp_instance *instance;
unsigned int res;
res = hdb_handle_get (&totemsrp_instance_database, handle,
(void *)&instance);
if (res != 0) {
goto error_exit;
}
token_hold_cancel_send (instance);
callback_handle = malloc (sizeof (struct token_callback_instance));
if (callback_handle == 0) {
return (-1);
}
*handle_out = (void *)callback_handle;
list_init (&callback_handle->list);
callback_handle->callback_fn = callback_fn;
callback_handle->data = (void *) data;
callback_handle->callback_type = type;
callback_handle->delete = delete;
switch (type) {
case TOTEM_CALLBACK_TOKEN_RECEIVED:
list_add (&callback_handle->list, &instance->token_callback_received_listhead);
break;
case TOTEM_CALLBACK_TOKEN_SENT:
list_add (&callback_handle->list, &instance->token_callback_sent_listhead);
break;
}
hdb_handle_put (&totemsrp_instance_database, handle);
error_exit:
return (0);
}
void totemsrp_callback_token_destroy (hdb_handle_t handle, void **handle_out)
{
struct token_callback_instance *h;
if (*handle_out) {
h = (struct token_callback_instance *)*handle_out;
list_del (&h->list);
free (h);
h = NULL;
*handle_out = 0;
}
}
static void token_callbacks_execute (
struct totemsrp_instance *instance,
enum totem_callback_token_type type)
{
struct list_head *list;
struct list_head *list_next;
struct list_head *callback_listhead = 0;
struct token_callback_instance *token_callback_instance;
int res;
int del;
switch (type) {
case TOTEM_CALLBACK_TOKEN_RECEIVED:
callback_listhead = &instance->token_callback_received_listhead;
break;
case TOTEM_CALLBACK_TOKEN_SENT:
callback_listhead = &instance->token_callback_sent_listhead;
break;
default:
assert (0);
}
for (list = callback_listhead->next; list != callback_listhead;
list = list_next) {
token_callback_instance = list_entry (list, struct token_callback_instance, list);
list_next = list->next;
del = token_callback_instance->delete;
if (del == 1) {
list_del (list);
}
res = token_callback_instance->callback_fn (
token_callback_instance->callback_type,
token_callback_instance->data);
/*
* This callback failed to execute, try it again on the next token
*/
if (res == -1 && del == 1) {
list_add (list, callback_listhead);
} else if (del) {
free (token_callback_instance);
}
}
}
/*
* Flow control functions
*/
static unsigned int backlog_get (struct totemsrp_instance *instance)
{
unsigned int backlog = 0;
if (instance->memb_state == MEMB_STATE_OPERATIONAL) {
backlog = cs_queue_used (&instance->new_message_queue);
} else
if (instance->memb_state == MEMB_STATE_RECOVERY) {
backlog = cs_queue_used (&instance->retrans_message_queue);
}
return (backlog);
}
static int fcc_calculate (
struct totemsrp_instance *instance,
struct orf_token *token)
{
unsigned int transmits_allowed;
unsigned int backlog_calc;
transmits_allowed = instance->totem_config->max_messages;
if (transmits_allowed > instance->totem_config->window_size - token->fcc) {
transmits_allowed = instance->totem_config->window_size - token->fcc;
}
instance->my_cbl = backlog_get (instance);
/*
* Only do backlog calculation if there is a backlog otherwise
* we would result in div by zero
*/
if (token->backlog + instance->my_cbl - instance->my_pbl) {
backlog_calc = (instance->totem_config->window_size * instance->my_pbl) /
(token->backlog + instance->my_cbl - instance->my_pbl);
if (backlog_calc > 0 && transmits_allowed > backlog_calc) {
transmits_allowed = backlog_calc;
}
}
return (transmits_allowed);
}
/*
* don't overflow the RTR sort queue
*/
static void fcc_rtr_limit (
struct totemsrp_instance *instance,
struct orf_token *token,
unsigned int *transmits_allowed)
{
assert ((QUEUE_RTR_ITEMS_SIZE_MAX - *transmits_allowed - instance->totem_config->window_size) >= 0);
if (sq_lt_compare (instance->last_released +
QUEUE_RTR_ITEMS_SIZE_MAX - *transmits_allowed -
instance->totem_config->window_size,
token->seq)) {
*transmits_allowed = 0;
}
}
static void fcc_token_update (
struct totemsrp_instance *instance,
struct orf_token *token,
unsigned int msgs_transmitted)
{
token->fcc += msgs_transmitted - instance->my_trc;
token->backlog += instance->my_cbl - instance->my_pbl;
assert (token->backlog >= 0);
instance->my_trc = msgs_transmitted;
instance->my_pbl = instance->my_cbl;
}
/*
* Message Handlers
*/
struct timeval tv_old;
/*
* message handler called when TOKEN message type received
*/
static int message_handler_orf_token (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed)
{
char token_storage[1500];
char token_convert[1500];
struct orf_token *token = NULL;
int forward_token;
unsigned int transmits_allowed;
unsigned int mcasted_retransmit;
unsigned int mcasted_regular;
unsigned int last_aru;
#ifdef GIVEINFO
struct timeval tv_current;
struct timeval tv_diff;
gettimeofday (&tv_current, NULL);
timersub (&tv_current, &tv_old, &tv_diff);
memcpy (&tv_old, &tv_current, sizeof (struct timeval));
log_printf (instance->totemsrp_log_level_notice,
"Time since last token %0.4f ms\n",
(((float)tv_diff.tv_sec) * 1000) + ((float)tv_diff.tv_usec)
/ 1000.0);
#endif
#ifdef TEST_DROP_ORF_TOKEN_PERCENTAGE
if (random()%100 < TEST_DROP_ORF_TOKEN_PERCENTAGE) {
return (0);
}
#endif
if (endian_conversion_needed) {
orf_token_endian_convert ((struct orf_token *)msg,
(struct orf_token *)token_convert);
msg = (struct orf_token *)token_convert;
}
/*
* Make copy of token and retransmit list in case we have
* to flush incoming messages from the kernel queue
*/
token = (struct orf_token *)token_storage;
memcpy (token, msg, sizeof (struct orf_token));
memcpy (&token->rtr_list[0], (char *)msg + sizeof (struct orf_token),
sizeof (struct rtr_item) * RETRANSMIT_ENTRIES_MAX);
/*
* Handle merge detection timeout
*/
if (token->seq == instance->my_last_seq) {
start_merge_detect_timeout (instance);
instance->my_seq_unchanged += 1;
} else {
cancel_merge_detect_timeout (instance);
cancel_token_hold_retransmit_timeout (instance);
instance->my_seq_unchanged = 0;
}
instance->my_last_seq = token->seq;
#ifdef TEST_RECOVERY_MSG_COUNT
if (instance->memb_state == MEMB_STATE_OPERATIONAL && token->seq > TEST_RECOVERY_MSG_COUNT) {
return (0);
}
#endif
totemrrp_recv_flush (instance->totemrrp_handle);
/*
* Determine if we should hold (in reality drop) the token
*/
instance->my_token_held = 0;
if (totemip_equal(&instance->my_ring_id.rep, &instance->my_id.addr[0]) &&
instance->my_seq_unchanged > instance->totem_config->seqno_unchanged_const) {
instance->my_token_held = 1;
} else
if (!totemip_equal(&instance->my_ring_id.rep, &instance->my_id.addr[0]) &&
instance->my_seq_unchanged >= instance->totem_config->seqno_unchanged_const) {
instance->my_token_held = 1;
}
/*
* Hold onto token when there is no activity on ring and
* this processor is the ring rep
*/
forward_token = 1;
if (totemip_equal(&instance->my_ring_id.rep, &instance->my_id.addr[0])) {
if (instance->my_token_held) {
forward_token = 0;
}
}
token_callbacks_execute (instance, TOTEM_CALLBACK_TOKEN_RECEIVED);
switch (instance->memb_state) {
case MEMB_STATE_COMMIT:
/* Discard token */
break;
case MEMB_STATE_OPERATIONAL:
messages_free (instance, token->aru);
case MEMB_STATE_GATHER:
/*
* DO NOT add break, we use different free mechanism in recovery state
*/
case MEMB_STATE_RECOVERY:
last_aru = instance->my_last_aru;
instance->my_last_aru = token->aru;
/*
* Discard tokens from another configuration
*/
if (memcmp (&token->ring_id, &instance->my_ring_id,
sizeof (struct memb_ring_id)) != 0) {
if ((forward_token)
&& instance->use_heartbeat) {
reset_heartbeat_timeout(instance);
}
else {
cancel_heartbeat_timeout(instance);
}
return (0); /* discard token */
}
/*
* Discard retransmitted tokens
*/
if (sq_lte_compare (token->token_seq, instance->my_token_seq)) {
/*
* If this processor receives a retransmitted token, it is sure
* the previous processor is still alive. As a result, it can
* reset its token timeout. If some processor previous to that
* has failed, it will eventually not execute a reset of the
* token timeout, and will cause a reconfiguration to occur.
*/
reset_token_timeout (instance);
if ((forward_token)
&& instance->use_heartbeat) {
reset_heartbeat_timeout(instance);
}
else {
cancel_heartbeat_timeout(instance);
}
return (0); /* discard token */
}
transmits_allowed = fcc_calculate (instance, token);
mcasted_retransmit = orf_token_rtr (instance, token, &transmits_allowed);
fcc_rtr_limit (instance, token, &transmits_allowed);
mcasted_regular = orf_token_mcast (instance, token, transmits_allowed);
fcc_token_update (instance, token, mcasted_retransmit +
mcasted_regular);
if (sq_lt_compare (instance->my_aru, token->aru) ||
instance->my_id.addr[0].nodeid == token->aru_addr ||
token->aru_addr == 0) {
token->aru = instance->my_aru;
if (token->aru == token->seq) {
token->aru_addr = 0;
} else {
token->aru_addr = instance->my_id.addr[0].nodeid;
}
}
if (token->aru == last_aru && token->aru_addr != 0) {
instance->my_aru_count += 1;
} else {
instance->my_aru_count = 0;
}
if (instance->my_aru_count > instance->totem_config->fail_to_recv_const &&
token->aru_addr != instance->my_id.addr[0].nodeid) {
log_printf (instance->totemsrp_log_level_error,
"FAILED TO RECEIVE\n");
// TODO if we fail to receive, it may be possible to end with a gather
// state of proc == failed = 0 entries
/* THIS IS A BIG TODO
memb_set_merge (&token->aru_addr, 1,
instance->my_failed_list,
&instance->my_failed_list_entries);
*/
ring_state_restore (instance);
memb_state_gather_enter (instance, 6);
} else {
instance->my_token_seq = token->token_seq;
token->token_seq += 1;
if (instance->memb_state == MEMB_STATE_RECOVERY) {
/*
* instance->my_aru == instance->my_high_seq_received means this processor
* has recovered all messages it can recover
* (ie: its retrans queue is empty)
*/
if (cs_queue_is_empty (&instance->retrans_message_queue) == 0) {
if (token->retrans_flg == 0) {
token->retrans_flg = 1;
instance->my_set_retrans_flg = 1;
}
} else
if (token->retrans_flg == 1 && instance->my_set_retrans_flg) {
token->retrans_flg = 0;
}
log_printf (instance->totemsrp_log_level_debug,
"token retrans flag is %d my set retrans flag%d retrans queue empty %d count %d, aru %x\n",
token->retrans_flg, instance->my_set_retrans_flg,
cs_queue_is_empty (&instance->retrans_message_queue),
instance->my_retrans_flg_count, token->aru);
if (token->retrans_flg == 0) {
instance->my_retrans_flg_count += 1;
} else {
instance->my_retrans_flg_count = 0;
}
if (instance->my_retrans_flg_count == 2) {
instance->my_install_seq = token->seq;
}
log_printf (instance->totemsrp_log_level_debug,
"install seq %x aru %x high seq received %x\n",
instance->my_install_seq, instance->my_aru, instance->my_high_seq_received);
if (instance->my_retrans_flg_count >= 2 &&
instance->my_received_flg == 0 &&
sq_lte_compare (instance->my_install_seq, instance->my_aru)) {
instance->my_received_flg = 1;
instance->my_deliver_memb_entries = instance->my_trans_memb_entries;
memcpy (instance->my_deliver_memb_list, instance->my_trans_memb_list,
sizeof (struct totem_ip_address) * instance->my_trans_memb_entries);
}
if (instance->my_retrans_flg_count >= 3 &&
sq_lte_compare (instance->my_install_seq, token->aru)) {
instance->my_rotation_counter += 1;
} else {
instance->my_rotation_counter = 0;
}
if (instance->my_rotation_counter == 2) {
log_printf (instance->totemsrp_log_level_debug,
"retrans flag count %x token aru %x install seq %x aru %x %x\n",
instance->my_retrans_flg_count, token->aru, instance->my_install_seq,
instance->my_aru, token->seq);
memb_state_operational_enter (instance);
instance->my_rotation_counter = 0;
instance->my_retrans_flg_count = 0;
}
}
totemrrp_send_flush (instance->totemrrp_handle);
token_send (instance, token, forward_token);
#ifdef GIVEINFO
gettimeofday (&tv_current, NULL);
timersub (&tv_current, &tv_old, &tv_diff);
memcpy (&tv_old, &tv_current, sizeof (struct timeval));
log_printf (instance->totemsrp_log_level_notice,
"I held %0.4f ms\n",
((float)tv_diff.tv_usec) / 1000.0);
#endif
if (instance->memb_state == MEMB_STATE_OPERATIONAL) {
messages_deliver_to_app (instance, 0,
instance->my_high_seq_received);
}
/*
* Deliver messages after token has been transmitted
* to improve performance
*/
reset_token_timeout (instance); // REVIEWED
reset_token_retransmit_timeout (instance); // REVIEWED
if (totemip_equal(&instance->my_id.addr[0], &instance->my_ring_id.rep) &&
instance->my_token_held == 1) {
start_token_hold_retransmit_timeout (instance);
}
token_callbacks_execute (instance, TOTEM_CALLBACK_TOKEN_SENT);
}
break;
}
if ((forward_token)
&& instance->use_heartbeat) {
reset_heartbeat_timeout(instance);
}
else {
cancel_heartbeat_timeout(instance);
}
return (0);
}
static void messages_deliver_to_app (
struct totemsrp_instance *instance,
int skip,
unsigned int end_point)
{
struct sort_queue_item *sort_queue_item_p;
unsigned int i;
int res;
struct mcast *mcast_in;
struct mcast mcast_header;
unsigned int range = 0;
int endian_conversion_required;
unsigned int my_high_delivered_stored = 0;
range = end_point - instance->my_high_delivered;
if (range) {
log_printf (instance->totemsrp_log_level_debug,
"Delivering %x to %x\n", instance->my_high_delivered,
end_point);
}
assert (range < 10240);
my_high_delivered_stored = instance->my_high_delivered;
/*
* Deliver messages in order from rtr queue to pending delivery queue
*/
for (i = 1; i <= range; i++) {
void *ptr = 0;
/*
* If out of range of sort queue, stop assembly
*/
res = sq_in_range (&instance->regular_sort_queue,
my_high_delivered_stored + i);
if (res == 0) {
break;
}
res = sq_item_get (&instance->regular_sort_queue,
my_high_delivered_stored + i, &ptr);
/*
* If hole, stop assembly
*/
if (res != 0 && skip == 0) {
break;
}
instance->my_high_delivered = my_high_delivered_stored + i;
if (res != 0) {
continue;
}
sort_queue_item_p = ptr;
mcast_in = sort_queue_item_p->mcast;
assert (mcast_in != (struct mcast *)0xdeadbeef);
endian_conversion_required = 0;
if (mcast_in->header.endian_detector != ENDIAN_LOCAL) {
endian_conversion_required = 1;
mcast_endian_convert (mcast_in, &mcast_header);
} else {
memcpy (&mcast_header, mcast_in, sizeof (struct mcast));
}
/*
* Skip messages not originated in instance->my_deliver_memb
*/
if (skip &&
memb_set_subset (&mcast_header.system_from,
1,
instance->my_deliver_memb_list,
instance->my_deliver_memb_entries) == 0) {
instance->my_high_delivered = my_high_delivered_stored + i;
continue;
}
/*
* Message found
*/
log_printf (instance->totemsrp_log_level_debug,
"Delivering MCAST message with seq %x to pending delivery queue\n",
mcast_header.seq);
/*
* Message is locally originated multicast
*/
instance->totemsrp_deliver_fn (
mcast_header.header.nodeid,
((char *)sort_queue_item_p->mcast) + sizeof (struct mcast),
sort_queue_item_p->msg_len - sizeof (struct mcast),
endian_conversion_required);
}
}
/*
* recv message handler called when MCAST message type received
*/
static int message_handler_mcast (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed)
{
struct sort_queue_item sort_queue_item;
struct sq *sort_queue;
struct mcast mcast_header;
if (endian_conversion_needed) {
mcast_endian_convert (msg, &mcast_header);
} else {
memcpy (&mcast_header, msg, sizeof (struct mcast));
}
if (mcast_header.header.encapsulated == MESSAGE_ENCAPSULATED) {
sort_queue = &instance->recovery_sort_queue;
} else {
sort_queue = &instance->regular_sort_queue;
}
assert (msg_len < FRAME_SIZE_MAX);
#ifdef TEST_DROP_MCAST_PERCENTAGE
if (random()%100 < TEST_DROP_MCAST_PERCENTAGE) {
printf ("dropping message %d\n", mcast_header.seq);
return (0);
} else {
printf ("accepting message %d\n", mcast_header.seq);
}
#endif
if (srp_addr_equal (&mcast_header.system_from, &instance->my_id) == 0) {
cancel_token_retransmit_timeout (instance);
}
/*
* If the message is foreign execute the switch below
*/
if (memcmp (&instance->my_ring_id, &mcast_header.ring_id,
sizeof (struct memb_ring_id)) != 0) {
switch (instance->memb_state) {
case MEMB_STATE_OPERATIONAL:
memb_set_merge (
&mcast_header.system_from, 1,
instance->my_proc_list, &instance->my_proc_list_entries);
memb_state_gather_enter (instance, 7);
break;
case MEMB_STATE_GATHER:
if (!memb_set_subset (
&mcast_header.system_from,
1,
instance->my_proc_list,
instance->my_proc_list_entries)) {
memb_set_merge (&mcast_header.system_from, 1,
instance->my_proc_list, &instance->my_proc_list_entries);
memb_state_gather_enter (instance, 8);
return (0);
}
break;
case MEMB_STATE_COMMIT:
/* discard message */
break;
case MEMB_STATE_RECOVERY:
/* discard message */
break;
}
return (0);
}
log_printf (instance->totemsrp_log_level_debug,
"Received ringid(%s:%lld) seq %x\n",
totemip_print (&mcast_header.ring_id.rep),
mcast_header.ring_id.seq,
mcast_header.seq);
/*
* Add mcast message to rtr queue if not already in rtr queue
* otherwise free io vectors
*/
if (msg_len > 0 && msg_len < FRAME_SIZE_MAX &&
sq_in_range (sort_queue, mcast_header.seq) &&
sq_item_inuse (sort_queue, mcast_header.seq) == 0) {
/*
* Allocate new multicast memory block
*/
// TODO LEAK
sort_queue_item.mcast = malloc (msg_len);
if (sort_queue_item.mcast == NULL) {
return (-1); /* error here is corrected by the algorithm */
}
memcpy (sort_queue_item.mcast, msg, msg_len);
sort_queue_item.msg_len = msg_len;
if (sq_lt_compare (instance->my_high_seq_received,
mcast_header.seq)) {
instance->my_high_seq_received = mcast_header.seq;
}
sq_item_add (sort_queue, &sort_queue_item, mcast_header.seq);
}
update_aru (instance);
if (instance->memb_state == MEMB_STATE_OPERATIONAL) {
messages_deliver_to_app (instance, 0, instance->my_high_seq_received);
}
/* TODO remove from retrans message queue for old ring in recovery state */
return (0);
}
static int message_handler_memb_merge_detect (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed)
{
struct memb_merge_detect memb_merge_detect;
if (endian_conversion_needed) {
memb_merge_detect_endian_convert (msg, &memb_merge_detect);
} else {
memcpy (&memb_merge_detect, msg,
sizeof (struct memb_merge_detect));
}
/*
* do nothing if this is a merge detect from this configuration
*/
if (memcmp (&instance->my_ring_id, &memb_merge_detect.ring_id,
sizeof (struct memb_ring_id)) == 0) {
return (0);
}
/*
* Execute merge operation
*/
switch (instance->memb_state) {
case MEMB_STATE_OPERATIONAL:
memb_set_merge (&memb_merge_detect.system_from, 1,
instance->my_proc_list, &instance->my_proc_list_entries);
memb_state_gather_enter (instance, 9);
break;
case MEMB_STATE_GATHER:
if (!memb_set_subset (
&memb_merge_detect.system_from,
1,
instance->my_proc_list,
instance->my_proc_list_entries)) {
memb_set_merge (&memb_merge_detect.system_from, 1,
instance->my_proc_list, &instance->my_proc_list_entries);
memb_state_gather_enter (instance, 10);
return (0);
}
break;
case MEMB_STATE_COMMIT:
/* do nothing in commit */
break;
case MEMB_STATE_RECOVERY:
/* do nothing in recovery */
break;
}
return (0);
}
static int memb_join_process (
struct totemsrp_instance *instance,
const struct memb_join *memb_join)
{
unsigned char *commit_token_storage[TOKEN_SIZE_MAX];
struct memb_commit_token *my_commit_token =
(struct memb_commit_token *)commit_token_storage;
struct srp_addr *proc_list;
struct srp_addr *failed_list;
proc_list = (struct srp_addr *)memb_join->end_of_memb_join;
failed_list = proc_list + memb_join->proc_list_entries;
if (memb_set_equal (proc_list,
memb_join->proc_list_entries,
instance->my_proc_list,
instance->my_proc_list_entries) &&
memb_set_equal (failed_list,
memb_join->failed_list_entries,
instance->my_failed_list,
instance->my_failed_list_entries)) {
memb_consensus_set (instance, &memb_join->system_from);
if (memb_consensus_agreed (instance) &&
memb_lowest_in_config (instance)) {
memb_state_commit_token_create (instance, my_commit_token);
memb_state_commit_enter (instance, my_commit_token);
} else {
return (0);
}
} else
if (memb_set_subset (proc_list,
memb_join->proc_list_entries,
instance->my_proc_list,
instance->my_proc_list_entries) &&
memb_set_subset (failed_list,
memb_join->failed_list_entries,
instance->my_failed_list,
instance->my_failed_list_entries)) {
return (0);
} else
if (memb_set_subset (&memb_join->system_from, 1,
instance->my_failed_list, instance->my_failed_list_entries)) {
return (0);
} else {
memb_set_merge (proc_list,
memb_join->proc_list_entries,
instance->my_proc_list, &instance->my_proc_list_entries);
if (memb_set_subset (
&instance->my_id, 1,
failed_list, memb_join->failed_list_entries)) {
memb_set_merge (
&memb_join->system_from, 1,
instance->my_failed_list, &instance->my_failed_list_entries);
} else {
memb_set_merge (failed_list,
memb_join->failed_list_entries,
instance->my_failed_list, &instance->my_failed_list_entries);
}
memb_state_gather_enter (instance, 11);
return (1); /* gather entered */
}
return (0); /* gather not entered */
}
static void memb_join_endian_convert (const struct memb_join *in, struct memb_join *out)
{
int i;
struct srp_addr *in_proc_list;
struct srp_addr *in_failed_list;
struct srp_addr *out_proc_list;
struct srp_addr *out_failed_list;
out->header.type = in->header.type;
out->header.endian_detector = ENDIAN_LOCAL;
out->header.nodeid = swab32 (in->header.nodeid);
srp_addr_copy_endian_convert (&out->system_from, &in->system_from);
out->proc_list_entries = swab32 (in->proc_list_entries);
out->failed_list_entries = swab32 (in->failed_list_entries);
out->ring_seq = swab64 (in->ring_seq);
in_proc_list = (struct srp_addr *)in->end_of_memb_join;
in_failed_list = in_proc_list + out->proc_list_entries;
out_proc_list = (struct srp_addr *)out->end_of_memb_join;
out_failed_list = out_proc_list + out->proc_list_entries;
for (i = 0; i < out->proc_list_entries; i++) {
srp_addr_copy_endian_convert (&out_proc_list[i], &in_proc_list[i]);
}
for (i = 0; i < out->failed_list_entries; i++) {
srp_addr_copy_endian_convert (&out_failed_list[i], &in_failed_list[i]);
}
}
static void memb_commit_token_endian_convert (const struct memb_commit_token *in, struct memb_commit_token *out)
{
int i;
struct srp_addr *in_addr = (struct srp_addr *)in->end_of_commit_token;
struct srp_addr *out_addr = (struct srp_addr *)out->end_of_commit_token;
struct memb_commit_token_memb_entry *in_memb_list;
struct memb_commit_token_memb_entry *out_memb_list;
out->header.type = in->header.type;
out->header.endian_detector = ENDIAN_LOCAL;
out->header.nodeid = swab32 (in->header.nodeid);
out->token_seq = swab32 (in->token_seq);
totemip_copy_endian_convert(&out->ring_id.rep, &in->ring_id.rep);
out->ring_id.seq = swab64 (in->ring_id.seq);
out->retrans_flg = swab32 (in->retrans_flg);
out->memb_index = swab32 (in->memb_index);
out->addr_entries = swab32 (in->addr_entries);
in_memb_list = (struct memb_commit_token_memb_entry *)(in_addr + out->addr_entries);
out_memb_list = (struct memb_commit_token_memb_entry *)(out_addr + out->addr_entries);
for (i = 0; i < out->addr_entries; i++) {
srp_addr_copy_endian_convert (&out_addr[i], &in_addr[i]);
/*
* Only convert the memb entry if it has been set
*/
if (in_memb_list[i].ring_id.rep.family != 0) {
totemip_copy_endian_convert (&out_memb_list[i].ring_id.rep,
&in_memb_list[i].ring_id.rep);
out_memb_list[i].ring_id.seq =
swab64 (in_memb_list[i].ring_id.seq);
out_memb_list[i].aru = swab32 (in_memb_list[i].aru);
out_memb_list[i].high_delivered = swab32 (in_memb_list[i].high_delivered);
out_memb_list[i].received_flg = swab32 (in_memb_list[i].received_flg);
}
}
}
static void orf_token_endian_convert (const struct orf_token *in, struct orf_token *out)
{
int i;
out->header.type = in->header.type;
out->header.endian_detector = ENDIAN_LOCAL;
out->header.nodeid = swab32 (in->header.nodeid);
out->seq = swab32 (in->seq);
out->token_seq = swab32 (in->token_seq);
out->aru = swab32 (in->aru);
totemip_copy_endian_convert(&out->ring_id.rep, &in->ring_id.rep);
out->aru_addr = swab32(in->aru_addr);
out->ring_id.seq = swab64 (in->ring_id.seq);
out->fcc = swab32 (in->fcc);
out->backlog = swab32 (in->backlog);
out->retrans_flg = swab32 (in->retrans_flg);
out->rtr_list_entries = swab32 (in->rtr_list_entries);
for (i = 0; i < out->rtr_list_entries; i++) {
totemip_copy_endian_convert(&out->rtr_list[i].ring_id.rep, &in->rtr_list[i].ring_id.rep);
out->rtr_list[i].ring_id.seq = swab64 (in->rtr_list[i].ring_id.seq);
out->rtr_list[i].seq = swab32 (in->rtr_list[i].seq);
}
}
static void mcast_endian_convert (const struct mcast *in, struct mcast *out)
{
out->header.type = in->header.type;
out->header.endian_detector = ENDIAN_LOCAL;
out->header.nodeid = swab32 (in->header.nodeid);
out->header.encapsulated = in->header.encapsulated;
out->seq = swab32 (in->seq);
out->this_seqno = swab32 (in->this_seqno);
totemip_copy_endian_convert(&out->ring_id.rep, &in->ring_id.rep);
out->ring_id.seq = swab64 (in->ring_id.seq);
out->node_id = swab32 (in->node_id);
out->guarantee = swab32 (in->guarantee);
srp_addr_copy_endian_convert (&out->system_from, &in->system_from);
}
static void memb_merge_detect_endian_convert (
const struct memb_merge_detect *in,
struct memb_merge_detect *out)
{
out->header.type = in->header.type;
out->header.endian_detector = ENDIAN_LOCAL;
out->header.nodeid = swab32 (in->header.nodeid);
totemip_copy_endian_convert(&out->ring_id.rep, &in->ring_id.rep);
out->ring_id.seq = swab64 (in->ring_id.seq);
srp_addr_copy_endian_convert (&out->system_from, &in->system_from);
}
static int message_handler_memb_join (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed)
{
const struct memb_join *memb_join;
struct memb_join *memb_join_convert = alloca (msg_len);
int gather_entered;
if (endian_conversion_needed) {
memb_join = memb_join_convert;
memb_join_endian_convert (msg, memb_join_convert);
} else {
memb_join = msg;
}
if (instance->token_ring_id_seq < memb_join->ring_seq) {
instance->token_ring_id_seq = memb_join->ring_seq;
}
switch (instance->memb_state) {
case MEMB_STATE_OPERATIONAL:
gather_entered = memb_join_process (instance,
memb_join);
if (gather_entered == 0) {
memb_state_gather_enter (instance, 12);
}
break;
case MEMB_STATE_GATHER:
memb_join_process (instance, memb_join);
break;
case MEMB_STATE_COMMIT:
if (memb_set_subset (&memb_join->system_from,
1,
instance->my_new_memb_list,
instance->my_new_memb_entries) &&
memb_join->ring_seq >= instance->my_ring_id.seq) {
memb_join_process (instance, memb_join);
memb_state_gather_enter (instance, 13);
}
break;
case MEMB_STATE_RECOVERY:
if (memb_set_subset (&memb_join->system_from,
1,
instance->my_new_memb_list,
instance->my_new_memb_entries) &&
memb_join->ring_seq >= instance->my_ring_id.seq) {
ring_state_restore (instance);
memb_join_process (instance, memb_join);
memb_state_gather_enter (instance, 14);
}
break;
}
return (0);
}
static int message_handler_memb_commit_token (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed)
{
struct memb_commit_token *memb_commit_token_convert = alloca (msg_len);
struct memb_commit_token *memb_commit_token;
struct srp_addr sub[PROCESSOR_COUNT_MAX];
int sub_entries;
struct srp_addr *addr;
struct memb_commit_token_memb_entry *memb_list;
log_printf (instance->totemsrp_log_level_debug,
"got commit token\n");
if (endian_conversion_needed) {
memb_commit_token_endian_convert (msg, memb_commit_token_convert);
} else {
memcpy (memb_commit_token_convert, msg, msg_len);
}
memb_commit_token = memb_commit_token_convert;
addr = (struct srp_addr *)memb_commit_token->end_of_commit_token;
memb_list = (struct memb_commit_token_memb_entry *)(addr + memb_commit_token->addr_entries);
#ifdef TEST_DROP_COMMIT_TOKEN_PERCENTAGE
if (random()%100 < TEST_DROP_COMMIT_TOKEN_PERCENTAGE) {
return (0);
}
#endif
switch (instance->memb_state) {
case MEMB_STATE_OPERATIONAL:
/* discard token */
break;
case MEMB_STATE_GATHER:
memb_set_subtract (sub, &sub_entries,
instance->my_proc_list, instance->my_proc_list_entries,
instance->my_failed_list, instance->my_failed_list_entries);
if (memb_set_equal (addr,
memb_commit_token->addr_entries,
sub,
sub_entries) &&
memb_commit_token->ring_id.seq > instance->my_ring_id.seq) {
memb_state_commit_enter (instance, memb_commit_token);
}
break;
case MEMB_STATE_COMMIT:
/*
* If retransmitted commit tokens are sent on this ring
* filter them out and only enter recovery once the
* commit token has traversed the array. This is
* determined by :
* memb_commit_token->memb_index == memb_commit_token->addr_entries) {
*/
if (memb_commit_token->ring_id.seq == instance->my_ring_id.seq &&
memb_commit_token->memb_index == memb_commit_token->addr_entries) {
memb_state_recovery_enter (instance, memb_commit_token);
}
break;
case MEMB_STATE_RECOVERY:
if (totemip_equal (&instance->my_id.addr[0], &instance->my_ring_id.rep)) {
log_printf (instance->totemsrp_log_level_notice,
"Sending initial ORF token\n");
// TODO convert instead of initiate
orf_token_send_initial (instance);
reset_token_timeout (instance); // REVIEWED
reset_token_retransmit_timeout (instance); // REVIEWED
}
break;
}
return (0);
}
static int message_handler_token_hold_cancel (
struct totemsrp_instance *instance,
const void *msg,
size_t msg_len,
int endian_conversion_needed)
{
const struct token_hold_cancel *token_hold_cancel = msg;
if (memcmp (&token_hold_cancel->ring_id, &instance->my_ring_id,
sizeof (struct memb_ring_id)) == 0) {
instance->my_seq_unchanged = 0;
if (totemip_equal(&instance->my_ring_id.rep, &instance->my_id.addr[0])) {
timer_function_token_retransmit_timeout (instance);
}
}
return (0);
}
void main_deliver_fn (
void *context,
const void *msg,
unsigned int msg_len)
{
struct totemsrp_instance *instance = context;
const struct message_header *message_header = msg;
if (msg_len < sizeof (struct message_header)) {
log_printf (instance->totemsrp_log_level_security,
"Received message is too short... ignoring %u.\n",
(unsigned int)msg_len);
return;
}
if ((int)message_header->type >= totemsrp_message_handlers.count) {
log_printf (instance->totemsrp_log_level_security, "Type of received message is wrong... ignoring %d.\n", (int)message_header->type);
return;
}
/*
* Handle incoming message
*/
totemsrp_message_handlers.handler_functions[(int)message_header->type] (
instance,
msg,
msg_len,
message_header->endian_detector != ENDIAN_LOCAL);
}
void main_iface_change_fn (
void *context,
const struct totem_ip_address *iface_addr,
unsigned int iface_no)
{
struct totemsrp_instance *instance = context;
totemip_copy (&instance->my_id.addr[iface_no], iface_addr);
assert (instance->my_id.addr[iface_no].nodeid);
totemip_copy (&instance->my_memb_list[0].addr[iface_no], iface_addr);
if (instance->iface_changes++ == 0) {
memb_ring_id_create_or_load (instance, &instance->my_ring_id);
log_printf (
instance->totemsrp_log_level_notice,
"Created or loaded sequence id %lld.%s for this ring.\n",
instance->my_ring_id.seq,
totemip_print (&instance->my_ring_id.rep));
}
if (instance->iface_changes >= instance->totem_config->interface_count) {
memb_state_gather_enter (instance, 15);
}
}
void totemsrp_net_mtu_adjust (struct totem_config *totem_config) {
totem_config->net_mtu -= sizeof (struct mcast);
}
diff --git a/include/corosync/engine/logsys.h b/include/corosync/engine/logsys.h
index 58e24cdf..b34b327b 100644
--- a/include/corosync/engine/logsys.h
+++ b/include/corosync/engine/logsys.h
@@ -1,437 +1,431 @@
/*
* Copyright (c) 2002-2004 MontaVista Software, Inc.
* Copyright (c) 2006-2009 Red Hat, Inc.
*
* Author: Steven Dake (sdake@redhat.com)
* Author: Lon Hohberger (lhh@redhat.com)
* Author: Fabio M. Di Nitto (fdinitto@redhat.com)
*
* All rights reserved.
*
* This software licensed under BSD license, the text of which follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the MontaVista Software, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LOGSYS_H_DEFINED
#define LOGSYS_H_DEFINED
#include <stdarg.h>
#include <stdlib.h>
#include <syslog.h>
#include <pthread.h>
#include <limits.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* All of the LOGSYS_MODE's can be ORed together for combined behavior
*
* FORK and THREADED are ignored for SUBSYSTEMS
*/
#define LOGSYS_MODE_OUTPUT_FILE (1<<0)
#define LOGSYS_MODE_OUTPUT_STDERR (1<<1)
#define LOGSYS_MODE_OUTPUT_SYSLOG (1<<2)
#define LOGSYS_MODE_FORK (1<<3)
#define LOGSYS_MODE_THREADED (1<<4)
/*
* Log priorities, compliant with syslog and SA Forum Log spec.
*/
#define LOGSYS_LEVEL_EMERG LOG_EMERG
#define LOGSYS_LEVEL_ALERT LOG_ALERT
#define LOGSYS_LEVEL_CRIT LOG_CRIT
#define LOGSYS_LEVEL_ERROR LOG_ERR
#define LOGSYS_LEVEL_WARNING LOG_WARNING
#define LOGSYS_LEVEL_NOTICE LOG_NOTICE
#define LOGSYS_LEVEL_INFO LOG_INFO
#define LOGSYS_LEVEL_DEBUG LOG_DEBUG
/*
* All of the LOGSYS_RECID's are mutually exclusive. Only one RECID at any time
* can be specified.
*
* RECID_LOG indicates a message that should be sent to log. Anything else
* is stored only in the flight recorder.
*/
#define LOGSYS_RECID_MAX ((UINT_MAX) >> LOGSYS_SUBSYSID_END)
#define LOGSYS_RECID_LOG (LOGSYS_RECID_MAX - 1)
#define LOGSYS_RECID_ENTER (LOGSYS_RECID_MAX - 2)
#define LOGSYS_RECID_LEAVE (LOGSYS_RECID_MAX - 3)
#define LOGSYS_RECID_TRACE1 (LOGSYS_RECID_MAX - 4)
#define LOGSYS_RECID_TRACE2 (LOGSYS_RECID_MAX - 5)
#define LOGSYS_RECID_TRACE3 (LOGSYS_RECID_MAX - 6)
#define LOGSYS_RECID_TRACE4 (LOGSYS_RECID_MAX - 7)
#define LOGSYS_RECID_TRACE5 (LOGSYS_RECID_MAX - 8)
#define LOGSYS_RECID_TRACE6 (LOGSYS_RECID_MAX - 9)
#define LOGSYS_RECID_TRACE7 (LOGSYS_RECID_MAX - 10)
#define LOGSYS_RECID_TRACE8 (LOGSYS_RECID_MAX - 11)
/*
* Internal APIs that must be globally exported
* (External API below)
*/
/*
* logsys_logger bits
*
* SUBSYS_COUNT defines the maximum number of subsystems
* SUBSYS_NAMELEN defines the maximum len of a subsystem name
*/
#define LOGSYS_MAX_SUBSYS_COUNT 64
#define LOGSYS_MAX_SUBSYS_NAMELEN 64
/*
* rec_ident explained:
*
* rec_ident is an unsigned int and carries bitfields information
* on subsys_id, log priority (level) and type of message (RECID).
*
* level values are imported from syslog.h.
* At the time of writing it's a 3 bits value (0 to 7).
*
* subsys_id is any value between 0 and 64 (LOGSYS_MAX_SUBSYS_COUNT)
*
* RECID identifies the type of message. A set of predefined values
* are available via logsys, but other custom values can be defined
* by users.
*
* ----
* bitfields:
*
* 0 - 2 level
* 3 - 9 subsysid
* 10 - n RECID
*/
#define LOGSYS_LEVEL_END (3)
#define LOGSYS_SUBSYSID_END (LOGSYS_LEVEL_END + 7)
#define LOGSYS_RECID_LEVEL_MASK (LOG_PRIMASK)
#define LOGSYS_RECID_SUBSYSID_MASK ((2 << (LOGSYS_SUBSYSID_END - 1)) - \
(LOG_PRIMASK + 1))
#define LOGSYS_RECID_RECID_MASK (UINT_MAX - \
(LOGSYS_RECID_SUBSYSID_MASK + LOG_PRIMASK))
#define LOGSYS_ENCODE_RECID(level,subsysid,recid) \
(((recid) << LOGSYS_SUBSYSID_END) | \
((subsysid) << LOGSYS_LEVEL_END) | \
(level))
#define LOGSYS_DECODE_LEVEL(rec_ident) \
((rec_ident) & LOGSYS_RECID_LEVEL_MASK)
#define LOGSYS_DECODE_SUBSYSID(rec_ident) \
(((rec_ident) & LOGSYS_RECID_SUBSYSID_MASK) >> LOGSYS_LEVEL_END)
#define LOGSYS_DECODE_RECID(rec_ident) \
(((rec_ident) & LOGSYS_RECID_RECID_MASK) >> LOGSYS_SUBSYSID_END)
#ifndef LOGSYS_UTILS_ONLY
extern int _logsys_system_setup(
const char *mainsystem,
unsigned int mode,
unsigned int debug,
const char *logfile,
int logfile_priority,
int syslog_facility,
int syslog_priority);
extern int _logsys_config_subsys_get (
const char *subsys);
extern unsigned int _logsys_subsys_create (const char *subsys);
extern int _logsys_rec_init (unsigned int size);
extern void _logsys_log_vprintf (
- int subsysid,
+ unsigned int rec_ident,
const char *function_name,
const char *file_name,
int file_line,
- unsigned int level,
- unsigned int rec_ident,
const char *format,
- va_list ap) __attribute__((format(printf, 7, 0)));
+ va_list ap) __attribute__((format(printf, 5, 0)));
extern void _logsys_log_printf (
- int subsysid,
+ unsigned int rec_ident,
const char *function_name,
const char *file_name,
int file_line,
- unsigned int level,
- unsigned int rec_ident,
const char *format,
- ...) __attribute__((format(printf, 7, 8)));
+ ...) __attribute__((format(printf, 5, 6)));
extern void _logsys_log_rec (
- int subsysid,
+ unsigned int rec_ident,
const char *function_name,
const char *file_name,
int file_line,
- unsigned int level,
- unsigned int rec_ident,
...);
extern int _logsys_wthread_create (void);
-static unsigned int logsys_subsys_id __attribute__((unused)) = -1;
+static int logsys_subsys_id __attribute__((unused)) = LOGSYS_MAX_SUBSYS_COUNT;
/*
* External API - init
* See below:
*
* LOGSYS_DECLARE_SYSTEM
* LOGSYS_DECLARE_SUBSYS
*
*/
extern void logsys_fork_completed (void);
extern void logsys_atexit (void);
/*
* External API - misc
*/
extern void logsys_flush (void);
extern int logsys_log_rec_store (const char *filename);
/*
* External API - configuration
*/
/*
* configuration bits that can only be done for the whole system
*/
extern int logsys_format_set (
const char *format);
extern char *logsys_format_get (void);
/*
* per system/subsystem settings.
*
* NOTE: once a subsystem is created and configured, changing
* the default does NOT affect the subsystems.
*
* Pass a NULL subsystem to change them all
*/
extern unsigned int logsys_config_syslog_facility_set (
const char *subsys,
unsigned int facility);
extern unsigned int logsys_config_syslog_priority_set (
const char *subsys,
unsigned int priority);
extern unsigned int logsys_config_mode_set (
const char *subsys,
unsigned int mode);
extern unsigned int logsys_config_mode_get (
const char *subsys);
/*
* to close a logfile, just invoke this function with a NULL
* file or if you want to change logfile, the old one will
* be closed for you.
*/
extern int logsys_config_file_set (
const char *subsys,
const char **error_string,
const char *file);
extern unsigned int logsys_config_logfile_priority_set (
const char *subsys,
unsigned int priority);
/*
* enabling debug, disable message priority filtering.
* everything is sent everywhere. priority values
* for file and syslog are not overwritten.
*/
extern unsigned int logsys_config_debug_set (
const char *subsys,
unsigned int value);
/*
* External API - helpers
*
* convert facility/priority to/from name/values
*/
extern int logsys_facility_id_get (
const char *name);
extern const char *logsys_facility_name_get (
unsigned int facility);
extern int logsys_priority_id_get (
const char *name);
extern const char *logsys_priority_name_get (
unsigned int priority);
extern int logsys_thread_priority_set (
int policy,
const struct sched_param *param,
unsigned int after_log_ops_yield);
/*
* External definitions
*/
extern void *logsys_rec_end;
#define LOGSYS_REC_END (&logsys_rec_end)
#define LOGSYS_DECLARE_SYSTEM(name,mode,debug,file,file_priority, \
syslog_facility,syslog_priority,format,rec_size) \
__attribute__ ((constructor)) \
static void logsys_system_init (void) \
{ \
if (_logsys_system_setup (name,mode,debug,file,file_priority, \
syslog_facility,syslog_priority) < 0) { \
fprintf (stderr, \
"Unable to setup logging system: %s.\n", name); \
exit (-1); \
} \
\
if (logsys_format_set (format) < 0) { \
fprintf (stderr, \
"Unable to setup logging format.\n"); \
exit (-1); \
} \
\
if (_logsys_rec_init (rec_size) < 0) { \
fprintf (stderr, \
"Unable to initialize log flight recorder.\n"); \
exit (-1); \
} \
\
if (_logsys_wthread_create() < 0) { \
fprintf (stderr, \
"Unable to initialize logging thread.\n"); \
exit (-1); \
} \
}
#define LOGSYS_DECLARE_SUBSYS(subsys) \
__attribute__ ((constructor)) \
static void logsys_subsys_init (void) \
{ \
logsys_subsys_id = \
_logsys_subsys_create ((subsys)); \
if (logsys_subsys_id == -1) { \
fprintf (stderr, \
"Unable to create logging subsystem: %s.\n", subsys); \
exit (-1); \
} \
}
#define log_rec(rec_ident, args...) \
do { \
- _logsys_log_rec (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, rec_ident, 0, ##args, \
+ _logsys_log_rec (rec_ident, __FUNCTION__, \
+ __FILE__, __LINE__, ##args, \
LOGSYS_REC_END); \
} while(0)
#define log_printf(level, format, args...) \
do { \
- _logsys_log_printf (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, level, LOGSYS_RECID_LOG, \
+ _logsys_log_printf ( \
+ LOGSYS_ENCODE_RECID(level, \
+ logsys_subsys_id, \
+ LOGSYS_RECID_LOG), \
+ __FUNCTION__, __FILE__, __LINE__, \
format, ##args); \
} while(0)
#define ENTER() do { \
- _logsys_log_rec (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, LOGSYS_LEVEL_DEBUG, \
- LOGSYS_RECID_ENTER, LOGSYS_REC_END); \
+ _logsys_log_rec ( \
+ LOGSYS_ENCODE_RECID(LOGSYS_LEVEL_DEBUG, \
+ logsys_subsys_id, \
+ LOGSYS_RECID_ENTER), \
+ __FUNCTION__, __FILE__, __LINE__, LOGSYS_REC_END); \
} while(0)
#define LEAVE() do { \
- _logsys_log_rec (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, LOGSYS_LEVEL_DEBUG, \
- LOGSYS_RECID_LEAVE, LOGSYS_REC_END); \
+ _logsys_log_rec ( \
+ LOGSYS_ENCODE_RECID(LOGSYS_LEVEL_DEBUG, \
+ logsys_subsys_id, \
+ LOGSYS_RECID_LEAVE), \
+ __FUNCTION__, __FILE__, __LINE__, LOGSYS_REC_END); \
+} while(0)
+
+#define TRACE(recid, format, args...) do { \
+ _logsys_log_printf ( \
+ LOGSYS_ENCODE_RECID(LOGSYS_LEVEL_DEBUG, \
+ logsys_subsys_id, \
+ recid), \
+ __FUNCTION__, __FILE__, __LINE__, \
+ format, ##args); \
} while(0)
#define TRACE1(format, args...) do { \
- _logsys_log_printf (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, LOGSYS_LEVEL_DEBUG, \
- LOGSYS_RECID_TRACE1, format, ##args); \
+ TRACE(LOGSYS_RECID_TRACE1, format, ##args); \
} while(0)
#define TRACE2(format, args...) do { \
- _logsys_log_printf (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, LOGSYS_LEVEL_DEBUG, \
- LOGSYS_RECID_TRACE2, format, ##args); \
+ TRACE(LOGSYS_RECID_TRACE2, format, ##args); \
} while(0)
#define TRACE3(format, args...) do { \
- _logsys_log_printf (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, LOGSYS_LEVEL_DEBUG, \
- LOGSYS_RECID_TRACE3, format, ##args); \
+ TRACE(LOGSYS_RECID_TRACE3, format, ##args); \
} while(0)
#define TRACE4(format, args...) do { \
- _logsys_log_printf (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, LOGSYS_LEVEL_DEBUG, \
- LOGSYS_RECID_TRACE4, format, ##args); \
+ TRACE(LOGSYS_RECID_TRACE4, format, ##args); \
} while(0)
#define TRACE5(format, args...) do { \
- _logsys_log_printf (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, LOGSYS_LEVEL_DEBUG, \
- LOGSYS_RECID_TRACE5, format, ##args); \
+ TRACE(LOGSYS_RECID_TRACE5, format, ##args); \
} while(0)
#define TRACE6(format, args...) do { \
- _logsys_log_printf (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, LOGSYS_LEVEL_DEBUG, \
- LOGSYS_RECID_TRACE6, format, ##args); \
+ TRACE(LOGSYS_RECID_TRACE6, format, ##args); \
} while(0)
#define TRACE7(format, args...) do { \
- _logsys_log_printf (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, LOGSYS_LEVEL_DEBUG, \
- LOGSYS_RECID_TRACE7, format, ##args); \
+ TRACE(LOGSYS_RECID_TRACE7, format, ##args); \
} while(0)
#define TRACE8(format, args...) do { \
- _logsys_log_printf (logsys_subsys_id, __FUNCTION__, \
- __FILE__, __LINE__, LOGSYS_LEVEL_DEBUG, \
- LOGSYS_RECID_TRACE8, format, ##args); \
+ TRACE(LOGSYS_RECID_TRACE8, format, ##args); \
} while(0)
#endif /* LOGSYS_UTILS_ONLY */
#ifdef __cplusplus
}
#endif
#endif /* LOGSYS_H_DEFINED */
diff --git a/include/corosync/totem/totem.h b/include/corosync/totem/totem.h
index 5808d595..6c113d17 100644
--- a/include/corosync/totem/totem.h
+++ b/include/corosync/totem/totem.h
@@ -1,187 +1,185 @@
/*
* Copyright (c) 2005 MontaVista Software, Inc.
* Copyright (c) 2006-2009 Red Hat, Inc.
*
* Author: Steven Dake (sdake@redhat.com)
*
* All rights reserved.
*
* This software licensed under BSD license, the text of which follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the MontaVista Software, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TOTEM_H_DEFINED
#define TOTEM_H_DEFINED
#include "totemip.h"
#ifndef MESSAGE_SIZE_MAX
#define MESSAGE_SIZE_MAX 1024*1024 /* (1MB) */
#endif /* MESSAGE_SIZE_MAX */
#ifndef MESSAGE_QUEUE_MAX
#define MESSAGE_QUEUE_MAX MESSAGE_SIZE_MAX / totem_config->net_mtu
#endif /* MESSAGE_QUEUE_MAX */
#define PROCESSOR_COUNT_MAX 384
#define FRAME_SIZE_MAX 9000
#define TRANSMITS_ALLOWED 16
#define SEND_THREADS_MAX 16
#define INTERFACE_MAX 2
struct totem_interface {
struct totem_ip_address bindnet;
struct totem_ip_address boundto;
struct totem_ip_address mcast_addr;
uint16_t ip_port;
};
struct totem_logging_configuration {
- void (*log_printf) (
- int subsys,
- const char *function_name,
- const char *file_name,
- int file_line,
- unsigned int level,
- unsigned int tag,
- const char *format,
- ...) __attribute__((format(printf, 7, 8)));
+ void (*log_printf) (
+ unsigned int rec_ident,
+ const char *function_name,
+ const char *file_name,
+ int file_line,
+ const char *format,
+ ...) __attribute__((format(printf, 5, 6)));
int log_level_security;
int log_level_error;
int log_level_warning;
int log_level_notice;
int log_level_debug;
int log_subsys_id;
};
enum { TOTEM_PRIVATE_KEY_LEN = 128 };
enum { TOTEM_RRP_MODE_BYTES = 64 };
struct totem_config {
int version;
/*
* network
*/
struct totem_interface *interfaces;
unsigned int interface_count;
unsigned int node_id;
unsigned int clear_node_high_bit;
/*
* key information
*/
unsigned char private_key[TOTEM_PRIVATE_KEY_LEN];
unsigned int private_key_len;
/*
* Totem configuration parameters
*/
unsigned int token_timeout;
unsigned int token_retransmit_timeout;
unsigned int token_hold_timeout;
unsigned int token_retransmits_before_loss_const;
unsigned int join_timeout;
unsigned int send_join_timeout;
unsigned int consensus_timeout;
unsigned int merge_timeout;
unsigned int downcheck_timeout;
unsigned int fail_to_recv_const;
unsigned int seqno_unchanged_const;
unsigned int rrp_token_expired_timeout;
unsigned int rrp_problem_count_timeout;
unsigned int rrp_problem_count_threshold;
char rrp_mode[TOTEM_RRP_MODE_BYTES];
struct totem_logging_configuration totem_logging_configuration;
void (*log_rec) (
int subsysid,
const char *function_name,
const char *file_name,
int file_line,
unsigned int rec_ident,
...);
unsigned int secauth;
unsigned int net_mtu;
unsigned int threads;
unsigned int heartbeat_failures_allowed;
unsigned int max_network_delay;
unsigned int window_size;
unsigned int max_messages;
const char *vsf_type;
unsigned int broadcast_use;
enum { TOTEM_CRYPTO_SOBER=0, TOTEM_CRYPTO_NSS } crypto_type;
enum { TOTEM_CRYPTO_ACCEPT_OLD=0, TOTEM_CRYPTO_ACCEPT_NEW } crypto_accept;
int crypto_crypt_type;
int crypto_sign_type;
};
#define TOTEM_CONFIGURATION_TYPE
enum totem_configuration_type {
TOTEM_CONFIGURATION_REGULAR,
TOTEM_CONFIGURATION_TRANSITIONAL
};
#define TOTEM_CALLBACK_TOKEN_TYPE
enum totem_callback_token_type {
TOTEM_CALLBACK_TOKEN_RECEIVED = 1,
TOTEM_CALLBACK_TOKEN_SENT = 2
};
#define MEMB_RING_ID
struct memb_ring_id {
struct totem_ip_address rep;
unsigned long long seq;
} __attribute__((packed));
#endif /* TOTEM_H_DEFINED */
diff --git a/test/logsysrec.c b/test/logsysrec.c
index 56457ba2..5d2163ff 100644
--- a/test/logsysrec.c
+++ b/test/logsysrec.c
@@ -1,66 +1,70 @@
/*
* Copyright (c) 2008 Red Hat, Inc.
*
* All rights reserved.
*
* Author: Steven Dake (sdake@redhat.com)
*
* This software licensed under BSD license, the text of which follows:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the MontaVista Software, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <config.h>
#include <stdio.h>
#include <stdint.h>
#include <corosync/engine/logsys.h>
LOGSYS_DECLARE_SYSTEM ("logtest_rec",
LOGSYS_MODE_OUTPUT_STDERR | LOGSYS_MODE_THREADED,
0,
NULL,
LOG_INFO,
LOG_DAEMON,
LOG_INFO,
NULL,
100000);
#define LOGREC_ID_CHECKPOINT_CREATE 2
#define LOGREC_ARGS_CHECKPOINT_CREATE 2
int main(int argc, char **argv)
{
int i;
for (i = 0; i < 10; i++) {
- log_printf (LOGSYS_LEVEL_NOTICE, "This is a test of %s\n", "stringparse");
+ log_printf (LOGSYS_LEVEL_NOTICE,
+ "This is a test of %s(%d)\n", "stringparse", i);
- log_rec (LOGREC_ID_CHECKPOINT_CREATE, "record1", 8, "record22", 9, "record333", 10, "record444", 11, LOGSYS_REC_END);
+ log_rec (LOGSYS_ENCODE_RECID(LOGSYS_LEVEL_NOTICE,
+ logsys_subsys_id,
+ LOGREC_ID_CHECKPOINT_CREATE),
+ "record1", 8, "record22", 9, "record333", 10, "record444", 11, LOGSYS_REC_END);
}
logsys_log_rec_store ("fdata");
return 0;
}
diff --git a/tools/corosync-fplay.c b/tools/corosync-fplay.c
index e75d239e..4a2dc0fc 100644
--- a/tools/corosync-fplay.c
+++ b/tools/corosync-fplay.c
@@ -1,509 +1,510 @@
#include <config.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <errno.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <corosync/engine/logsys.h>
unsigned int flt_data_size = 1000000;
unsigned int *flt_data;
#define FDHEAD_INDEX (flt_data_size)
#define FDTAIL_INDEX (flt_data_size + 1)
#define TOTEMIP_ADDRLEN (sizeof(struct in6_addr))
struct totem_ip_address {
unsigned int nodeid;
unsigned short family;
unsigned char addr[TOTEMIP_ADDRLEN];
} __attribute__((packed));
struct memb_ring_id {
struct totem_ip_address rep;
unsigned long long seq;
} __attribute__((packed));
static const char *totemip_print(const struct totem_ip_address *addr)
{
static char buf[INET6_ADDRSTRLEN];
return inet_ntop(addr->family, addr->addr, buf, sizeof(buf));
}
static char *print_string_len (const unsigned char *str, unsigned int len)
{
unsigned int i;
static char buf[1024];
memset (buf, 0, sizeof (buf));
for (i = 0; i < len; i++) {
buf[i] = str[i];
}
return (buf);
}
static void sync_printer_confchg_set_sync (const void **record)
{
const unsigned int *my_should_sync = record[0];
printf ("Setting my_should_sync to %d\n", *my_should_sync);
}
static void sync_printer_set_sync_state (const void **record)
{
const unsigned int *my_sync_state = record[0];
printf ("Setting my_sync_state to %d\n", *my_sync_state);
}
static void sync_printer_process_currentstate (const void **record)
{
const unsigned int *my_sync_state = record[0];
printf ("Retrieving my_sync_state %d\n", *my_sync_state);
}
static void sync_printer_process_get_shouldsync (const void **record)
{
const unsigned int *my_should_sync = record[0];
printf ("Getting my_should_sync %d\n", *my_should_sync);
}
static void sync_printer_checkpoint_release (const void **record)
{
const unsigned char *name = record[0];
const uint16_t *name_len = record[1];
const unsigned int *ckpt_id = record[2];
const unsigned int *from = record[3];
printf ("Checkpoint release name=[%s] id=[%d] from=[%d] len=[%d]\n",
print_string_len (name, *name_len),
*ckpt_id,
*from,
*name_len);
}
static void sync_printer_checkpoint_transmit (const void **record)
{
const unsigned char *name = record[0];
const uint16_t *name_len = record[1];
const unsigned int *ckpt_id = record[2];
const unsigned int *xmit_id = record[3];
printf ("xmit_id=[%d] Checkpoint transmit name=[%s] id=[%d]\n",
*xmit_id, print_string_len (name, *name_len),
*ckpt_id);
}
static void sync_printer_section_transmit (const void **record)
{
const unsigned char *ckpt_name = record[0];
const uint16_t *name_len = record[1];
const unsigned int *ckpt_id = record[2];
const unsigned int *xmit_id = record[3];
const unsigned char *section_name = record[4];
const uint16_t *section_name_len = record[5];
printf ("xmit_id=[%d] Section transmit checkpoint name=[%s] id=[%d] ",
*xmit_id, print_string_len (ckpt_name, *name_len),
*ckpt_id);
printf ("section=[%s]\n",
print_string_len (section_name, *section_name_len));
}
static void sync_printer_checkpoint_receive (const void **record)
{
const unsigned char *ckpt_name = record[0];
const uint16_t *name_len = record[1];
const unsigned int *ckpt_id = record[2];
const unsigned int *xmit_id = record[3];
printf ("xmit_id=[%d] Checkpoint receive checkpoint name=[%s] id=[%d]\n",
*xmit_id, print_string_len (ckpt_name, *name_len), *ckpt_id);
}
static void sync_printer_section_receive (const void **record)
{
const unsigned char *ckpt_name = record[0];
const uint16_t *name_len = record[1];
const unsigned int *ckpt_id = record[2];
const unsigned int *xmit_id = record[3];
const unsigned char *section_name = record[4];
const unsigned int *section_name_len = record[5];
printf ("xmit_id=[%d] Section receive checkpoint name=[%s] id=[%d] ",
*xmit_id, print_string_len (ckpt_name, *name_len),
*ckpt_id);
printf ("section=[%s]\n",
print_string_len (section_name, *section_name_len));
}
static void sync_printer_confchg_fn (const void **record)
{
unsigned int i;
const unsigned int *members = record[0];
const unsigned int *member_count = record[1];
const struct memb_ring_id *ring_id = record[2];
struct in_addr addr;
printf ("sync confchg fn ringid [ip=%s seq=%lld]\n",
totemip_print (&ring_id->rep),
ring_id->seq);
printf ("members [%d]:\n", *member_count);
for (i = 0; i < *member_count; i++) {
addr.s_addr = members[i];
printf ("\tmember [%s]\n", inet_ntoa (addr));
}
}
static void printer_totemsrp_mcast (const void **record)
{
const unsigned int *msgid = record[0];
printf ("totemsrp_mcast %d\n", *msgid);
}
static void printer_totemsrp_delv (const void **record)
{
const unsigned int *msgid = record[0];
printf ("totemsrp_delv %d\n", *msgid);
}
static void printer_totempg_mcast_fits (const void **record)
{
const unsigned int *idx = record[0];
const unsigned int *iov_len = record[1];
const unsigned int *copy_len = record[2];
const unsigned int *fragment_size = record[3];
const unsigned int *max_packet_size = record[4];
const unsigned int *copy_base = record[5];
const unsigned char *next_fragment = record[6];
printf ("totempg_mcast index=[%d] iov_len=[%d] copy_len=[%d] fragment_size=[%d] max_packet_size=[%d] copy_base=[%d] next_fragment[%d]\n",
*idx, *iov_len, *copy_len, *fragment_size, *max_packet_size, *copy_base, *next_fragment);
}
static void sync_printer_service_process (const void **record)
{
const struct memb_ring_id *ring_id = record[0];
const struct memb_ring_id *sync_ring_id = record[1];
printf ("sync service process callback ringid [ip=%s seq=%lld] ",
totemip_print (&ring_id->rep),
ring_id->seq);
printf ("sync ringid [ip=%s seq=%lld]\n",
totemip_print (&sync_ring_id->rep),
sync_ring_id->seq);
}
struct printer_subsys_record_print {
int ident;
void (*print_fn)(const void **record);
int record_length;
};
struct printer_subsys {
const char *subsys;
struct printer_subsys_record_print *record_printers;
int record_printers_count;
};
#define LOGREC_ID_SYNC_CONFCHG_FN 0
#define LOGREC_ID_SYNC_SERVICE_PROCESS 1
/*
* CKPT subsystem
*/
#define LOGREC_ID_CONFCHG_SETSYNC 0
#define LOGREC_ID_SETSYNCSTATE 1
#define LOGREC_ID_SYNC_PROCESS_CURRENTSTATE 2
#define LOGREC_ID_SYNC_PROCESS_GETSHOULDSYNC 3
#define LOGREC_ID_SYNC_CHECKPOINT_TRANSMIT 4
#define LOGREC_ID_SYNC_SECTION_TRANSMIT 5
#define LOGREC_ID_SYNC_CHECKPOINT_RECEIVE 6
#define LOGREC_ID_SYNC_SECTION_RECEIVE 7
#define LOGREC_ID_SYNC_CHECKPOINT_RELEASE 8
#define LOGREC_ID_TOTEMSRP_MCAST 0
#define LOGREC_ID_TOTEMSRP_DELV 1
#define LOGREC_ID_TOTEMPG_MCAST_FITS 2
static struct printer_subsys_record_print record_print_sync[] = {
{
.ident = LOGREC_ID_SYNC_CONFCHG_FN,
.print_fn = sync_printer_confchg_fn,
.record_length = 28
},
{
.ident = LOGREC_ID_SYNC_SERVICE_PROCESS,
.print_fn = sync_printer_service_process,
.record_length = 28
}
};
static struct printer_subsys_record_print record_print_ckpt[] = {
{
.ident = LOGREC_ID_CONFCHG_SETSYNC,
.print_fn = sync_printer_confchg_set_sync,
.record_length = 28
},
{
.ident = LOGREC_ID_SETSYNCSTATE,
.print_fn = sync_printer_set_sync_state,
.record_length = 28
},
{
.ident = LOGREC_ID_SYNC_PROCESS_CURRENTSTATE,
.print_fn = sync_printer_process_currentstate,
.record_length = 28
},
{
.ident = LOGREC_ID_SYNC_PROCESS_GETSHOULDSYNC,
.print_fn = sync_printer_process_get_shouldsync,
.record_length = 28
},
{
.ident = LOGREC_ID_SYNC_CHECKPOINT_TRANSMIT,
.print_fn = sync_printer_checkpoint_transmit,
.record_length = 28
},
{
.ident = LOGREC_ID_SYNC_SECTION_TRANSMIT,
.print_fn = sync_printer_section_transmit,
.record_length = 28
},
{
.ident = LOGREC_ID_SYNC_CHECKPOINT_RECEIVE,
.print_fn = sync_printer_checkpoint_receive,
.record_length = 28
},
{
.ident = LOGREC_ID_SYNC_SECTION_RECEIVE,
.print_fn = sync_printer_section_receive,
.record_length = 28
},
{
.ident = LOGREC_ID_SYNC_CHECKPOINT_RELEASE,
.print_fn = sync_printer_checkpoint_release,
.record_length = 28
}
};
static struct printer_subsys_record_print record_print_totem[] = {
{
.ident = LOGREC_ID_TOTEMSRP_MCAST,
.print_fn = printer_totemsrp_mcast,
.record_length = 28
},
{
.ident = LOGREC_ID_TOTEMSRP_DELV,
.print_fn = printer_totemsrp_delv,
.record_length = 28
},
{
.ident = LOGREC_ID_TOTEMPG_MCAST_FITS,
.print_fn = printer_totempg_mcast_fits,
.record_length = 28
}
};
static struct printer_subsys printer_subsystems[] = {
{
.subsys = "SYNC",
.record_printers = record_print_sync,
.record_printers_count = sizeof (record_print_sync) / sizeof (struct printer_subsys_record_print)
},
{
.subsys = "CKPT",
.record_printers = record_print_ckpt,
.record_printers_count = sizeof (record_print_ckpt) / sizeof (struct printer_subsys_record_print)
},
{
.subsys = "TOTEM",
.record_printers = record_print_totem,
.record_printers_count = sizeof (record_print_totem) / sizeof (struct printer_subsys_record_print)
}
};
static unsigned int printer_subsys_count =
sizeof (printer_subsystems) / sizeof (struct printer_subsys);
static unsigned int g_record[10000];
/*
* Copy record, dealing with wrapping
*/
static int logsys_rec_get (int rec_idx) {
unsigned int rec_size;
int firstcopy, secondcopy;
rec_size = flt_data[rec_idx];
firstcopy = rec_size;
secondcopy = 0;
if (firstcopy + rec_idx > flt_data_size) {
firstcopy = flt_data_size - rec_idx;
secondcopy -= firstcopy - rec_size;
}
memcpy (&g_record[0], &flt_data[rec_idx], firstcopy<<2);
if (secondcopy) {
memcpy (&g_record[firstcopy], &flt_data[0], secondcopy<<2);
}
return ((rec_idx + rec_size) % flt_data_size);
}
static void logsys_rec_print (const void *record)
{
const unsigned int *buf_uint32t = record;
unsigned int rec_size;
unsigned int rec_ident;
unsigned int level;
unsigned int line;
unsigned int arg_size_idx;
unsigned int i;
unsigned int j;
unsigned int rec_idx = 0;
unsigned int record_number;
unsigned int words_processed;
unsigned int found;
const char *arguments[64];
int arg_count = 0;
rec_size = buf_uint32t[rec_idx];
- level = buf_uint32t[rec_idx+1];
- rec_ident = buf_uint32t[rec_idx+2];
- line = buf_uint32t[rec_idx+3];
- record_number = buf_uint32t[rec_idx+4];
+ rec_ident = buf_uint32t[rec_idx+1];
+ line = buf_uint32t[rec_idx+2];
+ record_number = buf_uint32t[rec_idx+3];
+
+ level = LOGSYS_DECODE_LEVEL(rec_ident);
printf ("rec=[%d] ", record_number);
- arg_size_idx = rec_idx + 5;
- words_processed = 5;
+ arg_size_idx = rec_idx + 4;
+ words_processed = 4;
for (i = 0; words_processed < rec_size; i++) {
arguments[arg_count++] =
(const char *)&buf_uint32t[arg_size_idx + 1];
words_processed += buf_uint32t[arg_size_idx] + 1;
arg_size_idx += buf_uint32t[arg_size_idx] + 1;
}
found = 0;
for (i = 0; i < printer_subsys_count; i++) {
if (strcmp (arguments[0], printer_subsystems[i].subsys) == 0) {
for (j = 0; j < printer_subsystems[i].record_printers_count; j++) {
if (rec_ident == printer_subsystems[i].record_printers[j].ident) {
printer_subsystems[i].record_printers[j].print_fn ((const void **)&arguments[3]);
return;
}
}
}
}
- switch(rec_ident) {
+ switch(LOGSYS_DECODE_RECID(rec_ident)) {
case LOGSYS_RECID_LOG:
printf ("Log Message=%s\n", arguments[3]);
break;
case LOGSYS_RECID_ENTER:
printf ("ENTERING function [%s] line [%d]\n", arguments[2], line);
break;
case LOGSYS_RECID_LEAVE:
printf ("LEAVING function [%s] line [%d]\n", arguments[2], line);
break;
case LOGSYS_RECID_TRACE1:
printf ("Tracing(1) Messsage=%s\n", arguments[3]);
break;
case LOGSYS_RECID_TRACE2:
printf ("Tracing(2) Messsage=%s\n", arguments[3]);
break;
case LOGSYS_RECID_TRACE3:
printf ("Tracing(3) Messsage=%s\n", arguments[3]);
break;
case LOGSYS_RECID_TRACE4:
printf ("Tracing(4) Messsage=%s\n", arguments[3]);
break;
case LOGSYS_RECID_TRACE5:
printf ("Tracing(5) Messsage=%s\n", arguments[3]);
break;
case LOGSYS_RECID_TRACE6:
printf ("Tracing(6) Messsage=%s\n", arguments[3]);
break;
case LOGSYS_RECID_TRACE7:
printf ("Tracing(7) Messsage=%s\n", arguments[3]);
break;
case LOGSYS_RECID_TRACE8:
printf ("Tracing(8) Messsage=%s\n", arguments[3]);
break;
default:
printf ("Unknown record type found subsys=[%s] ident=[%d]\n",
- arguments[0], rec_ident);
+ arguments[0], LOGSYS_DECODE_RECID(rec_ident));
break;
}
#ifdef COMPILE_OUT
printf ("\n");
#endif
}
int main (void)
{
unsigned int fd;
int rec_idx;
int end_rec;
int record_count = 1;
ssize_t n_read;
const char *data_file = LOCALSTATEDIR "/lib/corosync/fdata";
size_t n_required = (flt_data_size + 2) * sizeof (unsigned int);
if ((fd = open (data_file, O_RDONLY)) < 0) {
fprintf (stderr, "failed to open %s: %s\n",
data_file, strerror (errno));
return EXIT_FAILURE;
}
if ((flt_data = malloc (n_required)) == NULL) {
fprintf (stderr, "exhausted virtual memory\n");
return EXIT_FAILURE;
}
n_read = read (fd, flt_data, n_required);
close (fd);
if (n_read < 0) {
fprintf (stderr, "reading %s failed: %s\n",
data_file, strerror (errno));
return EXIT_FAILURE;
}
if (n_read != n_required) {
printf ("Warning: read %lu bytes, but expected %lu\n",
(unsigned long) n_read, (unsigned long) n_required);
}
rec_idx = flt_data[FDTAIL_INDEX];
end_rec = flt_data[FDHEAD_INDEX];
printf ("Starting replay: head [%d] tail [%d]\n",
flt_data[FDHEAD_INDEX],
flt_data[FDTAIL_INDEX]);
for (;;) {
rec_idx = logsys_rec_get (rec_idx);
logsys_rec_print (g_record);
if (rec_idx == end_rec) {
break;
}
record_count += 1;
}
printf ("Finishing replay: records found [%d]\n", record_count);
return (0);
}

File Metadata

Mime Type
text/x-diff
Expires
Mon, Feb 24, 2:48 PM (6 h, 39 s ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1464271
Default Alt Text
(356 KB)

Event Timeline