diff --git a/exec/totempg.c b/exec/totempg.c index e2ce17f0..f723decf 100644 --- a/exec/totempg.c +++ b/exec/totempg.c @@ -1,1338 +1,1338 @@ /* * Copyright (c) 2003-2005 MontaVista Software, Inc. * Copyright (c) 2005 OSDL. * Copyright (c) 2006-2009 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * Author: Mark Haverkamp (markh@osdl.org) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * FRAGMENTATION AND PACKING ALGORITHM: * * Assemble the entire message into one buffer * if full fragment * store fragment into lengths list * for each full fragment * multicast fragment * set length and fragment fields of pg mesage * store remaining multicast into head of fragmentation data and set lens field * * If a message exceeds the maximum packet size allowed by the totem * single ring protocol, the protocol could lose forward progress. * Statically calculating the allowed data amount doesn't work because * the amount of data allowed depends on the number of fragments in * each message. In this implementation, the maximum fragment size * is dynamically calculated for each fragment added to the message. * It is possible for a message to be two bytes short of the maximum * packet size. This occurs when a message or collection of * messages + the mcast header + the lens are two bytes short of the * end of the packet. Since another len field consumes two bytes, the * len field would consume the rest of the packet without room for data. * * One optimization would be to forgo the final len field and determine * it from the size of the udp datagram. Then this condition would no * longer occur. */ /* * ASSEMBLY AND UNPACKING ALGORITHM: * * copy incoming packet into assembly data buffer indexed by current * location of end of fragment * * if not fragmented * deliver all messages in assembly data buffer * else * if msg_count > 1 and fragmented * deliver all messages except last message in assembly data buffer * copy last fragmented section to start of assembly data buffer * else * if msg_count = 1 and fragmented * do nothing * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "totemmrp.h" #include "totemsrp.h" #define min(a,b) ((a) < (b)) ? a : b struct totempg_mcast_header { short version; short type; }; /* * totempg_mcast structure * * header: Identify the mcast. * fragmented: Set if this message continues into next message * continuation: Set if this message is a continuation from last message * msg_count Indicates how many packed messages are contained * in the mcast. * Also, the size of each packed message and the messages themselves are * appended to the end of this structure when sent. */ struct totempg_mcast { struct totempg_mcast_header header; unsigned char fragmented; unsigned char continuation; unsigned short msg_count; /* * short msg_len[msg_count]; */ /* * data for messages */ }; /* * Maximum packet size for totem pg messages */ #define TOTEMPG_PACKET_SIZE (totempg_totem_config->net_mtu - \ sizeof (struct totempg_mcast)) /* * Local variables used for packing small messages */ static unsigned short mcast_packed_msg_lens[FRAME_SIZE_MAX]; static int mcast_packed_msg_count = 0; static int totempg_reserved = 0; /* * Function and data used to log messages */ static int totempg_log_level_security; static int totempg_log_level_error; static int totempg_log_level_warning; static int totempg_log_level_notice; static int totempg_log_level_debug; static int totempg_subsys_id; static void (*totempg_log_printf) (int subsys_id, const char *function, const char *file, int line, unsigned int level, const char *format, ...) __attribute__((format(printf, 6, 7))); struct totem_config *totempg_totem_config; struct assembly { unsigned int nodeid; unsigned char data[MESSAGE_SIZE_MAX]; int index; unsigned char last_frag_num; struct list_head list; }; static void assembly_deref (struct assembly *assembly); static int callback_token_received_fn (enum totem_callback_token_type type, const void *data); enum throw_away_mode_t { THROW_AWAY_INACTIVE, THROW_AWAY_ACTIVE }; static enum throw_away_mode_t throw_away_mode = THROW_AWAY_INACTIVE; DECLARE_LIST_INIT(assembly_list_inuse); DECLARE_LIST_INIT(assembly_list_free); /* * Staging buffer for packed messages. Messages are staged in this buffer * before sending. Multiple messages may fit which cuts down on the * number of mcasts sent. If a message doesn't completely fit, then * the mcast header has a fragment bit set that says that there are more * data to follow. fragment_size is an index into the buffer. It indicates * the size of message data and where to place new message data. * fragment_contuation indicates whether the first packed message in * the buffer is a continuation of a previously packed fragment. */ static unsigned char *fragmentation_data; static int fragment_size = 0; static int fragment_continuation = 0; static struct iovec iov_delv; static unsigned int totempg_max_handle = 0; struct totempg_group_instance { void (*deliver_fn) ( unsigned int nodeid, struct iovec *iovec, unsigned int iov_len, int endian_conversion_required); void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id); struct totempg_group *groups; int groups_cnt; }; static struct hdb_handle_database totempg_groups_instance_database = { .handle_count = 0, .handles = 0, .iterator = 0, .mutex = PTHREAD_MUTEX_INITIALIZER }; static unsigned char next_fragment = 1; static pthread_mutex_t totempg_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t callback_token_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t mcast_msg_mutex = PTHREAD_MUTEX_INITIALIZER; #define log_printf(level, format, args...) \ do { \ totempg_log_printf (totempg_subsys_id, __FUNCTION__, \ __FILE__, __LINE__, level, format, ##args); \ } while (0); static int msg_count_send_ok (int msg_count); static int byte_count_send_ok (int byte_count); static struct assembly *assembly_ref (unsigned int nodeid) { struct assembly *assembly; struct list_head *list; /* * Search inuse list for node id and return assembly buffer if found */ for (list = assembly_list_inuse.next; list != &assembly_list_inuse; list = list->next) { assembly = list_entry (list, struct assembly, list); if (nodeid == assembly->nodeid) { return (assembly); } } /* * Nothing found in inuse list get one from free list if available */ if (list_empty (&assembly_list_free) == 0) { assembly = list_entry (assembly_list_free.next, struct assembly, list); list_del (&assembly->list); list_add (&assembly->list, &assembly_list_inuse); assembly->nodeid = nodeid; return (assembly); } /* * Nothing available in inuse or free list, so allocate a new one */ assembly = malloc (sizeof (struct assembly)); memset (assembly, 0, sizeof (struct assembly)); /* * TODO handle memory allocation failure here */ assert (assembly); assembly->nodeid = nodeid; list_init (&assembly->list); list_add (&assembly->list, &assembly_list_inuse); return (assembly); } static void assembly_deref (struct assembly *assembly) { list_del (&assembly->list); list_add (&assembly->list, &assembly_list_free); } static inline void app_confchg_fn ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id) { int i; struct totempg_group_instance *instance; unsigned int res; for (i = 0; i <= totempg_max_handle; i++) { res = hdb_handle_get (&totempg_groups_instance_database, hdb_nocheck_convert (i), (void *)&instance); if (res == 0) { if (instance->confchg_fn) { instance->confchg_fn ( configuration_type, member_list, member_list_entries, left_list, left_list_entries, joined_list, joined_list_entries, ring_id); } hdb_handle_put (&totempg_groups_instance_database, hdb_nocheck_convert (i)); } } } static inline void group_endian_convert ( struct iovec *iovec) { unsigned short *group_len; int i; struct iovec iovec_aligned = { NULL, 0 }; struct iovec *iovec_swab; /* * Align data structure for sparc and ia64 */ if ((size_t)iovec->iov_base % 4 != 0) { iovec_aligned.iov_base = alloca(iovec->iov_len); memcpy(iovec_aligned.iov_base, iovec->iov_base, iovec->iov_len); iovec_aligned.iov_len = iovec->iov_len; iovec_swab = &iovec_aligned; } else { iovec_swab = iovec; } group_len = (unsigned short *)iovec_swab->iov_base; group_len[0] = swab16(group_len[0]); for (i = 1; i < group_len[0] + 1; i++) { group_len[i] = swab16(group_len[i]); } if (iovec_swab == &iovec_aligned) { memcpy(iovec->iov_base, iovec_aligned.iov_base, iovec->iov_len); } } static inline int group_matches ( struct iovec *iovec, unsigned int iov_len, struct totempg_group *groups_b, unsigned int group_b_cnt, unsigned int *adjust_iovec) { unsigned short *group_len; char *group_name; int i; int j; struct iovec iovec_aligned = { NULL, 0 }; assert (iov_len == 1); /* * Align data structure for sparc and ia64 */ if ((size_t)iovec->iov_base % 4 != 0) { iovec_aligned.iov_base = alloca(iovec->iov_len); memcpy(iovec_aligned.iov_base, iovec->iov_base, iovec->iov_len); iovec_aligned.iov_len = iovec->iov_len; iovec = &iovec_aligned; } group_len = (unsigned short *)iovec->iov_base; group_name = ((char *)iovec->iov_base) + sizeof (unsigned short) * (group_len[0] + 1); /* * Calculate amount to adjust the iovec by before delivering to app */ *adjust_iovec = sizeof (unsigned short) * (group_len[0] + 1); for (i = 1; i < group_len[0] + 1; i++) { *adjust_iovec += group_len[i]; } /* * Determine if this message should be delivered to this instance */ for (i = 1; i < group_len[0] + 1; i++) { for (j = 0; j < group_b_cnt; j++) { if ((group_len[i] == groups_b[j].group_len) && (memcmp (groups_b[j].group, group_name, group_len[i]) == 0)) { return (1); } } group_name += group_len[i]; } return (0); } static inline void app_deliver_fn ( unsigned int nodeid, struct iovec *iovec, unsigned int iov_len, int endian_conversion_required) { int i; struct totempg_group_instance *instance; struct iovec stripped_iovec; unsigned int adjust_iovec; unsigned int res; struct iovec aligned_iovec = { NULL, 0 }; if (endian_conversion_required) { group_endian_convert (iovec); } /* * Align data structure for sparc and ia64 */ aligned_iovec.iov_base = alloca(iovec->iov_len); aligned_iovec.iov_len = iovec->iov_len; memcpy(aligned_iovec.iov_base, iovec->iov_base, iovec->iov_len); iovec = &aligned_iovec; for (i = 0; i <= totempg_max_handle; i++) { res = hdb_handle_get (&totempg_groups_instance_database, hdb_nocheck_convert (i), (void *)&instance); if (res == 0) { assert (iov_len == 1); if (group_matches (iovec, iov_len, instance->groups, instance->groups_cnt, &adjust_iovec)) { stripped_iovec.iov_len = iovec->iov_len - adjust_iovec; // stripped_iovec.iov_base = (char *)iovec->iov_base + adjust_iovec; /* * Align data structure for sparc and ia64 */ if ((char *)iovec->iov_base + adjust_iovec % 4 != 0) { /* * Deal with misalignment */ stripped_iovec.iov_base = alloca (stripped_iovec.iov_len); memcpy (stripped_iovec.iov_base, (char *)iovec->iov_base + adjust_iovec, stripped_iovec.iov_len); } instance->deliver_fn ( nodeid, &stripped_iovec, iov_len, endian_conversion_required); } hdb_handle_put (&totempg_groups_instance_database, hdb_nocheck_convert(i)); } } } static void totempg_confchg_fn ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id) { // TODO optimize this app_confchg_fn (configuration_type, member_list, member_list_entries, left_list, left_list_entries, joined_list, joined_list_entries, ring_id); } static void totempg_deliver_fn ( unsigned int nodeid, struct iovec *iovec, unsigned int iov_len, int endian_conversion_required) { struct totempg_mcast *mcast; unsigned short *msg_lens; int i; struct assembly *assembly; char header[FRAME_SIZE_MAX]; int h_index; int a_i = 0; int msg_count; int continuation; int start; assembly = assembly_ref (nodeid); assert (assembly); /* * Assemble the header into one block of data and * assemble the packet contents into one block of data to simplify delivery */ if (iov_len == 1) { /* * This message originated from external processor * because there is only one iovec for the full msg. */ char *data; int datasize; mcast = (struct totempg_mcast *)iovec[0].iov_base; if (endian_conversion_required) { mcast->msg_count = swab16 (mcast->msg_count); } msg_count = mcast->msg_count; datasize = sizeof (struct totempg_mcast) + msg_count * sizeof (unsigned short); memcpy (header, iovec[0].iov_base, datasize); assert(iovec); data = iovec[0].iov_base; msg_lens = (unsigned short *) (header + sizeof (struct totempg_mcast)); if (endian_conversion_required) { for (i = 0; i < mcast->msg_count; i++) { msg_lens[i] = swab16 (msg_lens[i]); } } memcpy (&assembly->data[assembly->index], &data[datasize], iovec[0].iov_len - datasize); } else { /* * The message originated from local processor * becasue there is greater than one iovec for then full msg. */ h_index = 0; for (i = 0; i < 2; i++) { memcpy (&header[h_index], iovec[i].iov_base, iovec[i].iov_len); h_index += iovec[i].iov_len; } mcast = (struct totempg_mcast *)header; // TODO make sure we are using a copy of mcast not the actual data itself msg_lens = (unsigned short *) (header + sizeof (struct totempg_mcast)); for (i = 2; i < iov_len; i++) { a_i = assembly->index; assert (iovec[i].iov_len + a_i <= MESSAGE_SIZE_MAX); memcpy (&assembly->data[a_i], iovec[i].iov_base, iovec[i].iov_len); a_i += msg_lens[i - 2]; } iov_len -= 2; } /* * If the last message in the buffer is a fragment, then we * can't deliver it. We'll first deliver the full messages * then adjust the assembly buffer so we can add the rest of the * fragment when it arrives. */ msg_count = mcast->fragmented ? mcast->msg_count - 1 : mcast->msg_count; continuation = mcast->continuation; iov_delv.iov_base = &assembly->data[0]; iov_delv.iov_len = assembly->index + msg_lens[0]; /* * Make sure that if this message is a continuation, that it * matches the sequence number of the previous fragment. * Also, if the first packed message is a continuation * of a previous message, but the assembly buffer * is empty, then we need to discard it since we can't * assemble a complete message. Likewise, if this message isn't a * continuation and the assembly buffer is empty, we have to discard * the continued message. */ start = 0; if (throw_away_mode == THROW_AWAY_ACTIVE) { /* Throw away the first msg block */ if (mcast->fragmented == 0 || mcast->fragmented == 1) { throw_away_mode = THROW_AWAY_INACTIVE; assembly->index += msg_lens[0]; iov_delv.iov_base = &assembly->data[assembly->index]; iov_delv.iov_len = msg_lens[1]; start = 1; } } else if (throw_away_mode == THROW_AWAY_INACTIVE) { if (continuation == assembly->last_frag_num) { assembly->last_frag_num = mcast->fragmented; for (i = start; i < msg_count; i++) { app_deliver_fn(nodeid, &iov_delv, 1, endian_conversion_required); assembly->index += msg_lens[i]; iov_delv.iov_base = &assembly->data[assembly->index]; if (i < (msg_count - 1)) { iov_delv.iov_len = msg_lens[i + 1]; } } } else { throw_away_mode = THROW_AWAY_ACTIVE; } } if (mcast->fragmented == 0) { /* * End of messages, dereference assembly struct */ assembly->last_frag_num = 0; assembly->index = 0; assembly_deref (assembly); } else { /* * Message is fragmented, keep around assembly list */ if (mcast->msg_count > 1) { memmove (&assembly->data[0], &assembly->data[assembly->index], msg_lens[msg_count]); assembly->index = 0; } assembly->index += msg_lens[msg_count]; } } /* * Totem Process Group Abstraction * depends on poll abstraction, POSIX, IPV4 */ void *callback_token_received_handle; int callback_token_received_fn (enum totem_callback_token_type type, const void *data) { struct totempg_mcast mcast; struct iovec iovecs[3]; int res; pthread_mutex_lock (&mcast_msg_mutex); if (mcast_packed_msg_count == 0) { pthread_mutex_unlock (&mcast_msg_mutex); return (0); } if (totemmrp_avail() == 0) { pthread_mutex_unlock (&mcast_msg_mutex); return (0); } mcast.fragmented = 0; /* * Was the first message in this buffer a continuation of a * fragmented message? */ mcast.continuation = fragment_continuation; fragment_continuation = 0; mcast.msg_count = mcast_packed_msg_count; iovecs[0].iov_base = &mcast; iovecs[0].iov_len = sizeof (struct totempg_mcast); iovecs[1].iov_base = mcast_packed_msg_lens; iovecs[1].iov_len = mcast_packed_msg_count * sizeof (unsigned short); iovecs[2].iov_base = &fragmentation_data[0]; iovecs[2].iov_len = fragment_size; res = totemmrp_mcast (iovecs, 3, 0); mcast_packed_msg_count = 0; fragment_size = 0; pthread_mutex_unlock (&mcast_msg_mutex); return (0); } /* * Initialize the totem process group abstraction */ int totempg_initialize ( hdb_handle_t poll_handle, struct totem_config *totem_config) { int res; totempg_totem_config = totem_config; totempg_log_level_security = totem_config->totem_logging_configuration.log_level_security; totempg_log_level_error = totem_config->totem_logging_configuration.log_level_error; totempg_log_level_warning = totem_config->totem_logging_configuration.log_level_warning; totempg_log_level_notice = totem_config->totem_logging_configuration.log_level_notice; totempg_log_level_debug = totem_config->totem_logging_configuration.log_level_debug; totempg_log_printf = totem_config->totem_logging_configuration.log_printf; totempg_subsys_id = totem_config->totem_logging_configuration.log_subsys_id; fragmentation_data = malloc (TOTEMPG_PACKET_SIZE); if (fragmentation_data == 0) { return (-1); } res = totemmrp_initialize ( poll_handle, totem_config, totempg_deliver_fn, totempg_confchg_fn); totemmrp_callback_token_create ( &callback_token_received_handle, TOTEM_CALLBACK_TOKEN_RECEIVED, 0, callback_token_received_fn, 0); totemsrp_net_mtu_adjust (totem_config); return (res); } void totempg_finalize (void) { pthread_mutex_lock (&totempg_mutex); totemmrp_finalize (); pthread_mutex_unlock (&totempg_mutex); } /* * Multicast a message */ static int mcast_msg ( struct iovec *iovec_in, unsigned int iov_len, int guarantee) { int res = 0; struct totempg_mcast mcast; struct iovec iovecs[3]; struct iovec iovec[64]; int i; int dest, src; int max_packet_size = 0; int copy_len = 0; int copy_base = 0; int total_size = 0; pthread_mutex_lock (&mcast_msg_mutex); totemmrp_new_msg_signal (); /* * Remove zero length iovectors from the list */ assert (iov_len < 64); for (dest = 0, src = 0; src < iov_len; src++) { if (iovec_in[src].iov_len) { memcpy (&iovec[dest++], &iovec_in[src], sizeof (struct iovec)); } } iov_len = dest; max_packet_size = TOTEMPG_PACKET_SIZE - (sizeof (unsigned short) * (mcast_packed_msg_count + 1)); mcast_packed_msg_lens[mcast_packed_msg_count] = 0; /* * Check if we would overwrite new message queue */ for (i = 0; i < iov_len; i++) { total_size += iovec[i].iov_len; } if (byte_count_send_ok (total_size + sizeof(unsigned short) * (mcast_packed_msg_count+1)) == 0) { pthread_mutex_unlock (&mcast_msg_mutex); return(-1); } for (i = 0; i < iov_len; ) { mcast.fragmented = 0; mcast.continuation = fragment_continuation; copy_len = iovec[i].iov_len - copy_base; /* * If it all fits with room left over, copy it in. * We need to leave at least sizeof(short) + 1 bytes in the * fragment_buffer on exit so that max_packet_size + fragment_size * doesn't exceed the size of the fragment_buffer on the next call. */ if ((copy_len + fragment_size) < (max_packet_size - sizeof (unsigned short))) { memcpy (&fragmentation_data[fragment_size], (char *)iovec[i].iov_base + copy_base, copy_len); fragment_size += copy_len; mcast_packed_msg_lens[mcast_packed_msg_count] += copy_len; next_fragment = 1; copy_len = 0; copy_base = 0; i++; continue; /* * If it just fits or is too big, then send out what fits. */ } else { unsigned char *data_ptr; copy_len = min(copy_len, max_packet_size - fragment_size); if( copy_len == max_packet_size ) data_ptr = (unsigned char *)iovec[i].iov_base + copy_base; else { data_ptr = fragmentation_data; memcpy (&fragmentation_data[fragment_size], (unsigned char *)iovec[i].iov_base + copy_base, copy_len); } memcpy (&fragmentation_data[fragment_size], (unsigned char *)iovec[i].iov_base + copy_base, copy_len); mcast_packed_msg_lens[mcast_packed_msg_count] += copy_len; /* * if we're not on the last iovec or the iovec is too large to * fit, then indicate a fragment. This also means that the next * message will have the continuation of this one. */ if ((i < (iov_len - 1)) || ((copy_base + copy_len) < iovec[i].iov_len)) { if (!next_fragment) { next_fragment++; } fragment_continuation = next_fragment; mcast.fragmented = next_fragment++; assert(fragment_continuation != 0); assert(mcast.fragmented != 0); } else { fragment_continuation = 0; } /* * assemble the message and send it */ mcast.msg_count = ++mcast_packed_msg_count; iovecs[0].iov_base = &mcast; iovecs[0].iov_len = sizeof(struct totempg_mcast); iovecs[1].iov_base = mcast_packed_msg_lens; iovecs[1].iov_len = mcast_packed_msg_count * sizeof(unsigned short); iovecs[2].iov_base = data_ptr; iovecs[2].iov_len = max_packet_size; assert (totemmrp_avail() > 0); res = totemmrp_mcast (iovecs, 3, guarantee); /* * Recalculate counts and indexes for the next. */ mcast_packed_msg_lens[0] = 0; mcast_packed_msg_count = 0; fragment_size = 0; max_packet_size = TOTEMPG_PACKET_SIZE - (sizeof(unsigned short)); /* * If the iovec all fit, go to the next iovec */ if ((copy_base + copy_len) == iovec[i].iov_len) { copy_len = 0; copy_base = 0; i++; /* * Continue with the rest of the current iovec. */ } else { copy_base += copy_len; } } } /* * Bump only if we added message data. This may be zero if * the last buffer just fit into the fragmentation_data buffer * and we were at the last iovec. */ if (mcast_packed_msg_lens[mcast_packed_msg_count]) { mcast_packed_msg_count++; } pthread_mutex_unlock (&mcast_msg_mutex); return (res); } /* * Determine if a message of msg_size could be queued */ static int msg_count_send_ok ( int msg_count) { int avail = 0; avail = totemmrp_avail () - totempg_reserved - 1; return (avail > msg_count); } static int byte_count_send_ok ( int byte_count) { unsigned int msg_count = 0; int avail = 0; avail = totemmrp_avail () - 1; msg_count = (byte_count / (totempg_totem_config->net_mtu - 25)) + 1; return (avail > msg_count); } static int send_reserve ( int msg_size) { unsigned int msg_count = 0; msg_count = (msg_size / (totempg_totem_config->net_mtu - 25)) + 1; totempg_reserved += msg_count; return (msg_count); } static void send_release ( int msg_count) { totempg_reserved -= msg_count; } int totempg_callback_token_create ( void **handle_out, enum totem_callback_token_type type, int delete, int (*callback_fn) (enum totem_callback_token_type type, const void *), const void *data) { unsigned int res; pthread_mutex_lock (&callback_token_mutex); res = totemmrp_callback_token_create (handle_out, type, delete, callback_fn, data); pthread_mutex_unlock (&callback_token_mutex); return (res); } void totempg_callback_token_destroy ( void *handle_out) { pthread_mutex_lock (&callback_token_mutex); totemmrp_callback_token_destroy (handle_out); pthread_mutex_unlock (&callback_token_mutex); } /* * vi: set autoindent tabstop=4 shiftwidth=4 : */ int totempg_groups_initialize ( hdb_handle_t *handle, void (*deliver_fn) ( unsigned int nodeid, struct iovec *iovec, unsigned int iov_len, int endian_conversion_required), void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id)) { struct totempg_group_instance *instance; unsigned int res; pthread_mutex_lock (&totempg_mutex); res = hdb_handle_create (&totempg_groups_instance_database, sizeof (struct totempg_group_instance), handle); if (res != 0) { goto error_exit; } if (*handle > totempg_max_handle) { totempg_max_handle = *handle; } res = hdb_handle_get (&totempg_groups_instance_database, *handle, (void *)&instance); if (res != 0) { goto error_destroy; } instance->deliver_fn = deliver_fn; instance->confchg_fn = confchg_fn; instance->groups = 0; instance->groups_cnt = 0; hdb_handle_put (&totempg_groups_instance_database, *handle); pthread_mutex_unlock (&totempg_mutex); return (0); error_destroy: hdb_handle_destroy (&totempg_groups_instance_database, *handle); error_exit: pthread_mutex_unlock (&totempg_mutex); return (-1); } int totempg_groups_join ( hdb_handle_t handle, const struct totempg_group *groups, size_t group_cnt) { struct totempg_group_instance *instance; struct totempg_group *new_groups; unsigned int res; pthread_mutex_lock (&totempg_mutex); res = hdb_handle_get (&totempg_groups_instance_database, handle, (void *)&instance); if (res != 0) { goto error_exit; } new_groups = realloc (instance->groups, sizeof (struct totempg_group) * (instance->groups_cnt + group_cnt)); if (new_groups == 0) { res = ENOMEM; goto error_exit; } memcpy (&new_groups[instance->groups_cnt], groups, group_cnt * sizeof (struct totempg_group)); instance->groups = new_groups; instance->groups_cnt = instance->groups_cnt = group_cnt; hdb_handle_put (&totempg_groups_instance_database, handle); error_exit: pthread_mutex_unlock (&totempg_mutex); return (res); } int totempg_groups_leave ( hdb_handle_t handle, const struct totempg_group *groups, size_t group_cnt) { struct totempg_group_instance *instance; unsigned int res; pthread_mutex_lock (&totempg_mutex); res = hdb_handle_get (&totempg_groups_instance_database, handle, (void *)&instance); if (res != 0) { goto error_exit; } hdb_handle_put (&totempg_groups_instance_database, handle); error_exit: pthread_mutex_unlock (&totempg_mutex); return (res); } #define MAX_IOVECS_FROM_APP 32 #define MAX_GROUPS_PER_MSG 32 int totempg_groups_mcast_joined ( hdb_handle_t handle, const struct iovec *iovec, unsigned int iov_len, int guarantee) { struct totempg_group_instance *instance; unsigned short group_len[MAX_GROUPS_PER_MSG + 1]; struct iovec iovec_mcast[MAX_GROUPS_PER_MSG + 1 + MAX_IOVECS_FROM_APP]; int i; unsigned int res; pthread_mutex_lock (&totempg_mutex); res = hdb_handle_get (&totempg_groups_instance_database, handle, (void *)&instance); if (res != 0) { goto error_exit; } /* * Build group_len structure and the iovec_mcast structure */ group_len[0] = instance->groups_cnt; for (i = 0; i < instance->groups_cnt; i++) { group_len[i + 1] = instance->groups[i].group_len; iovec_mcast[i + 1].iov_len = instance->groups[i].group_len; iovec_mcast[i + 1].iov_base = (void *) instance->groups[i].group; } iovec_mcast[0].iov_len = (instance->groups_cnt + 1) * sizeof (unsigned short); iovec_mcast[0].iov_base = group_len; for (i = 0; i < iov_len; i++) { iovec_mcast[i + instance->groups_cnt + 1].iov_len = iovec[i].iov_len; iovec_mcast[i + instance->groups_cnt + 1].iov_base = iovec[i].iov_base; } res = mcast_msg (iovec_mcast, iov_len + instance->groups_cnt + 1, guarantee); hdb_handle_put (&totempg_groups_instance_database, handle); error_exit: pthread_mutex_unlock (&totempg_mutex); return (res); } int totempg_groups_joined_reserve ( hdb_handle_t handle, const struct iovec *iovec, unsigned int iov_len) { struct totempg_group_instance *instance; unsigned int size = 0; unsigned int i; unsigned int res; unsigned int reserved = 0; pthread_mutex_lock (&totempg_mutex); pthread_mutex_lock (&mcast_msg_mutex); res = hdb_handle_get (&totempg_groups_instance_database, handle, (void *)&instance); if (res != 0) { goto error_exit; } for (i = 0; i < instance->groups_cnt; i++) { size += instance->groups[i].group_len; } for (i = 0; i < iov_len; i++) { size += iovec[i].iov_len; } reserved = send_reserve (size); if (msg_count_send_ok (reserved) == 0) { send_release (reserved); reserved = 0; } hdb_handle_put (&totempg_groups_instance_database, handle); error_exit: pthread_mutex_unlock (&mcast_msg_mutex); pthread_mutex_unlock (&totempg_mutex); return (reserved); } int totempg_groups_joined_release (int msg_count) { pthread_mutex_lock (&totempg_mutex); pthread_mutex_lock (&mcast_msg_mutex); send_release (msg_count); pthread_mutex_unlock (&mcast_msg_mutex); pthread_mutex_unlock (&totempg_mutex); return 0; } int totempg_groups_mcast_groups ( hdb_handle_t handle, int guarantee, const struct totempg_group *groups, size_t groups_cnt, const struct iovec *iovec, - size_t iov_len) + unsigned int iov_len) { struct totempg_group_instance *instance; unsigned short group_len[MAX_GROUPS_PER_MSG + 1]; struct iovec iovec_mcast[MAX_GROUPS_PER_MSG + 1 + MAX_IOVECS_FROM_APP]; int i; unsigned int res; pthread_mutex_lock (&totempg_mutex); res = hdb_handle_get (&totempg_groups_instance_database, handle, (void *)&instance); if (res != 0) { goto error_exit; } /* * Build group_len structure and the iovec_mcast structure */ group_len[0] = groups_cnt; for (i = 0; i < groups_cnt; i++) { group_len[i + 1] = groups[i].group_len; iovec_mcast[i + 1].iov_len = groups[i].group_len; iovec_mcast[i + 1].iov_base = (void *) groups[i].group; } iovec_mcast[0].iov_len = (groups_cnt + 1) * sizeof (unsigned short); iovec_mcast[0].iov_base = group_len; for (i = 0; i < iov_len; i++) { iovec_mcast[i + groups_cnt + 1].iov_len = iovec[i].iov_len; iovec_mcast[i + groups_cnt + 1].iov_base = iovec[i].iov_base; } res = mcast_msg (iovec_mcast, iov_len + groups_cnt + 1, guarantee); hdb_handle_put (&totempg_groups_instance_database, handle); error_exit: pthread_mutex_unlock (&totempg_mutex); return (res); } /* * Returns -1 if error, 0 if can't send, 1 if can send the message */ int totempg_groups_send_ok_groups ( hdb_handle_t handle, const struct totempg_group *groups, size_t groups_cnt, const struct iovec *iovec, - size_t iov_len) + unsigned int iov_len) { struct totempg_group_instance *instance; unsigned int size = 0; unsigned int i; unsigned int res; pthread_mutex_lock (&totempg_mutex); res = hdb_handle_get (&totempg_groups_instance_database, handle, (void *)&instance); if (res != 0) { goto error_exit; } for (i = 0; i < groups_cnt; i++) { size += groups[i].group_len; } for (i = 0; i < iov_len; i++) { size += iovec[i].iov_len; } res = msg_count_send_ok (size); hdb_handle_put (&totempg_groups_instance_database, handle); error_exit: pthread_mutex_unlock (&totempg_mutex); return (res); } int totempg_ifaces_get ( unsigned int nodeid, struct totem_ip_address *interfaces, char ***status, unsigned int *iface_count) { int res; res = totemmrp_ifaces_get ( nodeid, interfaces, status, iface_count); return (res); } int totempg_ring_reenable (void) { int res; res = totemmrp_ring_reenable (); return (res); } const char *totempg_ifaces_print (unsigned int nodeid) { static char iface_string[256 * INTERFACE_MAX]; char one_iface[64]; struct totem_ip_address interfaces[INTERFACE_MAX]; char **status; unsigned int iface_count; unsigned int i; int res; iface_string[0] = '\0'; res = totempg_ifaces_get (nodeid, interfaces, &status, &iface_count); if (res == -1) { return ("no interface found for nodeid"); } for (i = 0; i < iface_count; i++) { sprintf (one_iface, "r(%d) ip(%s) ", i, totemip_print (&interfaces[i])); strcat (iface_string, one_iface); } return (iface_string); } unsigned int totempg_my_nodeid_get (void) { return (totemmrp_my_nodeid_get()); } int totempg_my_family_get (void) { return (totemmrp_my_family_get()); } diff --git a/include/corosync/evs.h b/include/corosync/evs.h index f17903a3..68705ca8 100644 --- a/include/corosync/evs.h +++ b/include/corosync/evs.h @@ -1,169 +1,169 @@ /* * Copyright (c) 2004 MontaVista Software, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef COROSYNC_EVS_H_DEFINED #define COROSYNC_EVS_H_DEFINED #include #include #include /** * @defgroup corosync Other API services provided by corosync */ /** * @addtogroup evs_corosync * * @{ */ typedef uint64_t evs_handle_t; typedef enum { EVS_TYPE_UNORDERED, /* not implemented */ EVS_TYPE_FIFO, /* same as agreed */ EVS_TYPE_AGREED, EVS_TYPE_SAFE /* not implemented */ } evs_guarantee_t; #define TOTEMIP_ADDRLEN (sizeof(struct in6_addr)) /** These are the things that get passed around */ struct evs_address { unsigned int nodeid; unsigned short family; unsigned char addr[TOTEMIP_ADDRLEN]; }; struct evs_group { char key[32]; }; typedef void (*evs_deliver_fn_t) ( unsigned int nodeid, const void *msg, size_t msg_len); typedef void (*evs_confchg_fn_t) ( unsigned int *member_list, size_t member_list_entries, unsigned int *left_list, size_t left_list_entries, unsigned int *joined_list, size_t joined_list_entries); typedef struct { evs_deliver_fn_t evs_deliver_fn; evs_confchg_fn_t evs_confchg_fn; } evs_callbacks_t; /** @} */ /* * Create a new evs connection */ cs_error_t evs_initialize ( evs_handle_t *handle, evs_callbacks_t *callbacks); /* * Close the evs handle */ cs_error_t evs_finalize ( evs_handle_t handle); /* * Get a file descriptor on which to poll. evs_handle_t is NOT a * file descriptor and may not be used directly. */ cs_error_t evs_fd_get ( evs_handle_t handle, int *fd); /* * Dispatch messages and configuration changes */ cs_error_t evs_dispatch ( evs_handle_t handle, cs_dispatch_flags_t dispatch_types); /* * Join one or more groups. * messages multicasted with evs_mcast_joined will be sent to every * group that has been joined on handle handle. Any message multicasted * to a group that has been previously joined will be delivered in evs_dispatch */ cs_error_t evs_join ( evs_handle_t handle, const struct evs_group *groups, size_t group_cnt); /* * Leave one or more groups */ cs_error_t evs_leave ( evs_handle_t handle, const struct evs_group *groups, size_t group_cnt); /* * Multicast to groups joined with evs_join. * The iovec described by iovec will be multicasted to all groups joined with * the evs_join interface for handle. */ cs_error_t evs_mcast_joined ( evs_handle_t handle, evs_guarantee_t guarantee, const struct iovec *iovec, - size_t iov_len); + unsigned int iov_len); /* * Multicast to specified groups. * Messages will be multicast to groups specified in the api call and not those * that have been joined (unless they are in the groups parameter). */ cs_error_t evs_mcast_groups ( evs_handle_t handle, evs_guarantee_t guarantee, const struct evs_group *groups, size_t group_cnt, const struct iovec *iovec, - size_t iov_len); + unsigned int iov_len); /* * Get membership information from evs */ cs_error_t evs_membership_get ( evs_handle_t handle, unsigned int *local_nodeid, unsigned int *member_list, size_t *member_list_entries); #endif /* COROSYNC_EVS_H_DEFINED */ diff --git a/include/corosync/totem/totempg.h b/include/corosync/totem/totempg.h index b0f489c0..20e1189a 100644 --- a/include/corosync/totem/totempg.h +++ b/include/corosync/totem/totempg.h @@ -1,150 +1,150 @@ /* * Copyright (c) 2003-2005 MontaVista Software, Inc. * Copyright (c) 2006-2007, 2009 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TOTEMPG_H_DEFINED #define TOTEMPG_H_DEFINED #include #include "totem.h" #include "coropoll.h" #include struct totempg_group { const void *group; int group_len; }; #define TOTEMPG_AGREED 0 #define TOTEMPG_SAFE 1 /* * Totem Single Ring Protocol * depends on poll abstraction, POSIX, IPV4 */ /* * Initialize the totem process groups abstraction */ extern int totempg_initialize ( hdb_handle_t poll_handle, struct totem_config *totem_config ); extern void totempg_finalize (void); extern int totempg_callback_token_create (void **handle_out, enum totem_callback_token_type type, int delete, int (*callback_fn) (enum totem_callback_token_type type, const void *), const void *data); extern void totempg_callback_token_destroy (void *handle); /* * Initialize a groups instance */ extern int totempg_groups_initialize ( hdb_handle_t *handle, void (*deliver_fn) ( unsigned int nodeid, struct iovec *iovec, unsigned int iov_len, int endian_conversion_required), void (*confchg_fn) ( enum totem_configuration_type configuration_type, const unsigned int *member_list, size_t member_list_entries, const unsigned int *left_list, size_t left_list_entries, const unsigned int *joined_list, size_t joined_list_entries, const struct memb_ring_id *ring_id)); extern int totempg_groups_finalize ( hdb_handle_t handle); extern int totempg_groups_join ( hdb_handle_t handle, const struct totempg_group *groups, size_t group_cnt); extern int totempg_groups_leave ( hdb_handle_t handle, const struct totempg_group *groups, size_t group_cnt); extern int totempg_groups_mcast_joined ( hdb_handle_t handle, const struct iovec *iovec, unsigned int iov_len, int guarantee); extern int totempg_groups_joined_reserve ( hdb_handle_t handle, const struct iovec *iovec, unsigned int iov_len); extern int totempg_groups_joined_release ( int msg_count); extern int totempg_groups_mcast_groups ( hdb_handle_t handle, int guarantee, const struct totempg_group *groups, size_t groups_cnt, const struct iovec *iovec, - size_t iov_len); + unsigned int iov_len); extern int totempg_groups_send_ok_groups ( hdb_handle_t handle, const struct totempg_group *groups, size_t groups_cnt, const struct iovec *iovec, - size_t iov_len); + unsigned int iov_len); extern int totempg_ifaces_get ( unsigned int nodeid, struct totem_ip_address *interfaces, char ***status, unsigned int *iface_count); extern const char *totempg_ifaces_print (unsigned int nodeid); extern unsigned int totempg_my_nodeid_get (void); extern int totempg_my_family_get (void); extern int totempg_ring_reenable (void); #endif /* TOTEMPG_H_DEFINED */ diff --git a/lib/evs.c b/lib/evs.c index 5053b7a6..6f968358 100644 --- a/lib/evs.c +++ b/lib/evs.c @@ -1,581 +1,581 @@ /* * vi: set autoindent tabstop=4 shiftwidth=4 : * Copyright (c) 2004-2005 MontaVista Software, Inc. * Copyright (c) 2006-2007, 2009 Red Hat, Inc. * * All rights reserved. * * Author: Steven Dake (sdake@redhat.com) * * This software licensed under BSD license, the text of which follows: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the MontaVista Software, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * Provides an extended virtual synchrony API using the corosync executive */ #include #include #include #include #include #include #include #include #include #include #include #include #include #undef MIN #define MIN(x,y) ((x) < (y) ? (x) : (y)) struct evs_inst { void *ipc_ctx; int finalize; evs_callbacks_t callbacks; pthread_mutex_t response_mutex; pthread_mutex_t dispatch_mutex; }; struct res_overlay { mar_res_header_t header __attribute__((aligned(8))); char data[512000]; }; static void evs_instance_destructor (void *instance); static struct saHandleDatabase evs_handle_t_db = { .handleCount = 0, .handles = 0, .mutex = PTHREAD_MUTEX_INITIALIZER, .handleInstanceDestructor = evs_instance_destructor }; /* * Clean up function for an evt instance (saEvtInitialize) handle */ static void evs_instance_destructor (void *instance) { struct evs_inst *evs_inst = instance; pthread_mutex_destroy (&evs_inst->response_mutex); pthread_mutex_destroy (&evs_inst->dispatch_mutex); } /** * @defgroup evs_coroipcc The extended virtual synchrony passthrough API * @ingroup coroipcc * * @{ */ /** * test * @param handle The handle of evs initialize * @param callbacks The callbacks for evs_initialize * @returns EVS_OK */ evs_error_t evs_initialize ( evs_handle_t *handle, evs_callbacks_t *callbacks) { cs_error_t error; struct evs_inst *evs_inst; error = saHandleCreate (&evs_handle_t_db, sizeof (struct evs_inst), handle); if (error != CS_OK) { goto error_no_destroy; } error = saHandleInstanceGet (&evs_handle_t_db, *handle, (void *)&evs_inst); if (error != CS_OK) { goto error_destroy; } error = coroipcc_service_connect (IPC_SOCKET_NAME, EVS_SERVICE, &evs_inst->ipc_ctx); if (error != EVS_OK) { goto error_put_destroy; } memcpy (&evs_inst->callbacks, callbacks, sizeof (evs_callbacks_t)); pthread_mutex_init (&evs_inst->response_mutex, NULL); pthread_mutex_init (&evs_inst->dispatch_mutex, NULL); saHandleInstancePut (&evs_handle_t_db, *handle); return (CS_OK); error_put_destroy: saHandleInstancePut (&evs_handle_t_db, *handle); error_destroy: saHandleDestroy (&evs_handle_t_db, *handle); error_no_destroy: return (error); } evs_error_t evs_finalize ( evs_handle_t handle) { struct evs_inst *evs_inst; cs_error_t error; error = saHandleInstanceGet (&evs_handle_t_db, handle, (void *)&evs_inst); if (error != CS_OK) { return (error); } pthread_mutex_lock (&evs_inst->response_mutex); /* * Another thread has already started finalizing */ if (evs_inst->finalize) { pthread_mutex_unlock (&evs_inst->response_mutex); saHandleInstancePut (&evs_handle_t_db, handle); return (EVS_ERR_BAD_HANDLE); } evs_inst->finalize = 1; coroipcc_service_disconnect (evs_inst->ipc_ctx); pthread_mutex_unlock (&evs_inst->response_mutex); saHandleDestroy (&evs_handle_t_db, handle); saHandleInstancePut (&evs_handle_t_db, handle); return (EVS_OK); } evs_error_t evs_fd_get ( evs_handle_t handle, int *fd) { cs_error_t error; struct evs_inst *evs_inst; error = saHandleInstanceGet (&evs_handle_t_db, handle, (void *)&evs_inst); if (error != CS_OK) { return (error); } *fd = coroipcc_fd_get (evs_inst->ipc_ctx); saHandleInstancePut (&evs_handle_t_db, handle); return (CS_OK); } evs_error_t evs_dispatch ( evs_handle_t handle, cs_dispatch_flags_t dispatch_types) { int timeout = -1; cs_error_t error; int cont = 1; /* always continue do loop except when set to 0 */ int dispatch_avail; struct evs_inst *evs_inst; struct res_evs_confchg_callback *res_evs_confchg_callback; struct res_evs_deliver_callback *res_evs_deliver_callback; evs_callbacks_t callbacks; struct res_overlay dispatch_data; int ignore_dispatch = 0; error = saHandleInstanceGet (&evs_handle_t_db, handle, (void *)&evs_inst); if (error != CS_OK) { return (error); } /* * Timeout instantly for SA_DISPATCH_ONE or SA_DISPATCH_ALL and * wait indefinately for SA_DISPATCH_BLOCKING */ if (dispatch_types == EVS_DISPATCH_ALL) { timeout = 0; } do { dispatch_avail = coroipcc_dispatch_recv (evs_inst->ipc_ctx, (void *)&dispatch_data, sizeof (dispatch_data), timeout); if (dispatch_avail == -1) { error = CS_ERR_LIBRARY; goto error_nounlock; } pthread_mutex_lock (&evs_inst->dispatch_mutex); /* * Handle has been finalized in another thread */ if (evs_inst->finalize == 1) { error = EVS_OK; pthread_mutex_unlock (&evs_inst->dispatch_mutex); goto error_unlock; } if (dispatch_avail == 0 && dispatch_types == EVS_DISPATCH_ALL) { pthread_mutex_unlock (&evs_inst->dispatch_mutex); break; /* exit do while cont is 1 loop */ } else if (dispatch_avail == 0) { pthread_mutex_unlock (&evs_inst->dispatch_mutex); continue; /* next dispatch event */ } /* * Make copy of callbacks, message data, unlock instance, and call callback * A risk of this dispatch method is that the callback routines may * operate at the same time that evsFinalize has been called. */ memcpy (&callbacks, &evs_inst->callbacks, sizeof (evs_callbacks_t)); pthread_mutex_unlock (&evs_inst->dispatch_mutex); /* * Dispatch incoming message */ switch (dispatch_data.header.id) { case MESSAGE_RES_EVS_DELIVER_CALLBACK: res_evs_deliver_callback = (struct res_evs_deliver_callback *)&dispatch_data; callbacks.evs_deliver_fn ( res_evs_deliver_callback->local_nodeid, &res_evs_deliver_callback->msg, res_evs_deliver_callback->msglen); break; case MESSAGE_RES_EVS_CONFCHG_CALLBACK: res_evs_confchg_callback = (struct res_evs_confchg_callback *)&dispatch_data; callbacks.evs_confchg_fn ( res_evs_confchg_callback->member_list, res_evs_confchg_callback->member_list_entries, res_evs_confchg_callback->left_list, res_evs_confchg_callback->left_list_entries, res_evs_confchg_callback->joined_list, res_evs_confchg_callback->joined_list_entries); break; default: error = CS_ERR_LIBRARY; goto error_nounlock; break; } /* * Determine if more messages should be processed * */ switch (dispatch_types) { case EVS_DISPATCH_ONE: if (ignore_dispatch) { ignore_dispatch = 0; } else { cont = 0; } break; case EVS_DISPATCH_ALL: if (ignore_dispatch) { ignore_dispatch = 0; } break; case EVS_DISPATCH_BLOCKING: break; } } while (cont); error_unlock: saHandleInstancePut (&evs_handle_t_db, handle); error_nounlock: return (error); } evs_error_t evs_join ( evs_handle_t handle, const struct evs_group *groups, size_t group_entries) { evs_error_t error; struct evs_inst *evs_inst; struct iovec iov[2]; struct req_lib_evs_join req_lib_evs_join; struct res_lib_evs_join res_lib_evs_join; error = saHandleInstanceGet (&evs_handle_t_db, handle, (void *)&evs_inst); if (error != EVS_OK) { return (error); } req_lib_evs_join.header.size = sizeof (struct req_lib_evs_join) + (group_entries * sizeof (struct evs_group)); req_lib_evs_join.header.id = MESSAGE_REQ_EVS_JOIN; req_lib_evs_join.group_entries = group_entries; iov[0].iov_base = &req_lib_evs_join; iov[0].iov_len = sizeof (struct req_lib_evs_join); iov[1].iov_base = (void*) groups; /* cast away const */ iov[1].iov_len = (group_entries * sizeof (struct evs_group)); pthread_mutex_lock (&evs_inst->response_mutex); error = coroipcc_msg_send_reply_receive (evs_inst->ipc_ctx, iov, 2, &res_lib_evs_join, sizeof (struct res_lib_evs_join)); pthread_mutex_unlock (&evs_inst->response_mutex); if (error != CS_OK) { goto error_exit; } error = res_lib_evs_join.header.error; error_exit: saHandleInstancePut (&evs_handle_t_db, handle); return (error); } evs_error_t evs_leave ( evs_handle_t handle, const struct evs_group *groups, size_t group_entries) { evs_error_t error; struct evs_inst *evs_inst; struct iovec iov[2]; struct req_lib_evs_leave req_lib_evs_leave; struct res_lib_evs_leave res_lib_evs_leave; error = saHandleInstanceGet (&evs_handle_t_db, handle, (void *)&evs_inst); if (error != CS_OK) { return (error); } req_lib_evs_leave.header.size = sizeof (struct req_lib_evs_leave) + (group_entries * sizeof (struct evs_group)); req_lib_evs_leave.header.id = MESSAGE_REQ_EVS_LEAVE; req_lib_evs_leave.group_entries = group_entries; iov[0].iov_base = &req_lib_evs_leave; iov[0].iov_len = sizeof (struct req_lib_evs_leave); iov[1].iov_base = (void *) groups; /* cast away const */ iov[1].iov_len = (group_entries * sizeof (struct evs_group)); pthread_mutex_lock (&evs_inst->response_mutex); error = coroipcc_msg_send_reply_receive (evs_inst->ipc_ctx, iov, 2, &res_lib_evs_leave, sizeof (struct res_lib_evs_leave)); pthread_mutex_unlock (&evs_inst->response_mutex); if (error != CS_OK) { goto error_exit; } error = res_lib_evs_leave.header.error; error_exit: saHandleInstancePut (&evs_handle_t_db, handle); return (error); } evs_error_t evs_mcast_joined ( evs_handle_t handle, evs_guarantee_t guarantee, const struct iovec *iovec, - size_t iov_len) + unsigned int iov_len) { int i; evs_error_t error; struct evs_inst *evs_inst; struct iovec iov[64]; struct req_lib_evs_mcast_joined req_lib_evs_mcast_joined; struct res_lib_evs_mcast_joined res_lib_evs_mcast_joined; int msg_len = 0; error = saHandleInstanceGet (&evs_handle_t_db, handle, (void *)&evs_inst); if (error != CS_OK) { return (error); } for (i = 0; i < iov_len; i++ ) { msg_len += iovec[i].iov_len; } req_lib_evs_mcast_joined.header.size = sizeof (struct req_lib_evs_mcast_joined) + msg_len; req_lib_evs_mcast_joined.header.id = MESSAGE_REQ_EVS_MCAST_JOINED; req_lib_evs_mcast_joined.guarantee = guarantee; req_lib_evs_mcast_joined.msg_len = msg_len; iov[0].iov_base = &req_lib_evs_mcast_joined; iov[0].iov_len = sizeof (struct req_lib_evs_mcast_joined); memcpy (&iov[1], iovec, iov_len * sizeof (struct iovec)); pthread_mutex_lock (&evs_inst->response_mutex); error = coroipcc_msg_send_reply_receive (evs_inst->ipc_ctx, iov, iov_len + 1, &res_lib_evs_mcast_joined, sizeof (struct res_lib_evs_mcast_joined)); pthread_mutex_unlock (&evs_inst->response_mutex); if (error != CS_OK) { goto error_exit; } error = res_lib_evs_mcast_joined.header.error; error_exit: saHandleInstancePut (&evs_handle_t_db, handle); return (error); } evs_error_t evs_mcast_groups ( evs_handle_t handle, evs_guarantee_t guarantee, const struct evs_group *groups, size_t group_entries, const struct iovec *iovec, - size_t iov_len) + unsigned int iov_len) { int i; evs_error_t error; struct evs_inst *evs_inst; struct iovec iov[64]; /* FIXME: what if iov_len > 62 ? use malloc */ struct req_lib_evs_mcast_groups req_lib_evs_mcast_groups; struct res_lib_evs_mcast_groups res_lib_evs_mcast_groups; int msg_len = 0; error = saHandleInstanceGet (&evs_handle_t_db, handle, (void *)&evs_inst); if (error != CS_OK) { return (error); } for (i = 0; i < iov_len; i++) { msg_len += iovec[i].iov_len; } req_lib_evs_mcast_groups.header.size = sizeof (struct req_lib_evs_mcast_groups) + (group_entries * sizeof (struct evs_group)) + msg_len; req_lib_evs_mcast_groups.header.id = MESSAGE_REQ_EVS_MCAST_GROUPS; req_lib_evs_mcast_groups.guarantee = guarantee; req_lib_evs_mcast_groups.msg_len = msg_len; req_lib_evs_mcast_groups.group_entries = group_entries; iov[0].iov_base = &req_lib_evs_mcast_groups; iov[0].iov_len = sizeof (struct req_lib_evs_mcast_groups); iov[1].iov_base = (void *) groups; /* cast away const */ iov[1].iov_len = (group_entries * sizeof (struct evs_group)); memcpy (&iov[2], iovec, iov_len * sizeof (struct iovec)); pthread_mutex_lock (&evs_inst->response_mutex); error = coroipcc_msg_send_reply_receive (evs_inst->ipc_ctx, iov, iov_len + 2, &res_lib_evs_mcast_groups, sizeof (struct res_lib_evs_mcast_groups)); pthread_mutex_unlock (&evs_inst->response_mutex); if (error != CS_OK) { goto error_exit; } error = res_lib_evs_mcast_groups.header.error; error_exit: saHandleInstancePut (&evs_handle_t_db, handle); return (error); } evs_error_t evs_membership_get ( evs_handle_t handle, unsigned int *local_nodeid, unsigned int *member_list, size_t *member_list_entries) { evs_error_t error; struct evs_inst *evs_inst; struct iovec iov; struct req_lib_evs_membership_get req_lib_evs_membership_get; struct res_lib_evs_membership_get res_lib_evs_membership_get; error = saHandleInstanceGet (&evs_handle_t_db, handle, (void *)&evs_inst); if (error != CS_OK) { return (error); } req_lib_evs_membership_get.header.size = sizeof (struct req_lib_evs_membership_get); req_lib_evs_membership_get.header.id = MESSAGE_REQ_EVS_MEMBERSHIP_GET; iov.iov_base = &req_lib_evs_membership_get; iov.iov_len = sizeof (struct req_lib_evs_membership_get); pthread_mutex_lock (&evs_inst->response_mutex); error = coroipcc_msg_send_reply_receive (evs_inst->ipc_ctx, &iov, 1, &res_lib_evs_membership_get, sizeof (struct res_lib_evs_membership_get)); pthread_mutex_unlock (&evs_inst->response_mutex); if (error != CS_OK) { goto error_exit; } error = res_lib_evs_membership_get.header.error; /* * Copy results to caller */ if (local_nodeid) { *local_nodeid = res_lib_evs_membership_get.local_nodeid; } *member_list_entries = MIN (*member_list_entries, res_lib_evs_membership_get.member_list_entries); if (member_list) { memcpy (member_list, &res_lib_evs_membership_get.member_list, *member_list_entries * sizeof (struct in_addr)); } error_exit: saHandleInstancePut (&evs_handle_t_db, handle); return (error); } /** @} */ diff --git a/man/cpg_mcast_joined.3 b/man/cpg_mcast_joined.3 index 2c7c858f..3865dda6 100644 --- a/man/cpg_mcast_joined.3 +++ b/man/cpg_mcast_joined.3 @@ -1,133 +1,133 @@ .\"/* .\" * Copyright (c) 2006 Red Hat, Inc. .\" * .\" * All rights reserved. .\" * .\" * Author: Patrick Caulfield .\" * .\" * This software licensed under BSD license, the text of which follows: .\" * .\" * Redistribution and use in source and binary forms, with or without .\" * modification, are permitted provided that the following conditions are met: .\" * .\" * - Redistributions of source code must retain the above copyright notice, .\" * this list of conditions and the following disclaimer. .\" * - Redistributions in binary form must reproduce the above copyright notice, .\" * this list of conditions and the following disclaimer in the documentation .\" * and/or other materials provided with the distribution. .\" * - Neither the name of the MontaVista Software, Inc. nor the names of its .\" * contributors may be used to endorse or promote products derived from this .\" * software without specific prior written permission. .\" * .\" * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" .\" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE .\" * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF .\" * THE POSSIBILITY OF SUCH DAMAGE. .\" */ .TH CPG_MCAST_JOINED 3 3004-08-31 "corosync Man Page" "Corosync Cluster Engine Programmer's Manual" .SH NAME cpg_mcast_joined \- Multicasts to all groups joined to a handle .SH SYNOPSIS .B #include .B #include .sp .BI "int cpg_mcast_joined(cpg_handle_t " handle ", cpg_gurantee_t " guarantee ", struct iovec *" iovec ", int " iov_len "); .SH DESCRIPTION The .B cpg_mcast_joined function will multicast a message to all the processes that have been joined with the .B cpg_join(3) funtion for the same group name. Messages that are sent to any of the groups joined to the parameter .I handle will be delivered to all subscribed processes in the system. .PP The argument .I guarantee requests a delivery guarantee for the message to be sent. The cpg_guarantee_t type is defined by: .IP .RS .ne 18 .nf .ta 4n 30n 33n typedef enum { CPG_TYPE_UNORDERED, /* not implemented */ CPG_TYPE_FIFO, /* same as agreed */ CPG_TYPE_AGREED, /* implemented */ CPG_TYPE_SAFE /* not implemented */ } cpg_guarantee_t; .ta .fi .RE .IP .PP .PP The meanings of the cpg_guarantee_t typedef are: .TP .B CPG_TYPE_UNORDERED Messages are guaranteed to be delivered, but with no particular order. This mode is unimplemented in the CPG library. .TP .B CPG_TYPE_FIFO Messages are guaranteed to be delivered in first sent first delivery order. In fact, this guarantee is equivalent to the CPG_TYPE_AGREED guarantee. .TP .B CPG_TYPE_AGREED All processors must agree on the order of delivery. If a message is sent from two or more processors at about the same time, the delivery will occur in the same order to all processors. .TP .B CPG_TYPE_SAFE All processors must agree on the order of delivery. Further all processors must have a copy of the message before any delivery takes place. This mode is unimplemented in the CPG library. .PP The .I iovec argument describes the scatter/gather list which is used to transmit a message. This is a standard socket structure described by: .IP .RS .ne 18 .nf .ta 4n 30n 33n struct iovec { void *iov_base; /* Pointer to data. */ - size_t iov_len; /* Length of data. */ + unsigned int iov_len; /* Length of data. */ }; .ta .fi .RE .IP .PP .PP The .I iov_len argument describes the number of entires in the .I iovec argument. .SH RETURN VALUE This call returns the CPG_OK value if successful, otherwise an error is returned. .PP .SH ERRORS The errors are undocumented. .SH "SEE ALSO" .BR cpg_overview (8), .BR cpg_initialize (3), .BR cpg_finalize (3), .BR cpg_fd_get (3), .BR cpg_dispatch (3), .BR cpg_leave (3), .BR cpg_join (3), .BR cpg_membership_get (3) .PP diff --git a/man/evs_mcast_groups.3 b/man/evs_mcast_groups.3 index fca94f17..f3d28f9c 100644 --- a/man/evs_mcast_groups.3 +++ b/man/evs_mcast_groups.3 @@ -1,162 +1,162 @@ .\"/* .\" * Copyright (c) 2004 MontaVista Software, Inc. .\" * .\" * All rights reserved. .\" * .\" * Author: Steven Dake (sdake@redhat.com) .\" * .\" * This software licensed under BSD license, the text of which follows: .\" * .\" * Redistribution and use in source and binary forms, with or without .\" * modification, are permitted provided that the following conditions are met: .\" * .\" * - Redistributions of source code must retain the above copyright notice, .\" * this list of conditions and the following disclaimer. .\" * - Redistributions in binary form must reproduce the above copyright notice, .\" * this list of conditions and the following disclaimer in the documentation .\" * and/or other materials provided with the distribution. .\" * - Neither the name of the MontaVista Software, Inc. nor the names of its .\" * contributors may be used to endorse or promote products derived from this .\" * software without specific prior written permission. .\" * .\" * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" .\" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE .\" * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF .\" * THE POSSIBILITY OF SUCH DAMAGE. .\" */ .TH EVS_MCAST_GROUPS 3 2004-08-31 "corosync Man Page" "Corosync Cluster Engine Programmer's Manual" .SH NAME evs_join \- Multicast a message to selected groups .SH SYNOPSIS .B #include .B #include .sp .BI "int evs_mcast_groups(evs_handle_t " handle ", evs_guraantee_t " guarantee ", evs_group_t *" groups ", int group_entries, struct iovec *" iovec ", int " iov_len "); .SH DESCRIPTION The .B evs_mcast_groups(3) function multicasts a message to all the groups specified in the arguemnt .I groups. Messages are delivered to all processors in the system that are described by the current configuration. .PP The argument .I handle describes a handle created with .B evs_initialize(3). .PP The argument .I guarantee requests a delivery guarantee for the message to be sent. The evs_guarantee_t type is defined by: .IP .RS .ne 18 .nf .ta 4n 30n 33n typedef enum { EVS_TYPE_UNORDERED, /* not implemented */ EVS_TYPE_FIFO, /* same as agreed */ EVS_TYPE_AGREED, /* implemented */ EVS_TYPE_SAFE /* not implemented */ } evs_guarantee_t; .ta .fi .RE .IP .PP .PP The meanings of the evs_guarantee_t typedef are: .TP .B EVS_GUARANTEE_UNORDERED Messages are guaranteed to be delivered, but with no particular order. This mode is unimplemented in the EVS library. .TP .B EVS_GUARANTEE_FIFO Messages are guaranteed to be delivered in first sent first delivery order from one one. In fact, this guarantee is actually the AGREED guarantee. .TP .B EVS_GUARANTEE_AGREED All processors must agree on the order of delivery. If a message is sent from two or more processors at about the same time, the delivery will occur in the same order to all processors. .TP .B EVS_GUARANTEE_SAFE All processors must agree on the order of delivery. Further all processors must have a copy of the message before any delivery takes place. This mode is unimplemented in the EVS library. .PP The .I groups argument is of the type evs_group_t which is defined by the structure: .IP .RS .ne 18 .nf .ta 4n 30n 33n typedef struct { char key[32]; } evs_groups_t; .ta .fi .RE .IP .PP .PP The .I group_entries argument describes the number of entries in the .I group argument. .PP The .I iovec argument describes the scatter/gather list which is used to transmit a message. This is a standard socket structure described by: .IP .RS .ne 18 .nf .ta 4n 30n 33n struct iovec { void *iov_base; /* Pointer to data. */ - size_t iov_len; /* Length of data. */ + unsigned int iov_len; /* Length of data. */ }; .ta .fi .RE .IP .PP .PP The .I iovlen argument describes the number of entires in the .I iovec argument. .SH RETURN VALUE This call returns the EVS_OK value if successful, otherwise an error is returned. .PP .SH ERRORS The errors are undocumented. .SH "SEE ALSO" .BR evs_overview (8), .BR evs_initialize (3), .BR evs_finalize (3), .BR evs_fd_get (3), .BR evs_dispatch (3), .BR evs_leave (3), .BR evs_join (3), .BR evs_mcast_joined (3), .BR evs_mmembership_get (3) .PP diff --git a/man/evs_mcast_joined.3 b/man/evs_mcast_joined.3 index 4d2eeb5e..a685e8b1 100644 --- a/man/evs_mcast_joined.3 +++ b/man/evs_mcast_joined.3 @@ -1,135 +1,135 @@ .\"/* .\" * Copyright (c) 2004 MontaVista Software, Inc. .\" * .\" * All rights reserved. .\" * .\" * Author: Steven Dake (sdake@redhat.com) .\" * .\" * This software licensed under BSD license, the text of which follows: .\" * .\" * Redistribution and use in source and binary forms, with or without .\" * modification, are permitted provided that the following conditions are met: .\" * .\" * - Redistributions of source code must retain the above copyright notice, .\" * this list of conditions and the following disclaimer. .\" * - Redistributions in binary form must reproduce the above copyright notice, .\" * this list of conditions and the following disclaimer in the documentation .\" * and/or other materials provided with the distribution. .\" * - Neither the name of the MontaVista Software, Inc. nor the names of its .\" * contributors may be used to endorse or promote products derived from this .\" * software without specific prior written permission. .\" * .\" * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" .\" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE .\" * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE .\" * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE .\" * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR .\" * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF .\" * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS .\" * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN .\" * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) .\" * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF .\" * THE POSSIBILITY OF SUCH DAMAGE. .\" */ .TH EVS_MCAST_JOINED 3 3004-08-31 "corosync Man Page" "Corosync Cluster Engine Programmer's Manual" .SH NAME evs_join \- Multicasts to all groups joined to an handle .SH SYNOPSIS .B #include .B #include .sp .BI "int evs_mcast_joined(evs_handle_t " handle ", evs_guraantee_t " guarantee ", struct iovec *" iovec ", int " iov_len "); .SH DESCRIPTION The .B evs_mcast_joined function is multicast a message to all the groups that have been joined with the .B evs_join(3) function for the argument .I handle. Messages that are sent to any of the groups joined to the parameter .I handle will be delivered to all processors in the system. .PP The argument .I guarantee requests a delivery guarantee for the message to be sent. The evs_guarantee_t type is defined by: .IP .RS .ne 18 .nf .ta 4n 30n 33n typedef enum { EVS_TYPE_UNORDERED, /* not implemented */ EVS_TYPE_FIFO, /* same as agreed */ EVS_TYPE_AGREED, /* implemented */ EVS_TYPE_SAFE /* not implemented */ } evs_guarantee_t; .ta .fi .RE .IP .PP .PP The meanings of the evs_guarantee_t typedef are: .TP .B EVS_GUARANTEE_UNORDERED Messages are guaranteed to be delivered, but with no particular order. This mode is unimplemented in the EVS library. .TP .B EVS_GUARANTEE_FIFO Messages are guaranteed to be delivered in first sent first delivery order from one one. In fact, this guarantee is actually the AGREED guarantee. .TP .B EVS_GUARANTEE_AGREED All processors must agree on the order of delivery. If a message is sent from two or more processors at about the same time, the delivery will occur in the same order to all processors. .TP .B EVS_GUARANTEE_SAFE All processors must agree on the order of delivery. Further all processors must have a copy of the message before any delivery takes place. This mode is unimplemented in the EVS library. .PP The .I iovec argument describes the scatter/gather list which is used to transmit a message. This is a standard socket structure described by: .IP .RS .ne 18 .nf .ta 4n 30n 33n struct iovec { void *iov_base; /* Pointer to data. */ - size_t iov_len; /* Length of data. */ + unsigned int iov_len; /* Length of data. */ }; .ta .fi .RE .IP .PP .PP The .I iovlen argument describes the number of entires in the .I iovec argument. .SH RETURN VALUE This call returns the EVS_OK value if successful, otherwise an error is returned. .PP .SH ERRORS The errors are undocumented. .SH "SEE ALSO" .BR evs_overview (8), .BR evs_initialize (3), .BR evs_finalize (3), .BR evs_fd_get (3), .BR evs_dispatch (3), .BR evs_leave (3), .BR evs_join (3), .BR evs_mcast_groups (3), .BR evs_mmembership_get (3) .PP