diff --git a/include/qb/qbipcc.h b/include/qb/qbipcc.h index 98ba456..bdbfe03 100644 --- a/include/qb/qbipcc.h +++ b/include/qb/qbipcc.h @@ -1,217 +1,240 @@ /* * Copyright (C) 2006-2007, 2009 Red Hat, Inc. * * Author: Steven Dake * * This file is part of libqb. * * libqb is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 2.1 of the License, or * (at your option) any later version. * * libqb is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with libqb. If not, see . */ #ifndef QB_IPCC_H_DEFINED #define QB_IPCC_H_DEFINED /* *INDENT-OFF* */ #ifdef __cplusplus extern "C" { #endif /* *INDENT-ON* */ #include #include #include #include #include #include /** * @file qbipcc.h * * Client IPC API. * * @par Lifecycle of an IPC connection. * An IPC connection is made to the server with qb_ipcc_connect(). This function * connects to the server and requests channels be created for communication. * To disconnect, the client either exits or executes the function qb_ipcc_disconnect(). * * @par Synchronous communication * The function qb_ipcc_sendv_recv() sends an iovector request and receives a response. * * @par Asynchronous requests from the client * The function qb_ipcc_sendv() sends an iovector request. * The function qb_ipcc_send() sends an message buffer request. * * @par Asynchronous events from the server * The qb_ipcc_event_recv() function receives an out-of-band asyncronous message. * The asynchronous messages are queued and can provide very high out-of-band performance. * To determine when to call qb_ipcc_event_recv() the qb_ipcc_fd_get() call is * used to obtain a file descriptor used in the poll() or select() system calls. * * @example ipcclient.c * This is an example of how to use the client. */ typedef struct qb_ipcc_connection qb_ipcc_connection_t; /** * Create a connection to an IPC service. * * @param name name of the service. * @param max_msg_size biggest msg size. * @return NULL (error: see errno) or a connection object. + * + * @note It is recommended to do a one time check on the + * max_msg_size value using qb_ipcc_verify_dgram_max_msg_size + * _BEFORE_ calling the connect function when IPC_SOCKET is in use. + * Some distributions while allow large message buffers to be + * set on the socket, but not actually honor them because of + * kernel state values. The qb_ipcc_verify_dgram_max_msg_size + * function both sets the socket buffer size and verifies it by + * doing a send/recv. */ qb_ipcc_connection_t* qb_ipcc_connect(const char *name, size_t max_msg_size); +/** + * Test kernel dgram socket buffers to verify the largest size up + * to the max_msg_size value a single msg can be. Rounds down to the + * nearest 1k. + * + * @param max_msg_size biggest msg size. + * @return -1 if max size can not be detected, positive value + * representing the largest single msg up to max_msg_size + * that can successfully be sent over a unix dgram socket. + */ +int32_t +qb_ipcc_verify_dgram_max_msg_size(size_t max_msg_size); + + /** * Disconnect an IPC connection. * * @param c connection instance */ void qb_ipcc_disconnect(qb_ipcc_connection_t* c); /** * Get the file descriptor to poll. * * @param c connection instance * @param fd (out) file descriptor to poll */ int32_t qb_ipcc_fd_get(qb_ipcc_connection_t* c, int32_t * fd); /** * Set the maximum allowable flowcontrol value. * * @note the default is 1 * * @param c connection instance * @param max the max allowable flowcontrol value (1 or 2) */ int32_t qb_ipcc_fc_enable_max_set(qb_ipcc_connection_t * c, uint32_t max); /** * Send a message. * * @param c connection instance * @param msg_ptr pointer to a message to send * @param msg_len the size of the message * @return (size sent, -errno == error) * * @note the msg_ptr must include a qb_ipc_request_header at * the top of the message. The server will read the size field * to determine how much to recv. */ ssize_t qb_ipcc_send(qb_ipcc_connection_t* c, const void *msg_ptr, size_t msg_len); /** * Send a message (iovec). * * @param c connection instance * @param iov pointer to an iovec struct to send * @param iov_len the number of iovecs used * @return (size sent, -errno == error) * * @note the iov[0] must be a qb_ipc_request_header. The server will * read the size field to determine how much to recv. */ ssize_t qb_ipcc_sendv(qb_ipcc_connection_t* c, const struct iovec* iov, size_t iov_len); /** * Receive a response. * * @param c connection instance * @param msg_ptr pointer to a message buffer to receive into * @param msg_len the size of the buffer * @param ms_timeout max time to wait for a response * @return (size recv'ed, -errno == error) * * @note that msg_ptr will include a qb_ipc_response_header at * the top of the message. */ ssize_t qb_ipcc_recv(qb_ipcc_connection_t* c, void *msg_ptr, size_t msg_len, int32_t ms_timeout); /** * This is a convenience function that simply sends and then recvs. * * @param c connection instance * @param iov pointer to an iovec struct to send * @param iov_len the number of iovecs used * @param msg_ptr pointer to a message buffer to receive into * @param msg_len the size of the buffer * @param ms_timeout max time to wait for a response * * @note the iov[0] must include a qb_ipc_request_header at * the top of the message. The server will read the size field * to determine how much to recv. * @note that msg_ptr will include a qb_ipc_response_header at * the top of the message. * * @see qb_ipcc_sendv() qb_ipcc_recv() */ ssize_t qb_ipcc_sendv_recv(qb_ipcc_connection_t *c, const struct iovec *iov, uint32_t iov_len, void *msg_ptr, size_t msg_len, int32_t ms_timeout); /** * Receive an event. * * @param c connection instance * @param msg_ptr pointer to a message buffer to receive into * @param msg_len the size of the buffer * @param ms_timeout time in milli seconds to wait for a message * 0 == no wait, negative == block, positive == wait X ms. * @param ms_timeout max time to wait for a response * @return size of the message or error (-errno) * * @note that msg_ptr will include a qb_ipc_response_header at * the top of the message. */ ssize_t qb_ipcc_event_recv(qb_ipcc_connection_t* c, void *msg_ptr, size_t msg_len, int32_t ms_timeout); /** * Associate a "user" pointer with this connection. * * @param context the point to associate with this connection. * @param c connection instance * @see qb_ipcc_context_get() */ void qb_ipcc_context_set(qb_ipcc_connection_t *c, void *context); /** * Get the context (set previously) * * @param c connection instance * @return the context * @see qb_ipcc_context_set() */ void *qb_ipcc_context_get(qb_ipcc_connection_t *c); /** * Is the connection connected? * * @param c connection instance * @retval QB_TRUE when connected * @retval QB_FALSE when not connected */ int32_t qb_ipcc_is_connected(qb_ipcc_connection_t *c); /* *INDENT-OFF* */ #ifdef __cplusplus } #endif /* *INDENT-ON* */ #endif /* QB_IPCC_H_DEFINED */ diff --git a/lib/ipc_socket.c b/lib/ipc_socket.c index 8f1e22c..81b48cb 100644 --- a/lib/ipc_socket.c +++ b/lib/ipc_socket.c @@ -1,651 +1,737 @@ /* * Copyright (C) 2010,2013 Red Hat, Inc. * * Author: Angus Salkeld * * This file is part of libqb. * * libqb is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 2.1 of the License, or * (at your option) any later version. * * libqb is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with libqb. If not, see . */ #include "os_base.h" #ifdef HAVE_SYS_UN_H #include #endif /* HAVE_SYS_UN_H */ #ifdef HAVE_SYS_MMAN_H #include #endif #include #include #include #include #include "util_int.h" #include "ipc_int.h" struct ipc_us_control { int32_t sent; int32_t flow_control; }; #define SHM_CONTROL_SIZE (3 * sizeof(struct ipc_us_control)) static void set_sock_addr(struct sockaddr_un *address, const char *socket_name) { memset(address, 0, sizeof(struct sockaddr_un)); address->sun_family = AF_UNIX; #ifdef HAVE_STRUCT_SOCKADDR_UN_SUN_LEN address->sun_len = QB_SUN_LEN(address); #endif #if defined(QB_LINUX) || defined(QB_CYGWIN) snprintf(address->sun_path + 1, UNIX_PATH_MAX - 1, "%s", socket_name); #else snprintf(address->sun_path, UNIX_PATH_MAX, "%s/%s", SOCKETDIR, socket_name); #endif } static int32_t qb_ipc_dgram_sock_setup(const char *base_name, const char *service_name, int32_t * sock_pt) { int32_t request_fd; struct sockaddr_un local_address; int32_t res = 0; char sock_path[PATH_MAX]; request_fd = socket(PF_UNIX, SOCK_DGRAM, 0); if (request_fd == -1) { return -errno; } qb_socket_nosigpipe(request_fd); res = qb_sys_fd_nonblock_cloexec_set(request_fd); if (res < 0) { goto error_connect; } snprintf(sock_path, PATH_MAX, "%s-%s", base_name, service_name); set_sock_addr(&local_address, sock_path); res = bind(request_fd, (struct sockaddr *)&local_address, sizeof(local_address)); if (res < 0) { goto error_connect; } *sock_pt = request_fd; return 0; error_connect: close(request_fd); *sock_pt = -1; return res; } static int32_t set_sock_size(int sockfd, size_t max_msg_size) { int32_t rc; unsigned int optval; socklen_t optlen = sizeof(optval); rc = getsockopt(sockfd, SOL_SOCKET, SO_SNDBUF, &optval, &optlen); - qb_util_log(LOG_DEBUG, "%d: getsockopt(%d, needed:%d) actual:%d", - rc, sockfd, max_msg_size, optval); - - if (rc == 0 && optval < max_msg_size) { + qb_util_log(LOG_TRACE, "%d: getsockopt(%d, needed:%d) actual:%d", + rc, sockfd, max_msg_size, optval); + + /* The optvat <= max_msg_size check is weird... + * during testing it was discovered in some instances if the + * default optval is exactly equal to our max_msg_size, we couldn't + * actually send a message that large unless we explicilty set + * it using setsockopt... there is no good explaination for this. Most + * likely this is hitting some sort of "off by one" error in the kernel. */ + if (rc == 0 && optval <= max_msg_size) { optval = max_msg_size; optlen = sizeof(optval); rc = setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF, &optval, optlen); } return rc; } +static int32_t +dgram_verify_msg_size(size_t max_msg_size) +{ + int32_t rc = -1; + int32_t sockets[2]; + int32_t tries = 0; + int32_t write_passed = 0; + int32_t read_passed = 0; + char buf[max_msg_size]; + + if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sockets) < 0) { + goto cleanup_socks; + } + + if (set_sock_size(sockets[0], max_msg_size) != 0) { + goto cleanup_socks; + } + if (set_sock_size(sockets[1], max_msg_size) != 0) { + goto cleanup_socks; + } + + for (tries = 0; tries < 3; tries++) { + + if (write_passed == 0) { + rc = write(sockets[1], buf, max_msg_size); + + if (rc < 0 && (errno == EAGAIN || errno == EINTR)) { + continue; + } else if (rc == max_msg_size) { + write_passed = 1; + } else { + break; + } + } + + if (read_passed == 0) { + rc = read(sockets[0], buf, max_msg_size); + + if (rc < 0 && (errno == EAGAIN || errno == EINTR)) { + continue; + } else if (rc == max_msg_size) { + read_passed = 1; + } else { + break; + } + } + + if (read_passed && write_passed) { + rc = 0; + break; + } + } + + +cleanup_socks: + close(sockets[0]); + close(sockets[1]); + return rc; +} + +int32_t +qb_ipcc_verify_dgram_max_msg_size(size_t max_msg_size) +{ + int32_t i; + int32_t last = -1; + + if (dgram_verify_msg_size(max_msg_size) == 0) { + return max_msg_size; + } + + for (i = 1024; i < max_msg_size; i+=1024) { + if (dgram_verify_msg_size(i) != 0) { + break; + } + last = i; + } + + return last; +} + /* * bind to "base_name-local_name" * connect to "base_name-remote_name" * output sock_pt */ static int32_t qb_ipc_dgram_sock_connect(const char *base_name, const char *local_name, const char *remote_name, int32_t max_msg_size, int32_t * sock_pt) { char sock_path[PATH_MAX]; struct sockaddr_un remote_address; int32_t res = qb_ipc_dgram_sock_setup(base_name, local_name, sock_pt); if (res < 0) { return res; } snprintf(sock_path, PATH_MAX, "%s-%s", base_name, remote_name); set_sock_addr(&remote_address, sock_path); if (connect(*sock_pt, (struct sockaddr *)&remote_address, QB_SUN_LEN(&remote_address)) == -1) { res = -errno; goto error_connect; } return set_sock_size(*sock_pt, max_msg_size); error_connect: close(*sock_pt); *sock_pt = -1; return res; } static int32_t _finish_connecting(struct qb_ipc_one_way *one_way) { struct sockaddr_un remote_address; int res; int error; int retry = 0; set_sock_addr(&remote_address, one_way->u.us.sock_name); /* this retry loop is here to help connecting when trying to send * an event right after connection setup. */ do { errno = 0; res = connect(one_way->u.us.sock, (struct sockaddr *)&remote_address, QB_SUN_LEN(&remote_address)); if (res == -1) { error = -errno; qb_util_perror(LOG_DEBUG, "error calling connect()"); retry++; usleep(100000); } } while (res == -1 && retry < 10); if (res == -1) { return error; } free(one_way->u.us.sock_name); one_way->u.us.sock_name = NULL; return set_sock_size(one_way->u.us.sock, one_way->max_msg_size); } /* * client functions * -------------------------------------------------------- */ static void qb_ipcc_us_disconnect(struct qb_ipcc_connection *c) { munmap(c->request.u.us.shared_data, SHM_CONTROL_SIZE); unlink(c->request.u.us.shared_file_name); close(c->request.u.us.sock); close(c->event.u.us.sock); } static ssize_t qb_ipc_socket_send(struct qb_ipc_one_way *one_way, const void *msg_ptr, size_t msg_len) { ssize_t rc = 0; struct ipc_us_control *ctl; ctl = (struct ipc_us_control *)one_way->u.us.shared_data; if (one_way->u.us.sock_name) { rc = _finish_connecting(one_way); if (rc < 0) { qb_util_log(LOG_ERR, "socket connect-on-send"); return rc; } } qb_sigpipe_ctl(QB_SIGPIPE_IGNORE); rc = send(one_way->u.us.sock, msg_ptr, msg_len, MSG_NOSIGNAL); if (rc == -1) { rc = -errno; if (errno != EAGAIN && errno != ENOBUFS) { qb_util_perror(LOG_ERR, "socket_send:send"); } } qb_sigpipe_ctl(QB_SIGPIPE_DEFAULT); if (ctl && rc == msg_len) { qb_atomic_int_inc(&ctl->sent); } return rc; } static ssize_t qb_ipc_socket_sendv(struct qb_ipc_one_way *one_way, const struct iovec *iov, size_t iov_len) { int32_t rc; struct ipc_us_control *ctl; ctl = (struct ipc_us_control *)one_way->u.us.shared_data; qb_sigpipe_ctl(QB_SIGPIPE_IGNORE); if (one_way->u.us.sock_name) { rc = _finish_connecting(one_way); if (rc < 0) { qb_util_perror(LOG_ERR, "socket connect-on-sendv"); return rc; } } rc = writev(one_way->u.us.sock, iov, iov_len); if (rc == -1) { rc = -errno; if (errno != EAGAIN && errno != ENOBUFS) { qb_util_perror(LOG_ERR, "socket_sendv:writev %d", one_way->u.us.sock); } } qb_sigpipe_ctl(QB_SIGPIPE_DEFAULT); if (ctl && rc > 0) { qb_atomic_int_inc(&ctl->sent); } return rc; } /* * recv a message of unknown size. */ static ssize_t qb_ipc_us_recv_at_most(struct qb_ipc_one_way *one_way, void *msg, size_t len, int32_t timeout) { int32_t result; int32_t final_rc = 0; int32_t to_recv = 0; char *data = msg; struct ipc_us_control *ctl = NULL; int32_t time_waited = 0; int32_t time_to_wait = timeout; if (timeout == -1) { time_to_wait = 1000; } qb_sigpipe_ctl(QB_SIGPIPE_IGNORE); retry_peek: result = recv(one_way->u.us.sock, data, sizeof(struct qb_ipc_request_header), MSG_NOSIGNAL | MSG_PEEK); if (result == -1) { if (errno == EAGAIN && (time_waited < timeout || timeout == -1)) { result = qb_ipc_us_ready(one_way, NULL, time_to_wait, POLLIN); time_waited += time_to_wait; goto retry_peek; } else { return -errno; } } if (result >= sizeof(struct qb_ipc_request_header)) { struct qb_ipc_request_header *hdr = NULL; hdr = (struct qb_ipc_request_header *)msg; to_recv = hdr->size; } result = recv(one_way->u.us.sock, data, to_recv, MSG_NOSIGNAL | MSG_WAITALL); if (result == -1) { final_rc = -errno; goto cleanup_sigpipe; } else if (result == 0) { qb_util_log(LOG_DEBUG, "recv == 0 -> ENOTCONN"); final_rc = -ENOTCONN; goto cleanup_sigpipe; } final_rc = result; ctl = (struct ipc_us_control *)one_way->u.us.shared_data; if (ctl) { (void)qb_atomic_int_dec_and_test(&ctl->sent); } cleanup_sigpipe: qb_sigpipe_ctl(QB_SIGPIPE_DEFAULT); return final_rc; } static void qb_ipc_us_fc_set(struct qb_ipc_one_way *one_way, int32_t fc_enable) { struct ipc_us_control *ctl = (struct ipc_us_control *)one_way->u.us.shared_data; qb_util_log(LOG_TRACE, "setting fc to %d", fc_enable); qb_atomic_int_set(&ctl->flow_control, fc_enable); } static int32_t qb_ipc_us_fc_get(struct qb_ipc_one_way *one_way) { struct ipc_us_control *ctl = (struct ipc_us_control *)one_way->u.us.shared_data; return qb_atomic_int_get(&ctl->flow_control); } static ssize_t qb_ipc_us_q_len_get(struct qb_ipc_one_way *one_way) { struct ipc_us_control *ctl = (struct ipc_us_control *)one_way->u.us.shared_data; return qb_atomic_int_get(&ctl->sent); } int32_t qb_ipcc_us_connect(struct qb_ipcc_connection * c, struct qb_ipc_connection_response * r) { int32_t res; char path[PATH_MAX]; int32_t fd_hdr; char *shm_ptr; qb_atomic_init(); c->needs_sock_for_poll = QB_FALSE; c->funcs.send = qb_ipc_socket_send; c->funcs.sendv = qb_ipc_socket_sendv; c->funcs.recv = qb_ipc_us_recv_at_most; c->funcs.fc_get = qb_ipc_us_fc_get; c->funcs.disconnect = qb_ipcc_us_disconnect; fd_hdr = qb_sys_mmap_file_open(path, r->request, SHM_CONTROL_SIZE, O_RDWR); if (fd_hdr < 0) { res = fd_hdr; errno = -fd_hdr; qb_util_perror(LOG_ERR, "couldn't open file for mmap"); return res; } (void)strlcpy(c->request.u.us.shared_file_name, r->request, NAME_MAX); shm_ptr = mmap(0, SHM_CONTROL_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd_hdr, 0); if (shm_ptr == MAP_FAILED) { res = -errno; qb_util_perror(LOG_ERR, "couldn't create mmap for header"); goto cleanup_hdr; } c->request.u.us.shared_data = shm_ptr; c->response.u.us.shared_data = shm_ptr + sizeof(struct ipc_us_control); c->event.u.us.shared_data = shm_ptr + (2 * sizeof(struct ipc_us_control)); close(fd_hdr); fd_hdr = -1; res = qb_ipc_dgram_sock_connect(r->response, "response", "request", r->max_msg_size, &c->request.u.us.sock); if (res != 0) { goto cleanup_hdr; } c->response.u.us.sock = c->request.u.us.sock; res = qb_ipc_dgram_sock_connect(r->response, "event", "event-tx", r->max_msg_size, &c->event.u.us.sock); if (res != 0) { goto cleanup_hdr; } return 0; cleanup_hdr: if (fd_hdr >= 0) { close(fd_hdr); } close(c->event.u.us.sock); close(c->request.u.us.sock); unlink(r->request); munmap(c->request.u.us.shared_data, SHM_CONTROL_SIZE); return res; } /* * service functions * -------------------------------------------------------- */ static int32_t _sock_connection_liveliness(int32_t fd, int32_t revents, void *data) { struct qb_ipcs_connection *c = (struct qb_ipcs_connection *)data; qb_util_log(LOG_DEBUG, "LIVENESS: fd %d event %d conn (%s)", fd, revents, c->description); if (revents & POLLNVAL) { qb_util_log(LOG_DEBUG, "NVAL conn (%s)", c->description); qb_ipcs_disconnect(c); return -EINVAL; } if (revents & POLLHUP) { qb_util_log(LOG_DEBUG, "HUP conn (%s)", c->description); qb_ipcs_disconnect(c); return -ESHUTDOWN; } return 0; } static int32_t _sock_add_to_mainloop(struct qb_ipcs_connection *c) { int res; res = c->service->poll_fns.dispatch_add(c->service->poll_priority, c->request.u.us.sock, POLLIN | POLLPRI | POLLNVAL, c, qb_ipcs_dispatch_connection_request); if (res < 0) { qb_util_log(LOG_ERR, "Error adding socket to mainloop (%s).", c->description); return res; } qb_ipcs_connection_ref(c); res = c->service->poll_fns.dispatch_add(c->service->poll_priority, c->setup.u.us.sock, POLLIN | POLLPRI | POLLNVAL, c, _sock_connection_liveliness); qb_util_log(LOG_DEBUG, "added %d to poll loop (liveness)", c->setup.u.us.sock); if (res < 0) { qb_util_perror(LOG_ERR, "Error adding setupfd to mainloop"); (void)c->service->poll_fns.dispatch_del(c->request.u.us.sock); return res; } qb_ipcs_connection_ref(c); return res; } static void _sock_rm_from_mainloop(struct qb_ipcs_connection *c) { (void)c->service->poll_fns.dispatch_del(c->request.u.us.sock); qb_ipcs_connection_unref(c); (void)c->service->poll_fns.dispatch_del(c->setup.u.us.sock); qb_ipcs_connection_unref(c); } static void qb_ipcs_us_disconnect(struct qb_ipcs_connection *c) { qb_enter(); if (c->state == QB_IPCS_CONNECTION_ESTABLISHED || c->state == QB_IPCS_CONNECTION_ACTIVE) { _sock_rm_from_mainloop(c); qb_ipcc_us_sock_close(c->setup.u.us.sock); qb_ipcc_us_sock_close(c->request.u.us.sock); qb_ipcc_us_sock_close(c->event.u.us.sock); } if (c->state == QB_IPCS_CONNECTION_SHUTTING_DOWN || c->state == QB_IPCS_CONNECTION_ACTIVE) { munmap(c->request.u.us.shared_data, SHM_CONTROL_SIZE); unlink(c->request.u.us.shared_file_name); } } static int32_t qb_ipcs_us_connect(struct qb_ipcs_service *s, struct qb_ipcs_connection *c, struct qb_ipc_connection_response *r) { char path[PATH_MAX]; int32_t fd_hdr; int32_t res = 0; struct ipc_us_control *ctl; char *shm_ptr; qb_util_log(LOG_DEBUG, "connecting to client (%s)", c->description); c->request.u.us.sock = c->setup.u.us.sock; c->response.u.us.sock = c->setup.u.us.sock; snprintf(r->request, NAME_MAX, "qb-%s-control-%s", s->name, c->description); snprintf(r->response, NAME_MAX, "qb-%s-%s", s->name, c->description); fd_hdr = qb_sys_mmap_file_open(path, r->request, SHM_CONTROL_SIZE, O_CREAT | O_TRUNC | O_RDWR); if (fd_hdr < 0) { res = fd_hdr; errno = -fd_hdr; qb_util_perror(LOG_ERR, "couldn't create file for mmap (%s)", c->description); return res; } (void)strlcpy(r->request, path, PATH_MAX); (void)strlcpy(c->request.u.us.shared_file_name, r->request, NAME_MAX); res = chown(r->request, c->auth.uid, c->auth.gid); if (res != 0) { /* ignore res, this is just for the compiler warnings. */ res = 0; } res = chmod(r->request, c->auth.mode); if (res != 0) { /* ignore res, this is just for the compiler warnings. */ res = 0; } shm_ptr = mmap(0, SHM_CONTROL_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd_hdr, 0); if (shm_ptr == MAP_FAILED) { res = -errno; qb_util_perror(LOG_ERR, "couldn't create mmap for header (%s)", c->description); goto cleanup_hdr; } c->request.u.us.shared_data = shm_ptr; c->response.u.us.shared_data = shm_ptr + sizeof(struct ipc_us_control); c->event.u.us.shared_data = shm_ptr + (2 * sizeof(struct ipc_us_control)); ctl = (struct ipc_us_control *)c->request.u.us.shared_data; ctl->sent = 0; ctl->flow_control = 0; ctl = (struct ipc_us_control *)c->response.u.us.shared_data; ctl->sent = 0; ctl->flow_control = 0; ctl = (struct ipc_us_control *)c->event.u.us.shared_data; ctl->sent = 0; ctl->flow_control = 0; close(fd_hdr); fd_hdr = -1; /* request channel */ res = qb_ipc_dgram_sock_setup(r->response, "request", &c->request.u.us.sock); if (res < 0) { goto cleanup_hdr; } c->setup.u.us.sock_name = NULL; c->request.u.us.sock_name = NULL; /* response channel */ c->response.u.us.sock = c->request.u.us.sock; snprintf(path, PATH_MAX, "%s-%s", r->response, "response"); c->response.u.us.sock_name = strdup(path); /* event channel */ res = qb_ipc_dgram_sock_setup(r->response, "event-tx", &c->event.u.us.sock); if (res < 0) { goto cleanup_hdr; } snprintf(path, PATH_MAX, "%s-%s", r->response, "event"); c->event.u.us.sock_name = strdup(path); res = _sock_add_to_mainloop(c); if (res < 0) { goto cleanup_hdr; } return res; cleanup_hdr: free(c->response.u.us.sock_name); free(c->event.u.us.sock_name); if (fd_hdr >= 0) { close(fd_hdr); } unlink(r->request); munmap(c->request.u.us.shared_data, SHM_CONTROL_SIZE); return res; } void qb_ipcs_us_init(struct qb_ipcs_service *s) { s->funcs.connect = qb_ipcs_us_connect; s->funcs.disconnect = qb_ipcs_us_disconnect; s->funcs.recv = qb_ipc_us_recv_at_most; s->funcs.peek = NULL; s->funcs.reclaim = NULL; s->funcs.send = qb_ipc_socket_send; s->funcs.sendv = qb_ipc_socket_sendv; s->funcs.fc_set = qb_ipc_us_fc_set; s->funcs.q_len_get = qb_ipc_us_q_len_get; s->needs_sock_for_poll = QB_FALSE; qb_atomic_init(); } diff --git a/tests/check_ipc.c b/tests/check_ipc.c index cdce7b5..ad4888f 100644 --- a/tests/check_ipc.c +++ b/tests/check_ipc.c @@ -1,1338 +1,1384 @@ /* * Copyright (c) 2010 Red Hat, Inc. * * All rights reserved. * * Author: Angus Salkeld * * This file is part of libqb. * * libqb is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 2.1 of the License, or * (at your option) any later version. * * libqb is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with libqb. If not, see . */ #include "os_base.h" #include #include #include #include #include #include #include #include static const char *ipc_name = "ipc_test"; -#define MAX_MSG_SIZE (8192*16) + +#define DEFAULT_MAX_MSG_SIZE (8192*16) +static int CALCULATED_DGRAM_MAX_MSG_SIZE = 0; + +#define DGRAM_MAX_MSG_SIZE \ + (CALCULATED_DGRAM_MAX_MSG_SIZE == 0 ? \ + CALCULATED_DGRAM_MAX_MSG_SIZE = qb_ipcc_verify_dgram_max_msg_size(DEFAULT_MAX_MSG_SIZE) : \ + CALCULATED_DGRAM_MAX_MSG_SIZE) + +#define MAX_MSG_SIZE (ipc_type == QB_IPC_SOCKET ? DGRAM_MAX_MSG_SIZE : DEFAULT_MAX_MSG_SIZE) /* The size the giant msg's data field needs to be to make * this the largests msg we can successfully send. */ #define GIANT_MSG_DATA_SIZE MAX_MSG_SIZE - sizeof(struct qb_ipc_response_header) - 8 static qb_ipcc_connection_t *conn; static enum qb_ipc_type ipc_type; -struct giant_event { - struct qb_ipc_response_header hdr __attribute__ ((aligned(8))); - char data[GIANT_MSG_DATA_SIZE] __attribute__ ((aligned(8))); - uint32_t sent_msgs __attribute__ ((aligned(8))); -} __attribute__ ((aligned(8))); - -static struct giant_event giant_event_recv; -static struct giant_event giant_event_send; - enum my_msg_ids { IPC_MSG_REQ_TX_RX, IPC_MSG_RES_TX_RX, IPC_MSG_REQ_DISPATCH, IPC_MSG_RES_DISPATCH, IPC_MSG_REQ_BULK_EVENTS, IPC_MSG_RES_BULK_EVENTS, IPC_MSG_REQ_STRESS_EVENT, IPC_MSG_RES_STRESS_EVENT, IPC_MSG_REQ_SERVER_FAIL, IPC_MSG_RES_SERVER_FAIL, IPC_MSG_REQ_SERVER_DISCONNECT, IPC_MSG_RES_SERVER_DISCONNECT, }; /* Test Cases * * 1) basic send & recv differnet message sizes * * 2) send message to start dispatch (confirm receipt) * * 3) flow control * * 4) authentication * * 5) thread safety * * 6) cleanup * * 7) service availabilty * * 8) multiple services */ static qb_loop_t *my_loop; static qb_ipcs_service_t* s1; static int32_t turn_on_fc = QB_FALSE; static int32_t fc_enabled = 89; static int32_t send_event_on_created = QB_FALSE; static int32_t disconnect_after_created = QB_FALSE; static int32_t num_bulk_events = 10; static int32_t num_stress_events = 30000; static int32_t reference_count_test = QB_FALSE; static int32_t exit_handler(int32_t rsignal, void *data) { qb_log(LOG_DEBUG, "caught signal %d", rsignal); qb_ipcs_destroy(s1); return -1; } static int32_t s1_msg_process_fn(qb_ipcs_connection_t *c, void *data, size_t size) { struct qb_ipc_request_header *req_pt = (struct qb_ipc_request_header *)data; struct qb_ipc_response_header response = { 0, }; ssize_t res; if (req_pt->id == IPC_MSG_REQ_TX_RX) { response.size = sizeof(struct qb_ipc_response_header); response.id = IPC_MSG_RES_TX_RX; response.error = 0; res = qb_ipcs_response_send(c, &response, response.size); if (res < 0) { qb_perror(LOG_INFO, "qb_ipcs_response_send"); } else if (res != response.size) { qb_log(LOG_DEBUG, "qb_ipcs_response_send %zd != %d", res, response.size); } if (turn_on_fc) { qb_ipcs_request_rate_limit(s1, QB_IPCS_RATE_OFF); } } else if (req_pt->id == IPC_MSG_REQ_DISPATCH) { response.size = sizeof(struct qb_ipc_response_header); response.id = IPC_MSG_RES_DISPATCH; response.error = 0; res = qb_ipcs_event_send(c, &response, sizeof(response)); if (res < 0) { qb_perror(LOG_INFO, "qb_ipcs_event_send"); } } else if (req_pt->id == IPC_MSG_REQ_BULK_EVENTS) { int32_t m; int32_t num; struct qb_ipcs_connection_stats_2 *stats; response.size = sizeof(struct qb_ipc_response_header); response.error = 0; stats = qb_ipcs_connection_stats_get_2(c, QB_FALSE); num = stats->event_q_length; free(stats); /* crazy large message */ res = qb_ipcs_event_send(c, &response, MAX_MSG_SIZE*10); ck_assert_int_eq(res, -EMSGSIZE); /* send one event before responding */ res = qb_ipcs_event_send(c, &response, sizeof(response)); ck_assert_int_eq(res, sizeof(response)); response.id++; /* send response */ response.id = IPC_MSG_RES_BULK_EVENTS; res = qb_ipcs_response_send(c, &response, response.size); ck_assert_int_eq(res, sizeof(response)); /* send the rest of the events after the response */ for (m = 1; m < num_bulk_events; m++) { res = qb_ipcs_event_send(c, &response, sizeof(response)); if (res == -EAGAIN || res == -ENOBUFS) { /* retry */ usleep(1000); m--; continue; } ck_assert_int_eq(res, sizeof(response)); response.id++; } stats = qb_ipcs_connection_stats_get_2(c, QB_FALSE); ck_assert_int_eq(stats->event_q_length - num, num_bulk_events); free(stats); } else if (req_pt->id == IPC_MSG_REQ_STRESS_EVENT) { + struct { + struct qb_ipc_response_header hdr __attribute__ ((aligned(8))); + char data[GIANT_MSG_DATA_SIZE] __attribute__ ((aligned(8))); + uint32_t sent_msgs __attribute__ ((aligned(8))); + } __attribute__ ((aligned(8))) giant_event_send; int32_t m; response.size = sizeof(struct qb_ipc_response_header); response.error = 0; response.id = IPC_MSG_RES_STRESS_EVENT; res = qb_ipcs_response_send(c, &response, response.size); ck_assert_int_eq(res, sizeof(response)); giant_event_send.hdr.error = 0; giant_event_send.hdr.id = IPC_MSG_RES_STRESS_EVENT; for (m = 0; m < num_stress_events; m++) { size_t sent_len = sizeof(struct qb_ipc_response_header); if (((m+1) % 1000) == 0) { - sent_len = sizeof(struct giant_event); + sent_len = sizeof(giant_event_send); giant_event_send.sent_msgs = m + 1; } giant_event_send.hdr.size = sent_len; res = qb_ipcs_event_send(c, &giant_event_send, sent_len); if (res < 0) { if (res == -EAGAIN || res == -ENOBUFS) { /* yield to the receive process */ usleep(1000); m--; continue; } else { qb_perror(LOG_DEBUG, "sending stress events"); ck_assert_int_eq(res, sent_len); } } else if (((m+1) % 1000) == 0) { qb_log(LOG_DEBUG, "SENT: %d stress events sent", m+1); } giant_event_send.hdr.id++; } } else if (req_pt->id == IPC_MSG_REQ_SERVER_FAIL) { exit(0); } else if (req_pt->id == IPC_MSG_REQ_SERVER_DISCONNECT) { qb_ipcs_disconnect(c); } return 0; } static int32_t my_job_add(enum qb_loop_priority p, void *data, qb_loop_job_dispatch_fn fn) { return qb_loop_job_add(my_loop, p, data, fn); } static int32_t my_dispatch_add(enum qb_loop_priority p, int32_t fd, int32_t events, void *data, qb_ipcs_dispatch_fn_t fn) { return qb_loop_poll_add(my_loop, p, fd, events, data, fn); } static int32_t my_dispatch_mod(enum qb_loop_priority p, int32_t fd, int32_t events, void *data, qb_ipcs_dispatch_fn_t fn) { return qb_loop_poll_mod(my_loop, p, fd, events, data, fn); } static int32_t my_dispatch_del(int32_t fd) { return qb_loop_poll_del(my_loop, fd); } static int32_t s1_connection_closed(qb_ipcs_connection_t *c) { qb_enter(); qb_leave(); return 0; } static void outq_flush (void *data) { static int i = 0; struct cs_ipcs_conn_context *cnx; cnx = qb_ipcs_context_get(data); qb_log(LOG_DEBUG,"iter %u\n", i); i++; if (i == 2) { qb_ipcs_destroy(s1); s1 = NULL; } /* is the reference counting is not working, this should fail * for i > 1. */ qb_ipcs_event_send(data, "test", 4); assert(memcmp(cnx, "test", 4) == 0); if (i < 5) { qb_loop_job_add(my_loop, QB_LOOP_HIGH, data, outq_flush); } else { /* this single unref should clean everything up. */ qb_ipcs_connection_unref(data); qb_log(LOG_INFO, "end of test, stopping loop"); qb_loop_stop(my_loop); } } static void s1_connection_destroyed(qb_ipcs_connection_t *c) { qb_enter(); if (reference_count_test) { struct cs_ipcs_conn_context *cnx; cnx = qb_ipcs_context_get(c); free(cnx); } else { qb_loop_stop(my_loop); } qb_leave(); } static void s1_connection_created(qb_ipcs_connection_t *c) { if (send_event_on_created) { struct qb_ipc_response_header response; int32_t res; response.size = sizeof(struct qb_ipc_response_header); response.id = IPC_MSG_RES_DISPATCH; response.error = 0; res = qb_ipcs_event_send(c, &response, sizeof(response)); ck_assert_int_eq(res, response.size); } if (reference_count_test) { struct cs_ipcs_conn_context *context; qb_ipcs_connection_ref(c); qb_loop_job_add(my_loop, QB_LOOP_HIGH, c, outq_flush); context = calloc(1, 20); memcpy(context, "test", 4); qb_ipcs_context_set(c, context); } } static void run_ipc_server(void) { int32_t res; qb_loop_signal_handle handle; struct qb_ipcs_service_handlers sh = { .connection_accept = NULL, .connection_created = s1_connection_created, .msg_process = s1_msg_process_fn, .connection_destroyed = s1_connection_destroyed, .connection_closed = s1_connection_closed, }; struct qb_ipcs_poll_handlers ph = { .job_add = my_job_add, .dispatch_add = my_dispatch_add, .dispatch_mod = my_dispatch_mod, .dispatch_del = my_dispatch_del, }; qb_loop_signal_add(my_loop, QB_LOOP_HIGH, SIGSTOP, NULL, exit_handler, &handle); qb_loop_signal_add(my_loop, QB_LOOP_HIGH, SIGTERM, NULL, exit_handler, &handle); my_loop = qb_loop_create(); s1 = qb_ipcs_create(ipc_name, 4, ipc_type, &sh); fail_if(s1 == 0); qb_ipcs_poll_handlers_set(s1, &ph); res = qb_ipcs_run(s1); ck_assert_int_eq(res, 0); qb_loop_run(my_loop); qb_log(LOG_DEBUG, "loop finished - done ..."); } static int32_t run_function_in_new_process(void (*run_ipc_server_fn)(void)) { pid_t pid = fork (); if (pid == -1) { fprintf (stderr, "Can't fork\n"); return -1; } if (pid == 0) { run_ipc_server_fn(); return 0; } return pid; } static int32_t stop_process(pid_t pid) { /* wait a bit for the server to shutdown by it's self */ usleep(100000); kill(pid, SIGTERM); waitpid(pid, NULL, 0); return 0; } struct my_req { struct qb_ipc_request_header hdr; char message[1024 * 1024]; }; static struct my_req request; static int32_t send_and_check(int32_t req_id, uint32_t size, int32_t ms_timeout, int32_t expect_perfection) { struct qb_ipc_response_header res_header; int32_t res; int32_t try_times = 0; request.hdr.id = req_id; request.hdr.size = sizeof(struct qb_ipc_request_header) + size; /* check that we can't send a message that is too big * and we get the right return code. */ res = qb_ipcc_send(conn, &request, MAX_MSG_SIZE*2); ck_assert_int_eq(res, -EMSGSIZE); repeat_send: res = qb_ipcc_send(conn, &request, request.hdr.size); try_times++; if (res < 0) { if (res == -EAGAIN && try_times < 10) { goto repeat_send; } else { if (res == -EAGAIN && try_times >= 10) { fc_enabled = QB_TRUE; } errno = -res; qb_perror(LOG_INFO, "qb_ipcc_send"); return res; } } if (req_id == IPC_MSG_REQ_DISPATCH) { res = qb_ipcc_event_recv(conn, &res_header, sizeof(struct qb_ipc_response_header), ms_timeout); } else { res = qb_ipcc_recv(conn, &res_header, sizeof(struct qb_ipc_response_header), ms_timeout); } if (res == -EINTR) { return -1; } if (res == -EAGAIN || res == -ETIMEDOUT) { fc_enabled = QB_TRUE; qb_perror(LOG_DEBUG, "qb_ipcc_recv"); return res; } if (expect_perfection) { ck_assert_int_eq(res, sizeof(struct qb_ipc_response_header)); ck_assert_int_eq(res_header.id, req_id + 1); ck_assert_int_eq(res_header.size, sizeof(struct qb_ipc_response_header)); } return res; } static int32_t recv_timeout = -1; static void test_ipc_txrx(void) { int32_t j; int32_t c = 0; size_t size; pid_t pid; pid = run_function_in_new_process(run_ipc_server); fail_if(pid == -1); sleep(1); do { conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE); if (conn == NULL) { j = waitpid(pid, NULL, WNOHANG); ck_assert_int_eq(j, 0); sleep(1); c++; } } while (conn == NULL && c < 5); fail_if(conn == NULL); size = QB_MIN(sizeof(struct qb_ipc_request_header), 64); for (j = 1; j < 19; j++) { size *= 2; if (size >= MAX_MSG_SIZE) break; if (send_and_check(IPC_MSG_REQ_TX_RX, size, recv_timeout, QB_TRUE) < 0) { break; } } if (turn_on_fc) { ck_assert_int_eq(fc_enabled, QB_TRUE); } qb_ipcc_disconnect(conn); stop_process(pid); } static void test_ipc_exit(void) { struct qb_ipc_request_header req_header; struct qb_ipc_response_header res_header; struct iovec iov[1]; int32_t res; int32_t c = 0; int32_t j = 0; pid_t pid; pid = run_function_in_new_process(run_ipc_server); fail_if(pid == -1); sleep(1); do { conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE); if (conn == NULL) { j = waitpid(pid, NULL, WNOHANG); ck_assert_int_eq(j, 0); sleep(1); c++; } } while (conn == NULL && c < 5); fail_if(conn == NULL); req_header.id = IPC_MSG_REQ_TX_RX; req_header.size = sizeof(struct qb_ipc_request_header); iov[0].iov_len = req_header.size; iov[0].iov_base = &req_header; res = qb_ipcc_sendv_recv(conn, iov, 1, &res_header, sizeof(struct qb_ipc_response_header), -1); ck_assert_int_eq(res, sizeof(struct qb_ipc_response_header)); /* kill the server */ stop_process(pid); /* * wait a bit for the server to die. */ sleep(1); /* * this needs to free up the shared mem */ qb_ipcc_disconnect(conn); } START_TEST(test_ipc_exit_us) { qb_enter(); ipc_type = QB_IPC_SOCKET; ipc_name = __func__; recv_timeout = 5000; test_ipc_exit(); qb_leave(); } END_TEST START_TEST(test_ipc_exit_shm) { qb_enter(); ipc_type = QB_IPC_SHM; ipc_name = __func__; recv_timeout = 1000; test_ipc_exit(); qb_leave(); } END_TEST START_TEST(test_ipc_txrx_shm_tmo) { qb_enter(); turn_on_fc = QB_FALSE; ipc_type = QB_IPC_SHM; ipc_name = __func__; recv_timeout = 1000; test_ipc_txrx(); qb_leave(); } END_TEST START_TEST(test_ipc_txrx_shm_block) { qb_enter(); turn_on_fc = QB_FALSE; ipc_type = QB_IPC_SHM; ipc_name = __func__; recv_timeout = -1; test_ipc_txrx(); qb_leave(); } END_TEST START_TEST(test_ipc_fc_shm) { qb_enter(); turn_on_fc = QB_TRUE; ipc_type = QB_IPC_SHM; recv_timeout = 500; ipc_name = __func__; test_ipc_txrx(); qb_leave(); } END_TEST START_TEST(test_ipc_txrx_us_block) { qb_enter(); turn_on_fc = QB_FALSE; ipc_type = QB_IPC_SOCKET; ipc_name = __func__; recv_timeout = -1; test_ipc_txrx(); qb_leave(); } END_TEST START_TEST(test_ipc_txrx_us_tmo) { qb_enter(); turn_on_fc = QB_FALSE; ipc_type = QB_IPC_SOCKET; ipc_name = __func__; recv_timeout = 1000; test_ipc_txrx(); qb_leave(); } END_TEST START_TEST(test_ipc_fc_us) { qb_enter(); turn_on_fc = QB_TRUE; ipc_type = QB_IPC_SOCKET; recv_timeout = 500; ipc_name = __func__; test_ipc_txrx(); qb_leave(); } END_TEST struct my_res { struct qb_ipc_response_header hdr; char message[1024 * 1024]; }; static void test_ipc_dispatch(void) { int32_t j; int32_t c = 0; pid_t pid; int32_t size; pid = run_function_in_new_process(run_ipc_server); fail_if(pid == -1); sleep(1); do { conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE); if (conn == NULL) { j = waitpid(pid, NULL, WNOHANG); ck_assert_int_eq(j, 0); sleep(1); c++; } } while (conn == NULL && c < 5); fail_if(conn == NULL); size = QB_MIN(sizeof(struct qb_ipc_request_header), 64); for (j = 1; j < 19; j++) { size *= 2; if (size >= MAX_MSG_SIZE) break; if (send_and_check(IPC_MSG_REQ_DISPATCH, size, recv_timeout, QB_TRUE) < 0) { break; } } qb_ipcc_disconnect(conn); stop_process(pid); } START_TEST(test_ipc_disp_us) { qb_enter(); ipc_type = QB_IPC_SOCKET; ipc_name = __func__; test_ipc_dispatch(); qb_leave(); } END_TEST static int32_t events_received; static int32_t count_stress_events(int32_t fd, int32_t revents, void *data) { + struct { + struct qb_ipc_response_header hdr __attribute__ ((aligned(8))); + char data[GIANT_MSG_DATA_SIZE] __attribute__ ((aligned(8))); + uint32_t sent_msgs __attribute__ ((aligned(8))); + } __attribute__ ((aligned(8))) giant_event_recv; qb_loop_t *cl = (qb_loop_t*)data; int32_t res; res = qb_ipcc_event_recv(conn, &giant_event_recv, - sizeof(struct giant_event), + sizeof(giant_event_recv), -1); if (res > 0) { events_received++; if ((events_received % 1000) == 0) { qb_log(LOG_DEBUG, "RECV: %d stress events processed", events_received); - if (res != sizeof(struct giant_event)) { + if (res != sizeof(giant_event_recv)) { qb_log(LOG_DEBUG, "Unexpected recv size, expected %d got %d", - res, sizeof(struct giant_event)); + res, sizeof(giant_event_recv)); } else if (giant_event_recv.sent_msgs != events_received) { qb_log(LOG_DEBUG, "Server event mismatch. Server thinks we got %d msgs, but we only received %d", giant_event_recv.sent_msgs, events_received); } } } else if (res != -EAGAIN) { qb_perror(LOG_DEBUG, "count_stress_events"); qb_loop_stop(cl); return -1; } if (events_received >= num_stress_events) { qb_loop_stop(cl); return -1; } return 0; } static int32_t count_bulk_events(int32_t fd, int32_t revents, void *data) { qb_loop_t *cl = (qb_loop_t*)data; struct qb_ipc_response_header res_header; int32_t res; res = qb_ipcc_event_recv(conn, &res_header, sizeof(struct qb_ipc_response_header), -1); if (res > 0) { events_received++; } if (events_received >= num_bulk_events) { qb_loop_stop(cl); return -1; } return 0; } static void test_ipc_bulk_events(void) { struct qb_ipc_request_header req_header; struct qb_ipc_response_header res_header; struct iovec iov[1]; int32_t c = 0; int32_t j = 0; pid_t pid; int32_t res; qb_loop_t *cl; int32_t fd; pid = run_function_in_new_process(run_ipc_server); fail_if(pid == -1); sleep(1); do { conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE); if (conn == NULL) { j = waitpid(pid, NULL, WNOHANG); ck_assert_int_eq(j, 0); sleep(1); c++; } } while (conn == NULL && c < 5); fail_if(conn == NULL); events_received = 0; cl = qb_loop_create(); res = qb_ipcc_fd_get(conn, &fd); ck_assert_int_eq(res, 0); res = qb_loop_poll_add(cl, QB_LOOP_MED, fd, POLLIN, cl, count_bulk_events); ck_assert_int_eq(res, 0); res = send_and_check(IPC_MSG_REQ_BULK_EVENTS, 0, recv_timeout, QB_TRUE); ck_assert_int_eq(res, sizeof(struct qb_ipc_response_header)); qb_loop_run(cl); ck_assert_int_eq(events_received, num_bulk_events); req_header.id = IPC_MSG_REQ_SERVER_FAIL; req_header.size = sizeof(struct qb_ipc_request_header); iov[0].iov_len = req_header.size; iov[0].iov_base = &req_header; res = qb_ipcc_sendv_recv(conn, iov, 1, &res_header, sizeof(struct qb_ipc_response_header), -1); if (res != -ECONNRESET && res != -ENOTCONN) { qb_log(LOG_ERR, "id:%d size:%d", res_header.id, res_header.size); ck_assert_int_eq(res, -ENOTCONN); } qb_ipcc_disconnect(conn); stop_process(pid); } static void test_ipc_stress_test(void) { struct qb_ipc_request_header req_header; struct qb_ipc_response_header res_header; struct iovec iov[1]; int32_t c = 0; int32_t j = 0; pid_t pid; int32_t res; qb_loop_t *cl; int32_t fd; pid = run_function_in_new_process(run_ipc_server); fail_if(pid == -1); sleep(1); do { conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE); if (conn == NULL) { j = waitpid(pid, NULL, WNOHANG); ck_assert_int_eq(j, 0); sleep(1); c++; } } while (conn == NULL && c < 5); fail_if(conn == NULL); qb_log(LOG_DEBUG, "Testing %d iterations of EVENT msg passing.", num_stress_events); events_received = 0; cl = qb_loop_create(); res = qb_ipcc_fd_get(conn, &fd); ck_assert_int_eq(res, 0); res = qb_loop_poll_add(cl, QB_LOOP_MED, fd, POLLIN, cl, count_stress_events); ck_assert_int_eq(res, 0); res = send_and_check(IPC_MSG_REQ_STRESS_EVENT, 0, recv_timeout, QB_TRUE); qb_loop_run(cl); ck_assert_int_eq(events_received, num_stress_events); req_header.id = IPC_MSG_REQ_SERVER_FAIL; req_header.size = sizeof(struct qb_ipc_request_header); iov[0].iov_len = req_header.size; iov[0].iov_base = &req_header; res = qb_ipcc_sendv_recv(conn, iov, 1, &res_header, sizeof(struct qb_ipc_response_header), -1); if (res != -ECONNRESET && res != -ENOTCONN) { qb_log(LOG_ERR, "id:%d size:%d", res_header.id, res_header.size); ck_assert_int_eq(res, -ENOTCONN); } qb_ipcc_disconnect(conn); stop_process(pid); } START_TEST(test_ipc_stress_test_us) { qb_enter(); send_event_on_created = QB_FALSE; ipc_type = QB_IPC_SOCKET; ipc_name = __func__; test_ipc_stress_test(); qb_leave(); } END_TEST START_TEST(test_ipc_bulk_events_us) { qb_enter(); send_event_on_created = QB_FALSE; ipc_type = QB_IPC_SOCKET; ipc_name = __func__; test_ipc_bulk_events(); qb_leave(); } END_TEST static void test_ipc_event_on_created(void) { int32_t c = 0; int32_t j = 0; pid_t pid; int32_t res; qb_loop_t *cl; int32_t fd; num_bulk_events = 1; pid = run_function_in_new_process(run_ipc_server); fail_if(pid == -1); sleep(1); do { conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE); if (conn == NULL) { j = waitpid(pid, NULL, WNOHANG); ck_assert_int_eq(j, 0); sleep(1); c++; } } while (conn == NULL && c < 5); fail_if(conn == NULL); events_received = 0; cl = qb_loop_create(); res = qb_ipcc_fd_get(conn, &fd); ck_assert_int_eq(res, 0); res = qb_loop_poll_add(cl, QB_LOOP_MED, fd, POLLIN, cl, count_bulk_events); ck_assert_int_eq(res, 0); qb_loop_run(cl); ck_assert_int_eq(events_received, num_bulk_events); qb_ipcc_disconnect(conn); stop_process(pid); } START_TEST(test_ipc_event_on_created_us) { qb_enter(); send_event_on_created = QB_TRUE; ipc_type = QB_IPC_SOCKET; ipc_name = __func__; test_ipc_event_on_created(); qb_leave(); } END_TEST static void test_ipc_disconnect_after_created(void) { struct qb_ipc_request_header req_header; struct qb_ipc_response_header res_header; struct iovec iov[1]; int32_t c = 0; int32_t j = 0; pid_t pid; int32_t res; pid = run_function_in_new_process(run_ipc_server); fail_if(pid == -1); sleep(1); do { conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE); if (conn == NULL) { j = waitpid(pid, NULL, WNOHANG); ck_assert_int_eq(j, 0); sleep(1); c++; } } while (conn == NULL && c < 5); fail_if(conn == NULL); ck_assert_int_eq(QB_TRUE, qb_ipcc_is_connected(conn)); req_header.id = IPC_MSG_REQ_SERVER_DISCONNECT; req_header.size = sizeof(struct qb_ipc_request_header); iov[0].iov_len = req_header.size; iov[0].iov_base = &req_header; res = qb_ipcc_sendv_recv(conn, iov, 1, &res_header, sizeof(struct qb_ipc_response_header), -1); /* * confirm we get -ENOTCONN or -ECONNRESET */ if (res != -ECONNRESET && res != -ENOTCONN) { qb_log(LOG_ERR, "id:%d size:%d", res_header.id, res_header.size); ck_assert_int_eq(res, -ENOTCONN); } ck_assert_int_eq(QB_FALSE, qb_ipcc_is_connected(conn)); qb_ipcc_disconnect(conn); stop_process(pid); } START_TEST(test_ipc_disconnect_after_created_us) { qb_enter(); disconnect_after_created = QB_TRUE; ipc_type = QB_IPC_SOCKET; ipc_name = __func__; test_ipc_disconnect_after_created(); qb_leave(); } END_TEST static void test_ipc_server_fail(void) { struct qb_ipc_request_header req_header; struct qb_ipc_response_header res_header; struct iovec iov[1]; int32_t res; int32_t j; int32_t c = 0; pid_t pid; pid = run_function_in_new_process(run_ipc_server); fail_if(pid == -1); sleep(1); do { conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE); if (conn == NULL) { j = waitpid(pid, NULL, WNOHANG); ck_assert_int_eq(j, 0); sleep(1); c++; } } while (conn == NULL && c < 5); fail_if(conn == NULL); /* * tell the server to exit */ req_header.id = IPC_MSG_REQ_SERVER_FAIL; req_header.size = sizeof(struct qb_ipc_request_header); iov[0].iov_len = req_header.size; iov[0].iov_base = &req_header; ck_assert_int_eq(QB_TRUE, qb_ipcc_is_connected(conn)); res = qb_ipcc_sendv_recv(conn, iov, 1, &res_header, sizeof(struct qb_ipc_response_header), -1); /* * confirm we get -ENOTCONN or ECONNRESET */ if (res != -ECONNRESET && res != -ENOTCONN) { qb_log(LOG_ERR, "id:%d size:%d", res_header.id, res_header.size); ck_assert_int_eq(res, -ENOTCONN); } ck_assert_int_eq(QB_FALSE, qb_ipcc_is_connected(conn)); qb_ipcc_disconnect(conn); stop_process(pid); } START_TEST(test_ipc_server_fail_soc) { qb_enter(); ipc_type = QB_IPC_SOCKET; ipc_name = __func__; test_ipc_server_fail(); qb_leave(); } END_TEST START_TEST(test_ipc_disp_shm) { qb_enter(); ipc_type = QB_IPC_SHM; ipc_name = __func__; test_ipc_dispatch(); qb_leave(); } END_TEST START_TEST(test_ipc_stress_test_shm) { qb_enter(); send_event_on_created = QB_FALSE; ipc_type = QB_IPC_SHM; ipc_name = __func__; test_ipc_stress_test(); qb_leave(); } END_TEST START_TEST(test_ipc_bulk_events_shm) { qb_enter(); ipc_type = QB_IPC_SHM; ipc_name = __func__; test_ipc_bulk_events(); qb_leave(); } END_TEST START_TEST(test_ipc_event_on_created_shm) { qb_enter(); send_event_on_created = QB_TRUE; ipc_type = QB_IPC_SHM; ipc_name = __func__; test_ipc_event_on_created(); qb_leave(); } END_TEST START_TEST(test_ipc_server_fail_shm) { qb_enter(); ipc_type = QB_IPC_SHM; ipc_name = __func__; test_ipc_server_fail(); qb_leave(); } END_TEST static void test_ipc_service_ref_count(void) { int32_t c = 0; int32_t j = 0; pid_t pid; reference_count_test = QB_TRUE; pid = run_function_in_new_process(run_ipc_server); fail_if(pid == -1); sleep(1); do { conn = qb_ipcc_connect(ipc_name, MAX_MSG_SIZE); if (conn == NULL) { j = waitpid(pid, NULL, WNOHANG); ck_assert_int_eq(j, 0); sleep(1); c++; } } while (conn == NULL && c < 5); fail_if(conn == NULL); sleep(5); qb_ipcc_disconnect(conn); stop_process(pid); } START_TEST(test_ipc_service_ref_count_shm) { qb_enter(); ipc_type = QB_IPC_SHM; ipc_name = __func__; test_ipc_service_ref_count(); qb_leave(); } END_TEST START_TEST(test_ipc_service_ref_count_us) { qb_enter(); ipc_type = QB_IPC_SOCKET; ipc_name = __func__; test_ipc_service_ref_count(); qb_leave(); } END_TEST +static void test_max_dgram_size(void) +{ + /* most implementations will not let you set a dgram buffer + * of 1 million bytes. This test verifies that the we can detect + * the max dgram buffersize regardless, and that the value we detect + * is consistent. */ + int32_t init; + int32_t i; + + qb_log_filter_ctl(QB_LOG_STDERR, QB_LOG_FILTER_REMOVE, + QB_LOG_FILTER_FILE, "*", LOG_TRACE); + + init = qb_ipcc_verify_dgram_max_msg_size(1000000); + fail_if(init <= 0); + for (i = 0; i < 100; i++) { + int try = qb_ipcc_verify_dgram_max_msg_size(1000000); + ck_assert_int_eq(init, try); + } + + qb_log_filter_ctl(QB_LOG_STDERR, QB_LOG_FILTER_ADD, + QB_LOG_FILTER_FILE, "*", LOG_TRACE); +} + +START_TEST(test_ipc_max_dgram_size) +{ + qb_enter(); + test_max_dgram_size(); + qb_leave(); +} +END_TEST + static Suite * make_shm_suite(void) { TCase *tc; Suite *s = suite_create("shm"); tc = tcase_create("ipc_server_fail_shm"); tcase_add_test(tc, test_ipc_server_fail_shm); tcase_set_timeout(tc, 6); suite_add_tcase(s, tc); tc = tcase_create("ipc_txrx_shm_block"); tcase_add_test(tc, test_ipc_txrx_shm_block); tcase_set_timeout(tc, 6); suite_add_tcase(s, tc); tc = tcase_create("ipc_txrx_shm_tmo"); tcase_add_test(tc, test_ipc_txrx_shm_tmo); tcase_set_timeout(tc, 6); suite_add_tcase(s, tc); tc = tcase_create("ipc_fc_shm"); tcase_add_test(tc, test_ipc_fc_shm); tcase_set_timeout(tc, 6); suite_add_tcase(s, tc); tc = tcase_create("ipc_dispatch_shm"); tcase_add_test(tc, test_ipc_disp_shm); tcase_set_timeout(tc, 16); suite_add_tcase(s, tc); tc = tcase_create("ipc_stress_test_shm"); tcase_add_test(tc, test_ipc_stress_test_shm); tcase_set_timeout(tc, 16); suite_add_tcase(s, tc); tc = tcase_create("ipc_bulk_events_shm"); tcase_add_test(tc, test_ipc_bulk_events_shm); tcase_set_timeout(tc, 16); suite_add_tcase(s, tc); tc = tcase_create("ipc_exit_shm"); tcase_add_test(tc, test_ipc_exit_shm); tcase_set_timeout(tc, 3); suite_add_tcase(s, tc); tc = tcase_create("ipc_event_on_created_shm"); tcase_add_test(tc, test_ipc_event_on_created_shm); suite_add_tcase(s, tc); tc = tcase_create("ipc_service_ref_count_shm"); tcase_add_test(tc, test_ipc_service_ref_count_shm); tcase_set_timeout(tc, 10); suite_add_tcase(s, tc); return s; } static Suite * make_soc_suite(void) { Suite *s = suite_create("socket"); TCase *tc; + tc = tcase_create("ipc_max_dgram_size"); + tcase_add_test(tc, test_ipc_max_dgram_size); + tcase_set_timeout(tc, 30); + suite_add_tcase(s, tc); + tc = tcase_create("ipc_server_fail_soc"); tcase_add_test(tc, test_ipc_server_fail_soc); tcase_set_timeout(tc, 6); suite_add_tcase(s, tc); tc = tcase_create("ipc_txrx_us_block"); tcase_add_test(tc, test_ipc_txrx_us_block); tcase_set_timeout(tc, 6); suite_add_tcase(s, tc); tc = tcase_create("ipc_txrx_us_tmo"); tcase_add_test(tc, test_ipc_txrx_us_tmo); tcase_set_timeout(tc, 6); suite_add_tcase(s, tc); tc = tcase_create("ipc_fc_us"); tcase_add_test(tc, test_ipc_fc_us); tcase_set_timeout(tc, 6); suite_add_tcase(s, tc); tc = tcase_create("ipc_exit_us"); tcase_add_test(tc, test_ipc_exit_us); tcase_set_timeout(tc, 6); suite_add_tcase(s, tc); tc = tcase_create("ipc_dispatch_us"); tcase_add_test(tc, test_ipc_disp_us); tcase_set_timeout(tc, 16); suite_add_tcase(s, tc); tc = tcase_create("ipc_stress_test_us"); tcase_add_test(tc, test_ipc_stress_test_us); tcase_set_timeout(tc, 60); suite_add_tcase(s, tc); tc = tcase_create("ipc_bulk_events_us"); tcase_add_test(tc, test_ipc_bulk_events_us); tcase_set_timeout(tc, 16); suite_add_tcase(s, tc); tc = tcase_create("ipc_event_on_created_us"); tcase_add_test(tc, test_ipc_event_on_created_us); suite_add_tcase(s, tc); tc = tcase_create("ipc_disconnect_after_created_us"); tcase_add_test(tc, test_ipc_disconnect_after_created_us); suite_add_tcase(s, tc); tc = tcase_create("ipc_service_ref_count_us"); tcase_add_test(tc, test_ipc_service_ref_count_us); tcase_set_timeout(tc, 10); suite_add_tcase(s, tc); return s; } int32_t main(void) { int32_t number_failed; SRunner *sr; Suite *s; int32_t do_shm_tests = QB_TRUE; #ifdef DISABLE_IPC_SHM do_shm_tests = QB_FALSE; #endif /* DISABLE_IPC_SHM */ s = make_soc_suite(); sr = srunner_create(s); if (do_shm_tests) { srunner_add_suite(sr, make_shm_suite()); } qb_log_init("check", LOG_USER, LOG_EMERG); qb_log_ctl(QB_LOG_SYSLOG, QB_LOG_CONF_ENABLED, QB_FALSE); qb_log_filter_ctl(QB_LOG_STDERR, QB_LOG_FILTER_ADD, QB_LOG_FILTER_FILE, "*", LOG_TRACE); qb_log_ctl(QB_LOG_STDERR, QB_LOG_CONF_ENABLED, QB_TRUE); qb_log_format_set(QB_LOG_STDERR, "lib/%f|%l| %b"); srunner_run_all(sr, CK_VERBOSE); number_failed = srunner_ntests_failed(sr); srunner_free(sr); return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; }