diff --git a/lib/ipc_shm.c b/lib/ipc_shm.c index 8cffb02..ac836bd 100644 --- a/lib/ipc_shm.c +++ b/lib/ipc_shm.c @@ -1,368 +1,365 @@ /* * Copyright (C) 2010 Red Hat, Inc. * * Author: Angus Salkeld * * This file is part of libqb. * * libqb is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 2.1 of the License, or * (at your option) any later version. * * libqb is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with libqb. If not, see . */ #include "os_base.h" #include "ipc_int.h" #include "util_int.h" #include "ringbuffer_int.h" #include #include #include #include /* * utility functions * -------------------------------------------------------- */ /* * client functions * -------------------------------------------------------- */ static void qb_ipcc_shm_disconnect(struct qb_ipcc_connection *c) { qb_ipcc_us_sock_close(c->setup.u.us.sock); if (c->is_connected) { qb_rb_close(c->request.u.shm.rb); qb_rb_close(c->response.u.shm.rb); qb_rb_close(c->event.u.shm.rb); } else { qb_rb_force_close(c->request.u.shm.rb); qb_rb_force_close(c->response.u.shm.rb); qb_rb_force_close(c->event.u.shm.rb); } } static ssize_t qb_ipc_shm_send(struct qb_ipc_one_way *one_way, const void *msg_ptr, size_t msg_len) { return qb_rb_chunk_write(one_way->u.shm.rb, msg_ptr, msg_len); } static ssize_t qb_ipc_shm_sendv(struct qb_ipc_one_way *one_way, const struct iovec *iov, size_t iov_len) { char *dest; int32_t res = 0; int32_t total_size = 0; int32_t i; char *pt = NULL; if (one_way->u.shm.rb == NULL) { return -ENOTCONN; } for (i = 0; i < iov_len; i++) { total_size += iov[i].iov_len; } dest = qb_rb_chunk_alloc(one_way->u.shm.rb, total_size); if (dest == NULL) { return -errno; } pt = dest; for (i = 0; i < iov_len; i++) { memcpy(pt, iov[i].iov_base, iov[i].iov_len); pt += iov[i].iov_len; } res = qb_rb_chunk_commit(one_way->u.shm.rb, total_size); if (res < 0) { return res; } return total_size; } static ssize_t qb_ipc_shm_recv(struct qb_ipc_one_way *one_way, void *msg_ptr, size_t msg_len, int32_t ms_timeout) { if (one_way->u.shm.rb == NULL) { return -ENOTCONN; } return qb_rb_chunk_read(one_way->u.shm.rb, (void *)msg_ptr, msg_len, ms_timeout); } static ssize_t qb_ipc_shm_peek(struct qb_ipc_one_way *one_way, void **data_out, int32_t ms_timeout) { ssize_t rc; if (one_way->u.shm.rb == NULL) { return -ENOTCONN; } rc = qb_rb_chunk_peek(one_way->u.shm.rb, data_out, ms_timeout); if (rc == 0) { return -EAGAIN; } return rc; } static void qb_ipc_shm_reclaim(struct qb_ipc_one_way *one_way) { if (one_way->u.shm.rb != NULL) { qb_rb_chunk_reclaim(one_way->u.shm.rb); } } static void qb_ipc_shm_fc_set(struct qb_ipc_one_way *one_way, int32_t fc_enable) { int32_t *fc; fc = qb_rb_shared_user_data_get(one_way->u.shm.rb); qb_util_log(LOG_TRACE, "setting fc to %d", fc_enable); qb_atomic_int_set(fc, fc_enable); } static int32_t qb_ipc_shm_fc_get(struct qb_ipc_one_way *one_way) { int32_t *fc; int32_t rc = qb_rb_refcount_get(one_way->u.shm.rb); if (rc != 2) { return -ENOTCONN; } fc = qb_rb_shared_user_data_get(one_way->u.shm.rb); return qb_atomic_int_get(fc); } static ssize_t qb_ipc_shm_q_len_get(struct qb_ipc_one_way *one_way) { if (one_way->u.shm.rb == NULL) { return -ENOTCONN; } return qb_rb_chunks_used(one_way->u.shm.rb); } int32_t qb_ipcc_shm_connect(struct qb_ipcc_connection * c, struct qb_ipc_connection_response * response) { int32_t res = 0; c->funcs.send = qb_ipc_shm_send; c->funcs.sendv = qb_ipc_shm_sendv; c->funcs.recv = qb_ipc_shm_recv; c->funcs.fc_get = qb_ipc_shm_fc_get; c->funcs.disconnect = qb_ipcc_shm_disconnect; c->needs_sock_for_poll = QB_TRUE; if (strlen(c->name) > (NAME_MAX - 20)) { errno = EINVAL; return -errno; } c->request.u.shm.rb = qb_rb_open(response->request, c->request.max_msg_size, QB_RB_FLAG_SHARED_PROCESS, sizeof(int32_t)); if (c->request.u.shm.rb == NULL) { res = -errno; qb_util_perror(LOG_ERR, "qb_rb_open:REQUEST"); goto return_error; } c->response.u.shm.rb = qb_rb_open(response->response, c->response.max_msg_size, QB_RB_FLAG_SHARED_PROCESS, 0); if (c->response.u.shm.rb == NULL) { res = -errno; qb_util_perror(LOG_ERR, "qb_rb_open:RESPONSE"); goto cleanup_request; } c->event.u.shm.rb = qb_rb_open(response->event, c->response.max_msg_size, QB_RB_FLAG_SHARED_PROCESS, 0); if (c->event.u.shm.rb == NULL) { res = -errno; qb_util_perror(LOG_ERR, "qb_rb_open:EVENT"); goto cleanup_request_response; } return 0; cleanup_request_response: qb_rb_close(c->response.u.shm.rb); cleanup_request: qb_rb_close(c->request.u.shm.rb); return_error: errno = -res; qb_util_perror(LOG_ERR, "connection failed"); return res; } /* * service functions * -------------------------------------------------------- */ static void qb_ipcs_shm_disconnect(struct qb_ipcs_connection *c) { if (c->state == QB_IPCS_CONNECTION_ESTABLISHED || c->state == QB_IPCS_CONNECTION_ACTIVE) { if (c->setup.u.us.sock > 0) { qb_ipcc_us_sock_close(c->setup.u.us.sock); (void)c->service->poll_fns.dispatch_del(c->setup.u.us.sock); c->setup.u.us.sock = -1; - qb_ipcs_connection_unref(c); } } if (c->state == QB_IPCS_CONNECTION_SHUTTING_DOWN || c->state == QB_IPCS_CONNECTION_ACTIVE) { if (c->response.u.shm.rb) { qb_rb_close(c->response.u.shm.rb); c->response.u.shm.rb = NULL; } if (c->event.u.shm.rb) { qb_rb_close(c->event.u.shm.rb); c->event.u.shm.rb = NULL; } if (c->request.u.shm.rb) { qb_rb_close(c->request.u.shm.rb); c->request.u.shm.rb = NULL; } } } static int32_t qb_ipcs_shm_rb_open(struct qb_ipcs_connection *c, struct qb_ipc_one_way *ow, const char *rb_name) { int32_t res = 0; ow->u.shm.rb = qb_rb_open(rb_name, ow->max_msg_size, QB_RB_FLAG_CREATE | QB_RB_FLAG_SHARED_PROCESS, sizeof(int32_t)); if (ow->u.shm.rb == NULL) { res = -errno; qb_util_perror(LOG_ERR, "qb_rb_open:%s", rb_name); return res; } res = qb_rb_chown(ow->u.shm.rb, c->auth.uid, c->auth.gid); if (res != 0) { qb_util_perror(LOG_ERR, "qb_rb_chown:%s", rb_name); goto cleanup; } res = qb_rb_chmod(ow->u.shm.rb, c->auth.mode); if (res != 0) { qb_util_perror(LOG_ERR, "qb_rb_chmod:%s", rb_name); goto cleanup; } return res; cleanup: qb_rb_close(ow->u.shm.rb); return res; } static int32_t qb_ipcs_shm_connect(struct qb_ipcs_service *s, struct qb_ipcs_connection *c, struct qb_ipc_connection_response *r) { int32_t res; qb_util_log(LOG_DEBUG, "connecting to client [%d]", c->pid); snprintf(r->request, NAME_MAX, "%s-request-%s", s->name, c->description); snprintf(r->response, NAME_MAX, "%s-response-%s", s->name, c->description); snprintf(r->event, NAME_MAX, "%s-event-%s", s->name, c->description); res = qb_ipcs_shm_rb_open(c, &c->request, r->request); if (res != 0) { goto cleanup; } res = qb_ipcs_shm_rb_open(c, &c->response, r->response); if (res != 0) { goto cleanup_request; } res = qb_ipcs_shm_rb_open(c, &c->event, r->event); if (res != 0) { goto cleanup_request_response; } res = s->poll_fns.dispatch_add(s->poll_priority, c->setup.u.us.sock, POLLIN | POLLPRI | POLLNVAL, c, qb_ipcs_dispatch_connection_request); - if (res == 0) { - qb_ipcs_connection_ref(c); - } else { + if (res != 0) { qb_util_log(LOG_ERR, "Error adding socket to mainloop (%s).", c->description); goto cleanup_request_response; } r->hdr.error = 0; return 0; cleanup_request_response: qb_rb_close(c->request.u.shm.rb); cleanup_request: qb_rb_close(c->response.u.shm.rb); cleanup: r->hdr.error = res; errno = -res; qb_util_perror(LOG_ERR, "shm connection FAILED"); return res; } void qb_ipcs_shm_init(struct qb_ipcs_service *s) { s->funcs.connect = qb_ipcs_shm_connect; s->funcs.disconnect = qb_ipcs_shm_disconnect; s->funcs.recv = qb_ipc_shm_recv; s->funcs.peek = qb_ipc_shm_peek; s->funcs.reclaim = qb_ipc_shm_reclaim; s->funcs.send = qb_ipc_shm_send; s->funcs.sendv = qb_ipc_shm_sendv; s->funcs.fc_set = qb_ipc_shm_fc_set; s->funcs.q_len_get = qb_ipc_shm_q_len_get; s->needs_sock_for_poll = QB_TRUE; } diff --git a/lib/ipc_socket.c b/lib/ipc_socket.c index 492bea8..9a5962d 100644 --- a/lib/ipc_socket.c +++ b/lib/ipc_socket.c @@ -1,760 +1,755 @@ /* * Copyright (C) 2010,2013 Red Hat, Inc. * * Author: Angus Salkeld * * This file is part of libqb. * * libqb is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 2.1 of the License, or * (at your option) any later version. * * libqb is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with libqb. If not, see . */ #include "os_base.h" #ifdef HAVE_SYS_UN_H #include #endif /* HAVE_SYS_UN_H */ #ifdef HAVE_SYS_MMAN_H #include #endif #include #include #include #include #include "util_int.h" #include "ipc_int.h" struct ipc_us_control { int32_t sent; int32_t flow_control; }; #define SHM_CONTROL_SIZE (3 * sizeof(struct ipc_us_control)) static void set_sock_addr(struct sockaddr_un *address, const char *socket_name) { memset(address, 0, sizeof(struct sockaddr_un)); address->sun_family = AF_UNIX; #ifdef HAVE_STRUCT_SOCKADDR_UN_SUN_LEN address->sun_len = QB_SUN_LEN(address); #endif #if defined(QB_LINUX) || defined(QB_CYGWIN) snprintf(address->sun_path + 1, UNIX_PATH_MAX - 1, "%s", socket_name); #else snprintf(address->sun_path, sizeof(address->sun_path), "%s/%s", SOCKETDIR, socket_name); #endif } static int32_t qb_ipc_dgram_sock_setup(const char *base_name, const char *service_name, int32_t * sock_pt) { int32_t request_fd; struct sockaddr_un local_address; int32_t res = 0; char sock_path[PATH_MAX]; request_fd = socket(PF_UNIX, SOCK_DGRAM, 0); if (request_fd == -1) { return -errno; } qb_socket_nosigpipe(request_fd); res = qb_sys_fd_nonblock_cloexec_set(request_fd); if (res < 0) { goto error_connect; } snprintf(sock_path, PATH_MAX, "%s-%s", base_name, service_name); set_sock_addr(&local_address, sock_path); res = bind(request_fd, (struct sockaddr *)&local_address, sizeof(local_address)); if (res < 0) { goto error_connect; } *sock_pt = request_fd; return 0; error_connect: close(request_fd); *sock_pt = -1; return res; } static int32_t set_sock_size(int sockfd, size_t max_msg_size) { int32_t rc; unsigned int optval; socklen_t optlen = sizeof(optval); rc = getsockopt(sockfd, SOL_SOCKET, SO_SNDBUF, &optval, &optlen); qb_util_log(LOG_TRACE, "%d: getsockopt(%d, needed:%d) actual:%d", rc, sockfd, max_msg_size, optval); /* The optvat <= max_msg_size check is weird... * during testing it was discovered in some instances if the * default optval is exactly equal to our max_msg_size, we couldn't * actually send a message that large unless we explicilty set * it using setsockopt... there is no good explaination for this. Most * likely this is hitting some sort of "off by one" error in the kernel. */ if (rc == 0 && optval <= max_msg_size) { optval = max_msg_size; optlen = sizeof(optval); rc = setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF, &optval, optlen); } return rc; } static int32_t dgram_verify_msg_size(size_t max_msg_size) { int32_t rc = -1; int32_t sockets[2]; int32_t tries = 0; int32_t write_passed = 0; int32_t read_passed = 0; char buf[max_msg_size]; if (socketpair(AF_UNIX, SOCK_DGRAM, 0, sockets) < 0) { goto cleanup_socks; } if (set_sock_size(sockets[0], max_msg_size) != 0) { goto cleanup_socks; } if (set_sock_size(sockets[1], max_msg_size) != 0) { goto cleanup_socks; } for (tries = 0; tries < 3; tries++) { if (write_passed == 0) { rc = write(sockets[1], buf, max_msg_size); if (rc < 0 && (errno == EAGAIN || errno == EINTR)) { continue; } else if (rc == max_msg_size) { write_passed = 1; } else { break; } } if (read_passed == 0) { rc = read(sockets[0], buf, max_msg_size); if (rc < 0 && (errno == EAGAIN || errno == EINTR)) { continue; } else if (rc == max_msg_size) { read_passed = 1; } else { break; } } if (read_passed && write_passed) { rc = 0; break; } } cleanup_socks: close(sockets[0]); close(sockets[1]); return rc; } int32_t qb_ipcc_verify_dgram_max_msg_size(size_t max_msg_size) { int32_t i; int32_t last = -1; if (dgram_verify_msg_size(max_msg_size) == 0) { return max_msg_size; } for (i = 1024; i < max_msg_size; i+=1024) { if (dgram_verify_msg_size(i) != 0) { break; } last = i; } return last; } /* * bind to "base_name-local_name" * connect to "base_name-remote_name" * output sock_pt */ static int32_t qb_ipc_dgram_sock_connect(const char *base_name, const char *local_name, const char *remote_name, int32_t max_msg_size, int32_t * sock_pt) { char sock_path[PATH_MAX]; struct sockaddr_un remote_address; int32_t res = qb_ipc_dgram_sock_setup(base_name, local_name, sock_pt); if (res < 0) { return res; } snprintf(sock_path, PATH_MAX, "%s-%s", base_name, remote_name); set_sock_addr(&remote_address, sock_path); if (connect(*sock_pt, (struct sockaddr *)&remote_address, QB_SUN_LEN(&remote_address)) == -1) { res = -errno; goto error_connect; } return set_sock_size(*sock_pt, max_msg_size); error_connect: close(*sock_pt); *sock_pt = -1; return res; } static int32_t _finish_connecting(struct qb_ipc_one_way *one_way) { struct sockaddr_un remote_address; int res; int error; int retry = 0; set_sock_addr(&remote_address, one_way->u.us.sock_name); /* this retry loop is here to help connecting when trying to send * an event right after connection setup. */ do { errno = 0; res = connect(one_way->u.us.sock, (struct sockaddr *)&remote_address, QB_SUN_LEN(&remote_address)); if (res == -1) { error = -errno; qb_util_perror(LOG_DEBUG, "error calling connect()"); retry++; usleep(100000); } } while (res == -1 && retry < 10); if (res == -1) { return error; } free(one_way->u.us.sock_name); one_way->u.us.sock_name = NULL; return set_sock_size(one_way->u.us.sock, one_way->max_msg_size); } /* * client functions * -------------------------------------------------------- */ static void qb_ipcc_us_disconnect(struct qb_ipcc_connection *c) { munmap(c->request.u.us.shared_data, SHM_CONTROL_SIZE); unlink(c->request.u.us.shared_file_name); qb_ipcc_us_sock_close(c->event.u.us.sock); qb_ipcc_us_sock_close(c->request.u.us.sock); qb_ipcc_us_sock_close(c->setup.u.us.sock); } static ssize_t qb_ipc_socket_send(struct qb_ipc_one_way *one_way, const void *msg_ptr, size_t msg_len) { ssize_t rc = 0; struct ipc_us_control *ctl; ctl = (struct ipc_us_control *)one_way->u.us.shared_data; if (one_way->u.us.sock_name) { rc = _finish_connecting(one_way); if (rc < 0) { qb_util_log(LOG_ERR, "socket connect-on-send"); return rc; } } qb_sigpipe_ctl(QB_SIGPIPE_IGNORE); rc = send(one_way->u.us.sock, msg_ptr, msg_len, MSG_NOSIGNAL); if (rc == -1) { rc = -errno; if (errno != EAGAIN && errno != ENOBUFS) { qb_util_perror(LOG_DEBUG, "socket_send:send"); } } qb_sigpipe_ctl(QB_SIGPIPE_DEFAULT); if (ctl && rc == msg_len) { qb_atomic_int_inc(&ctl->sent); } return rc; } static ssize_t qb_ipc_socket_sendv(struct qb_ipc_one_way *one_way, const struct iovec *iov, size_t iov_len) { int32_t rc; struct ipc_us_control *ctl; ctl = (struct ipc_us_control *)one_way->u.us.shared_data; qb_sigpipe_ctl(QB_SIGPIPE_IGNORE); if (one_way->u.us.sock_name) { rc = _finish_connecting(one_way); if (rc < 0) { qb_util_perror(LOG_ERR, "socket connect-on-sendv"); return rc; } } rc = writev(one_way->u.us.sock, iov, iov_len); if (rc == -1) { rc = -errno; if (errno != EAGAIN && errno != ENOBUFS) { qb_util_perror(LOG_DEBUG, "socket_sendv:writev %d", one_way->u.us.sock); } } qb_sigpipe_ctl(QB_SIGPIPE_DEFAULT); if (ctl && rc > 0) { qb_atomic_int_inc(&ctl->sent); } return rc; } /* * recv a message of unknown size. */ static ssize_t qb_ipc_us_recv_at_most(struct qb_ipc_one_way *one_way, void *msg, size_t len, int32_t timeout) { int32_t result; int32_t final_rc = 0; int32_t to_recv = 0; char *data = msg; struct ipc_us_control *ctl = NULL; int32_t time_waited = 0; int32_t time_to_wait = timeout; if (timeout == -1) { time_to_wait = 1000; } qb_sigpipe_ctl(QB_SIGPIPE_IGNORE); retry_peek: result = recv(one_way->u.us.sock, data, sizeof(struct qb_ipc_request_header), MSG_NOSIGNAL | MSG_PEEK); if (result == -1) { if (errno == EAGAIN && (time_waited < timeout || timeout == -1)) { result = qb_ipc_us_ready(one_way, NULL, time_to_wait, POLLIN); time_waited += time_to_wait; goto retry_peek; } else { return -errno; } } if (result >= sizeof(struct qb_ipc_request_header)) { struct qb_ipc_request_header *hdr = NULL; hdr = (struct qb_ipc_request_header *)msg; to_recv = hdr->size; } result = recv(one_way->u.us.sock, data, to_recv, MSG_NOSIGNAL | MSG_WAITALL); if (result == -1) { final_rc = -errno; goto cleanup_sigpipe; } else if (result == 0) { qb_util_log(LOG_DEBUG, "recv == 0 -> ENOTCONN"); final_rc = -ENOTCONN; goto cleanup_sigpipe; } final_rc = result; ctl = (struct ipc_us_control *)one_way->u.us.shared_data; if (ctl) { (void)qb_atomic_int_dec_and_test(&ctl->sent); } cleanup_sigpipe: qb_sigpipe_ctl(QB_SIGPIPE_DEFAULT); return final_rc; } static void qb_ipc_us_fc_set(struct qb_ipc_one_way *one_way, int32_t fc_enable) { struct ipc_us_control *ctl = (struct ipc_us_control *)one_way->u.us.shared_data; qb_util_log(LOG_TRACE, "setting fc to %d", fc_enable); qb_atomic_int_set(&ctl->flow_control, fc_enable); } static int32_t qb_ipc_us_fc_get(struct qb_ipc_one_way *one_way) { struct ipc_us_control *ctl = (struct ipc_us_control *)one_way->u.us.shared_data; return qb_atomic_int_get(&ctl->flow_control); } static ssize_t qb_ipc_us_q_len_get(struct qb_ipc_one_way *one_way) { struct ipc_us_control *ctl = (struct ipc_us_control *)one_way->u.us.shared_data; return qb_atomic_int_get(&ctl->sent); } int32_t qb_ipcc_us_connect(struct qb_ipcc_connection * c, struct qb_ipc_connection_response * r) { int32_t res; char path[PATH_MAX]; int32_t fd_hdr; char *shm_ptr; qb_atomic_init(); c->needs_sock_for_poll = QB_FALSE; c->funcs.send = qb_ipc_socket_send; c->funcs.sendv = qb_ipc_socket_sendv; c->funcs.recv = qb_ipc_us_recv_at_most; c->funcs.fc_get = qb_ipc_us_fc_get; c->funcs.disconnect = qb_ipcc_us_disconnect; fd_hdr = qb_sys_mmap_file_open(path, r->request, SHM_CONTROL_SIZE, O_RDWR); if (fd_hdr < 0) { res = fd_hdr; errno = -fd_hdr; qb_util_perror(LOG_ERR, "couldn't open file for mmap"); return res; } (void)strlcpy(c->request.u.us.shared_file_name, r->request, NAME_MAX); shm_ptr = mmap(0, SHM_CONTROL_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd_hdr, 0); if (shm_ptr == MAP_FAILED) { res = -errno; qb_util_perror(LOG_ERR, "couldn't create mmap for header"); goto cleanup_hdr; } c->request.u.us.shared_data = shm_ptr; c->response.u.us.shared_data = shm_ptr + sizeof(struct ipc_us_control); c->event.u.us.shared_data = shm_ptr + (2 * sizeof(struct ipc_us_control)); close(fd_hdr); fd_hdr = -1; res = qb_ipc_dgram_sock_connect(r->response, "response", "request", r->max_msg_size, &c->request.u.us.sock); if (res != 0) { goto cleanup_hdr; } c->response.u.us.sock = c->request.u.us.sock; res = qb_ipc_dgram_sock_connect(r->response, "event", "event-tx", r->max_msg_size, &c->event.u.us.sock); if (res != 0) { goto cleanup_hdr; } return 0; cleanup_hdr: if (fd_hdr >= 0) { close(fd_hdr); } close(c->event.u.us.sock); close(c->request.u.us.sock); unlink(r->request); munmap(c->request.u.us.shared_data, SHM_CONTROL_SIZE); return res; } /* * service functions * -------------------------------------------------------- */ static int32_t _sock_connection_liveliness(int32_t fd, int32_t revents, void *data) { struct qb_ipcs_connection *c = (struct qb_ipcs_connection *)data; qb_util_log(LOG_DEBUG, "LIVENESS: fd %d event %d conn (%s)", fd, revents, c->description); if (revents & POLLNVAL) { qb_util_log(LOG_DEBUG, "NVAL conn (%s)", c->description); qb_ipcs_disconnect(c); return -EINVAL; } if (revents & POLLHUP) { qb_util_log(LOG_DEBUG, "HUP conn (%s)", c->description); qb_ipcs_disconnect(c); return -ESHUTDOWN; } /* If we actually get POLLIN for some reason here, it most * certainly means EOF. Do a recv on the fd to detect eof and * then disconnect */ if (revents & POLLIN) { char buf[10]; int res; res = recv(fd, buf, sizeof(buf), MSG_DONTWAIT); if (res < 0 && errno != EAGAIN && errno != EWOULDBLOCK) { res = -errno; } else if (res == 0) { qb_util_log(LOG_DEBUG, "EOF conn (%s)", c->description); res = -ESHUTDOWN; } if (res < 0) { qb_ipcs_disconnect(c); return res; } } return 0; } static int32_t _sock_add_to_mainloop(struct qb_ipcs_connection *c) { int res; res = c->service->poll_fns.dispatch_add(c->service->poll_priority, c->request.u.us.sock, POLLIN | POLLPRI | POLLNVAL, c, qb_ipcs_dispatch_connection_request); if (res < 0) { qb_util_log(LOG_ERR, "Error adding socket to mainloop (%s).", c->description); return res; } - qb_ipcs_connection_ref(c); res = c->service->poll_fns.dispatch_add(c->service->poll_priority, c->setup.u.us.sock, POLLIN | POLLPRI | POLLNVAL, c, _sock_connection_liveliness); qb_util_log(LOG_DEBUG, "added %d to poll loop (liveness)", c->setup.u.us.sock); if (res < 0) { qb_util_perror(LOG_ERR, "Error adding setupfd to mainloop"); (void)c->service->poll_fns.dispatch_del(c->request.u.us.sock); return res; } - qb_ipcs_connection_ref(c); return res; } static void _sock_rm_from_mainloop(struct qb_ipcs_connection *c) { (void)c->service->poll_fns.dispatch_del(c->request.u.us.sock); - qb_ipcs_connection_unref(c); - (void)c->service->poll_fns.dispatch_del(c->setup.u.us.sock); - qb_ipcs_connection_unref(c); } static void qb_ipcs_us_disconnect(struct qb_ipcs_connection *c) { qb_enter(); if (c->state == QB_IPCS_CONNECTION_ESTABLISHED || c->state == QB_IPCS_CONNECTION_ACTIVE) { _sock_rm_from_mainloop(c); qb_ipcc_us_sock_close(c->setup.u.us.sock); qb_ipcc_us_sock_close(c->request.u.us.sock); qb_ipcc_us_sock_close(c->event.u.us.sock); } if (c->state == QB_IPCS_CONNECTION_SHUTTING_DOWN || c->state == QB_IPCS_CONNECTION_ACTIVE) { munmap(c->request.u.us.shared_data, SHM_CONTROL_SIZE); unlink(c->request.u.us.shared_file_name); } } static int32_t qb_ipcs_us_connect(struct qb_ipcs_service *s, struct qb_ipcs_connection *c, struct qb_ipc_connection_response *r) { char path[PATH_MAX]; int32_t fd_hdr; int32_t res = 0; struct ipc_us_control *ctl; char *shm_ptr; qb_util_log(LOG_DEBUG, "connecting to client (%s)", c->description); c->request.u.us.sock = c->setup.u.us.sock; c->response.u.us.sock = c->setup.u.us.sock; snprintf(r->request, NAME_MAX, "qb-%s-control-%s", s->name, c->description); snprintf(r->response, NAME_MAX, "qb-%s-%s", s->name, c->description); fd_hdr = qb_sys_mmap_file_open(path, r->request, SHM_CONTROL_SIZE, O_CREAT | O_TRUNC | O_RDWR); if (fd_hdr < 0) { res = fd_hdr; errno = -fd_hdr; qb_util_perror(LOG_ERR, "couldn't create file for mmap (%s)", c->description); return res; } (void)strlcpy(r->request, path, PATH_MAX); (void)strlcpy(c->request.u.us.shared_file_name, r->request, NAME_MAX); res = chown(r->request, c->auth.uid, c->auth.gid); if (res != 0) { /* ignore res, this is just for the compiler warnings. */ res = 0; } res = chmod(r->request, c->auth.mode); if (res != 0) { /* ignore res, this is just for the compiler warnings. */ res = 0; } shm_ptr = mmap(0, SHM_CONTROL_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd_hdr, 0); if (shm_ptr == MAP_FAILED) { res = -errno; qb_util_perror(LOG_ERR, "couldn't create mmap for header (%s)", c->description); goto cleanup_hdr; } c->request.u.us.shared_data = shm_ptr; c->response.u.us.shared_data = shm_ptr + sizeof(struct ipc_us_control); c->event.u.us.shared_data = shm_ptr + (2 * sizeof(struct ipc_us_control)); ctl = (struct ipc_us_control *)c->request.u.us.shared_data; ctl->sent = 0; ctl->flow_control = 0; ctl = (struct ipc_us_control *)c->response.u.us.shared_data; ctl->sent = 0; ctl->flow_control = 0; ctl = (struct ipc_us_control *)c->event.u.us.shared_data; ctl->sent = 0; ctl->flow_control = 0; close(fd_hdr); fd_hdr = -1; /* request channel */ res = qb_ipc_dgram_sock_setup(r->response, "request", &c->request.u.us.sock); if (res < 0) { goto cleanup_hdr; } c->setup.u.us.sock_name = NULL; c->request.u.us.sock_name = NULL; /* response channel */ c->response.u.us.sock = c->request.u.us.sock; snprintf(path, PATH_MAX, "%s-%s", r->response, "response"); c->response.u.us.sock_name = strdup(path); /* event channel */ res = qb_ipc_dgram_sock_setup(r->response, "event-tx", &c->event.u.us.sock); if (res < 0) { goto cleanup_hdr; } snprintf(path, PATH_MAX, "%s-%s", r->response, "event"); c->event.u.us.sock_name = strdup(path); res = _sock_add_to_mainloop(c); if (res < 0) { goto cleanup_hdr; } return res; cleanup_hdr: free(c->response.u.us.sock_name); free(c->event.u.us.sock_name); if (fd_hdr >= 0) { close(fd_hdr); } unlink(r->request); munmap(c->request.u.us.shared_data, SHM_CONTROL_SIZE); return res; } void qb_ipcs_us_init(struct qb_ipcs_service *s) { s->funcs.connect = qb_ipcs_us_connect; s->funcs.disconnect = qb_ipcs_us_disconnect; s->funcs.recv = qb_ipc_us_recv_at_most; s->funcs.peek = NULL; s->funcs.reclaim = NULL; s->funcs.send = qb_ipc_socket_send; s->funcs.sendv = qb_ipc_socket_sendv; s->funcs.fc_set = qb_ipc_us_fc_set; s->funcs.q_len_get = qb_ipc_us_q_len_get; s->needs_sock_for_poll = QB_FALSE; qb_atomic_init(); } diff --git a/lib/ipcs.c b/lib/ipcs.c index f21d093..96e04bc 100644 --- a/lib/ipcs.c +++ b/lib/ipcs.c @@ -1,964 +1,968 @@ /* * Copyright (C) 2010 Red Hat, Inc. * * Author: Angus Salkeld * * This file is part of libqb. * * libqb is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 2.1 of the License, or * (at your option) any later version. * * libqb is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with libqb. If not, see . */ #include "os_base.h" #include "util_int.h" #include "ipc_int.h" #include #include #include static void qb_ipcs_flowcontrol_set(struct qb_ipcs_connection *c, int32_t fc_enable); static int32_t new_event_notification(struct qb_ipcs_connection * c); static QB_LIST_DECLARE(qb_ipc_services); qb_ipcs_service_t * qb_ipcs_create(const char *name, int32_t service_id, enum qb_ipc_type type, struct qb_ipcs_service_handlers *handlers) { struct qb_ipcs_service *s; s = calloc(1, sizeof(struct qb_ipcs_service)); if (s == NULL) { return NULL; } if (type == QB_IPC_NATIVE) { #ifdef DISABLE_IPC_SHM s->type = QB_IPC_SOCKET; #else s->type = QB_IPC_SHM; #endif /* DISABLE_IPC_SHM */ } else { s->type = type; } s->pid = getpid(); s->needs_sock_for_poll = QB_FALSE; s->poll_priority = QB_LOOP_MED; /* Initial alloc ref */ qb_ipcs_ref(s); s->service_id = service_id; (void)strlcpy(s->name, name, NAME_MAX); s->serv_fns.connection_accept = handlers->connection_accept; s->serv_fns.connection_created = handlers->connection_created; s->serv_fns.msg_process = handlers->msg_process; s->serv_fns.connection_closed = handlers->connection_closed; s->serv_fns.connection_destroyed = handlers->connection_destroyed; qb_list_init(&s->connections); qb_list_init(&s->list); qb_list_add(&s->list, &qb_ipc_services); return s; } void qb_ipcs_poll_handlers_set(struct qb_ipcs_service *s, struct qb_ipcs_poll_handlers *handlers) { s->poll_fns.job_add = handlers->job_add; s->poll_fns.dispatch_add = handlers->dispatch_add; s->poll_fns.dispatch_mod = handlers->dispatch_mod; s->poll_fns.dispatch_del = handlers->dispatch_del; } void qb_ipcs_service_context_set(qb_ipcs_service_t* s, void *context) { s->context = context; } void * qb_ipcs_service_context_get(qb_ipcs_service_t* s) { return s->context; } int32_t qb_ipcs_run(struct qb_ipcs_service *s) { int32_t res = 0; if (s->poll_fns.dispatch_add == NULL || s->poll_fns.dispatch_mod == NULL || s->poll_fns.dispatch_del == NULL) { res = -EINVAL; goto run_cleanup; } switch (s->type) { case QB_IPC_SOCKET: qb_ipcs_us_init((struct qb_ipcs_service *)s); break; case QB_IPC_SHM: #ifdef DISABLE_IPC_SHM res = -ENOTSUP; #else qb_ipcs_shm_init((struct qb_ipcs_service *)s); #endif /* DISABLE_IPC_SHM */ break; case QB_IPC_POSIX_MQ: case QB_IPC_SYSV_MQ: res = -ENOTSUP; break; default: res = -EINVAL; break; } if (res == 0) { res = qb_ipcs_us_publish(s); if (res < 0) { (void)qb_ipcs_us_withdraw(s); goto run_cleanup; } } run_cleanup: if (res < 0) { /* Failed to run services, removing initial alloc reference. */ qb_ipcs_unref(s); } return res; } static int32_t _modify_dispatch_descriptor_(struct qb_ipcs_connection *c) { qb_ipcs_dispatch_mod_fn disp_mod = c->service->poll_fns.dispatch_mod; if (c->service->type == QB_IPC_SOCKET) { return disp_mod(c->service->poll_priority, c->event.u.us.sock, c->poll_events, c, qb_ipcs_dispatch_connection_request); } else { return disp_mod(c->service->poll_priority, c->setup.u.us.sock, c->poll_events, c, qb_ipcs_dispatch_connection_request); } return -EINVAL; } void qb_ipcs_request_rate_limit(struct qb_ipcs_service *s, enum qb_ipcs_rate_limit rl) { struct qb_ipcs_connection *c; enum qb_loop_priority old_p = s->poll_priority; struct qb_list_head *pos; struct qb_list_head *n; switch (rl) { case QB_IPCS_RATE_FAST: s->poll_priority = QB_LOOP_HIGH; break; case QB_IPCS_RATE_SLOW: case QB_IPCS_RATE_OFF: case QB_IPCS_RATE_OFF_2: s->poll_priority = QB_LOOP_LOW; break; default: case QB_IPCS_RATE_NORMAL: s->poll_priority = QB_LOOP_MED; break; } qb_list_for_each_safe(pos, n, &s->connections) { c = qb_list_entry(pos, struct qb_ipcs_connection, list); qb_ipcs_connection_ref(c); if (rl == QB_IPCS_RATE_OFF) { qb_ipcs_flowcontrol_set(c, 1); } else if (rl == QB_IPCS_RATE_OFF_2) { qb_ipcs_flowcontrol_set(c, 2); } else { qb_ipcs_flowcontrol_set(c, QB_FALSE); } if (old_p != s->poll_priority) { (void)_modify_dispatch_descriptor_(c); } qb_ipcs_connection_unref(c); } } void qb_ipcs_ref(struct qb_ipcs_service *s) { qb_atomic_int_inc(&s->ref_count); } void qb_ipcs_unref(struct qb_ipcs_service *s) { int32_t free_it; assert(s->ref_count > 0); free_it = qb_atomic_int_dec_and_test(&s->ref_count); if (free_it) { qb_util_log(LOG_DEBUG, "%s() - destroying", __func__); free(s); } } void qb_ipcs_destroy(struct qb_ipcs_service *s) { struct qb_ipcs_connection *c = NULL; struct qb_list_head *pos; struct qb_list_head *n; if (s == NULL) { return; } qb_list_for_each_safe(pos, n, &s->connections) { c = qb_list_entry(pos, struct qb_ipcs_connection, list); if (c == NULL) { continue; } qb_ipcs_disconnect(c); } (void)qb_ipcs_us_withdraw(s); /* service destroyed, remove initial alloc ref */ qb_ipcs_unref(s); } /* * connection API */ static struct qb_ipc_one_way * _event_sock_one_way_get(struct qb_ipcs_connection * c) { if (c->service->needs_sock_for_poll) { return &c->setup; } if (c->event.type == QB_IPC_SOCKET) { return &c->event; } return NULL; } static struct qb_ipc_one_way * _response_sock_one_way_get(struct qb_ipcs_connection * c) { if (c->service->needs_sock_for_poll) { return &c->setup; } if (c->response.type == QB_IPC_SOCKET) { return &c->response; } return NULL; } ssize_t qb_ipcs_response_send(struct qb_ipcs_connection *c, const void *data, size_t size) { ssize_t res; if (c == NULL) { return -EINVAL; } qb_ipcs_connection_ref(c); res = c->service->funcs.send(&c->response, data, size); if (res == size) { c->stats.responses++; } else if (res == -EAGAIN || res == -ETIMEDOUT) { struct qb_ipc_one_way *ow = _response_sock_one_way_get(c); if (ow) { ssize_t res2 = qb_ipc_us_ready(ow, &c->setup, 0, POLLOUT); if (res2 < 0) { res = res2; } } c->stats.send_retries++; } qb_ipcs_connection_unref(c); return res; } ssize_t qb_ipcs_response_sendv(struct qb_ipcs_connection * c, const struct iovec * iov, size_t iov_len) { ssize_t res; if (c == NULL) { return -EINVAL; } qb_ipcs_connection_ref(c); res = c->service->funcs.sendv(&c->response, iov, iov_len); if (res > 0) { c->stats.responses++; } else if (res == -EAGAIN || res == -ETIMEDOUT) { struct qb_ipc_one_way *ow = _response_sock_one_way_get(c); if (ow) { ssize_t res2 = qb_ipc_us_ready(ow, &c->setup, 0, POLLOUT); if (res2 < 0) { res = res2; } } c->stats.send_retries++; } qb_ipcs_connection_unref(c); return res; } static int32_t resend_event_notifications(struct qb_ipcs_connection *c) { ssize_t res = 0; if (!c->service->needs_sock_for_poll) { return res; } if (c->outstanding_notifiers > 0) { res = qb_ipc_us_send(&c->setup, c->receive_buf, c->outstanding_notifiers); } if (res > 0) { c->outstanding_notifiers -= res; } assert(c->outstanding_notifiers >= 0); if (c->outstanding_notifiers == 0) { c->poll_events = POLLIN | POLLPRI | POLLNVAL; (void)_modify_dispatch_descriptor_(c); } return res; } static int32_t new_event_notification(struct qb_ipcs_connection * c) { ssize_t res = 0; if (!c->service->needs_sock_for_poll) { return res; } assert(c->outstanding_notifiers >= 0); if (c->outstanding_notifiers > 0) { c->outstanding_notifiers++; res = resend_event_notifications(c); } else { res = qb_ipc_us_send(&c->setup, &c->outstanding_notifiers, 1); if (res == -EAGAIN) { /* * notify the client later, when we can. */ c->outstanding_notifiers++; c->poll_events = POLLOUT | POLLIN | POLLPRI | POLLNVAL; (void)_modify_dispatch_descriptor_(c); } } return res; } ssize_t qb_ipcs_event_send(struct qb_ipcs_connection * c, const void *data, size_t size) { ssize_t res; ssize_t resn; if (c == NULL) { return -EINVAL; } else if (size > c->event.max_msg_size) { return -EMSGSIZE; } qb_ipcs_connection_ref(c); res = c->service->funcs.send(&c->event, data, size); if (res == size) { c->stats.events++; resn = new_event_notification(c); if (resn < 0 && resn != -EAGAIN && resn != -ENOBUFS) { errno = -resn; qb_util_perror(LOG_WARNING, "new_event_notification (%s)", c->description); res = resn; } } else if (res == -EAGAIN || res == -ETIMEDOUT) { struct qb_ipc_one_way *ow = _event_sock_one_way_get(c); if (c->outstanding_notifiers > 0) { resn = resend_event_notifications(c); } if (ow) { resn = qb_ipc_us_ready(ow, &c->setup, 0, POLLOUT); if (resn < 0) { res = resn; } } c->stats.send_retries++; } qb_ipcs_connection_unref(c); return res; } ssize_t qb_ipcs_event_sendv(struct qb_ipcs_connection * c, const struct iovec * iov, size_t iov_len) { ssize_t res; ssize_t resn; if (c == NULL) { return -EINVAL; } qb_ipcs_connection_ref(c); res = c->service->funcs.sendv(&c->event, iov, iov_len); if (res > 0) { c->stats.events++; resn = new_event_notification(c); if (resn < 0 && resn != -EAGAIN) { errno = -resn; qb_util_perror(LOG_WARNING, "new_event_notification (%s)", c->description); res = resn; } } else if (res == -EAGAIN || res == -ETIMEDOUT) { struct qb_ipc_one_way *ow = _event_sock_one_way_get(c); if (c->outstanding_notifiers > 0) { resn = resend_event_notifications(c); } if (ow) { resn = qb_ipc_us_ready(ow, &c->setup, 0, POLLOUT); if (resn < 0) { res = resn; } } c->stats.send_retries++; } qb_ipcs_connection_unref(c); return res; } qb_ipcs_connection_t * qb_ipcs_connection_first_get(struct qb_ipcs_service * s) { struct qb_ipcs_connection *c; if (qb_list_empty(&s->connections)) { return NULL; } c = qb_list_first_entry(&s->connections, struct qb_ipcs_connection, list); qb_ipcs_connection_ref(c); return c; } qb_ipcs_connection_t * qb_ipcs_connection_next_get(struct qb_ipcs_service * s, struct qb_ipcs_connection * current) { struct qb_ipcs_connection *c; if (current == NULL || qb_list_is_last(¤t->list, &s->connections)) { return NULL; } c = qb_list_first_entry(¤t->list, struct qb_ipcs_connection, list); qb_ipcs_connection_ref(c); return c; } int32_t qb_ipcs_service_id_get(struct qb_ipcs_connection * c) { if (c == NULL) { return -EINVAL; } return c->service->service_id; } struct qb_ipcs_connection * qb_ipcs_connection_alloc(struct qb_ipcs_service *s) { struct qb_ipcs_connection *c = calloc(1, sizeof(struct qb_ipcs_connection)); if (c == NULL) { return NULL; } c->pid = 0; c->euid = -1; c->egid = -1; c->receive_buf = NULL; c->context = NULL; c->fc_enabled = QB_FALSE; c->state = QB_IPCS_CONNECTION_INACTIVE; c->poll_events = POLLIN | POLLPRI | POLLNVAL; c->setup.type = s->type; c->request.type = s->type; c->response.type = s->type; c->event.type = s->type; (void)strlcpy(c->description, "not set yet", CONNECTION_DESCRIPTION); /* initial alloc ref */ qb_ipcs_connection_ref(c); /* * The connection makes use of the service object. Give the connection * a reference to the service so we know the service can never be destroyed * until the connection is done with it. */ qb_ipcs_ref(s); c->service = s; qb_list_init(&c->list); return c; } void qb_ipcs_connection_ref(struct qb_ipcs_connection *c) { if (c) { qb_atomic_int_inc(&c->refcount); } } void qb_ipcs_connection_unref(struct qb_ipcs_connection *c) { int32_t free_it; if (c == NULL) { return; } if (c->refcount < 1) { qb_util_log(LOG_ERR, "ref:%d state:%d (%s)", c->refcount, c->state, c->description); assert(0); } free_it = qb_atomic_int_dec_and_test(&c->refcount); if (free_it) { qb_list_del(&c->list); if (c->service->serv_fns.connection_destroyed) { c->service->serv_fns.connection_destroyed(c); } c->service->funcs.disconnect(c); /* Let go of the connection's reference to the service */ qb_ipcs_unref(c->service); free(c->receive_buf); free(c); } } void qb_ipcs_disconnect(struct qb_ipcs_connection *c) { int32_t res = 0; qb_loop_job_dispatch_fn rerun_job; if (c == NULL) { return; } qb_util_log(LOG_DEBUG, "%s(%s) state:%d", __func__, c->description, c->state); if (c->state == QB_IPCS_CONNECTION_ACTIVE) { c->service->funcs.disconnect(c); c->state = QB_IPCS_CONNECTION_INACTIVE; c->service->stats.closed_connections++; + + /* This removes the initial alloc ref */ + qb_ipcs_connection_unref(c); + /* return early as it's an incomplete connection. */ return; } if (c->state == QB_IPCS_CONNECTION_ESTABLISHED) { c->service->funcs.disconnect(c); c->state = QB_IPCS_CONNECTION_SHUTTING_DOWN; c->service->stats.active_connections--; c->service->stats.closed_connections++; } if (c->state == QB_IPCS_CONNECTION_SHUTTING_DOWN) { int scheduled_retry = 0; res = 0; if (c->service->serv_fns.connection_closed) { res = c->service->serv_fns.connection_closed(c); } if (res != 0) { /* OK, so they want the connection_closed * function re-run */ rerun_job = (qb_loop_job_dispatch_fn) qb_ipcs_disconnect; res = c->service->poll_fns.job_add(QB_LOOP_LOW, c, rerun_job); if (res == 0) { /* this function is going to be called again. * so hold off on the unref */ scheduled_retry = 1; } } if (scheduled_retry == 0) { /* This removes the initial alloc ref */ qb_ipcs_connection_unref(c); } } } static void qb_ipcs_flowcontrol_set(struct qb_ipcs_connection *c, int32_t fc_enable) { if (c == NULL) { return; } if (c->fc_enabled != fc_enable) { c->service->funcs.fc_set(&c->request, fc_enable); c->fc_enabled = fc_enable; c->stats.flow_control_state = fc_enable; c->stats.flow_control_count++; } } static int32_t _process_request_(struct qb_ipcs_connection *c, int32_t ms_timeout) { int32_t res = 0; ssize_t size; struct qb_ipc_request_header *hdr; qb_ipcs_connection_ref(c); if (c->service->funcs.peek && c->service->funcs.reclaim) { size = c->service->funcs.peek(&c->request, (void **)&hdr, ms_timeout); } else { hdr = c->receive_buf; size = c->service->funcs.recv(&c->request, hdr, c->request.max_msg_size, ms_timeout); } if (size < 0) { if (size != -EAGAIN && size != -ETIMEDOUT) { qb_util_perror(LOG_DEBUG, "recv from client connection failed (%s)", c->description); } else { c->stats.recv_retries++; } res = size; goto cleanup; } else if (size == 0 || hdr->id == QB_IPC_MSG_DISCONNECT) { qb_util_log(LOG_DEBUG, "client requesting a disconnect (%s)", c->description); res = -ESHUTDOWN; goto cleanup; } else { c->stats.requests++; res = c->service->serv_fns.msg_process(c, hdr, hdr->size); /* 0 == good, negative == backoff */ if (res < 0) { res = -ENOBUFS; } else { res = size; } } if (c && c->service->funcs.peek && c->service->funcs.reclaim) { c->service->funcs.reclaim(&c->request); } cleanup: qb_ipcs_connection_unref(c); return res; } #define IPC_REQUEST_TIMEOUT 10 #define MAX_RECV_MSGS 50 static ssize_t _request_q_len_get(struct qb_ipcs_connection *c) { ssize_t q_len; if (c->service->funcs.q_len_get) { q_len = c->service->funcs.q_len_get(&c->request); if (q_len <= 0) { return q_len; } if (c->service->poll_priority == QB_LOOP_MED) { q_len = QB_MIN(q_len, 5); } else if (c->service->poll_priority == QB_LOOP_LOW) { q_len = 1; } else { q_len = QB_MIN(q_len, MAX_RECV_MSGS); } } else { q_len = 1; } return q_len; } int32_t qb_ipcs_dispatch_connection_request(int32_t fd, int32_t revents, void *data) { struct qb_ipcs_connection *c = (struct qb_ipcs_connection *)data; char bytes[MAX_RECV_MSGS]; int32_t res = 0; int32_t res2; int32_t recvd = 0; ssize_t avail; if (revents & POLLNVAL) { qb_util_log(LOG_DEBUG, "NVAL conn (%s)", c->description); res = -EINVAL; goto dispatch_cleanup; } if (revents & POLLHUP) { qb_util_log(LOG_DEBUG, "HUP conn (%s)", c->description); res = -ESHUTDOWN; goto dispatch_cleanup; } if (revents & POLLOUT) { /* try resend events now that fd can write */ res = resend_event_notifications(c); if (res < 0 && res != -EAGAIN) { errno = -res; qb_util_perror(LOG_WARNING, "resend_event_notifications (%s)", c->description); } /* nothing to read */ if ((revents & POLLIN) == 0) { res = 0; goto dispatch_cleanup; } } if (c->fc_enabled) { res = 0; goto dispatch_cleanup; } avail = _request_q_len_get(c); if (c->service->needs_sock_for_poll && avail == 0) { res2 = qb_ipc_us_recv(&c->setup, bytes, 1, 0); if (qb_ipc_us_sock_error_is_disconnected(res2)) { errno = -res2; qb_util_perror(LOG_WARNING, "conn (%s) disconnected", c->description); res = -ESHUTDOWN; goto dispatch_cleanup; } else { qb_util_log(LOG_WARNING, "conn (%s) Nothing in q but got POLLIN on fd:%d (res2:%d)", c->description, fd, res2); res = 0; goto dispatch_cleanup; } } do { res = _process_request_(c, IPC_REQUEST_TIMEOUT); if (res == -ESHUTDOWN) { goto dispatch_cleanup; } if (res > 0 || res == -ENOBUFS || res == -EINVAL) { recvd++; } if (res > 0) { avail--; } } while (avail > 0 && res > 0 && !c->fc_enabled); if (c->service->needs_sock_for_poll && recvd > 0) { res2 = qb_ipc_us_recv(&c->setup, bytes, recvd, -1); if (qb_ipc_us_sock_error_is_disconnected(res2)) { errno = -res2; qb_util_perror(LOG_ERR, "error receiving from setup sock (%s)", c->description); res = -ESHUTDOWN; goto dispatch_cleanup; } } res = QB_MIN(0, res); if (res == -EAGAIN || res == -ETIMEDOUT || res == -ENOBUFS) { res = 0; } if (res != 0) { if (res != -ENOTCONN) { /* * Abnormal state (ENOTCONN is normal shutdown). */ errno = -res; qb_util_perror(LOG_ERR, "request returned error (%s)", c->description); } } dispatch_cleanup: if (res != 0) { qb_ipcs_disconnect(c); } return res; } void qb_ipcs_context_set(struct qb_ipcs_connection *c, void *context) { if (c == NULL) { return; } c->context = context; } void * qb_ipcs_context_get(struct qb_ipcs_connection *c) { if (c == NULL) { return NULL; } return c->context; } void * qb_ipcs_connection_service_context_get(qb_ipcs_connection_t *c) { if (c == NULL || c->service == NULL) { return NULL; } return c->service->context; } int32_t qb_ipcs_connection_stats_get(qb_ipcs_connection_t * c, struct qb_ipcs_connection_stats * stats, int32_t clear_after_read) { if (c == NULL) { return -EINVAL; } memcpy(stats, &c->stats, sizeof(struct qb_ipcs_connection_stats)); if (clear_after_read) { memset(&c->stats, 0, sizeof(struct qb_ipcs_connection_stats_2)); c->stats.client_pid = c->pid; } return 0; } struct qb_ipcs_connection_stats_2* qb_ipcs_connection_stats_get_2(qb_ipcs_connection_t *c, int32_t clear_after_read) { struct qb_ipcs_connection_stats_2 * stats; if (c == NULL) { errno = EINVAL; return NULL; } stats = calloc(1, sizeof(struct qb_ipcs_connection_stats_2)); if (stats == NULL) { return NULL; } memcpy(stats, &c->stats, sizeof(struct qb_ipcs_connection_stats_2)); if (c->service->funcs.q_len_get) { stats->event_q_length = c->service->funcs.q_len_get(&c->event); } else { stats->event_q_length = 0; } if (clear_after_read) { memset(&c->stats, 0, sizeof(struct qb_ipcs_connection_stats_2)); c->stats.client_pid = c->pid; } return stats; } int32_t qb_ipcs_stats_get(struct qb_ipcs_service * s, struct qb_ipcs_stats * stats, int32_t clear_after_read) { if (s == NULL) { return -EINVAL; } memcpy(stats, &s->stats, sizeof(struct qb_ipcs_stats)); if (clear_after_read) { memset(&s->stats, 0, sizeof(struct qb_ipcs_stats)); } return 0; } void qb_ipcs_connection_auth_set(qb_ipcs_connection_t *c, uid_t uid, gid_t gid, mode_t mode) { if (c) { c->auth.uid = uid; c->auth.gid = gid; c->auth.mode = mode; } } int32_t qb_ipcs_connection_get_buffer_size(qb_ipcs_connection_t *c) { if (c == NULL) { return -EINVAL; } /* request, response, and event shoud all have the same * buffer size allocated. It doesn't matter which we return * here. */ return c->response.max_msg_size; } void qb_ipcs_enforce_buffer_size(qb_ipcs_service_t *s, uint32_t buf_size) { if (s == NULL) { return; } s->max_buffer_size = buf_size; }