diff --git a/libknet/compat.c b/libknet/compat.c index e808f332..2e73c9fc 100644 --- a/libknet/compat.c +++ b/libknet/compat.c @@ -1,114 +1,114 @@ /* * Copyright (C) 2016-2019 Red Hat, Inc. All rights reserved. * * Author: Jan Friesse * * This software licensed under LGPL-2.0+ */ #include "config.h" #include #include #include #include "compat.h" #ifndef HAVE_SYS_EPOLL_H #ifdef HAVE_KEVENT /* for FreeBSD which has kevent instead of epoll */ #include #include #include -#include +#include static int32_t _poll_to_filter_(int32_t event) { int32_t out = 0; if (event & POLLIN) out |= EVFILT_READ; if (event & POLLOUT) out |= EVFILT_WRITE; return out; } int epoll_create(int size) { return kqueue(); } int epoll_ctl(int epfd, int op, int fd, struct epoll_event *event) { int ret = 0; struct kevent ke; short filters = _poll_to_filter_(event->events); switch (op) { /* The kevent man page says that EV_ADD also does MOD */ case EPOLL_CTL_ADD: case EPOLL_CTL_MOD: EV_SET(&ke, fd, filters, EV_ADD | EV_ENABLE, 0, 0, event->data.ptr); break; case EPOLL_CTL_DEL: EV_SET(&ke, fd, filters, EV_DELETE, 0, 0, event->data.ptr); break; default: errno = EINVAL; return -1; } ret = kevent(epfd, &ke, 1, NULL, 0, NULL); return ret; } int epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout_ms) { struct kevent kevents[maxevents]; struct timespec timeout = { 0, 0 }; struct timespec *timeout_ptr = &timeout; uint32_t revents; int event_count; int i; int returned_events; if (timeout_ms != -1) { timeout.tv_sec = timeout_ms/1000; timeout.tv_nsec += (timeout_ms % 1000) * 1000000ULL; } else { timeout_ptr = NULL; } event_count = kevent(epfd, NULL, 0, kevents, maxevents, timeout_ptr); if (event_count == -1) { return -1; } returned_events = 0; for (i = 0; i < event_count; i++) { revents = 0; if (kevents[i].flags & EV_ERROR) { revents |= POLLERR; } if (kevents[i].flags & EV_EOF) { revents |= POLLHUP; } if (kevents[i].filter == EVFILT_READ) { revents |= POLLIN; } if (kevents[i].filter == EVFILT_WRITE) { revents |= POLLOUT; } events[returned_events].events = revents; events[returned_events].data.ptr = kevents[i].udata; returned_events++; } return returned_events; } #endif /* HAVE_KEVENT */ #endif /* HAVE_SYS_EPOLL_H */ diff --git a/libknet/crypto.c b/libknet/crypto.c index afa4f88c..2c4d5f5c 100644 --- a/libknet/crypto.c +++ b/libknet/crypto.c @@ -1,235 +1,235 @@ /* * Copyright (C) 2012-2019 Red Hat, Inc. All rights reserved. * * Author: Fabio M. Di Nitto * * This software licensed under LGPL-2.0+ */ #include "config.h" -#include +#include #include #include #include #include #include "crypto.h" #include "crypto_model.h" #include "internals.h" #include "logging.h" #include "common.h" /* * internal module switch data */ static crypto_model_t crypto_modules_cmds[] = { { "nss", WITH_CRYPTO_NSS, 0, NULL }, { "openssl", WITH_CRYPTO_OPENSSL, 0, NULL }, { NULL, 0, 0, NULL } }; static int crypto_get_model(const char *model) { int idx = 0; while (crypto_modules_cmds[idx].model_name != NULL) { if (!strcmp(crypto_modules_cmds[idx].model_name, model)) return idx; idx++; } return -1; } /* * exported API */ int crypto_encrypt_and_sign ( knet_handle_t knet_h, const unsigned char *buf_in, const ssize_t buf_in_len, unsigned char *buf_out, ssize_t *buf_out_len) { return crypto_modules_cmds[knet_h->crypto_instance->model].ops->crypt(knet_h, buf_in, buf_in_len, buf_out, buf_out_len); } int crypto_encrypt_and_signv ( knet_handle_t knet_h, const struct iovec *iov_in, int iovcnt_in, unsigned char *buf_out, ssize_t *buf_out_len) { return crypto_modules_cmds[knet_h->crypto_instance->model].ops->cryptv(knet_h, iov_in, iovcnt_in, buf_out, buf_out_len); } int crypto_authenticate_and_decrypt ( knet_handle_t knet_h, const unsigned char *buf_in, const ssize_t buf_in_len, unsigned char *buf_out, ssize_t *buf_out_len) { return crypto_modules_cmds[knet_h->crypto_instance->model].ops->decrypt(knet_h, buf_in, buf_in_len, buf_out, buf_out_len); } int crypto_init( knet_handle_t knet_h, struct knet_handle_crypto_cfg *knet_handle_crypto_cfg) { int err = 0, savederrno = 0; int model = 0; struct crypto_instance *current = NULL, *new = NULL; current = knet_h->crypto_instance; model = crypto_get_model(knet_handle_crypto_cfg->crypto_model); if (model < 0) { log_err(knet_h, KNET_SUB_CRYPTO, "model %s not supported", knet_handle_crypto_cfg->crypto_model); return -1; } if (crypto_modules_cmds[model].built_in == 0) { log_err(knet_h, KNET_SUB_CRYPTO, "this version of libknet was built without %s support. Please contact your vendor or fix the build.", knet_handle_crypto_cfg->crypto_model); return -1; } savederrno = pthread_rwlock_wrlock(&shlib_rwlock); if (savederrno) { log_err(knet_h, KNET_SUB_CRYPTO, "Unable to get write lock: %s", strerror(savederrno)); return -1; } if (!crypto_modules_cmds[model].loaded) { crypto_modules_cmds[model].ops = load_module (knet_h, "crypto", crypto_modules_cmds[model].model_name); if (!crypto_modules_cmds[model].ops) { savederrno = errno; err = -1; log_err(knet_h, KNET_SUB_CRYPTO, "Unable to load %s lib", crypto_modules_cmds[model].model_name); goto out; } if (crypto_modules_cmds[model].ops->abi_ver != KNET_CRYPTO_MODEL_ABI) { savederrno = EINVAL; err = -1; log_err(knet_h, KNET_SUB_CRYPTO, "ABI mismatch loading module %s. knet ver: %d, module ver: %d", crypto_modules_cmds[model].model_name, KNET_CRYPTO_MODEL_ABI, crypto_modules_cmds[model].ops->abi_ver); goto out; } crypto_modules_cmds[model].loaded = 1; } log_debug(knet_h, KNET_SUB_CRYPTO, "Initizializing crypto module [%s/%s/%s]", knet_handle_crypto_cfg->crypto_model, knet_handle_crypto_cfg->crypto_cipher_type, knet_handle_crypto_cfg->crypto_hash_type); new = malloc(sizeof(struct crypto_instance)); if (!new) { savederrno = ENOMEM; err = -1; log_err(knet_h, KNET_SUB_CRYPTO, "Unable to allocate memory for crypto instance"); goto out; } /* * if crypto_modules_cmds.ops->init fails, it is expected that * it will clean everything by itself. * crypto_modules_cmds.ops->fini is not invoked on error. */ new->model = model; if (crypto_modules_cmds[model].ops->init(knet_h, new, knet_handle_crypto_cfg)) { savederrno = errno; err = -1; goto out; } out: if (!err) { knet_h->crypto_instance = new; knet_h->sec_block_size = new->sec_block_size; knet_h->sec_hash_size = new->sec_hash_size; knet_h->sec_salt_size = new->sec_salt_size; log_debug(knet_h, KNET_SUB_CRYPTO, "Hash size: %zu salt size: %zu block size: %zu", knet_h->sec_hash_size, knet_h->sec_salt_size, knet_h->sec_block_size); if (current) { if (crypto_modules_cmds[current->model].ops->fini != NULL) { crypto_modules_cmds[current->model].ops->fini(knet_h, current); } free(current); } } else { if (new) { free(new); } } pthread_rwlock_unlock(&shlib_rwlock); errno = err ? savederrno : 0; return err; } void crypto_fini( knet_handle_t knet_h) { int savederrno = 0; savederrno = pthread_rwlock_wrlock(&shlib_rwlock); if (savederrno) { log_err(knet_h, KNET_SUB_CRYPTO, "Unable to get write lock: %s", strerror(savederrno)); return; } if (knet_h->crypto_instance) { if (crypto_modules_cmds[knet_h->crypto_instance->model].ops->fini != NULL) { crypto_modules_cmds[knet_h->crypto_instance->model].ops->fini(knet_h, knet_h->crypto_instance); } free(knet_h->crypto_instance); knet_h->sec_block_size = 0; knet_h->sec_hash_size = 0; knet_h->sec_salt_size = 0; knet_h->crypto_instance = NULL; } pthread_rwlock_unlock(&shlib_rwlock); return; } int knet_get_crypto_list(struct knet_crypto_info *crypto_list, size_t *crypto_list_entries) { int err = 0; int idx = 0; int outidx = 0; if (!crypto_list_entries) { errno = EINVAL; return -1; } while (crypto_modules_cmds[idx].model_name != NULL) { if (crypto_modules_cmds[idx].built_in) { if (crypto_list) { crypto_list[outidx].name = crypto_modules_cmds[idx].model_name; } outidx++; } idx++; } *crypto_list_entries = outidx; if (!err) errno = 0; return err; } diff --git a/libknet/onwire.c b/libknet/onwire.c index 143ac4b7..e3fd293b 100644 --- a/libknet/onwire.c +++ b/libknet/onwire.c @@ -1,127 +1,127 @@ /* * Copyright (C) 2019 Red Hat, Inc. All rights reserved. * * Author: Fabio M. Di Nitto * * This software licensed under LGPL-2.0+ */ #include "config.h" -#include +#include #include #include #include "crypto.h" #include "internals.h" #include "logging.h" #include "common.h" #include "transport_udp.h" #include "transport_sctp.h" /* * unencrypted packet looks like: * * | ip | protocol | knet_header | unencrypted data | * | onwire_len | * | proto_overhead | * | data_len | * | app MTU | * * encrypted packet looks like (not to scale): * * | ip | protocol | salt | crypto(knet_header | data) | crypto_data_pad | hash | * | onwire_len | * | proto_overhead | * | data_len | * | app MTU | * * knet_h->sec_block_size is >= 0 if encryption will pad the data * knet_h->sec_salt_size is >= 0 if encryption is enabled * knet_h->sec_hash_size is >= 0 if signing is enabled */ /* * this function takes in the data that we would like to send * and tells us the outgoing onwire data size with crypto and * all the headers adjustment. * calling thread needs to account for protocol overhead. */ size_t calc_data_outlen(knet_handle_t knet_h, size_t inlen) { size_t outlen = inlen, pad_len = 0; if (knet_h->sec_block_size) { /* * if the crypto mechanism requires padding, calculate the padding * and add it back to outlen because that's what the crypto layer * would do. */ pad_len = knet_h->sec_block_size - (outlen % knet_h->sec_block_size); outlen = outlen + pad_len; } return outlen + knet_h->sec_salt_size + knet_h->sec_hash_size; } /* * this function takes in the data that we would like to send * and tells us what is the real maximum data we can send * accounting for headers and crypto * calling thread needs to account for protocol overhead. */ size_t calc_max_data_outlen(knet_handle_t knet_h, size_t inlen) { size_t outlen = inlen, pad_len = 0; if (knet_h->sec_block_size) { /* * drop both salt and hash, that leaves only the crypto data and padding * we need to calculate the padding based on the real encrypted data * that includes the knet_header. */ outlen = outlen - (knet_h->sec_salt_size + knet_h->sec_hash_size); /* * if the crypto mechanism requires padding, calculate the padding * and remove it, to align the data. * NOTE: we need to remove pad_len + 1 because, based on testing, * if we send data that are already aligned to block_size, the * crypto implementations will add another block_size! * so we want to make sure that our data won't add an unnecessary * block_size that we need to remove later. */ pad_len = outlen % knet_h->sec_block_size; outlen = outlen - (pad_len + 1); /* * add both hash and salt size back, similar to padding above, * the crypto layer will add them to the outlen */ outlen = outlen + (knet_h->sec_salt_size + knet_h->sec_hash_size); } /* * drop KNET_HEADER_ALL_SIZE to provide a clean application MTU * and various crypto headers */ outlen = outlen - (KNET_HEADER_ALL_SIZE + knet_h->sec_salt_size + knet_h->sec_hash_size); return outlen; } /* * set the lowest possible value as failsafe for all links. * KNET_PMTUD_MIN_MTU_V4 < KNET_PMTUD_MIN_MTU_V6 * KNET_PMTUD_OVERHEAD_V6 > KNET_PMTUD_OVERHEAD_V4 * KNET_PMTUD_SCTP_OVERHEAD > KNET_PMTUD_UDP_OVERHEAD */ size_t calc_min_mtu(knet_handle_t knet_h) { return calc_max_data_outlen(knet_h, KNET_PMTUD_MIN_MTU_V4 - (KNET_PMTUD_OVERHEAD_V6 + KNET_PMTUD_SCTP_OVERHEAD)); }