diff --git a/libknet/crypto.c b/libknet/crypto.c
index 93d54450..ca1f3234 100644
--- a/libknet/crypto.c
+++ b/libknet/crypto.c
@@ -1,457 +1,457 @@
 /*
  * Copyright (C) 2012-2022 Red Hat, Inc.  All rights reserved.
  *
  * Author: Fabio M. Di Nitto <fabbione@kronosnet.org>
  *
  * This software licensed under LGPL-2.0+
  */
 
 #include "config.h"
 
 #include <errno.h>
 #include <stdlib.h>
 #include <string.h>
 #include <pthread.h>
 #include <time.h>
 
 #include "crypto.h"
 #include "crypto_model.h"
 #include "internals.h"
 #include "logging.h"
 #include "common.h"
 
 /*
  * internal module switch data
  */
 
 static crypto_model_t crypto_modules_cmds[] = {
 	{ "nss", WITH_CRYPTO_NSS, 0, NULL },
 	{ "openssl", WITH_CRYPTO_OPENSSL, 0, NULL },
 	{ "gcrypt", WITH_CRYPTO_GCRYPT, 0, NULL },
 	{ NULL, 0, 0, NULL }
 };
 
 static int crypto_get_model(const char *model)
 {
 	int idx = 0;
 
 	while (crypto_modules_cmds[idx].model_name != NULL) {
 		if (!strcmp(crypto_modules_cmds[idx].model_name, model))
 			return idx;
 		idx++;
 	}
 	return -1;
 }
 
 /*
  * exported API
  */
 
 int crypto_encrypt_and_sign (
 	knet_handle_t knet_h,
 	const unsigned char *buf_in,
 	const ssize_t buf_in_len,
 	unsigned char *buf_out,
 	ssize_t *buf_out_len)
 {
 	return crypto_modules_cmds[knet_h->crypto_instance[knet_h->crypto_in_use_config]->model].ops->crypt(knet_h, knet_h->crypto_instance[knet_h->crypto_in_use_config], buf_in, buf_in_len, buf_out, buf_out_len);
 }
 
 int crypto_encrypt_and_signv (
 	knet_handle_t knet_h,
 	const struct iovec *iov_in,
 	int iovcnt_in,
 	unsigned char *buf_out,
 	ssize_t *buf_out_len)
 {
 	return crypto_modules_cmds[knet_h->crypto_instance[knet_h->crypto_in_use_config]->model].ops->cryptv(knet_h, knet_h->crypto_instance[knet_h->crypto_in_use_config], iov_in, iovcnt_in, buf_out, buf_out_len);
 }
 
 int crypto_authenticate_and_decrypt (
 	knet_handle_t knet_h,
 	const unsigned char *buf_in,
 	const ssize_t buf_in_len,
 	unsigned char *buf_out,
 	ssize_t *buf_out_len)
 {
 	int i, err = 0;
 	int multiple_configs = 0;
 	uint8_t log_level = KNET_LOG_ERR;
 
 	for (i = 1; i <= KNET_MAX_CRYPTO_INSTANCES; i++) {
 		if (knet_h->crypto_instance[i]) {
 			multiple_configs++;
 		}
 	}
 
 	/*
 	 * attempt to decrypt first with the in-use config
 	 * to avoid excessive performance hit.
 	 */
 
 	if (multiple_configs > 1) {
 		log_level = KNET_LOG_DEBUG;
 	}
 
 	if (knet_h->crypto_in_use_config) {
 		err = crypto_modules_cmds[knet_h->crypto_instance[knet_h->crypto_in_use_config]->model].ops->decrypt(knet_h, knet_h->crypto_instance[knet_h->crypto_in_use_config], buf_in, buf_in_len, buf_out, buf_out_len, log_level);
 	} else {
 		err = -1;
 	}
 
 	/*
 	 * if we fail, try to use the other configurations
 	 */
 	if (err) {
 		for (i = 1; i <= KNET_MAX_CRYPTO_INSTANCES; i++) {
 			/*
 			 * in-use config was already attempted
 			 */
 			if (i == knet_h->crypto_in_use_config) {
 				continue;
 			}
 			if (knet_h->crypto_instance[i]) {
 				log_debug(knet_h, KNET_SUB_CRYPTO, "Alternative crypto configuration found, attempting to decrypt with config %u", i);
 				err = crypto_modules_cmds[knet_h->crypto_instance[i]->model].ops->decrypt(knet_h, knet_h->crypto_instance[i], buf_in, buf_in_len, buf_out, buf_out_len, KNET_LOG_ERR);
 				if (!err) {
 					errno = 0; /* clear errno from previous failures */
 					return err;
 				}
 				log_debug(knet_h, KNET_SUB_CRYPTO, "Packet failed to decrypt with crypto config %u", i);
 			}
 		}
 	}
 	return err;
 }
 
 static int crypto_use_config(
 	knet_handle_t knet_h,
 	uint8_t config_num)
 {
 	if ((config_num) && (!knet_h->crypto_instance[config_num])) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	knet_h->crypto_in_use_config = config_num;
 
 	if (config_num) {
 		knet_h->sec_block_size = knet_h->crypto_instance[config_num]->sec_block_size;
 		knet_h->sec_hash_size = knet_h->crypto_instance[config_num]->sec_hash_size;
 		knet_h->sec_salt_size = knet_h->crypto_instance[config_num]->sec_salt_size;
 	} else {
 		knet_h->sec_block_size = 0;
 		knet_h->sec_hash_size = 0;
 		knet_h->sec_salt_size = 0;
 	}
 
-	force_pmtud_run(knet_h, KNET_SUB_CRYPTO, 1);
+	force_pmtud_run(knet_h, KNET_SUB_CRYPTO, 1, 0);
 
 	return 0;
 }
 
 static int crypto_init(
 	knet_handle_t knet_h,
 	struct knet_handle_crypto_cfg *knet_handle_crypto_cfg,
 	uint8_t config_num)
 {
 	int err = 0, savederrno = 0;
 	int model = 0;
 	struct crypto_instance *current = NULL, *new = NULL;
 
 	current = knet_h->crypto_instance[config_num];
 
 	model = crypto_get_model(knet_handle_crypto_cfg->crypto_model);
 	if (model < 0) {
 		log_err(knet_h, KNET_SUB_CRYPTO, "model %s not supported", knet_handle_crypto_cfg->crypto_model);
 		return -1;
 	}
 
 	if (crypto_modules_cmds[model].built_in == 0) {
 		log_err(knet_h, KNET_SUB_CRYPTO, "this version of libknet was built without %s support. Please contact your vendor or fix the build.", knet_handle_crypto_cfg->crypto_model);
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_wrlock(&shlib_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_CRYPTO, "Unable to get write lock: %s",
 			strerror(savederrno));
 		return -1;
 	}
 
 	if (!crypto_modules_cmds[model].loaded) {
 		crypto_modules_cmds[model].ops = load_module (knet_h, "crypto", crypto_modules_cmds[model].model_name);
 		if (!crypto_modules_cmds[model].ops) {
 			savederrno = errno;
 			err = -1;
 			log_err(knet_h, KNET_SUB_CRYPTO, "Unable to load %s lib", crypto_modules_cmds[model].model_name);
 			goto out;
 		}
 		if (crypto_modules_cmds[model].ops->abi_ver != KNET_CRYPTO_MODEL_ABI) {
 			savederrno = EINVAL;
 			err = -1;
 			log_err(knet_h, KNET_SUB_CRYPTO,
 				"ABI mismatch loading module %s. knet ver: %d, module ver: %d",
 				crypto_modules_cmds[model].model_name, KNET_CRYPTO_MODEL_ABI,
 				crypto_modules_cmds[model].ops->abi_ver);
 			goto out;
 		}
 		crypto_modules_cmds[model].loaded = 1;
 	}
 
 	log_debug(knet_h, KNET_SUB_CRYPTO,
 		  "Initializing crypto module [%s/%s/%s]",
 		  knet_handle_crypto_cfg->crypto_model,
 		  knet_handle_crypto_cfg->crypto_cipher_type,
 		  knet_handle_crypto_cfg->crypto_hash_type);
 
 	new = malloc(sizeof(struct crypto_instance));
 
 	if (!new) {
 		savederrno = ENOMEM;
 		err = -1;
 		log_err(knet_h, KNET_SUB_CRYPTO, "Unable to allocate memory for crypto instance");
 		goto out;
 	}
 
 	/*
 	 * if crypto_modules_cmds.ops->init fails, it is expected that
 	 * it will clean everything by itself.
 	 * crypto_modules_cmds.ops->fini is not invoked on error.
 	 */
 	new->model = model;
 	if (crypto_modules_cmds[model].ops->init(knet_h, new, knet_handle_crypto_cfg)) {
 		savederrno = errno;
 		err = -1;
 		goto out;
 	}
 
 out:
 	if (!err) {
 		knet_h->crypto_instance[config_num] = new;
 
 		if (current) {
 			/*
 			 * if we are replacing the current config, we need to enable it right away
 			 */
 			if (knet_h->crypto_in_use_config == config_num) {
 				crypto_use_config(knet_h, config_num);
 			}
 
 			if (crypto_modules_cmds[current->model].ops->fini != NULL) {
 				crypto_modules_cmds[current->model].ops->fini(knet_h, current);
 			}
 			free(current);
 		}
 	} else {
 		if (new) {
 			free(new);
 		}
 	}
 
 	pthread_rwlock_unlock(&shlib_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 static void crypto_fini_config(
 	knet_handle_t knet_h,
 	uint8_t config_num)
 {
 	if (knet_h->crypto_instance[config_num]) {
 		if (crypto_modules_cmds[knet_h->crypto_instance[config_num]->model].ops->fini != NULL) {
 			crypto_modules_cmds[knet_h->crypto_instance[config_num]->model].ops->fini(knet_h, knet_h->crypto_instance[config_num]);
 		}
 		free(knet_h->crypto_instance[config_num]);
 		knet_h->crypto_instance[config_num] = NULL;
 	}
 }
 
 void crypto_fini(
 	knet_handle_t knet_h,
 	uint8_t config_num)
 {
 	int savederrno = 0, i;
 
 	savederrno = pthread_rwlock_wrlock(&shlib_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_CRYPTO, "Unable to get write lock: %s",
 			strerror(savederrno));
 		return;
 	}
 
 	if (config_num > KNET_MAX_CRYPTO_INSTANCES) {
 		for (i = 1; i <= KNET_MAX_CRYPTO_INSTANCES; i++) {
 			crypto_fini_config(knet_h, i);
 		}
 	} else {
 		crypto_fini_config(knet_h, config_num);
 	}
 
 	pthread_rwlock_unlock(&shlib_rwlock);
 	return;
 }
 
 int knet_handle_crypto_set_config(knet_handle_t knet_h,
 				  struct knet_handle_crypto_cfg *knet_handle_crypto_cfg,
 				  uint8_t config_num)
 {
 	int savederrno = 0;
 	int err = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (!knet_handle_crypto_cfg) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if ((config_num < 1) || (config_num > KNET_MAX_CRYPTO_INSTANCES)) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	if (knet_h->crypto_in_use_config == config_num) {
 		savederrno = EBUSY;
 		err = -1;
 		goto exit_unlock;
 	}
 
 	if ((!strncmp("none", knet_handle_crypto_cfg->crypto_model, 4)) ||
 	    ((!strncmp("none", knet_handle_crypto_cfg->crypto_cipher_type, 4)) &&
 	     (!strncmp("none", knet_handle_crypto_cfg->crypto_hash_type, 4)))) {
 		crypto_fini(knet_h, config_num);
 		log_debug(knet_h, KNET_SUB_CRYPTO, "crypto config %u is not enabled", config_num);
 		err = 0;
 		goto exit_unlock;
 	}
 
 	if (knet_handle_crypto_cfg->private_key_len < KNET_MIN_KEY_LEN) {
 		log_debug(knet_h, KNET_SUB_CRYPTO, "private key len too short for config %u (min %d): %u",
 			  config_num, KNET_MIN_KEY_LEN, knet_handle_crypto_cfg->private_key_len);
 		savederrno = EINVAL;
 		err = -1;
 		goto exit_unlock;
 	}
 
 	if (knet_handle_crypto_cfg->private_key_len > KNET_MAX_KEY_LEN) {
 		log_debug(knet_h, KNET_SUB_CRYPTO, "private key len too long for config %u (max %d): %u",
 			  config_num, KNET_MAX_KEY_LEN, knet_handle_crypto_cfg->private_key_len);
 		savederrno = EINVAL;
 		err = -1;
 		goto exit_unlock;
 	}
 
 	err = crypto_init(knet_h, knet_handle_crypto_cfg, config_num);
 
 	if (err) {
 		err = -2;
 		savederrno = errno;
 	}
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_handle_crypto_rx_clear_traffic(knet_handle_t knet_h,
 					uint8_t value)
 {
 	int savederrno = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (value > KNET_CRYPTO_RX_DISALLOW_CLEAR_TRAFFIC) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	knet_h->crypto_only = value;
 	if (knet_h->crypto_only) {
 		log_debug(knet_h, KNET_SUB_CRYPTO, "Only crypto traffic allowed for RX");
 	} else {
 		log_debug(knet_h, KNET_SUB_CRYPTO, "Both crypto and clear traffic allowed for RX");
 	}
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	return 0;
 }
 
 int knet_handle_crypto_use_config(knet_handle_t knet_h,
 				  uint8_t config_num)
 {
 	int savederrno = 0;
 	int err = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (config_num > KNET_MAX_CRYPTO_INSTANCES) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	err = crypto_use_config(knet_h, config_num);
 	savederrno = errno;
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_get_crypto_list(struct knet_crypto_info *crypto_list, size_t *crypto_list_entries)
 {
 	int err = 0;
 	int idx = 0;
 	int outidx = 0;
 
 	if (!crypto_list_entries) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	while (crypto_modules_cmds[idx].model_name != NULL) {
 		if (crypto_modules_cmds[idx].built_in) {
 			if (crypto_list) {
 				crypto_list[outidx].name = crypto_modules_cmds[idx].model_name;
 			}
 			outidx++;
 		}
 		idx++;
 	}
 	*crypto_list_entries = outidx;
 
 	if (!err)
 		errno = 0;
 	return err;
 }
diff --git a/libknet/links.c b/libknet/links.c
index 88471132..bad2e057 100644
--- a/libknet/links.c
+++ b/libknet/links.c
@@ -1,1568 +1,1572 @@
 /*
  * Copyright (C) 2012-2022 Red Hat, Inc.  All rights reserved.
  *
  * Authors: Fabio M. Di Nitto <fabbione@kronosnet.org>
  *          Federico Simoncelli <fsimon@kronosnet.org>
  *
  * This software licensed under LGPL-2.0+
  */
 
 #include "config.h"
 
 #include <errno.h>
 #include <netdb.h>
 #include <string.h>
 #include <pthread.h>
 
 #include "netutils.h"
 #include "internals.h"
 #include "logging.h"
 #include "links.h"
 #include "transports.h"
 #include "host.h"
 #include "threads_common.h"
 #include "links_acl.h"
 
 int _link_updown(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 		 unsigned int enabled, unsigned int connected, unsigned int lock_stats)
 {
 	struct knet_host *host = knet_h->host_index[host_id];
 	struct knet_link *link = &host->link[link_id];
 	int notify_status = link->status.connected;
 	int savederrno = 0;
 
 	if ((link->status.enabled == enabled) &&
 	    (link->status.connected == connected))
 		return 0;
 
 	if ((link->status.enabled) &&
 	    (knet_h->link_status_change_notify_fn)) {
 		if (link->status.connected != connected) {
 			notify_status = connected; /* connection state */
 		}
 		if (!enabled) {
 			notify_status = 0; /* disable == disconnected */
 		}
 		knet_h->link_status_change_notify_fn(
 					knet_h->link_status_change_notify_fn_private_data,
 					host_id,
 					link_id,
 					notify_status,
 					host->status.remote,
 					host->status.external);
 	}
 
 	link->status.enabled = enabled;
 	link->status.connected = connected;
 
 	_host_dstcache_update_async(knet_h, host);
 
 	if ((link->status.dynconnected) &&
 	    (!link->status.connected)) {
 		link->status.dynconnected = 0;
 	}
 
 	if (!connected) {
 		transport_link_is_down(knet_h, link);
+	} else {
+		/* Reset MTU in case new link can't use full line MTU */
+		log_info(knet_h, KNET_SUB_LINK, "Resetting MTU for link %u because host %u joined", link_id, host_id);
+		force_pmtud_run(knet_h, KNET_SUB_LINK, 1, 1);
 	}
 
 	if (lock_stats) {
 		savederrno = pthread_mutex_lock(&link->link_stats_mutex);
 		if (savederrno) {
 			log_err(knet_h, KNET_SUB_LINK, "Unable to get stats mutex lock for host %u link %u: %s",
 				host_id, link_id, strerror(savederrno));
 			errno = savederrno;
 			return -1;
 		}
 	}
 
 	if (connected) {
 		time(&link->status.stats.last_up_times[link->status.stats.last_up_time_index]);
 		link->status.stats.up_count++;
 		if (++link->status.stats.last_up_time_index >= MAX_LINK_EVENTS) {
 			link->status.stats.last_up_time_index = 0;
 		}
 	} else {
 		time(&link->status.stats.last_down_times[link->status.stats.last_down_time_index]);
 		link->status.stats.down_count++;
 		if (++link->status.stats.last_down_time_index >= MAX_LINK_EVENTS) {
 			link->status.stats.last_down_time_index = 0;
 		}
 	}
 
 	if (lock_stats) {
 		pthread_mutex_unlock(&link->link_stats_mutex);
 	}
 	return 0;
 }
 
 void _link_clear_stats(knet_handle_t knet_h)
 {
 	struct knet_host *host;
 	struct knet_link *link;
 	uint32_t host_id;
 	uint8_t link_id;
 
 	for (host_id = 0; host_id < KNET_MAX_HOST; host_id++) {
 		host = knet_h->host_index[host_id];
 		if (!host) {
 			continue;
 		}
 		for (link_id = 0; link_id < KNET_MAX_LINK; link_id++) {
 			link = &host->link[link_id];
 			memset(&link->status.stats, 0, sizeof(struct knet_link_stats));
 		}
 	}
 }
 
 int knet_link_set_config(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			 uint8_t transport,
 			 struct sockaddr_storage *src_addr,
 			 struct sockaddr_storage *dst_addr,
 			 uint64_t flags)
 {
 	int savederrno = 0, err = 0, i, wipelink = 0, link_idx;
 	struct knet_host *host, *tmp_host;
 	struct knet_link *link = NULL;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!src_addr) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (dst_addr && (src_addr->ss_family != dst_addr->ss_family)) {
 		log_err(knet_h, KNET_SUB_LINK, "Source address family does not match destination address family");
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (transport >= KNET_MAX_TRANSPORTS) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	if (transport == KNET_TRANSPORT_LOOPBACK && knet_h->host_id != host_id) {
 		log_err(knet_h, KNET_SUB_LINK, "Cannot create loopback link to remote node");
 		err = -1;
 		savederrno = EINVAL;
 		goto exit_unlock;
 	}
 
 	if (knet_h->host_id == host_id && knet_h->has_loop_link) {
 		log_err(knet_h, KNET_SUB_LINK, "Cannot create more than 1 link when loopback is active");
 		err = -1;
 		savederrno = EINVAL;
 		goto exit_unlock;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	if (transport == KNET_TRANSPORT_LOOPBACK && knet_h->host_id == host_id) {
 		for (i=0; i<KNET_MAX_LINK; i++) {
 			if (host->link[i].configured) {
 				log_err(knet_h, KNET_SUB_LINK, "Cannot add loopback link when other links are already configured.");
 				err = -1;
 				savederrno = EINVAL;
 				goto exit_unlock;
 			}
 		}
 	}
 
 	link = &host->link[link_id];
 
 	if (link->configured != 0) {
 		err =-1;
 		savederrno = EBUSY;
 		log_err(knet_h, KNET_SUB_LINK, "Host %u link %u is currently configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	if (link->status.enabled != 0) {
 		err =-1;
 		savederrno = EBUSY;
 		log_err(knet_h, KNET_SUB_LINK, "Host %u link %u is currently in use: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	/*
 	 * errors happening after this point should trigger
 	 * a memset of the link
 	 */
 	wipelink = 1;
 
 	copy_sockaddr(&link->src_addr, src_addr);
 
 	err = knet_addrtostr(src_addr, sizeof(struct sockaddr_storage),
 			     link->status.src_ipaddr, KNET_MAX_HOST_LEN,
 			     link->status.src_port, KNET_MAX_PORT_LEN);
 	if (err) {
 		if (err == EAI_SYSTEM) {
 			savederrno = errno;
 			log_warn(knet_h, KNET_SUB_LINK,
 				 "Unable to resolve host: %u link: %u source addr/port: %s",
 				 host_id, link_id, strerror(savederrno));
 		} else {
 			savederrno = EINVAL;
 			log_warn(knet_h, KNET_SUB_LINK,
 				 "Unable to resolve host: %u link: %u source addr/port: %s",
 				 host_id, link_id, gai_strerror(err));
 		}
 		err = -1;
 		goto exit_unlock;
 	}
 
 	if (!dst_addr) {
 		link->dynamic = KNET_LINK_DYNIP;
 	} else {
 
 		link->dynamic = KNET_LINK_STATIC;
 
 		copy_sockaddr(&link->dst_addr, dst_addr);
 		err = knet_addrtostr(dst_addr, sizeof(struct sockaddr_storage),
 				     link->status.dst_ipaddr, KNET_MAX_HOST_LEN,
 				     link->status.dst_port, KNET_MAX_PORT_LEN);
 		if (err) {
 			if (err == EAI_SYSTEM) {
 				savederrno = errno;
 				log_warn(knet_h, KNET_SUB_LINK,
 					 "Unable to resolve host: %u link: %u destination addr/port: %s",
 					 host_id, link_id, strerror(savederrno));
 			} else {
 				savederrno = EINVAL;
 				log_warn(knet_h, KNET_SUB_LINK,
 					 "Unable to resolve host: %u link: %u destination addr/port: %s",
 					 host_id, link_id, gai_strerror(err));
 			}
 			err = -1;
 			goto exit_unlock;
 		}
 	}
 
 	link->pmtud_crypto_timeout_multiplier = KNET_LINK_PMTUD_CRYPTO_TIMEOUT_MULTIPLIER_MIN;
 	link->pong_count = KNET_LINK_DEFAULT_PONG_COUNT;
 	link->has_valid_mtu = 0;
 	link->ping_interval = KNET_LINK_DEFAULT_PING_INTERVAL * 1000; /* microseconds */
 	link->pong_timeout = KNET_LINK_DEFAULT_PING_TIMEOUT * 1000; /* microseconds */
 	link->pong_timeout_backoff = KNET_LINK_PONG_TIMEOUT_BACKOFF;
 	link->pong_timeout_adj = link->pong_timeout * link->pong_timeout_backoff; /* microseconds */
 	link->latency_max_samples = KNET_LINK_DEFAULT_PING_PRECISION;
 	link->status.stats.latency_samples = 0;
 	link->flags = flags;
 
 	/*
 	 * check for DYNIP vs STATIC collisions.
 	 * example: link0 is static, user attempts to configure link1 as dynamic with the same source
 	 * address/port.
 	 * This configuration is invalid and would cause ACL collisions.
 	 */
 	for (tmp_host = knet_h->host_head; tmp_host != NULL; tmp_host = tmp_host->next) {
 		for (link_idx = 0; link_idx < KNET_MAX_LINK; link_idx++) {
 			if (&tmp_host->link[link_idx] == link)
 				continue;
 
 			if ((!memcmp(&tmp_host->link[link_idx].src_addr, &link->src_addr, sizeof(struct sockaddr_storage))) &&
 			    (tmp_host->link[link_idx].dynamic != link->dynamic)) {
 				savederrno = EINVAL;
 				err = -1;
 				log_err(knet_h, KNET_SUB_LINK, "Failed to configure host %u link %u dyn %u. Conflicts with host %u link %u dyn %u: %s",
 					host_id, link_id, link->dynamic, tmp_host->host_id, link_idx, tmp_host->link[link_idx].dynamic, strerror(savederrno));
 				goto exit_unlock;
 			}
 		}
 	}
 
 	savederrno = pthread_mutex_init(&link->link_stats_mutex, NULL);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to initialize link stats mutex: %s", strerror(savederrno));
 		err = -1;
 		goto exit_unlock;
 	}
 
 	if (transport_link_set_config(knet_h, link, transport) < 0) {
 		savederrno = errno;
 		err = -1;
 		goto exit_transport_err;
 	}
 
 	/*
 	 * we can only configure default access lists if we know both endpoints
 	 * and the protocol uses GENERIC_ACL, otherwise the protocol has
 	 * to setup their own access lists above in transport_link_set_config.
 	 */
 	if ((transport_get_acl_type(knet_h, transport) == USE_GENERIC_ACL) &&
 	    (link->dynamic == KNET_LINK_STATIC)) {
 		log_debug(knet_h, KNET_SUB_LINK, "Configuring default access lists for host: %u link: %u socket: %d",
 			  host_id, link_id, link->outsock);
 		if ((check_add(knet_h, link, -1,
 			       &link->dst_addr, &link->dst_addr,
 			       CHECK_TYPE_ADDRESS, CHECK_ACCEPT) < 0) && (errno != EEXIST)) {
 			log_warn(knet_h, KNET_SUB_LINK, "Failed to configure default access lists for host: %u link: %u", host_id, link_id);
 			savederrno = errno;
 			err = -1;
 			goto exit_acl_error;
 		}
 	}
 
 	/*
 	 * no errors should happen after link is configured
 	 */
 	link->configured = 1;
 	log_debug(knet_h, KNET_SUB_LINK, "host: %u link: %u is configured",
 		  host_id, link_id);
 
 	if (transport == KNET_TRANSPORT_LOOPBACK) {
 		knet_h->has_loop_link = 1;
 		knet_h->loop_link = link_id;
 		host->status.reachable = 1;
 		link->status.mtu = KNET_PMTUD_SIZE_V6;
 	} else {
 		/*
 		 * calculate the minimum MTU that is safe to use,
 		 * based on RFCs and that each network device should
 		 * be able to support without any troubles
 		 */
 		if (link->dynamic == KNET_LINK_STATIC) {
 			/*
 			 * with static link we can be more precise than using
 			 * the generic calc_min_mtu()
 			 */
 			switch (link->dst_addr.ss_family) {
 				case AF_INET6:
 					link->status.mtu =  calc_max_data_outlen(knet_h, KNET_PMTUD_MIN_MTU_V6 - (KNET_PMTUD_OVERHEAD_V6 + link->proto_overhead));
 					break;
 				case AF_INET:
 					link->status.mtu =  calc_max_data_outlen(knet_h, KNET_PMTUD_MIN_MTU_V4 - (KNET_PMTUD_OVERHEAD_V4 + link->proto_overhead));
 					break;
 			}
 		} else {
 			/*
 			 * for dynamic links we start with the minimum MTU
 			 * possible and PMTUd will kick in immediately
 			 * after connection status is 1
 			 */
 			link->status.mtu =  calc_min_mtu(knet_h);
 		}
 		link->has_valid_mtu = 1;
 	}
 
 exit_acl_error:
 	/*
 	 * if creating access lists has error, we only need to clean
 	 * the transport and the stuff below.
 	 */
 	if (err < 0) {
 		if ((transport_link_clear_config(knet_h, link) < 0)  &&
 		    (errno != EBUSY)) {
 			log_warn(knet_h, KNET_SUB_LINK, "Failed to deconfigure transport for host %u link %u: %s", host_id, link_id, strerror(errno));
 		}
 	}
 exit_transport_err:
 	/*
 	 * if transport has errors, transport will clean after itself
 	 * and we only need to clean the mutex
 	 */
 	if (err < 0) {
 		pthread_mutex_destroy(&link->link_stats_mutex);
 	}
 exit_unlock:
 	/*
 	 * re-init the link on error
 	 */
 	if ((err < 0) && (wipelink)) {
 		memset(link, 0, sizeof(struct knet_link));
 		link->link_id = link_id;
 	}
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_get_config(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			 uint8_t *transport,
 			 struct sockaddr_storage *src_addr,
 			 struct sockaddr_storage *dst_addr,
 			 uint8_t *dynamic,
 			 uint64_t *flags)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!src_addr) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!dynamic) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!transport) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!flags) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	if ((link->dynamic == KNET_LINK_STATIC) && (!dst_addr)) {
 		savederrno = EINVAL;
 		err = -1;
 		goto exit_unlock;
 	}
 
 	memmove(src_addr, &link->src_addr, sockaddr_len(&link->src_addr));
 
 	*transport = link->transport;
 	*flags = link->flags;
 
 	if (link->dynamic == KNET_LINK_STATIC) {
 		*dynamic = 0;
 		memmove(dst_addr, &link->dst_addr, sockaddr_len(&link->dst_addr));
 	} else {
 		*dynamic = 1;
 	}
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_clear_config(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 	int sock;
 	uint8_t transport;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (link->configured != 1) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	if (link->status.enabled != 0) {
 		err = -1;
 		savederrno = EBUSY;
 		log_err(knet_h, KNET_SUB_LINK, "Host %u link %u is currently in use: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	/*
 	 * remove well known access lists here.
 	 * After the transport has done clearing the config,
 	 * then we can remove any leftover access lists if the link
 	 * is no longer in use.
 	 */
 	if ((transport_get_acl_type(knet_h, link->transport) == USE_GENERIC_ACL) &&
 	    (link->dynamic == KNET_LINK_STATIC)) {
 		if ((check_rm(knet_h, link,
 			      &link->dst_addr, &link->dst_addr,
 			      CHECK_TYPE_ADDRESS, CHECK_ACCEPT) < 0) && (errno != ENOENT)) {
 			err = -1;
 			savederrno = errno;
 			log_err(knet_h, KNET_SUB_LINK, "Host %u link %u: unable to remove default access list",
 				host_id, link_id);
 			goto exit_unlock;
 		}
 	}
 
 	/*
 	 * cache it for later as we don't know if the transport
 	 * will clear link info during clear_config.
 	 */
 	sock = link->outsock;
 	transport = link->transport;
 
 	if ((transport_link_clear_config(knet_h, link) < 0)  &&
 	    (errno != EBUSY)) {
 		savederrno = errno;
 		err = -1;
 		goto exit_unlock;
 	}
 
 	/*
 	 * remove any other access lists when the socket is no
 	 * longer in use by the transport.
 	 */
 	if ((transport_get_acl_type(knet_h, transport) == USE_GENERIC_ACL) &&
 	    (knet_h->knet_transport_fd_tracker[sock].transport == KNET_MAX_TRANSPORTS)) {
 		check_rmall(knet_h, link);
 	}
 
 	pthread_mutex_destroy(&link->link_stats_mutex);
 
 	memset(link, 0, sizeof(struct knet_link));
 	link->link_id = link_id;
 
 	if (knet_h->has_loop_link && host_id == knet_h->host_id && link_id == knet_h->loop_link) {
 		knet_h->has_loop_link = 0;
 		if (host->active_link_entries == 0) {
 			host->status.reachable = 0;
 		}
 	}
 
 	log_debug(knet_h, KNET_SUB_LINK, "host: %u link: %u config has been wiped",
 		  host_id, link_id);
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_set_enable(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			 unsigned int enabled)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (enabled > 1) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	if (link->status.enabled == enabled) {
 		err = 0;
 		goto exit_unlock;
 	}
 
 	err = _link_updown(knet_h, host_id, link_id, enabled, link->status.connected, 0);
 	savederrno = errno;
 
 	if (enabled) {
 		goto exit_unlock;
 	}
 
 	log_debug(knet_h, KNET_SUB_LINK, "host: %u link: %u is disabled",
 		  host_id, link_id);
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_get_enable(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			 unsigned int *enabled)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!enabled) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	*enabled = link->status.enabled;
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_set_pong_count(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			     uint8_t pong_count)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (pong_count < 1) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link->pong_count = pong_count;
 
 	log_debug(knet_h, KNET_SUB_LINK,
 		  "host: %u link: %u pong count update: %u",
 		  host_id, link_id, link->pong_count);
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_get_pong_count(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			     uint8_t *pong_count)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!pong_count) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	*pong_count = link->pong_count;
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_set_ping_timers(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			      time_t interval, time_t timeout, unsigned int precision)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!interval) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!timeout) {
 		errno = ENOSYS;
 		return -1;
 	}
 
 	if (!precision) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link->ping_interval = interval * 1000; /* microseconds */
 	link->pong_timeout = timeout * 1000; /* microseconds */
 	link->latency_max_samples = precision;
 
 	log_debug(knet_h, KNET_SUB_LINK,
 		  "host: %u link: %u timeout update - interval: %llu timeout: %llu precision: %u",
 		  host_id, link_id, link->ping_interval, link->pong_timeout, precision);
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_get_ping_timers(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			      time_t *interval, time_t *timeout, unsigned int *precision)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!interval) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!timeout) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!precision) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	*interval = link->ping_interval / 1000; /* microseconds */
 	*timeout = link->pong_timeout / 1000;
 	*precision = link->latency_max_samples;
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_set_priority(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			   uint8_t priority)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 	uint8_t old_priority;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	old_priority = link->priority;
 
 	if (link->priority == priority) {
 		err = 0;
 		goto exit_unlock;
 	}
 
 	link->priority = priority;
 
 	if (_host_dstcache_update_sync(knet_h, host)) {
 		savederrno = errno;
 		log_debug(knet_h, KNET_SUB_LINK,
 			  "Unable to update link priority (host: %u link: %u priority: %u): %s",
 			  host_id, link_id, link->priority, strerror(savederrno));
 		link->priority = old_priority;
 		err = -1;
 		goto exit_unlock;
 	}
 
 	log_debug(knet_h, KNET_SUB_LINK,
 		  "host: %u link: %u priority set to: %u",
 		  host_id, link_id, link->priority);
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_get_priority(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			   uint8_t *priority)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!priority) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	*priority = link->priority;
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_get_link_list(knet_handle_t knet_h, knet_node_id_t host_id,
 			    uint8_t *link_ids, size_t *link_ids_entries)
 {
 	int savederrno = 0, err = 0, i, count = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (!link_ids) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!link_ids_entries) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	for (i = 0; i < KNET_MAX_LINK; i++) {
 		link = &host->link[i];
 		if (!link->configured) {
 			continue;
 		}
 		link_ids[count] = i;
 		count++;
 	}
 
 	*link_ids_entries = count;
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_get_status(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			 struct knet_link_status *status, size_t struct_size)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if (!status) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	savederrno = pthread_mutex_lock(&link->link_stats_mutex);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get stats mutex lock for host %u link %u: %s",
 			host_id, link_id, strerror(savederrno));
 		err = -1;
 		goto exit_unlock;
 	}
 
 	memmove(status, &link->status, struct_size);
 
 	pthread_mutex_unlock(&link->link_stats_mutex);
 
 	/* Calculate totals - no point in doing this on-the-fly */
 	status->stats.rx_total_packets =
 		status->stats.rx_data_packets +
 		status->stats.rx_ping_packets +
 		status->stats.rx_pong_packets +
 		status->stats.rx_pmtu_packets;
 	status->stats.tx_total_packets =
 		status->stats.tx_data_packets +
 		status->stats.tx_ping_packets +
 		status->stats.tx_pong_packets +
 		status->stats.tx_pmtu_packets;
 	status->stats.rx_total_bytes =
 		status->stats.rx_data_bytes +
 		status->stats.rx_ping_bytes +
 		status->stats.rx_pong_bytes +
 		status->stats.rx_pmtu_bytes;
 	status->stats.tx_total_bytes =
 		status->stats.tx_data_bytes +
 		status->stats.tx_ping_bytes +
 		status->stats.tx_pong_bytes +
 		status->stats.tx_pmtu_bytes;
 	status->stats.tx_total_errors =
 		status->stats.tx_data_errors +
 		status->stats.tx_ping_errors +
 		status->stats.tx_pong_errors +
 		status->stats.tx_pmtu_errors;
 	status->stats.tx_total_retries =
 		status->stats.tx_data_retries +
 		status->stats.tx_ping_retries +
 		status->stats.tx_pong_retries +
 		status->stats.tx_pmtu_retries;
 
 	/* Tell the caller our full size in case they have an old version */
 	status->size = sizeof(struct knet_link_status);
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	errno = err ? savederrno : 0;
 	return err;
 }
 
 int knet_link_enable_status_change_notify(knet_handle_t knet_h,
 					  void *link_status_change_notify_fn_private_data,
 					  void (*link_status_change_notify_fn) (
 						void *private_data,
 						knet_node_id_t host_id,
 						uint8_t link_id,
 						uint8_t connected,
 						uint8_t remote,
 						uint8_t external))
 {
 	int savederrno = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	knet_h->link_status_change_notify_fn_private_data = link_status_change_notify_fn_private_data;
 	knet_h->link_status_change_notify_fn = link_status_change_notify_fn;
 	if (knet_h->link_status_change_notify_fn) {
 		log_debug(knet_h, KNET_SUB_LINK, "link_status_change_notify_fn enabled");
 	} else {
 		log_debug(knet_h, KNET_SUB_LINK, "link_status_change_notify_fn disabled");
 	}
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 
 	errno = 0;
 	return 0;
 }
 
 int knet_link_insert_acl(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 			 int index,
 			 struct sockaddr_storage *ss1,
 			 struct sockaddr_storage *ss2,
 			 check_type_t type, check_acceptreject_t acceptreject)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (!ss1) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if ((type != CHECK_TYPE_ADDRESS) &&
 	    (type != CHECK_TYPE_MASK) &&
 	    (type != CHECK_TYPE_RANGE)) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if ((acceptreject != CHECK_ACCEPT) &&
 	    (acceptreject != CHECK_REJECT)) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if ((type != CHECK_TYPE_ADDRESS) && (!ss2)) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if ((type == CHECK_TYPE_RANGE) &&
 	    (ss1->ss_family != ss2->ss_family)) {
 			errno = EINVAL;
 			return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	if (link->dynamic != KNET_LINK_DYNIP) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is a point to point connection: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	err = check_add(knet_h, link, index,
 			ss1, ss2, type, acceptreject);
 	savederrno = errno;
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 
 	errno = savederrno;
 	return err;
 }
 
 int knet_link_add_acl(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 		      struct sockaddr_storage *ss1,
 		      struct sockaddr_storage *ss2,
 		      check_type_t type, check_acceptreject_t acceptreject)
 {
 	return knet_link_insert_acl(knet_h, host_id, link_id, -1, ss1, ss2, type, acceptreject);
 }
 
 int knet_link_rm_acl(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id,
 		     struct sockaddr_storage *ss1,
 		     struct sockaddr_storage *ss2,
 		     check_type_t type, check_acceptreject_t acceptreject)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (!ss1) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if ((type != CHECK_TYPE_ADDRESS) &&
 	    (type != CHECK_TYPE_MASK) &&
 	    (type != CHECK_TYPE_RANGE)) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if ((acceptreject != CHECK_ACCEPT) &&
 	    (acceptreject != CHECK_REJECT)) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if ((type != CHECK_TYPE_ADDRESS) && (!ss2)) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	if ((type == CHECK_TYPE_RANGE) &&
 	    (ss1->ss_family != ss2->ss_family)) {
 			errno = EINVAL;
 			return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	if (link->dynamic != KNET_LINK_DYNIP) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is a point to point connection: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	err = check_rm(knet_h, link,
 		       ss1, ss2, type, acceptreject);
 	savederrno = errno;
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 
 	errno = savederrno;
 	return err;
 }
 
 int knet_link_clear_acl(knet_handle_t knet_h, knet_node_id_t host_id, uint8_t link_id)
 {
 	int savederrno = 0, err = 0;
 	struct knet_host *host;
 	struct knet_link *link;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (link_id >= KNET_MAX_LINK) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_LINK, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	host = knet_h->host_index[host_id];
 	if (!host) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "Unable to find host %u: %s",
 			host_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	link = &host->link[link_id];
 
 	if (!link->configured) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is not configured: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	if (link->dynamic != KNET_LINK_DYNIP) {
 		err = -1;
 		savederrno = EINVAL;
 		log_err(knet_h, KNET_SUB_LINK, "host %u link %u is a point to point connection: %s",
 			host_id, link_id, strerror(savederrno));
 		goto exit_unlock;
 	}
 
 	check_rmall(knet_h, link);
 
 exit_unlock:
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 
 	errno = savederrno;
 	return err;
 }
diff --git a/libknet/threads_common.c b/libknet/threads_common.c
index d1398801..f026bf5f 100644
--- a/libknet/threads_common.c
+++ b/libknet/threads_common.c
@@ -1,341 +1,353 @@
 /*
  * Copyright (C) 2016-2022 Red Hat, Inc.  All rights reserved.
  *
  * Authors: Fabio M. Di Nitto <fabbione@kronosnet.org>
  *          Federico Simoncelli <fsimon@kronosnet.org>
  *
  * This software licensed under LGPL-2.0+
  */
 
 #include "config.h"
 
 #include <pthread.h>
 #include <errno.h>
 #include <string.h>
 #include <zlib.h>
 
 #include "internals.h"
 #include "logging.h"
 #include "threads_common.h"
 
 int shutdown_in_progress(knet_handle_t knet_h)
 {
 	int savederrno = 0;
 	int ret;
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_COMMON, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	ret = knet_h->fini_in_progress;
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 
 	return ret;
 }
 
-static int pmtud_reschedule(knet_handle_t knet_h)
+static int _pmtud_reschedule(knet_handle_t knet_h)
 {
-	if (pthread_mutex_lock(&knet_h->pmtud_mutex) != 0) {
-		log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock");
-		return -1;
-	}
-
 	if (knet_h->pmtud_running) {
 		knet_h->pmtud_abort = 1;
 
 		if (knet_h->pmtud_waiting) {
 			pthread_cond_signal(&knet_h->pmtud_cond);
 		}
 	}
+	return 0;
+}
 
+static int pmtud_reschedule(knet_handle_t knet_h)
+{
+	int res;
+
+	if (pthread_mutex_lock(&knet_h->pmtud_mutex) != 0) {
+		log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock");
+		return -1;
+	}
+	res = _pmtud_reschedule(knet_h);
 	pthread_mutex_unlock(&knet_h->pmtud_mutex);
-	return 0;
+	return res;
 }
 
 int get_global_wrlock(knet_handle_t knet_h)
 {
 	if (pmtud_reschedule(knet_h) < 0) {
 		log_info(knet_h, KNET_SUB_PMTUD, "Unable to notify PMTUd to reschedule. Expect delays in executing API calls");
 	}
 	return pthread_rwlock_wrlock(&knet_h->global_rwlock);
 }
 
 static struct pretty_names thread_names[KNET_THREAD_MAX] =
 {
 	{ "TX", KNET_THREAD_TX },
 	{ "RX", KNET_THREAD_RX },
 	{ "HB", KNET_THREAD_HB },
 	{ "PMTUD", KNET_THREAD_PMTUD },
 #ifdef HAVE_NETINET_SCTP_H
 	{ "SCTP_LISTEN", KNET_THREAD_SCTP_LISTEN },
 	{ "SCTP_CONN", KNET_THREAD_SCTP_CONN },
 #endif
 	{ "DST_LINK", KNET_THREAD_DST_LINK }
 };
 
 static struct pretty_names thread_status[] =
 {
 	{ "unregistered", KNET_THREAD_UNREGISTERED },
 	{ "registered", KNET_THREAD_REGISTERED },
 	{ "started", KNET_THREAD_STARTED },
 	{ "stopped", KNET_THREAD_STOPPED }
 };
 
 static const char *get_thread_status_name(uint8_t status)
 {
 	unsigned int i;
 
 	for (i = 0; i < KNET_THREAD_STATUS_MAX; i++) {
 		if (thread_status[i].val == status) {
 			return thread_status[i].name;
 		}
 	}
 	return "unknown";
 }
 
 static const char *get_thread_name(uint8_t thread_id)
 {
 	unsigned int i;
 
 	for (i = 0; i < KNET_THREAD_MAX; i++) {
 		if (thread_names[i].val == thread_id) {
 			return thread_names[i].name;
 		}
 	}
 	return "unknown";
 }
 
 int get_thread_flush_queue(knet_handle_t knet_h, uint8_t thread_id)
 {
 	uint8_t flush;
 
 	if (pthread_mutex_lock(&knet_h->threads_status_mutex) != 0) {
 		log_debug(knet_h, KNET_SUB_HANDLE, "Unable to get mutex lock");
 		return -1;
 	}
 
 	flush = knet_h->threads_flush_queue[thread_id];
 
 	pthread_mutex_unlock(&knet_h->threads_status_mutex);
 	return flush;
 }
 
 int set_thread_flush_queue(knet_handle_t knet_h, uint8_t thread_id, uint8_t status)
 {
 	if (pthread_mutex_lock(&knet_h->threads_status_mutex) != 0) {
 		log_debug(knet_h, KNET_SUB_HANDLE, "Unable to get mutex lock");
 		return -1;
 	}
 
 	knet_h->threads_flush_queue[thread_id] = status;
 
 	log_debug(knet_h, KNET_SUB_HANDLE, "Updated flush queue request for thread %s to %u",
 		  get_thread_name(thread_id), status);
 
 	pthread_mutex_unlock(&knet_h->threads_status_mutex);
 	return 0;
 }
 
 int wait_all_threads_flush_queue(knet_handle_t knet_h)
 {
 	uint8_t i = 0, found = 0;
 
 	while (!found) {
 		usleep(knet_h->threads_timer_res);
 
 		if (pthread_mutex_lock(&knet_h->threads_status_mutex) != 0) {
 			continue;
 		}
 
 		found = 1;
 
 		for (i = 0; i < KNET_THREAD_MAX; i++) {
 			if (knet_h->threads_flush_queue[i] == KNET_THREAD_QUEUE_FLUSHED) {
 				continue;
 			}
 			log_debug(knet_h, KNET_SUB_HANDLE, "Checking thread: %s queue: %u",
 					get_thread_name(i),
 					knet_h->threads_flush_queue[i]);
 			if (knet_h->threads_flush_queue[i] != KNET_THREAD_QUEUE_FLUSHED) {
 				found = 0;
 			}
 		}
 
 		pthread_mutex_unlock(&knet_h->threads_status_mutex);
 	}
 
 	return 0;
 }
 
 int set_thread_status(knet_handle_t knet_h, uint8_t thread_id, uint8_t status)
 {
 	if (pthread_mutex_lock(&knet_h->threads_status_mutex) != 0) {
 		log_debug(knet_h, KNET_SUB_HANDLE, "Unable to get mutex lock");
 		return -1;
 	}
 
 	knet_h->threads_status[thread_id] = status;
 
 	log_debug(knet_h, KNET_SUB_HANDLE, "Updated status for thread %s to %s",
 		  get_thread_name(thread_id), get_thread_status_name(status));
 
 	pthread_mutex_unlock(&knet_h->threads_status_mutex);
 	return 0;
 }
 
 int wait_all_threads_status(knet_handle_t knet_h, uint8_t status)
 {
 	uint8_t i = 0, found = 0;
 
 	while (!found) {
 		usleep(knet_h->threads_timer_res);
 
 		if (pthread_mutex_lock(&knet_h->threads_status_mutex) != 0) {
 			continue;
 		}
 
 		found = 1;
 
 		for (i = 0; i < KNET_THREAD_MAX; i++) {
 			if (knet_h->threads_status[i] == KNET_THREAD_UNREGISTERED) {
 				continue;
 			}
 			log_debug(knet_h, KNET_SUB_HANDLE, "Checking thread: %s status: %s req: %s",
 					get_thread_name(i),
 					get_thread_status_name(knet_h->threads_status[i]),
 					get_thread_status_name(status));
 			if (knet_h->threads_status[i] != status) {
 				found = 0;
 			}
 		}
 
 		pthread_mutex_unlock(&knet_h->threads_status_mutex);
 	}
 
 	return 0;
 }
 
-void force_pmtud_run(knet_handle_t knet_h, uint8_t subsystem, uint8_t reset_mtu)
+void force_pmtud_run(knet_handle_t knet_h, uint8_t subsystem, uint8_t reset_mtu, uint8_t force_restart)
 {
 	if (reset_mtu) {
 		log_debug(knet_h, subsystem, "PMTUd has been reset to default");
 		knet_h->data_mtu = calc_min_mtu(knet_h);
 		if (knet_h->pmtud_notify_fn) {
 			knet_h->pmtud_notify_fn(knet_h->pmtud_notify_fn_private_data,
 						knet_h->data_mtu);
 		}
 	}
 
 	/*
 	 * we can only try to take a lock here. This part of the code
 	 * can be invoked by any thread, including PMTUd that is already
 	 * holding a lock at that stage.
 	 * If PMTUd is holding the lock, most likely it is already running
 	 * and we don't need to notify it back.
 	 */
 	if (!pthread_mutex_trylock(&knet_h->pmtud_mutex)) {
 		if (!knet_h->pmtud_running) {
 			if (!knet_h->pmtud_forcerun) {
 				log_debug(knet_h, subsystem, "Notifying PMTUd to rerun");
 				knet_h->pmtud_forcerun = 1;
 			}
+		} else {
+			if (force_restart) {
+				if (_pmtud_reschedule(knet_h) < 0) {
+					log_info(knet_h, KNET_SUB_PMTUD, "Unable to notify PMTUd to reschedule. A joining node may struggle to connect properly");
+				}
+			}
 		}
 		pthread_mutex_unlock(&knet_h->pmtud_mutex);
 	}
 }
 
 int knet_handle_set_threads_timer_res(knet_handle_t knet_h,
 				      useconds_t timeres)
 {
 	int savederrno = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	/*
 	 * most threads use timeres / 1000 as timeout on epoll.
 	 * anything below 1000 would generate a result of 0, making
 	 * the threads spin at 100% cpu
 	 */
 	if ((timeres > 0) && (timeres < 1000)) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_HANDLE, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	if (timeres) {
 		knet_h->threads_timer_res = timeres;
 		log_debug(knet_h, KNET_SUB_HANDLE, "Setting new threads timer resolution to %u usecs", knet_h->threads_timer_res);
 	} else {
 		knet_h->threads_timer_res = KNET_THREADS_TIMER_RES;
 		log_debug(knet_h, KNET_SUB_HANDLE, "Setting new threads timer resolution to default %u usecs", knet_h->threads_timer_res);
 	}
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	return 0;
 }
 
 int knet_handle_get_threads_timer_res(knet_handle_t knet_h,
 				      useconds_t *timeres)
 {
 	int savederrno = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (!timeres) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_HANDLE, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	*timeres = knet_h->threads_timer_res;
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 	return 0;
 }
 
 uint32_t compute_chksum(const unsigned char *data, uint32_t data_len)
 {
 	uLong crc;
 
 	crc = crc32(0, NULL, 0);
 	crc = crc32(crc, (Bytef*)data, data_len);
 
 	return crc;
 }
 
 uint32_t compute_chksumv(const struct iovec *iov_in, int iovcnt_in)
 {
 	uLong crc;
 	int i;
 
 	crc = crc32(0, NULL, 0);
 
 	for (i = 0; i < iovcnt_in; i++) {
 		crc = crc32(crc, (Bytef*)iov_in[i].iov_base, iov_in[i].iov_len);
 	}
 
 	return crc;
 }
diff --git a/libknet/threads_common.h b/libknet/threads_common.h
index b79faf37..f38dc283 100644
--- a/libknet/threads_common.h
+++ b/libknet/threads_common.h
@@ -1,56 +1,56 @@
 /*
  * Copyright (C) 2012-2022 Red Hat, Inc.  All rights reserved.
  *
  * Authors: Fabio M. Di Nitto <fabbione@kronosnet.org>
  *          Federico Simoncelli <fsimon@kronosnet.org>
  *
  * This software licensed under LGPL-2.0+
  */
 
 #ifndef __KNET_THREADS_COMMON_H__
 #define __KNET_THREADS_COMMON_H__
 
 #include "internals.h"
 
 #define KNET_THREAD_UNREGISTERED	0 /* thread does not exist */
 #define KNET_THREAD_REGISTERED		1 /* thread has been registered before  pthread_create invocation.
 					     make sure threads are registered before calling wait_all_thread_status */
 #define KNET_THREAD_STARTED		2 /* thread has reported to be running */
 #define KNET_THREAD_STOPPED		3 /* thread has returned */
 #define KNET_THREAD_STATUS_MAX	KNET_THREAD_STOPPED + 1
 
 #define KNET_THREAD_TX		0
 #define KNET_THREAD_RX		1
 #define KNET_THREAD_HB		2
 #define KNET_THREAD_PMTUD	3
 #define KNET_THREAD_DST_LINK	4
 #ifdef HAVE_NETINET_SCTP_H
 #define KNET_THREAD_SCTP_LISTEN	5
 #define KNET_THREAD_SCTP_CONN	6
 #endif
 #define KNET_THREAD_MAX		32
 
 #define KNET_THREAD_QUEUE_FLUSHED 0
 #define KNET_THREAD_QUEUE_FLUSH   1
 
 #define timespec_diff(start, end, diff) \
 do { \
 	if (end.tv_sec > start.tv_sec) \
 		*(diff) = ((end.tv_sec - start.tv_sec) * 1000000000llu) \
 					+ end.tv_nsec - start.tv_nsec; \
 	else \
 		*(diff) = end.tv_nsec - start.tv_nsec; \
 } while (0);
 
 int shutdown_in_progress(knet_handle_t knet_h);
 int get_global_wrlock(knet_handle_t knet_h);
 int get_thread_flush_queue(knet_handle_t knet_h, uint8_t thread_id);
 int set_thread_flush_queue(knet_handle_t knet_h, uint8_t thread_id, uint8_t status);
 int wait_all_threads_flush_queue(knet_handle_t knet_h);
 int set_thread_status(knet_handle_t knet_h, uint8_t thread_id, uint8_t status);
 int wait_all_threads_status(knet_handle_t knet_h, uint8_t status);
-void force_pmtud_run(knet_handle_t knet_h, uint8_t subsystem, uint8_t reset_mtu);
+void force_pmtud_run(knet_handle_t knet_h, uint8_t subsystem, uint8_t reset_mtu, uint8_t force_restart);
 uint32_t compute_chksum(const unsigned char *data, uint32_t data_len);
 uint32_t compute_chksumv(const struct iovec *iov_in, int iovcnt_in);
 
 #endif
diff --git a/libknet/threads_pmtud.c b/libknet/threads_pmtud.c
index 179536b8..1db4cab4 100644
--- a/libknet/threads_pmtud.c
+++ b/libknet/threads_pmtud.c
@@ -1,938 +1,938 @@
 /*
  * Copyright (C) 2015-2022 Red Hat, Inc.  All rights reserved.
  *
  * Authors: Fabio M. Di Nitto <fabbione@kronosnet.org>
  *          Federico Simoncelli <fsimon@kronosnet.org>
  *
  * This software licensed under LGPL-2.0+
  */
 
 #include "config.h"
 
 #include <unistd.h>
 #include <string.h>
 #include <errno.h>
 #include <pthread.h>
 
 #include "crypto.h"
 #include "links.h"
 #include "host.h"
 #include "logging.h"
 #include "transports.h"
 #include "threads_common.h"
 #include "threads_pmtud.h"
 #include "onwire_v1.h"
 
 static int _calculate_manual_mtu(knet_handle_t knet_h, struct knet_link *dst_link)
 {
 	size_t ipproto_overhead_len;	/* onwire packet overhead (protocol based) */
 
 	switch (dst_link->dst_addr.ss_family) {
 		case AF_INET6:
 			ipproto_overhead_len = KNET_PMTUD_OVERHEAD_V6 + dst_link->proto_overhead;
 			break;
 		case AF_INET:
 			ipproto_overhead_len = KNET_PMTUD_OVERHEAD_V4 + dst_link->proto_overhead;
 			break;
 		default:
 			log_debug(knet_h, KNET_SUB_PMTUD, "unknown protocol");
 			return 0;
 			break;
 	}
 
 	dst_link->status.mtu = calc_max_data_outlen(knet_h, knet_h->manual_mtu - ipproto_overhead_len);
 
 	return 1;
 }
 
 static int _handle_check_link_pmtud(knet_handle_t knet_h, struct knet_host *dst_host, struct knet_link *dst_link)
 {
 	int err, ret, savederrno, mutex_retry_limit, failsafe, use_kernel_mtu, warn_once;
 	uint32_t kernel_mtu;		/* record kernel_mtu from EMSGSIZE */
 	size_t onwire_len;   		/* current packet onwire size */
 	size_t ipproto_overhead_len;	/* onwire packet overhead (protocol based) */
 	size_t max_mtu_len;		/* max mtu for protocol */
 	size_t data_len;		/* how much data we can send in the packet
 					 * generally would be onwire_len - ipproto_overhead_len
 					 * needs to be adjusted for crypto
 					 */
 	size_t app_mtu_len;		/* real data that we can send onwire */
 	ssize_t len;			/* len of what we were able to sendto onwire */
 	uint8_t onwire_ver;
 
 	struct timespec ts, pmtud_crypto_start_ts, pmtud_crypto_stop_ts;
 	unsigned long long pong_timeout_adj_tmp, timediff;
 	int pmtud_crypto_reduce = 1;
 	unsigned char *outbuf = (unsigned char *)knet_h->pmtudbuf;
 
 	warn_once = 0;
 
 	mutex_retry_limit = 0;
 	failsafe = 0;
 
 	switch (dst_link->dst_addr.ss_family) {
 		case AF_INET6:
 			max_mtu_len = KNET_PMTUD_SIZE_V6;
 			ipproto_overhead_len = KNET_PMTUD_OVERHEAD_V6 + dst_link->proto_overhead;
 			break;
 		case AF_INET:
 			max_mtu_len = KNET_PMTUD_SIZE_V4;
 			ipproto_overhead_len = KNET_PMTUD_OVERHEAD_V4 + dst_link->proto_overhead;
 			break;
 		default:
 			log_debug(knet_h, KNET_SUB_PMTUD, "PMTUD aborted, unknown protocol");
 			return -1;
 			break;
 	}
 
 	dst_link->last_bad_mtu = 0;
 	dst_link->last_good_mtu = dst_link->last_ping_size + ipproto_overhead_len;
 
 	/*
 	 * discovery starts from the top because kernel will
 	 * refuse to send packets > current iface mtu.
 	 * this saves us some time and network bw.
 	 */ 
 	onwire_len = max_mtu_len;
 
 	/*
 	 * cache onwire version for this link / run
 	 */
 	if (pthread_mutex_lock(&knet_h->onwire_mutex)) {
 		log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get onwire mutex lock");
 		return -1;
 	}
 	onwire_ver = knet_h->onwire_ver;
 	pthread_mutex_unlock(&knet_h->onwire_mutex);
 
 restart:
 	/*
 	 * prevent a race when interface mtu is changed _exactly_ during
 	 * the discovery process and it's complex to detect. Easier
 	 * to wait the next loop.
 	 * 30 is not an arbitrary value. To bisect from 576 to 128000 doesn't
 	 * take more than 18/19 steps.
 	 */
 
 	if (failsafe == 30) {
 		log_err(knet_h, KNET_SUB_PMTUD,
 			"Aborting PMTUD process: Too many attempts. MTU might have changed during discovery.");
 		return -1;
 	} else {
 		failsafe++;
 	}
 
 	/*
 	 * common to all packets
 	 */
 
 	/*
 	 * calculate the application MTU based on current onwire_len minus ipproto_overhead_len
 	 */
 
 	app_mtu_len = calc_max_data_outlen(knet_h, onwire_len - ipproto_overhead_len);
 
 	/*
 	 * recalculate onwire len back that might be different based
 	 * on data padding from crypto layer.
 	 */
 
 	onwire_len = calc_data_outlen(knet_h, app_mtu_len + KNET_HEADER_ALL_SIZE) + ipproto_overhead_len;
 
 	/*
 	 * calculate the size of what we need to send to sendto(2).
 	 * see also onwire.c for packet format explanation.
 	 */
 	data_len = app_mtu_len + knet_h->sec_hash_size + knet_h->sec_salt_size + KNET_HEADER_ALL_SIZE;
 
 	if (knet_h->onwire_ver_remap) {
 		prep_pmtud_v1(knet_h, dst_link, onwire_ver, onwire_len, app_mtu_len + KNET_HEADER_ALL_SIZE);
 	} else {
 		switch (onwire_ver) {
 			case 1:
 				prep_pmtud_v1(knet_h, dst_link, onwire_ver, onwire_len, app_mtu_len + KNET_HEADER_ALL_SIZE);
 				break;
 			default:
 				log_warn(knet_h, KNET_SUB_PMTUD, "preparing PMTUD onwire version %u not supported", onwire_ver);
 				return -1;
 				break;
 		}
 	}
 
 	if (knet_h->crypto_in_use_config) {
 		if (data_len < (knet_h->sec_hash_size + knet_h->sec_salt_size) + 1) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "Aborting PMTUD process: link mtu smaller than crypto header detected (link might have been disconnected)");
 			return -1;
 		}
 
 		if (crypto_encrypt_and_sign(knet_h,
 					    (const unsigned char *)knet_h->pmtudbuf,
 					    data_len - (knet_h->sec_hash_size + knet_h->sec_salt_size),
 					    knet_h->pmtudbuf_crypt,
 					    (ssize_t *)&data_len) < 0) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to crypto pmtud packet");
 			return -1;
 		}
 
 		outbuf = knet_h->pmtudbuf_crypt;
 		if (pthread_mutex_lock(&knet_h->handle_stats_mutex) < 0) {
 			log_err(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock");
 			return -1;
 		}
 		knet_h->stats_extra.tx_crypt_pmtu_packets++;
 		pthread_mutex_unlock(&knet_h->handle_stats_mutex);
 	}
 
 	/* link has gone down, aborting pmtud */
 	if (dst_link->status.connected != 1) {
 		log_debug(knet_h, KNET_SUB_PMTUD, "PMTUD detected host (%u) link (%u) has been disconnected", dst_host->host_id, dst_link->link_id);
 		return -1;
 	}
 
 	if (dst_link->transport_connected != 1) {
 		log_debug(knet_h, KNET_SUB_PMTUD, "PMTUD detected host (%u) link (%u) has been disconnected", dst_host->host_id, dst_link->link_id);
 		return -1;
 	}
 
 	if (pthread_mutex_lock(&knet_h->pmtud_mutex) != 0) {
 		log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock");
 		return -1;
 	}
 
 	if (knet_h->pmtud_abort) {
 		pthread_mutex_unlock(&knet_h->pmtud_mutex);
 		errno = EDEADLK;
 		return -1;
 	}
 
 	savederrno = pthread_mutex_lock(&knet_h->tx_mutex);
 	if (savederrno) {
 		pthread_mutex_unlock(&knet_h->pmtud_mutex);
 		log_err(knet_h, KNET_SUB_PMTUD, "Unable to get TX mutex lock: %s", strerror(savederrno));
 		return -1;
 	}
 
 	savederrno = pthread_mutex_lock(&dst_link->link_stats_mutex);
 	if (savederrno) {
 		pthread_mutex_unlock(&knet_h->pmtud_mutex);
 		pthread_mutex_unlock(&knet_h->tx_mutex);
 		log_err(knet_h, KNET_SUB_PMTUD, "Unable to get stats mutex lock for host %u link %u: %s",
 			dst_host->host_id, dst_link->link_id, strerror(savederrno));
 		return -1;
 	}
 
 retry:
 	if (transport_get_connection_oriented(knet_h, dst_link->transport) == TRANSPORT_PROTO_NOT_CONNECTION_ORIENTED) {
 		len = sendto(dst_link->outsock, outbuf, data_len, MSG_DONTWAIT | MSG_NOSIGNAL,
 			     (struct sockaddr *) &dst_link->dst_addr,
 			     knet_h->knet_transport_fd_tracker[dst_link->outsock].sockaddr_len);
 	} else {
 		len = sendto(dst_link->outsock, outbuf, data_len, MSG_DONTWAIT | MSG_NOSIGNAL, NULL, 0);
 	}
 	savederrno = errno;
 
 	/*
 	 * we cannot hold a lock on kmtu_mutex between resetting
 	 * knet_h->kernel_mtu here and below where it's used.
 	 * use_kernel_mtu tells us if the knet_h->kernel_mtu was
 	 * set to 0 and we can trust its value later.
 	 */
 	use_kernel_mtu = 0;
 
 	if (pthread_mutex_lock(&knet_h->kmtu_mutex) == 0) {
 		use_kernel_mtu = 1;
 		knet_h->kernel_mtu = 0;
 		pthread_mutex_unlock(&knet_h->kmtu_mutex);
 	}
 
 	kernel_mtu = 0;
 
 	err = transport_tx_sock_error(knet_h, dst_link->transport, dst_link->outsock, len, savederrno);
 	switch(err) {
 		case KNET_TRANSPORT_SOCK_ERROR_INTERNAL:
 			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to send pmtu packet (sendto): %d %s", savederrno, strerror(savederrno));
 			pthread_mutex_unlock(&knet_h->tx_mutex);
 			pthread_mutex_unlock(&knet_h->pmtud_mutex);
 			dst_link->status.stats.tx_pmtu_errors++;
 			pthread_mutex_unlock(&dst_link->link_stats_mutex);
 			return -1;
 			break;
 		case KNET_TRANSPORT_SOCK_ERROR_IGNORE:
 			break;
 		case KNET_TRANSPORT_SOCK_ERROR_RETRY:
 			dst_link->status.stats.tx_pmtu_retries++;
 			goto retry;
 			break;
 	}
 
 	pthread_mutex_unlock(&knet_h->tx_mutex);
 
 	if (len != (ssize_t )data_len) {
 		pthread_mutex_unlock(&dst_link->link_stats_mutex);
 		if (savederrno == EMSGSIZE) {
 			/*
 			 * we cannot hold a lock on kmtu_mutex between resetting
 			 * knet_h->kernel_mtu and here.
 			 * use_kernel_mtu tells us if the knet_h->kernel_mtu was
 			 * set to 0 previously and we can trust its value now.
 			 */
 			if (use_kernel_mtu) {
 				use_kernel_mtu = 0;
 				if (pthread_mutex_lock(&knet_h->kmtu_mutex) == 0) {
 					kernel_mtu = knet_h->kernel_mtu;
 					pthread_mutex_unlock(&knet_h->kmtu_mutex);
 				}
 			}
 			if (kernel_mtu > 0) {
 				dst_link->last_bad_mtu = kernel_mtu + 1;
 			} else {
 				dst_link->last_bad_mtu = onwire_len;
 			}
 		} else {
 			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to send pmtu packet len: %zu err: %s", onwire_len, strerror(savederrno));
 		}
 
 	} else {
 		dst_link->last_sent_mtu = onwire_len;
 		dst_link->last_recv_mtu = 0;
 		dst_link->status.stats.tx_pmtu_packets++;
 		dst_link->status.stats.tx_pmtu_bytes += data_len;
 		pthread_mutex_unlock(&dst_link->link_stats_mutex);
 
 		if (clock_gettime(CLOCK_REALTIME, &ts) < 0) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get current time: %s", strerror(errno));
 			pthread_mutex_unlock(&knet_h->pmtud_mutex);
 			return -1;
 		}
 
 		/*
 		 * non fatal, we can wait the next round to reduce the
 		 * multiplier
 		 */
 		if (clock_gettime(CLOCK_MONOTONIC, &pmtud_crypto_start_ts) < 0) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get current time: %s", strerror(errno));
 			pmtud_crypto_reduce = 0;
 		}
 
 		/*
 		 * set PMTUd reply timeout to match pong_timeout on a given link
 		 *
 		 * math: internally pong_timeout is expressed in microseconds, while
 		 *       the public API exports milliseconds. So careful with the 0's here.
 		 * the loop is necessary because we are grabbing the current time just above
 		 * and add values to it that could overflow into seconds.
 		 */ 
 
 		if (pthread_mutex_lock(&knet_h->backoff_mutex)) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get backoff_mutex");
 			pthread_mutex_unlock(&knet_h->pmtud_mutex);
 			return -1;
 		}
 
 		if (knet_h->crypto_in_use_config) {
 			/*
 			 * crypto, under pressure, is a royal PITA
 			 */
 			pong_timeout_adj_tmp = dst_link->pong_timeout_adj * dst_link->pmtud_crypto_timeout_multiplier;
 		} else {
 			pong_timeout_adj_tmp = dst_link->pong_timeout_adj;
 		}
 
 		ts.tv_sec += pong_timeout_adj_tmp / 1000000;
 		ts.tv_nsec += (((pong_timeout_adj_tmp) % 1000000) * 1000);
 		while (ts.tv_nsec > 1000000000) {
 			ts.tv_sec += 1;
 			ts.tv_nsec -= 1000000000;
 		}
 
 		pthread_mutex_unlock(&knet_h->backoff_mutex);
 
 		knet_h->pmtud_waiting = 1;
 
 		ret = pthread_cond_timedwait(&knet_h->pmtud_cond, &knet_h->pmtud_mutex, &ts);
 
 		knet_h->pmtud_waiting = 0;
 
 		if (knet_h->pmtud_abort) {
 			pthread_mutex_unlock(&knet_h->pmtud_mutex);
 			errno = EDEADLK;
 			return -1;
 		}
 
 		/*
 		 * we cannot use shutdown_in_progress in here because
 		 * we already hold the read lock
 		 */
 		if (knet_h->fini_in_progress) {
 			pthread_mutex_unlock(&knet_h->pmtud_mutex);
 			log_debug(knet_h, KNET_SUB_PMTUD, "PMTUD aborted. shutdown in progress");
 			return -1;
 		}
 
 		if (ret) {
 			if (ret == ETIMEDOUT) {
 				if ((knet_h->crypto_in_use_config) && (dst_link->pmtud_crypto_timeout_multiplier < KNET_LINK_PMTUD_CRYPTO_TIMEOUT_MULTIPLIER_MAX)) {
 					dst_link->pmtud_crypto_timeout_multiplier = dst_link->pmtud_crypto_timeout_multiplier * 2;
 					pmtud_crypto_reduce = 0;
 					log_debug(knet_h, KNET_SUB_PMTUD,
 							"Increasing PMTUd response timeout multiplier to (%u) for host %u link: %u",
 							dst_link->pmtud_crypto_timeout_multiplier,
 							dst_host->host_id,
 							dst_link->link_id);
 					pthread_mutex_unlock(&knet_h->pmtud_mutex);
 					goto restart;
 				}
 				if (!warn_once) {
 					log_warn(knet_h, KNET_SUB_PMTUD,
 							"possible MTU misconfiguration detected. "
 							"kernel is reporting MTU: %u bytes for "
 							"host %u link %u but the other node is "
 							"not acknowledging packets of this size. ",
 							dst_link->last_sent_mtu,
 							dst_host->host_id,
 							dst_link->link_id);
 					log_warn(knet_h, KNET_SUB_PMTUD,
 							"This can be caused by this node interface MTU "
 							"too big or a network device that does not "
 							"support or has been misconfigured to manage MTU "
 							"of this size, or packet loss. knet will continue "
 							"to run but performances might be affected.");
 					warn_once = 1;
 				}
 			} else {
 				pthread_mutex_unlock(&knet_h->pmtud_mutex);
 				if (mutex_retry_limit == 3) {
 					log_debug(knet_h, KNET_SUB_PMTUD, "PMTUD aborted, unable to get mutex lock");
 					return -1;
 				}
 				mutex_retry_limit++;
 				goto restart;
 			}
 		}
 
 		if ((knet_h->crypto_in_use_config) && (pmtud_crypto_reduce == 1) &&
 		    (dst_link->pmtud_crypto_timeout_multiplier > KNET_LINK_PMTUD_CRYPTO_TIMEOUT_MULTIPLIER_MIN)) {
 			if (!clock_gettime(CLOCK_MONOTONIC, &pmtud_crypto_stop_ts)) {
 				timespec_diff(pmtud_crypto_start_ts, pmtud_crypto_stop_ts, &timediff);
 				if (((pong_timeout_adj_tmp * 1000) / 2) > timediff) {
 					dst_link->pmtud_crypto_timeout_multiplier = dst_link->pmtud_crypto_timeout_multiplier / 2;
 					log_debug(knet_h, KNET_SUB_PMTUD,
 							"Decreasing PMTUd response timeout multiplier to (%u) for host %u link: %u",
 							dst_link->pmtud_crypto_timeout_multiplier,
 							dst_host->host_id,
 							dst_link->link_id);
 				}
 			} else {
 				log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get current time: %s", strerror(errno));
 			}
 		}
 
 		if ((dst_link->last_recv_mtu != onwire_len) || (ret)) {
 			dst_link->last_bad_mtu = onwire_len;
 		} else {
 			int found_mtu = 0;
 
 			if (knet_h->sec_block_size) {
 				if ((onwire_len + knet_h->sec_block_size >= max_mtu_len) ||
 				   ((dst_link->last_bad_mtu) && (dst_link->last_bad_mtu <= (onwire_len + knet_h->sec_block_size)))) {
 					found_mtu = 1;
 				}
 			} else {
 				if ((onwire_len == max_mtu_len) ||
 				    ((dst_link->last_bad_mtu) && (dst_link->last_bad_mtu == (onwire_len + 1))) ||
 				     (dst_link->last_bad_mtu == dst_link->last_good_mtu)) {
 					found_mtu = 1;
 				}
 			}
 
 			if (found_mtu) {
 				/*
 				 * account for IP overhead, knet headers and crypto in PMTU calculation
 				 */
 				dst_link->status.mtu = calc_max_data_outlen(knet_h, onwire_len - ipproto_overhead_len);
 				pthread_mutex_unlock(&knet_h->pmtud_mutex);
 				return 0;
 			}
 
 			dst_link->last_good_mtu = onwire_len;
 		}
 	}
 
 	if (kernel_mtu) {
 		onwire_len = kernel_mtu;
 	} else {
 		onwire_len = (dst_link->last_good_mtu + dst_link->last_bad_mtu) / 2;
 	}
 
 	pthread_mutex_unlock(&knet_h->pmtud_mutex);
 
 	goto restart;
 }
 
 static int _handle_check_pmtud(knet_handle_t knet_h, struct knet_host *dst_host, struct knet_link *dst_link, int force_run)
 {
 	uint8_t saved_valid_pmtud;
 	unsigned int saved_pmtud;
 	struct timespec clock_now;
 	unsigned long long diff_pmtud, interval;
 
 	if (clock_gettime(CLOCK_MONOTONIC, &clock_now) != 0) {
 		log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get monotonic clock");
 		return 0;
 	}
 
 	if (!force_run) {
 		interval = knet_h->pmtud_interval * 1000000000llu; /* nanoseconds */
 
 		timespec_diff(dst_link->pmtud_last, clock_now, &diff_pmtud);
 
 		if (diff_pmtud < interval) {
 			return dst_link->has_valid_mtu;
 		}
 	}
 
 	/*
 	 * status.proto_overhead should include all IP/(UDP|SCTP)/knet headers
 	 *
 	 * please note that it is not the same as link->proto_overhead that
 	 * includes only either UDP or SCTP (at the moment) overhead.
 	 */
 	switch (dst_link->dst_addr.ss_family) {
 		case AF_INET6:
 			dst_link->status.proto_overhead = KNET_PMTUD_OVERHEAD_V6 + dst_link->proto_overhead + KNET_HEADER_ALL_SIZE + knet_h->sec_hash_size + knet_h->sec_salt_size;
 			break;
 		case AF_INET:
 			dst_link->status.proto_overhead = KNET_PMTUD_OVERHEAD_V4 + dst_link->proto_overhead + KNET_HEADER_ALL_SIZE + knet_h->sec_hash_size + knet_h->sec_salt_size;
 			break;
 	}
 
 	saved_pmtud = dst_link->status.mtu;
 	saved_valid_pmtud = dst_link->has_valid_mtu;
 
 	log_debug(knet_h, KNET_SUB_PMTUD, "Starting PMTUD for host: %u link: %u", dst_host->host_id, dst_link->link_id);
 
 	errno = 0;
 	if (_handle_check_link_pmtud(knet_h, dst_host, dst_link) < 0) {
 		if (errno == EDEADLK) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "PMTUD for host: %u link: %u has been rescheduled", dst_host->host_id, dst_link->link_id);
 			dst_link->status.mtu = saved_pmtud;
 			dst_link->has_valid_mtu = saved_valid_pmtud;
 			errno = EDEADLK;
 			return dst_link->has_valid_mtu;
 		}
 		dst_link->has_valid_mtu = 0;
 	} else {
 		if (dst_link->status.mtu < calc_min_mtu(knet_h)) {
 			log_info(knet_h, KNET_SUB_PMTUD,
 				 "Invalid MTU detected for host: %u link: %u mtu: %u",
 				 dst_host->host_id, dst_link->link_id, dst_link->status.mtu);
 			dst_link->has_valid_mtu = 0;
 		} else {
 			dst_link->has_valid_mtu = 1;
 		}
 		if (dst_link->has_valid_mtu) {
 			if ((saved_pmtud) && (saved_pmtud != dst_link->status.mtu)) {
 				log_info(knet_h, KNET_SUB_PMTUD, "PMTUD link change for host: %u link: %u from %u to %u",
 					 dst_host->host_id, dst_link->link_id, saved_pmtud, dst_link->status.mtu);
 			}
 			log_debug(knet_h, KNET_SUB_PMTUD, "PMTUD completed for host: %u link: %u current link mtu: %u",
 				  dst_host->host_id, dst_link->link_id, dst_link->status.mtu);
 
 			/*
 			 * set pmtud_last, if we can, after we are done with the PMTUd process
 			 * because it can take a very long time.
 			 */
 			dst_link->pmtud_last = clock_now;
 			if (!clock_gettime(CLOCK_MONOTONIC, &clock_now)) {
 				dst_link->pmtud_last = clock_now;
 			}
 		}
 	}
 
 	if (saved_valid_pmtud != dst_link->has_valid_mtu) {
 		_host_dstcache_update_async(knet_h, dst_host);
 	}
 
 	return dst_link->has_valid_mtu;
 }
 
 void *_handle_pmtud_link_thread(void *data)
 {
 	knet_handle_t knet_h = (knet_handle_t) data;
 	struct knet_host *dst_host;
 	struct knet_link *dst_link;
 	int link_idx;
 	unsigned int have_mtu;
 	unsigned int lower_mtu;
 	int link_has_mtu;
 	int force_run = 0;
 
 	set_thread_status(knet_h, KNET_THREAD_PMTUD, KNET_THREAD_STARTED);
 
 	knet_h->data_mtu = calc_min_mtu(knet_h);
 
 	while (!shutdown_in_progress(knet_h)) {
 		usleep(knet_h->threads_timer_res);
 
 		if (pthread_mutex_lock(&knet_h->pmtud_mutex) != 0) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock");
 			continue;
 		}
 		knet_h->pmtud_abort = 0;
 		knet_h->pmtud_running = 1;
 		force_run = knet_h->pmtud_forcerun;
 		knet_h->pmtud_forcerun = 0;
 		pthread_mutex_unlock(&knet_h->pmtud_mutex);
 
 		if (force_run) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "PMTUd request to rerun has been received");
 		}
 
 		if (pthread_rwlock_rdlock(&knet_h->global_rwlock) != 0) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get read lock");
 			continue;
 		}
 
 		lower_mtu = KNET_PMTUD_SIZE_V4;
 		have_mtu = 0;
 
 		for (dst_host = knet_h->host_head; dst_host != NULL; dst_host = dst_host->next) {
 			for (link_idx = 0; link_idx < KNET_MAX_LINK; link_idx++) {
 				dst_link = &dst_host->link[link_idx];
 
 				if ((dst_link->status.enabled != 1) ||
 				    (dst_link->status.connected != 1) ||
 				    (dst_host->link[link_idx].transport == KNET_TRANSPORT_LOOPBACK) ||
 				    (!dst_link->last_ping_size) ||
 				    ((dst_link->dynamic == KNET_LINK_DYNIP) &&
 				     (dst_link->status.dynconnected != 1)))
 					continue;
 
 				if (!knet_h->manual_mtu) {
 					link_has_mtu = _handle_check_pmtud(knet_h, dst_host, dst_link, force_run);
 					if (errno == EDEADLK) {
 						goto out_unlock;
 					}
 					if (link_has_mtu) {
 						have_mtu = 1;
 						if (dst_link->status.mtu < lower_mtu) {
 							lower_mtu = dst_link->status.mtu;
 						}
 					}
 				} else {
 					link_has_mtu = _calculate_manual_mtu(knet_h, dst_link);
 					if (link_has_mtu) {
 						have_mtu = 1;
 						if (dst_link->status.mtu < lower_mtu) {
 							lower_mtu = dst_link->status.mtu;
 						}
 					}
 				}
 			}
 		}
 
 		if (have_mtu) {
 			if (knet_h->data_mtu != lower_mtu) {
 				knet_h->data_mtu = lower_mtu;
 				log_info(knet_h, KNET_SUB_PMTUD, "Global data MTU changed to: %u", knet_h->data_mtu);
 
 				if (knet_h->pmtud_notify_fn) {
 					knet_h->pmtud_notify_fn(knet_h->pmtud_notify_fn_private_data,
 								knet_h->data_mtu);
 				}
 			}
 		}
 out_unlock:
 		pthread_rwlock_unlock(&knet_h->global_rwlock);
 		if (pthread_mutex_lock(&knet_h->pmtud_mutex) != 0) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock");
 		} else {
 			knet_h->pmtud_running = 0;
 			pthread_mutex_unlock(&knet_h->pmtud_mutex);
 		}
 	}
 
 	set_thread_status(knet_h, KNET_THREAD_PMTUD, KNET_THREAD_STOPPED);
 
 	return NULL;
 }
 
 static void send_pmtud_reply(knet_handle_t knet_h, struct knet_link *src_link, struct knet_header *inbuf)
 {
 	int err = 0, savederrno = 0, stats_err = 0;
 	unsigned char *outbuf = (unsigned char *)inbuf;
 	ssize_t len, outlen;
 
 	if (knet_h->onwire_ver_remap) {
 		prep_pmtud_reply_v1(knet_h, inbuf, &outlen);
 	} else {
 		switch (inbuf->kh_version) {
 			case 1:
 				prep_pmtud_reply_v1(knet_h, inbuf, &outlen);
 				break;
 			default:
 				log_warn(knet_h, KNET_SUB_PMTUD, "preparing PMTUD reply onwire version %u not supported", inbuf->kh_version);
 				return;
 				break;
 		}
 	}
 
 	if (knet_h->crypto_in_use_config) {
 		if (crypto_encrypt_and_sign(knet_h,
 					    (const unsigned char *)inbuf,
 					    outlen,
 					    knet_h->recv_from_links_buf_crypt,
 					    &outlen) < 0) {
 			log_debug(knet_h, KNET_SUB_PMTUD, "Unable to encrypt PMTUd reply packet");
 			return;
 		}
 		outbuf = knet_h->recv_from_links_buf_crypt;
 		stats_err = pthread_mutex_lock(&knet_h->handle_stats_mutex);
 		if (stats_err < 0) {
 			log_err(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock: %s", strerror(stats_err));
 			return;
 		}
 		knet_h->stats_extra.tx_crypt_pmtu_reply_packets++;
 		pthread_mutex_unlock(&knet_h->handle_stats_mutex);
 	}
 
 	savederrno = pthread_mutex_lock(&knet_h->tx_mutex);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_PMTUD, "Unable to get TX mutex lock: %s", strerror(savederrno));
 		return;
 	}
 
 retry:
 	if (src_link->transport_connected) {
 		if (transport_get_connection_oriented(knet_h, src_link->transport) == TRANSPORT_PROTO_NOT_CONNECTION_ORIENTED) {
 			len = sendto(src_link->outsock, outbuf, outlen, MSG_DONTWAIT | MSG_NOSIGNAL,
 				     (struct sockaddr *) &src_link->dst_addr,
 				     knet_h->knet_transport_fd_tracker[src_link->outsock].sockaddr_len);
 		} else {
 			len = sendto(src_link->outsock, outbuf, outlen, MSG_DONTWAIT | MSG_NOSIGNAL, NULL, 0);
 		}
 		savederrno = errno;
 		if (len != outlen) {
 			err = transport_tx_sock_error(knet_h, src_link->transport, src_link->outsock, len, savederrno);
 			stats_err = pthread_mutex_lock(&src_link->link_stats_mutex);
 			if (stats_err < 0) {
 				log_err(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock: %s", strerror(stats_err));
 				return;
 			}
 			switch(err) {
 				case KNET_TRANSPORT_SOCK_ERROR_INTERNAL:
 					log_debug(knet_h, KNET_SUB_PMTUD,
 						  "Unable to send PMTUd reply (sock: %d) packet (sendto): %d %s. recorded src ip: %s src port: %s dst ip: %s dst port: %s",
 						  src_link->outsock, errno, strerror(errno),
 						  src_link->status.src_ipaddr, src_link->status.src_port,
 						  src_link->status.dst_ipaddr, src_link->status.dst_port);
 
 					src_link->status.stats.tx_pmtu_errors++;
 					break;
 				case KNET_TRANSPORT_SOCK_ERROR_IGNORE:
 					src_link->status.stats.tx_pmtu_errors++;
 					break;
 				case KNET_TRANSPORT_SOCK_ERROR_RETRY:
 					src_link->status.stats.tx_pmtu_retries++;
 					pthread_mutex_unlock(&src_link->link_stats_mutex);
 					goto retry;
 					break;
 			}
 			pthread_mutex_unlock(&src_link->link_stats_mutex);
 		}
 	}
 	pthread_mutex_unlock(&knet_h->tx_mutex);
 }
 
 void process_pmtud(knet_handle_t knet_h, struct knet_link *src_link, struct knet_header *inbuf)
 {
 	/*
 	 * at the moment we don't need to take any extra
 	 * actions when processing a PMTUd packet, except
 	 * sending a reply
 	 */
 	send_pmtud_reply(knet_h, src_link, inbuf);
 }
 
 void process_pmtud_reply(knet_handle_t knet_h, struct knet_link *src_link, struct knet_header *inbuf)
 {
 	if (pthread_mutex_lock(&knet_h->pmtud_mutex) != 0) {
 		log_debug(knet_h, KNET_SUB_PMTUD, "Unable to get mutex lock");
 		return;
 	}
 
 	if (knet_h->onwire_ver_remap) {
 		process_pmtud_reply_v1(knet_h, src_link, inbuf);
 	} else {
 		switch (inbuf->kh_version) {
 			case 1:
 				process_pmtud_reply_v1(knet_h, src_link, inbuf);
 				break;
 			default:
 				log_warn(knet_h, KNET_SUB_PMTUD, "preparing PMTUD reply onwire version %u not supported", inbuf->kh_version);
 				goto out_unlock;
 				break;
 		}
 	}
 
 	pthread_cond_signal(&knet_h->pmtud_cond);
 out_unlock:
 	pthread_mutex_unlock(&knet_h->pmtud_mutex);
 }
 
 int knet_handle_pmtud_getfreq(knet_handle_t knet_h, unsigned int *interval)
 {
 	int savederrno = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (!interval) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_PMTUD, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	*interval = knet_h->pmtud_interval;
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 
 	errno = 0;
 	return 0;
 }
 
 int knet_handle_pmtud_setfreq(knet_handle_t knet_h, unsigned int interval)
 {
 	int savederrno = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if ((!interval) || (interval > 86400)) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_PMTUD, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	knet_h->pmtud_interval = interval;
 	log_debug(knet_h, KNET_SUB_PMTUD, "PMTUd interval set to: %u seconds", interval);
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 
 	errno = 0;
 	return 0;
 }
 
 int knet_handle_enable_pmtud_notify(knet_handle_t knet_h,
 				    void *pmtud_notify_fn_private_data,
 				    void (*pmtud_notify_fn) (
 						void *private_data,
 						unsigned int data_mtu))
 {
 	int savederrno = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	savederrno = get_global_wrlock(knet_h);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_PMTUD, "Unable to get write lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	knet_h->pmtud_notify_fn_private_data = pmtud_notify_fn_private_data;
 	knet_h->pmtud_notify_fn = pmtud_notify_fn;
 	if (knet_h->pmtud_notify_fn) {
 		log_debug(knet_h, KNET_SUB_PMTUD, "pmtud_notify_fn enabled");
 	} else {
 		log_debug(knet_h, KNET_SUB_PMTUD, "pmtud_notify_fn disabled");
 	}
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 
 	errno = 0;
 	return 0;
 }
 
 int knet_handle_pmtud_set(knet_handle_t knet_h,
 			  unsigned int iface_mtu)
 {
 	int savederrno = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (iface_mtu > KNET_PMTUD_SIZE_V4) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_PMTUD, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	log_info(knet_h, KNET_SUB_PMTUD, "MTU manually set to: %u", iface_mtu);
 
 	knet_h->manual_mtu = iface_mtu;
 
-	force_pmtud_run(knet_h, KNET_SUB_PMTUD, 0);
+	force_pmtud_run(knet_h, KNET_SUB_PMTUD, 0, 0);
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 
 	errno = 0;
 	return 0;
 }
 
 int knet_handle_pmtud_get(knet_handle_t knet_h,
 			  unsigned int *data_mtu)
 {
 	int savederrno = 0;
 
 	if (!_is_valid_handle(knet_h)) {
 		return -1;
 	}
 
 	if (!data_mtu) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	savederrno = pthread_rwlock_rdlock(&knet_h->global_rwlock);
 	if (savederrno) {
 		log_err(knet_h, KNET_SUB_PMTUD, "Unable to get read lock: %s",
 			strerror(savederrno));
 		errno = savederrno;
 		return -1;
 	}
 
 	*data_mtu = knet_h->data_mtu;
 
 	pthread_rwlock_unlock(&knet_h->global_rwlock);
 
 	errno = 0;
 	return 0;
 }
diff --git a/libknet/transport_udp.c b/libknet/transport_udp.c
index ec5525fd..f6a94b4b 100644
--- a/libknet/transport_udp.c
+++ b/libknet/transport_udp.c
@@ -1,485 +1,485 @@
 /*
  * Copyright (C) 2016-2022 Red Hat, Inc.  All rights reserved.
  *
  * Author: Christine Caulfield <ccaulfie@redhat.com>
  *
  * This software licensed under LGPL-2.0+
  */
 
 #include "config.h"
 
 #include <string.h>
 #include <unistd.h>
 #include <errno.h>
 #include <sys/types.h>
 #include <sys/socket.h>
 #include <stdlib.h>
 #include <netinet/in.h>
 #include <netinet/ip.h>
 #include <netinet/ip_icmp.h>
 #if defined (IP_RECVERR) || defined (IPV6_RECVERR)
 #include <linux/errqueue.h>
 #endif
 
 #include "libknet.h"
 #include "compat.h"
 #include "host.h"
 #include "link.h"
 #include "logging.h"
 #include "common.h"
 #include "netutils.h"
 #include "transport_common.h"
 #include "transport_udp.h"
 #include "transports.h"
 #include "threads_common.h"
 
 typedef struct udp_handle_info {
 	struct qb_list_head links_list;
 } udp_handle_info_t;
 
 typedef struct udp_link_info {
 	struct qb_list_head list;
 	struct sockaddr_storage local_address;
 	int socket_fd;
 	int on_epoll;
 } udp_link_info_t;
 
 int udp_transport_link_set_config(knet_handle_t knet_h, struct knet_link *kn_link)
 {
 	int err = 0, savederrno = 0;
 	int sock = -1;
 	struct epoll_event ev;
 	udp_link_info_t *info;
 	udp_handle_info_t *handle_info = knet_h->transports[KNET_TRANSPORT_UDP];
 #if defined (IP_RECVERR) || defined (IPV6_RECVERR)
 	int value;
 #endif
 
 	/*
 	 * Only allocate a new link if the local address is different
 	 */
 	qb_list_for_each_entry(info, &handle_info->links_list, list) {
 		if (memcmp(&info->local_address, &kn_link->src_addr, sizeof(struct sockaddr_storage)) == 0) {
 			log_debug(knet_h, KNET_SUB_TRANSP_UDP, "Re-using existing UDP socket for new link");
 			kn_link->outsock = info->socket_fd;
 			kn_link->transport_link = info;
 			kn_link->transport_connected = 1;
 			return 0;
 		}
 	}
 
 	info = malloc(sizeof(udp_link_info_t));
 	if (!info) {
 		err = -1;
 		goto exit_error;
 	}
 	memset(info, 0, sizeof(udp_link_info_t));
 
 	sock = socket(kn_link->src_addr.ss_family, SOCK_DGRAM, 0);
 	if (sock < 0) {
 		savederrno = errno;
 		err = -1;
 		log_err(knet_h, KNET_SUB_TRANSP_UDP, "Unable to create listener socket: %s",
 			strerror(savederrno));
 		goto exit_error;
 	}
 
 	if (_configure_transport_socket(knet_h, sock, &kn_link->src_addr, kn_link->flags, "UDP") < 0) {
 		savederrno = errno;
 		err = -1;
 		goto exit_error;
 	}
 
 #ifdef IP_RECVERR
 	if (kn_link->src_addr.ss_family == AF_INET) {
 		value = 1;
 		if (setsockopt(sock, SOL_IP, IP_RECVERR, &value, sizeof(value)) <0) {
 			savederrno = errno;
 			err = -1;
 			log_err(knet_h, KNET_SUB_TRANSP_UDP, "Unable to set RECVERR on socket: %s",
 				strerror(savederrno));
 			goto exit_error;
 		}
 		log_debug(knet_h, KNET_SUB_TRANSP_UDP, "IP_RECVERR enabled on socket: %i", sock);
 	}
 #else
 	log_debug(knet_h, KNET_SUB_TRANSP_UDP, "IP_RECVERR not available in this build/platform");
 #endif
 #ifdef IPV6_RECVERR
 	if (kn_link->src_addr.ss_family == AF_INET6) {
 		value = 1;
 		if (setsockopt(sock, SOL_IPV6, IPV6_RECVERR, &value, sizeof(value)) <0) {
 			savederrno = errno;
 			err = -1;
 			log_err(knet_h, KNET_SUB_TRANSP_UDP, "Unable to set RECVERR on socket: %s",
 				strerror(savederrno));
 			goto exit_error;
 		}
 		log_debug(knet_h, KNET_SUB_TRANSP_UDP, "IPV6_RECVERR enabled on socket: %i", sock);
 	}
 #else
 	log_debug(knet_h, KNET_SUB_TRANSP_UDP, "IPV6_RECVERR not available in this build/platform");
 #endif
 
 	if (bind(sock, (struct sockaddr *)&kn_link->src_addr, sockaddr_len(&kn_link->src_addr))) {
 		savederrno = errno;
 		err = -1;
 		log_err(knet_h, KNET_SUB_TRANSP_UDP, "Unable to bind listener socket: %s",
 			strerror(savederrno));
 		goto exit_error;
 	}
 
 	memset(&ev, 0, sizeof(struct epoll_event));
 	ev.events = EPOLLIN;
 	ev.data.fd = sock;
 
 	if (epoll_ctl(knet_h->recv_from_links_epollfd, EPOLL_CTL_ADD, sock, &ev)) {
 		savederrno = errno;
 		err = -1;
 		log_err(knet_h, KNET_SUB_TRANSP_UDP, "Unable to add listener to epoll pool: %s",
 			strerror(savederrno));
 		goto exit_error;
 	}
 
 	info->on_epoll = 1;
 
 	if (_set_fd_tracker(knet_h, sock, KNET_TRANSPORT_UDP, 0, sockaddr_len(&kn_link->src_addr), info) < 0) {
 		savederrno = errno;
 		err = -1;
 		log_err(knet_h, KNET_SUB_TRANSP_UDP, "Unable to set fd tracker: %s",
 			strerror(savederrno));
 		goto exit_error;
 	}
 
 	memmove(&info->local_address, &kn_link->src_addr, sizeof(struct sockaddr_storage));
 	info->socket_fd = sock;
 	qb_list_add(&info->list, &handle_info->links_list);
 
 	kn_link->outsock = sock;
 	kn_link->transport_link = info;
 	kn_link->transport_connected = 1;
 
 exit_error:
 	if (err) {
 		if (info) {
 			if (info->on_epoll) {
 				epoll_ctl(knet_h->recv_from_links_epollfd, EPOLL_CTL_DEL, sock, &ev);
 			}
 			free(info);
 		}
 		if (sock >= 0) {
 			close(sock);
 		}
 	}
 	errno = savederrno;
 	return err;
 }
 
 int udp_transport_link_clear_config(knet_handle_t knet_h, struct knet_link *kn_link)
 {
 	int err = 0, savederrno = 0;
 	int found = 0;
 	struct knet_host *host;
 	int link_idx;
 	udp_link_info_t *info = kn_link->transport_link;
 	struct epoll_event ev;
 
 	for (host = knet_h->host_head; host != NULL; host = host->next) {
 		for (link_idx = 0; link_idx < KNET_MAX_LINK; link_idx++) {
 			if (&host->link[link_idx] == kn_link)
 				continue;
 
 			if (host->link[link_idx].transport_link == info) {
 				found = 1;
 				break;
 			}
 		}
 	}
 
 	if (found) {
 		log_debug(knet_h, KNET_SUB_TRANSP_UDP, "UDP socket %d still in use", info->socket_fd);
 		savederrno = EBUSY;
 		err = -1;
 		goto exit_error;
 	}
 
 	if (info->on_epoll) {
 		memset(&ev, 0, sizeof(struct epoll_event));
 		ev.events = EPOLLIN;
 		ev.data.fd = info->socket_fd;
 
 		if (epoll_ctl(knet_h->recv_from_links_epollfd, EPOLL_CTL_DEL, info->socket_fd, &ev) < 0) {
 			savederrno = errno;
 			err = -1;
 			log_err(knet_h, KNET_SUB_TRANSP_UDP, "Unable to remove UDP socket from epoll poll: %s",
 				strerror(errno));
 			goto exit_error;
 		}
 		info->on_epoll = 0;
 	}
 
 	if (_set_fd_tracker(knet_h, info->socket_fd, KNET_MAX_TRANSPORTS, 0, sockaddr_len(&kn_link->src_addr), NULL) < 0) {
 		savederrno = errno;
 		err = -1;
 		log_err(knet_h, KNET_SUB_TRANSP_UDP, "Unable to set fd tracker: %s",
 			strerror(savederrno));
 		goto exit_error;
 	}
 
 	close(info->socket_fd);
 	qb_list_del(&info->list);
 	free(kn_link->transport_link);
 
 exit_error:
 	errno = savederrno;
 	return err;
 }
 
 int udp_transport_free(knet_handle_t knet_h)
 {
 	udp_handle_info_t *handle_info;
 
 	if (!knet_h->transports[KNET_TRANSPORT_UDP]) {
 		errno = EINVAL;
 		return -1;
 	}
 
 	handle_info = knet_h->transports[KNET_TRANSPORT_UDP];
 
 	/*
 	 * keep it here while we debug list usage and such
 	 */
 	if (!qb_list_empty(&handle_info->links_list)) {
 		log_err(knet_h, KNET_SUB_TRANSP_UDP, "Internal error. handle list is not empty");
 		return -1;
 	}
 
 	free(handle_info);
 
 	knet_h->transports[KNET_TRANSPORT_UDP] = NULL;
 
 	return 0;
 }
 
 int udp_transport_init(knet_handle_t knet_h)
 {
 	udp_handle_info_t *handle_info;
 
 	if (knet_h->transports[KNET_TRANSPORT_UDP]) {
 		errno = EEXIST;
 		return -1;
 	}
 
 	handle_info = malloc(sizeof(udp_handle_info_t));
 	if (!handle_info) {
 		return -1;
 	}
 
 	memset(handle_info, 0, sizeof(udp_handle_info_t));
 
 	knet_h->transports[KNET_TRANSPORT_UDP] = handle_info;
 
 	qb_list_init(&handle_info->links_list);
 
 	return 0;
 }
 
 #if defined (IP_RECVERR) || defined (IPV6_RECVERR)
 static int read_errs_from_sock(knet_handle_t knet_h, int sockfd)
 {
 	int err = 0, savederrno = 0;
 	int got_err = 0;
 	char buffer[1024];
 	struct iovec iov;
 	struct msghdr msg;
 	struct cmsghdr *cmsg;
 	struct sock_extended_err *sock_err;
 	struct icmphdr icmph;
 	struct sockaddr_storage remote;
 	struct sockaddr_storage *origin;
 	char addr_str[KNET_MAX_HOST_LEN];
 	char port_str[KNET_MAX_PORT_LEN];
 	char addr_remote_str[KNET_MAX_HOST_LEN];
 	char port_remote_str[KNET_MAX_PORT_LEN];
 
 	iov.iov_base = &icmph;
 	iov.iov_len = sizeof(icmph);
 	msg.msg_name = (void*)&remote;
 	msg.msg_namelen = sizeof(remote);
 	msg.msg_iov = &iov;
 	msg.msg_iovlen = 1;
 	msg.msg_flags = 0;
 	msg.msg_control = buffer;
 	msg.msg_controllen = sizeof(buffer);
 
 	for (;;) {
 		err = recvmsg(sockfd, &msg, MSG_ERRQUEUE);
 		savederrno = errno;
 		if (err < 0) {
 			if (!got_err) {
 				errno = savederrno;
 				return -1;
 			} else {
 				return 0;
 			}
 		}
 		got_err = 1;
 		for (cmsg = CMSG_FIRSTHDR(&msg);cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
 			if (((cmsg->cmsg_level == SOL_IP) && (cmsg->cmsg_type == IP_RECVERR)) ||
 			    ((cmsg->cmsg_level == SOL_IPV6 && (cmsg->cmsg_type == IPV6_RECVERR)))) {
 				sock_err = (struct sock_extended_err*)(void *)CMSG_DATA(cmsg);
 				if (sock_err) {
 					switch (sock_err->ee_origin) {
 						case SO_EE_ORIGIN_NONE: /* no origin */
 						case SO_EE_ORIGIN_LOCAL: /* local source (EMSGSIZE) */
 							if (sock_err->ee_errno == EMSGSIZE) {
 								if (pthread_mutex_lock(&knet_h->kmtu_mutex) != 0) {
 									log_debug(knet_h, KNET_SUB_TRANSP_UDP, "Unable to get mutex lock");
 									knet_h->kernel_mtu = 0;
 									break;
 								} else {
 									knet_h->kernel_mtu = sock_err->ee_info;
 									log_debug(knet_h, KNET_SUB_TRANSP_UDP, "detected kernel MTU: %u", knet_h->kernel_mtu);
 									pthread_mutex_unlock(&knet_h->kmtu_mutex);
 								}
 
-								force_pmtud_run(knet_h, KNET_SUB_TRANSP_UDP, 0);
+								force_pmtud_run(knet_h, KNET_SUB_TRANSP_UDP, 0, 0);
 							}
 							/*
 							 * those errors are way too noisy
 							 */
 							break;
 						case SO_EE_ORIGIN_ICMP:  /* ICMP */
 						case SO_EE_ORIGIN_ICMP6: /* ICMP6 */
 							origin = (struct sockaddr_storage *)(void *)SO_EE_OFFENDER(sock_err);
 							if (knet_addrtostr(origin, sizeof(*origin),
 									   addr_str, KNET_MAX_HOST_LEN,
 									   port_str, KNET_MAX_PORT_LEN) < 0) {
 								log_debug(knet_h, KNET_SUB_TRANSP_UDP, "Received ICMP error from unknown source: %s", strerror(sock_err->ee_errno));
 
 							} else {
 								if (knet_addrtostr(&remote, sizeof(remote),
 									       addr_remote_str, KNET_MAX_HOST_LEN,
 									       port_remote_str, KNET_MAX_PORT_LEN) < 0) {
 									log_debug(knet_h, KNET_SUB_TRANSP_UDP, "Received ICMP error from %s: %s destination unknown", addr_str, strerror(sock_err->ee_errno));
 								} else {
 									log_debug(knet_h, KNET_SUB_TRANSP_UDP, "Received ICMP error from %s: %s %s", addr_str, strerror(sock_err->ee_errno), addr_remote_str);
 									if ((sock_err->ee_errno == ECONNREFUSED) || /* knet is not running on the other node */
 									    (sock_err->ee_errno == ECONNABORTED) || /* local kernel closed the socket */
 									    (sock_err->ee_errno == ENONET)       || /* network does not exist */
 									    (sock_err->ee_errno == ENETUNREACH)  || /* network unreachable */
 									    (sock_err->ee_errno == EHOSTUNREACH) || /* host unreachable */
 									    (sock_err->ee_errno == EHOSTDOWN)    || /* host down (from kernel/net/ipv4/icmp.c */
 									    (sock_err->ee_errno == ENETDOWN)) {     /* network down */
 										struct knet_host *host = NULL;
 										struct knet_link *kn_link = NULL;
 										int link_idx, found = 0;
 
 										for (host = knet_h->host_head; host != NULL; host = host->next) {
 											for (link_idx = 0; link_idx < KNET_MAX_LINK; link_idx++) {
 												kn_link = &host->link[link_idx];
 												if (kn_link->outsock == sockfd) {
 													if (!cmpaddr(&remote, &kn_link->dst_addr)) {
 														found = 1;
 														break;
 													}
 												}
 											}
 											if (found) {
 												break;
 											}
 										}
 
 										if ((host) && (kn_link) &&
 										    (kn_link->status.connected)) {
 											log_debug(knet_h, KNET_SUB_TRANSP_UDP, "Setting down host %u link %i", host->host_id, kn_link->link_id);
 											/*
 											 * setting transport_connected = 0 will trigger
 											 * thread_heartbeat link_down process.
 											 *
 											 * the process terminates calling into transport_link_down
 											 * below that will set transport_connected = 1
 											 */
 											kn_link->transport_connected = 0;
 										}
 
 									}
 								}
 							}
 							break;
 					}
 				} else {
 					log_debug(knet_h, KNET_SUB_TRANSP_UDP, "No data in MSG_ERRQUEUE");
 				}
 			}
 		}
 	}
 }
 #else
 static int read_errs_from_sock(knet_handle_t knet_h, int sockfd)
 {
 	return 0;
 }
 #endif
 
 int udp_transport_rx_sock_error(knet_handle_t knet_h, int sockfd, int recv_err, int recv_errno)
 {
 	if (recv_errno == EAGAIN) {
 		read_errs_from_sock(knet_h, sockfd);
 	}
 	return 0;
 }
 
 transport_sock_error_t udp_transport_tx_sock_error(knet_handle_t knet_h, int sockfd, int recv_err, int recv_errno)
 {
 	if (recv_err < 0) {
 		if (recv_errno == EMSGSIZE) {
 			read_errs_from_sock(knet_h, sockfd);
 			return KNET_TRANSPORT_SOCK_ERROR_IGNORE;
 		}
 		if ((recv_errno == EINVAL) || (recv_errno == EPERM) ||
 		    (recv_errno == ENETUNREACH) || (recv_errno == ENETDOWN) ||
 		    (recv_errno == EHOSTUNREACH)) {
 #ifdef DEBUG
 			if ((recv_errno == ENETUNREACH) || (recv_errno == ENETDOWN)) {
 				log_debug(knet_h, KNET_SUB_TRANSP_UDP, "Sock: %d is unreachable.", sockfd);
 			}
 #endif
 			return KNET_TRANSPORT_SOCK_ERROR_INTERNAL;
 		}
 		if ((recv_errno == ENOBUFS) || (recv_errno == EAGAIN)) {
 #ifdef DEBUG
 			log_debug(knet_h, KNET_SUB_TRANSP_UDP, "Sock: %d is overloaded. Slowing TX down", sockfd);
 #endif
 			usleep(knet_h->threads_timer_res / 16);
 		} else {
 			read_errs_from_sock(knet_h, sockfd);
 		}
 		return KNET_TRANSPORT_SOCK_ERROR_RETRY;
 	}
 
 	return KNET_TRANSPORT_SOCK_ERROR_IGNORE;
 }
 
 transport_rx_isdata_t udp_transport_rx_is_data(knet_handle_t knet_h, int sockfd, struct knet_mmsghdr *msg)
 {
 	if (msg->msg_len == 0)
 		return KNET_TRANSPORT_RX_NOT_DATA_CONTINUE;
 
 	return KNET_TRANSPORT_RX_IS_DATA;
 }
 
 int udp_transport_link_dyn_connect(knet_handle_t knet_h, int sockfd, struct knet_link *kn_link)
 {
 	kn_link->status.dynconnected = 1;
 	return 0;
 }
 
 int udp_transport_link_is_down(knet_handle_t knet_h, struct knet_link *kn_link)
 {
 	/*
 	 * see comments about handling ICMP error messages
 	 */
 	kn_link->transport_connected = 1;
 	return 0;
 }