diff --git a/configure.ac b/configure.ac
index b405294a..a5ee1b56 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,620 +1,622 @@
 #
 # Copyright (C) 2010-2025 Red Hat, Inc.  All rights reserved.
 #
 # Authors: Fabio M. Di Nitto <fabbione@kronosnet.org>
 #          Federico Simoncelli <fsimon@kronosnet.org>
 #          Jules <jules@google.com> (Google AI Agent)
 #
 # This software licensed under GPL-2.0+
 #
 
 #                                               -*- Autoconf -*-
 # Process this file with autoconf to produce a configure script.
 #
 
 AC_PREREQ([2.63])
 AC_INIT([kronosnet],
 	m4_esyscmd([build-aux/git-version-gen .tarball-version .gitarchivever]),
 	[devel@lists.kronosnet.org])
 # Don't let AC_PROC_CC (invoked by AC_USE_SYSTEM_EXTENSIONS) replace
 # undefined CFLAGS with -g -O2, overriding our special OPT_CFLAGS.
 : ${CFLAGS=""}
 AC_USE_SYSTEM_EXTENSIONS
 AM_INIT_AUTOMAKE([1.13 dist-bzip2 dist-xz color-tests -Wno-portability subdir-objects])
 
 LT_PREREQ([2.2.6])
 # --enable-new-dtags: Use RUNPATH instead of RPATH.
 # It is necessary to have this done before libtool does linker detection.
 # See also: https://github.com/kronosnet/kronosnet/issues/107
 # --as-needed: Modern systems have builtin ceil() making -lm superfluous but
 # AC_SEARCH_LIBS can't detect this because it tests with a false prototype
 AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags],
 		   [AM_LDFLAGS=-Wl,--enable-new-dtags],
 		   [AC_MSG_ERROR(["Linker support for --enable-new-dtags is required"])])
 AX_CHECK_LINK_FLAG([-Wl,--as-needed], [AM_LDFLAGS="$AM_LDFLAGS -Wl,--as-needed"])
 
 saved_LDFLAGS="$LDFLAGS"
 LDFLAGS="$AM_LDFLAGS $LDFLAGS"
 LT_INIT
 LDFLAGS="$saved_LDFLAGS"
 
 AC_CONFIG_MACRO_DIR([m4])
 AC_CONFIG_SRCDIR([libknet/handle.c])
 AC_CONFIG_HEADERS([config.h])
 
 AC_CANONICAL_HOST
 
 AC_LANG([C])
 
 if test "$prefix" = "NONE"; then
 	prefix="/usr"
 	if test "$localstatedir" = "\${prefix}/var"; then
 		localstatedir="/var"
 	fi
 	if test "$libdir" = "\${exec_prefix}/lib"; then
 		if test -e /usr/lib64; then
 			libdir="/usr/lib64"
 		else
 			libdir="/usr/lib"
 		fi
 	fi
 fi
 
 AC_PROG_AWK
 AC_PROG_GREP
 AC_PROG_SED
 AC_PROG_CPP
 AC_PROG_CC
 m4_version_prereq([2.70], [:], [AC_PROG_CC_C99])
 if test "x$ac_cv_prog_cc_c99" = "xno"; then
 	AC_MSG_ERROR(["C99 support is required"])
 fi
 AC_PROG_LN_S
 AC_PROG_INSTALL
 AC_PROG_MAKE_SET
 PKG_PROG_PKG_CONFIG
 
 AC_CHECK_PROGS([VALGRIND_EXEC], [valgrind])
 AM_CONDITIONAL([HAS_VALGRIND], [test x$VALGRIND_EXEC != "x"])
 
 AC_CHECK_PROGS([COVBUILD_EXEC], [cov-build])
 AM_CONDITIONAL([HAS_COVBUILD], [test x$COVBUILD_EXEC != "x"])
 
 AC_CHECK_PROGS([COVANALYZE_EXEC], [cov-analyze])
 AM_CONDITIONAL([HAS_COVANALYZE], [test x$COVANALYZE_EXEC != "x"])
 
 AC_CHECK_PROGS([COVFORMATERRORS_EXEC], [cov-format-errors])
 AM_CONDITIONAL([HAS_COVFORMATERRORS], [test x$COVFORMATERRORS_EXEC != "x"])
 
 # KNET_OPTION_DEFINES(stem,type,detection code)
 # stem: enters name of option, Automake conditional and preprocessor define
 # type: compress or crypto, determines where the default comes from
 AC_DEFUN([KNET_OPTION_DEFINES],[
 AC_ARG_ENABLE([$2-$1],[AS_HELP_STRING([--disable-$2-$1],[disable libknet $1 support])],,
 	[enable_$2_$1="$enable_$2_all"])
 AM_CONDITIONAL([BUILD_]m4_toupper([$2_$1]),[test "x$enable_$2_$1" = xyes])
 if test "x$enable_$2_$1" = xyes; then
 	$3
 fi
 AC_DEFINE_UNQUOTED([WITH_]m4_toupper([$2_$1]), [`test "x$enable_$2_$1" != xyes; echo $?`], $1 $2 [built in])
 ])
 
 AC_ARG_ENABLE([man],
 	[AS_HELP_STRING([--disable-man],[disable man page creation])],,
 	[ enable_man="yes" ])
 AM_CONDITIONAL([BUILD_MAN], [test x$enable_man = xyes])
 
 AC_ARG_ENABLE([libknet-sctp],
 	[AS_HELP_STRING([--disable-libknet-sctp],[disable libknet SCTP support])],,
 	[ enable_libknet_sctp="yes" ])
 AM_CONDITIONAL([BUILD_SCTP], [test x$enable_libknet_sctp = xyes])
 
 AC_ARG_ENABLE([functional-tests],
 	[AS_HELP_STRING([--disable-functional-tests],[disable execution of functional tests, useful for old and slow arches])],,
 	[ enable_functional_tests="yes" ])
 AM_CONDITIONAL([RUN_FUN_TESTS], [test x$enable_functional_tests = xyes])
 
 AC_ARG_ENABLE([crypto-all],
 	[AS_HELP_STRING([--disable-crypto-all],[disable libknet all crypto modules support])],,
 	[ enable_crypto_all="yes" ])
 
 KNET_OPTION_DEFINES([nss],[crypto],[PKG_CHECK_MODULES([nss], [nss])])
 KNET_OPTION_DEFINES([openssl],[crypto],[PKG_CHECK_MODULES([openssl], [libcrypto])])
 
 # use gcry_mac_open to detect if libgcrypt is new enough
 KNET_OPTION_DEFINES([gcrypt],[crypto],[
 	PKG_CHECK_MODULES([gcrypt], [libgcrypt >= 1.8.0],,
 		[AC_CHECK_HEADERS([gcrypt.h],
 			[AC_CHECK_LIB([gcrypt], [gcry_mac_open],
 				[AC_SUBST([gcrypt_LIBS], ["-lgcrypt -ldl -lgpg-error"])])],
 				[AC_MSG_ERROR(["missing required gcrypt.h"])])])
 ])
 
 AC_ARG_ENABLE([compress-all],
 	[AS_HELP_STRING([--disable-compress-all],[disable libknet all compress modules support])],,
 	[ enable_compress_all="yes" ])
 
 KNET_OPTION_DEFINES([zstd],[compress],[PKG_CHECK_MODULES([libzstd], [libzstd])])
 KNET_OPTION_DEFINES([zlib],[compress],[PKG_CHECK_MODULES([zlib], [zlib])])
 KNET_OPTION_DEFINES([lz4],[compress],[PKG_CHECK_MODULES([liblz4], [liblz4])])
 KNET_OPTION_DEFINES([lzo2],[compress],[
 	PKG_CHECK_MODULES([lzo2], [lzo2],
 		[# work around broken pkg-config file in v2.10
 		 AC_SUBST([lzo2_CFLAGS],[`echo $lzo2_CFLAGS | sed 's,/lzo *, ,'`])],
 		[AC_CHECK_HEADERS([lzo/lzo1x.h],
 			[AC_CHECK_LIB([lzo2], [lzo1x_decompress_safe],
 				[AC_SUBST([lzo2_LIBS], [-llzo2])])],
 				[AC_MSG_ERROR(["missing required lzo/lzo1x.h header"])])])
 ])
 KNET_OPTION_DEFINES([lzma],[compress],[PKG_CHECK_MODULES([liblzma], [liblzma])])
 KNET_OPTION_DEFINES([bzip2],[compress],[
 	PKG_CHECK_MODULES([bzip2], [bzip2],,
 		[AC_CHECK_HEADERS([bzlib.h],
 			[AC_CHECK_LIB([bz2], [BZ2_bzBuffToBuffCompress],
 				[AC_SUBST([bzip2_LIBS], [-lbz2])])],
 				[AC_MSG_ERROR(["missing required bzlib.h"])])])
 ])
 
 AC_ARG_ENABLE([install-tests],
 	[AS_HELP_STRING([--enable-install-tests],[install tests])],,
 	[ enable_install_tests="no" ])
 AM_CONDITIONAL([INSTALL_TESTS], [test x$enable_install_tests = xyes])
 
 AC_ARG_ENABLE([runautogen],
 	[AS_HELP_STRING([--enable-runautogen],[run autogen.sh])],,
 	[ enable_runautogen="no" ])
 AM_CONDITIONAL([BUILD_RUNAUTOGEN], [test x$enable_runautogen = xyes])
 
 override_rpm_debuginfo_option="yes"
 AC_ARG_ENABLE([rpm-debuginfo],
 	[AS_HELP_STRING([--enable-rpm-debuginfo],[build debuginfo packages])],,
 	[ enable_rpm_debuginfo="no", override_rpm_debuginfo_option="no" ])
 AM_CONDITIONAL([BUILD_RPM_DEBUGINFO], [test x$enable_rpm_debuginfo = xyes])
 AM_CONDITIONAL([OVERRIDE_RPM_DEBUGINFO], [test x$override_rpm_debuginfo_option = xyes])
 
 AC_ARG_ENABLE([libnozzle],
 	[AS_HELP_STRING([--enable-libnozzle],[libnozzle support])],,
 	[ enable_libnozzle="yes" ])
 AM_CONDITIONAL([BUILD_LIBNOZZLE], [test x$enable_libnozzle = xyes])
 
 AC_ARG_ENABLE([rust-bindings],
 	[AS_HELP_STRING([--enable-rust-bindings],[rust bindings support])],,
 	[ enable_rust_bindings="no" ])
 AM_CONDITIONAL([BUILD_RUST_BINDINGS], [test x$enable_rust_bindings = xyes])
 
 AC_ARG_ENABLE([python-bindings],
 	[AS_HELP_STRING([--enable-python-bindings],[python bindings support])],,
 	[ enable_python_bindings="no" ])
 AM_CONDITIONAL([BUILD_PYTHON_BINDINGS], [test x$enable_python_bindings = xyes])
 
 ## local helper functions
 # this function checks if CC support options passed as
 # args. Global CPPFLAGS are ignored during this test.
 cc_supports_flag() {
 	saveCPPFLAGS="$CPPFLAGS"
 	CPPFLAGS="-Werror $@"
 	AC_MSG_CHECKING([whether $CC supports "$@"])
 	AC_COMPILE_IFELSE([AC_LANG_PROGRAM([#include <stdio.h>], [
 			   #ifdef __USE_FORTIFY_LEVEL
 			   printf("%d\n", __USE_FORTIFY_LEVEL)
 			   #else
 			   printf("hello world\n")
 			   #endif
 			  ])],
 			  [RC=0; AC_MSG_RESULT([yes])],
 			  [RC=1; AC_MSG_RESULT([no])])
 	CPPFLAGS="$saveCPPFLAGS"
 	return $RC
 }
 
 # Checks for libraries.
 AX_PTHREAD(,[AC_MSG_ERROR([POSIX threads support is required])])
 saved_LIBS="$LIBS"
 LIBS=
 AC_SEARCH_LIBS([ceil], [m], , [AC_MSG_ERROR([ceil not found])])
 AC_SUBST([m_LIBS], [$LIBS])
 LIBS=
 AC_SEARCH_LIBS([clock_gettime], [rt], , [AC_MSG_ERROR([clock_gettime not found])])
 AC_SUBST([rt_LIBS], [$LIBS])
 LIBS=
 AC_SEARCH_LIBS([dlopen], [dl dld], , [AC_MSG_ERROR([dlopen not found])])
 AC_SUBST([dl_LIBS], [$LIBS])
 LIBS="$saved_LIBS"
 
 # Check RTLD_DI_ORIGIN (not decalred by musl. glibc has it as an enum so cannot use ifdef)
 AC_CHECK_DECL([RTLD_DI_ORIGIN], [AC_DEFINE([HAVE_RTLD_DI_ORIGIN], 1,
     [define when RTLD_DI_ORIGIN is declared])], ,[[#include <dlfcn.h>]])
 
 # OS detection
 
 AC_MSG_CHECKING([for os in ${host_os}])
 case "$host_os" in
 	*linux*)
 		AC_DEFINE_UNQUOTED([KNET_LINUX], [1], [Compiling for Linux platform])
 		AC_MSG_RESULT([Linux])
 		;;
 	*bsd*)
 		AC_DEFINE_UNQUOTED([KNET_BSD], [1], [Compiling for BSD platform])
 		AC_MSG_RESULT([BSD])
 		;;
 	*)
 		AC_MSG_ERROR([Unsupported OS? hmmmm])
 		;;
 esac
 
 # Checks for header files.
 AC_CHECK_HEADERS([sys/epoll.h])
 AC_CHECK_FUNCS([kevent])
 # if neither sys/epoll.h nor kevent are present, we should fail.
 
 if test "x$ac_cv_header_sys_epoll_h" = xno && test "x$ac_cv_func_kevent" = xno; then
 	AC_MSG_ERROR([Both epoll and kevent unavailable on this OS])
 fi
 
 if test "x$ac_cv_header_sys_epoll_h" = xyes && test "x$ac_cv_func_kevent" = xyes; then
 	AC_MSG_ERROR([Both epoll and kevent available on this OS, please contact the maintainers to fix the code])
 fi
 
 if test "x$enable_libknet_sctp" = xyes; then
 	AC_CHECK_HEADERS([netinet/sctp.h],, [AC_MSG_ERROR(["missing required SCTP headers"])])
 fi
 
 # Checks for typedefs, structures, and compiler characteristics.
 AC_C_INLINE
 AC_TYPE_PID_T
 AC_TYPE_SIZE_T
 AC_TYPE_SSIZE_T
 AC_TYPE_UINT8_T
 AC_TYPE_UINT16_T
 AC_TYPE_UINT32_T
 AC_TYPE_UINT64_T
 AC_TYPE_INT8_T
 AC_TYPE_INT16_T
 AC_TYPE_INT32_T
 AC_TYPE_INT64_T
 
 PKG_CHECK_MODULES([libqb], [libqb])
 
 if test "x$enable_man" = "xyes"; then
 	AC_ARG_VAR([DOXYGEN], [override doxygen executable])
 	AC_CHECK_PROGS([DOXYGEN], [doxygen], [no])
 	if test "x$DOXYGEN" = xno; then
 		AC_MSG_ERROR(["Doxygen command not found"])
 	fi
 
 	AC_ARG_VAR([DOXYGEN2MAN], [override doxygen2man executable])
 
 	# required to detect doxygen2man when libqb is installed
 	# in non standard paths
 	saved_PKG_CONFIG="$PKG_CONFIG"
 	saved_ac_cv_path_PKG_CONFIG="$ac_cv_path_PKG_CONFIG"
 	unset PKG_CONFIG ac_cv_path_PKG_CONFIG
 	AC_PATH_PROG([PKG_CONFIG], [pkg-config])
 	PKG_CHECK_MODULES([libqb_BUILD], [libqb])
 	PKG_CHECK_VAR([libqb_BUILD_PREFIX], [libqb], [prefix])
 	AC_PATH_PROG([DOXYGEN2MAN], [doxygen2man], [no], [$libqb_BUILD_PREFIX/bin$PATH_SEPARATOR$PATH])
 	PKG_CONFIG="$saved_PKG_CONFIG"
 	ac_cv_path_PKG_CONFIG="$saved_ac_cv_path_PKG_CONFIG"
 
 	if test "x$DOXYGEN2MAN" = "xno"; then
 		AC_MSG_ERROR(["doxygen2man command not found"])
 	fi
 	AC_SUBST([DOXYGEN2MAN])
 fi
 
 # check for rust tools to build bindings
 if test "x$enable_rust_bindings" = "xyes"; then
 	AC_PATH_PROG([CARGO], [cargo], [no])
 	if test "x$CARGO" = xno; then
 		AC_MSG_ERROR(["cargo command not found"])
 	fi
 
 	AC_PATH_PROG([RUSTC], [rustc], [no])
 	if test "x$RUSTC" = xno; then
 		AC_MSG_ERROR(["rustc command not found"])
 	fi
 
 	AC_PATH_PROG([RUSTDOC], [rustdoc], [no])
 	if test "x$RUSTDOC" = xno; then
 		AC_MSG_ERROR(["rustdoc command not found"])
 	fi
 
 	AC_PATH_PROG([BINDGEN], [bindgen], [no])
 	if test "x$BINDGEN" = xno; then
 		AC_MSG_ERROR(["bindgen command not found"])
 	fi
 
 	AC_PATH_PROG([CLIPPY], [clippy-driver], [no])
 	if test "x$CLIPPY" = xno; then
 		AC_MSG_ERROR(["clippy-driver command not found"])
 	fi
 
 	AC_PATH_PROG([RUSTFMT], [rustfmt], [no])
 	if test "x$RUSTFMT" = xno; then
 		AC_MSG_ERROR(["rustfmt command not found (optional)"])
 	fi
 fi
 
 # check for python tools to build bindings
 if test "x$enable_python_bindings" = "xyes"; then
 	AC_PATH_PROG([PYTHON3_CONFIG], [python3-config], [no])
 	if test "x$PYTHON3_CONFIG" = xno; then
 		PKG_CHECK_MODULES([PYTHON3], [python3], [
 			PYTHON_CFLAGS=$(pkg-config --cflags python3)
 			PYTHON_LIBS=$(pkg-config --libs python3)
 		], [AC_MSG_ERROR(["python3-config or pkg-config for python3 not found"])])
 	else
 		PYTHON_CFLAGS=$($PYTHON3_CONFIG --cflags)
 		PYTHON_LIBS=$($PYTHON3_CONFIG --ldflags --embed)
 	fi
 	AC_SUBST([PYTHON_CFLAGS])
 	AC_SUBST([PYTHON_LIBS])
 fi
 
 # checks for libnozzle
 if test "x$enable_libnozzle" = xyes; then
 	if `echo $host_os | grep -q linux`; then
 		PKG_CHECK_MODULES([libnl], [libnl-3.0])
 		PKG_CHECK_MODULES([libnlroute], [libnl-route-3.0 >= 3.3], [],
 			[PKG_CHECK_MODULES([libnlroute], [libnl-route-3.0 < 3.3],
 					   [AC_DEFINE_UNQUOTED([LIBNL3_WORKAROUND], [1], [Enable libnl < 3.3 build workaround])], [])])
 	fi
 fi
 
 # https://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
 knetcurrent="2"
 knetrevision="0"
 knetage="0"
 # c:r:a
 libknetversion="$knetcurrent:$knetrevision:$knetage"
 # soname derived from c:r:a
 # use $VERSION as build info https://semver.org/. build info are incremental automatically
 knetalpha="-alpha1"
 libknetrustver="$(($knetcurrent - $knetage)).$knetage.$knetrevision$knetalpha+$VERSION"
 
 nozzlecurrent="1"
 nozzlerevision="0"
 nozzleage="0"
 libnozzleversion="$nozzlecurrent:$nozzlerevision:$nozzleage"
 # nozzle is stable for now
 nozzlealpha=""
 libnozzlerustver="$(($nozzlecurrent - $nozzleage)).$nozzleage.$nozzlerevision$nozzlealpha+$VERSION"
 
 AC_SUBST([libknetversion])
 AC_SUBST([libknetrustver])
 AC_SUBST([libnozzleversion])
 AC_SUBST([libnozzlerustver])
 
 # local options
 AC_ARG_ENABLE([debug],
 	[AS_HELP_STRING([--enable-debug],[enable debug build])])
 
 AC_ARG_ENABLE([onwire-v1-extra-debug],
 	[AS_HELP_STRING([--enable-onwire-v1-extra-debug],[enable onwire protocol v1 extra debug. WARNING: IT BREAKS ONWIRE COMPATIBILITY! DO NOT USE IN PRODUCTION!])])
 
 if test "x${enable_onwire_v1_extra_debug}" = xyes; then
 	AC_DEFINE_UNQUOTED([ONWIRE_V1_EXTRA_DEBUG], [1], [Enable crc32 checksum for data and packets])
 fi
 
 # for standard crc32 function (used in test suite)
 PKG_CHECK_MODULES([zlib], [zlib])
 
 AC_ARG_ENABLE([hardening],
 	[AS_HELP_STRING([--disable-hardening],[disable hardening build flags])],,
 	[ enable_hardening="yes" ])
 
 AC_ARG_WITH([sanitizers],
 	[AS_HELP_STRING([--with-sanitizers=...,...],
 			[enable SANitizer build, do *NOT* use for production. Only ASAN/UBSAN/TSAN are currently supported])],
 	[ SANITIZERS="$withval" ],
 	[ SANITIZERS="" ])
 
 AC_ARG_WITH([testdir],
 	[AS_HELP_STRING([--with-testdir=DIR],[path to /usr/lib../kronosnet/tests/ dir where to install the test suite])],
 	[ TESTDIR="$withval" ],
 	[ TESTDIR="$libdir/kronosnet/tests" ])
 
 ## do subst
 
 AC_SUBST([TESTDIR])
 
 # debug build stuff
 if test "x${enable_debug}" = xyes; then
 	AC_DEFINE_UNQUOTED([DEBUG], [1], [Compiling Debugging code])
 	OPT_CFLAGS="-O0"
 	RUST_FLAGS=""
 	RUST_TARGET_DIR="debug"
 else
 	OPT_CFLAGS="-O3"
 	RUST_FLAGS="--release"
 	RUST_TARGET_DIR="release"
 fi
 
 # Check for availablility of hardening options
 
 annocheck=no
 
 if test "x${enable_hardening}" = xyes; then
 	# support only gcc for now
 	if echo $CC | grep -q gcc; then
 		ANNOPLUGIN="-fplugin=annobin"
 		annocheck=yes
 	fi
 
 	FORTIFY_CFLAGS=""
 	if test "x${enable_debug}" != xyes; then
 		for j in 3 2; do
 			FORTIFY_CFLAGS_TEMP="-D_FORTIFY_SOURCE=$j"
 			if cc_supports_flag "$OPT_CFLAGS $FORTIFY_CFLAGS_TEMP"; then
 				FORTIFY_CFLAGS="$FORTIFY_CFLAGS_TEMP"
 				break
 			fi
 		done
 	fi
 
 	HARDENING_CFLAGS_ANNOCHECK="$ANNOPLUGIN -fPIC -DPIC -pie -fstack-protector-strong -fexceptions -D_GLIBCXX_ASSERTIONS -Wl,-z,now"
 	HARDENING_CFLAGS="-fstack-clash-protection -fcf-protection=full -mcet -mstackrealign"
 	EXTRA_HARDENING_CFLAGS=""
 
 	# check for annobin required cflags/ldflags
 	for j in $HARDENING_CFLAGS_ANNOCHECK; do
 		if cc_supports_flag $j; then
 			EXTRA_HARDENING_CFLAGS="$EXTRA_HARDENING_CFLAGS $j"
 		else
 			annocheck=no
 		fi
 	done
 
 	# check for other hardening cflags/ldflags
 	for j in $HARDENING_CFLAGS; do
 		if cc_supports_flag $j; then
 			EXTRA_HARDENING_CFLAGS="$EXTRA_HARDENING_CFLAGS $j"
 		fi
 	done
 
 	EXTRA_HARDENING_CFLAGS="$EXTRA_HARDENING_CFLAGS $FORTIFY_CFLAGS"
 
 	# check if annocheck binary is available
 	if test "x${annocheck}" = xyes; then
 		AC_CHECK_PROGS([ANNOCHECK_EXEC], [annocheck])
 		if test "x${ANNOCHECK_EXEC}" = x; then
 			annocheck=no
 		fi
 	fi
 
 	AM_LDFLAGS="$AM_LDFLAGS $EXTRA_HARDENING_CFLAGS"
 fi
 
 if test "x${enable_debug}" = xyes; then
 	annocheck=no
 fi
 
 AM_CONDITIONAL([HAS_ANNOCHECK], [test "x$annocheck" = "xyes"])
 
 # gdb flags
 if test "x${GCC}" = xyes; then
 	GDB_CFLAGS="-ggdb3"
 else
 	GDB_CFLAGS="-g"
 fi
 
 # --- ASAN/UBSAN/TSAN (see man gcc) ---
 # when using SANitizers, we need to pass the -fsanitize..
 # to both CFLAGS and LDFLAGS. The CFLAGS/LDFLAGS must be
 # specified as first in the list or there will be runtime
 # issues (for example user has to LD_PRELOAD asan for it to work
 # properly).
 
 if test -n "${SANITIZERS}"; then
 	SANITIZERS=$(echo $SANITIZERS | sed -e 's/,/ /g')
 	for SANITIZER in $SANITIZERS; do
 		case $SANITIZER in
 			asan|ASAN)
 				SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=address"
 				SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=address -lasan"
 				AC_CHECK_LIB([asan],[main],,AC_MSG_ERROR([Unable to find libasan]))
 				;;
 			ubsan|UBSAN)
 				SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=undefined"
 				SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=undefined -lubsan"
 				AC_CHECK_LIB([ubsan],[main],,AC_MSG_ERROR([Unable to find libubsan]))
 				;;
 			tsan|TSAN)
 				SANITIZERS_CFLAGS="$SANITIZERS_CFLAGS -fsanitize=thread"
 				SANITIZERS_LDFLAGS="$SANITIZERS_LDFLAGS -fsanitize=thread -ltsan"
 				AC_CHECK_LIB([tsan],[main],,AC_MSG_ERROR([Unable to find libtsan]))
 				;;
 		esac
 	done
 fi
 
 DEFAULT_CFLAGS="-Werror -Wall -Wextra -Wno-gnu-folding-constant"
 
 # manual overrides
 # generates too much noise for stub APIs
 UNWANTED_CFLAGS="-Wno-unused-parameter"
 
 AC_SUBST([AM_CFLAGS],["$SANITIZERS_CFLAGS $OPT_CFLAGS $GDB_CFLAGS $DEFAULT_CFLAGS $EXTRA_HARDENING_CFLAGS $UNWANTED_CFLAGS"])
 LDFLAGS="$SANITIZERS_LDFLAGS $LDFLAGS"
 AC_SUBST([AM_LDFLAGS])
 AC_SUBST([RUST_FLAGS])
 AC_SUBST([RUST_TARGET_DIR])
 
 AX_PROG_DATE
 AS_IF([test "$ax_cv_prog_date_gnu_date:$ax_cv_prog_date_gnu_utc" = yes:yes],
 	[UTC_DATE_AT="date -u -d@"],
 	[AS_IF([test "x$ax_cv_prog_date_bsd_date" = xyes],
 		[UTC_DATE_AT="date -u -r"],
 		[AC_MSG_ERROR([date utility unable to convert epoch to UTC])])])
 AC_SUBST([UTC_DATE_AT])
 
 AC_ARG_VAR([SOURCE_EPOCH],[last modification date of the source])
 AC_MSG_NOTICE([trying to determine source epoch])
 AC_MSG_CHECKING([for source epoch in \$SOURCE_EPOCH])
 AS_IF([test -n "$SOURCE_EPOCH"],
 	[AC_MSG_RESULT([yes])],
 	[AC_MSG_RESULT([no])
 	 AC_MSG_CHECKING([for source epoch in source_epoch file])
 	 AS_IF([test -e "$srcdir/source_epoch"],
 		[read SOURCE_EPOCH <"$srcdir/source_epoch"
 		 AC_MSG_RESULT([yes])],
 		[AC_MSG_RESULT([no])
 		 AC_MSG_CHECKING([for source epoch baked in by gitattributes export-subst])
 		 SOURCE_EPOCH='$Format:%at$' # template for rewriting by git-archive
 		 AS_CASE([$SOURCE_EPOCH],
 			[?Format:*], # was not rewritten
 				[AC_MSG_RESULT([no])
 				 AC_MSG_CHECKING([for source epoch in \$SOURCE_DATE_EPOCH])
 				 AS_IF([test "x$SOURCE_DATE_EPOCH" != x],
 					[SOURCE_EPOCH="$SOURCE_DATE_EPOCH"
 					 AC_MSG_RESULT([yes])],
 					[AC_MSG_RESULT([no])
 					 AC_MSG_CHECKING([whether git log can provide a source epoch])
 					 SOURCE_EPOCH=f${SOURCE_EPOCH#\$F} # convert into git log --pretty format
 					 SOURCE_EPOCH=$(cd "$srcdir" && git log -1 --pretty=${SOURCE_EPOCH%$} 2>/dev/null)
 					 AS_IF([test -n "$SOURCE_EPOCH"],
 						[AC_MSG_RESULT([yes])],
 						[AC_MSG_RESULT([no, using current time and breaking reproducibility])
 						 SOURCE_EPOCH=$(date +%s)])])],
 			[AC_MSG_RESULT([yes])]
 		 )])
 	])
 AC_MSG_NOTICE([using source epoch $($UTC_DATE_AT$SOURCE_EPOCH +'%F %T %Z')])
 
 AC_CONFIG_FILES([
 		Makefile
 		libnozzle/Makefile
 		libnozzle/libnozzle.pc
 		libnozzle/tests/Makefile
 		libnozzle/bindings/Makefile
+		libnozzle/bindings/python/Makefile
 		libnozzle/bindings/rust/Makefile
 		libnozzle/bindings/rust/Cargo.toml
 		libnozzle/bindings/rust/tests/Makefile
 		libnozzle/bindings/rust/tests/Cargo.toml
 		libknet/Makefile
 		libknet/libknet.pc
 		libknet/tests/Makefile
 		libknet/bindings/Makefile
+		libknet/bindings/python/Makefile
 		libknet/bindings/rust/Makefile
 		libknet/bindings/rust/Cargo.toml
 		libknet/bindings/rust/tests/Makefile
 		libknet/bindings/rust/tests/Cargo.toml
 		man/Makefile
 		man/Doxyfile-knet
 		man/Doxyfile-nozzle
 		])
 
 if test "x$VERSION" = "xUNKNOWN"; then
 	AC_MSG_ERROR([m4_text_wrap([
   configure was unable to determine the source tree's current version. This
   generally happens when using git archive (or the github download button)
   generated tarball/zip file. In order to workaround this issue, either use git
   clone https://github.com/kronosnet/kronosnet.git or use an official release
   tarball, available at https://kronosnet.org/releases/.  Alternatively you
   can add a compatible version in a .tarball-version file at the top of the
   source tree, wipe your autom4te.cache dir and generated configure, and rerun
   autogen.sh.
   ], [  ], [   ], [76])])
 fi
 
 AC_OUTPUT
diff --git a/libknet/bindings/python/Makefile.am b/libknet/bindings/python/Makefile.am
index 174bf060..be6a68fa 100644
--- a/libknet/bindings/python/Makefile.am
+++ b/libknet/bindings/python/Makefile.am
@@ -1,51 +1,52 @@
 #
 # Copyright (C) 2025 Red Hat, Inc.  All rights reserved.
 #
 # Author: Jules <jules@google.com> (Google AI Agent)
 #
 # This software licensed under GPL-2.0+
 #
 
 MAINTAINERCLEANFILES = Makefile.in
 
 # Get CFLAGS and LIBS from python3-config or pkg-config (already handled in configure.ac)
-AM_CFLAGS = $(PYTHON_CFLAGS)
+AM_CFLAGS = $(PYTHON_CFLAGS) -I$(top_srcdir)/libknet
 AM_LDFLAGS = $(PYTHON_LIBS)
 
 # Define the Python extension module
 # The library will be named _knet.so
 # It will be built from _knet.c (which doesn't exist yet)
 pkglib_LTLIBRARIES = _knet.la
 _knet_la_SOURCES = _knet.c
 _knet_la_LDFLAGS = -module -avoid-version $(AM_LDFLAGS)
 _knet_la_CFLAGS = $(AM_CFLAGS)
 
 # Ensure the .so file is installed in a way that Python can find it,
 # typically within the package's directory structure or a site-packages directory.
 # For now, we'll install it into pkglibdir, which might need adjustment later
 # depending on the overall Python package structure.
 # pkglibdir = $(libdir)/kronosnet/python (adjust as necessary)
 
 # To make it discoverable by python, we might need a setup.py or similar,
 # or ensure it's installed in a standard Python module path.
 # This Makefile.am is just for building the .so.
 
 # Files to be cleaned
 CLEANFILES = *.lo *.la .libs/* _knet.so
 
 # Tests
 # Tell make where to find the tests and that they are python scripts
 # The path is relative to this Makefile.am
 TESTS = tests/test_knet.py
 
 # Set up the environment for running the tests
 # We need to ensure that the compiled Python extension (_knet.so) is findable.
 # It's typically in .libs after compilation by libtool, or in the current directory.
 AM_TESTS_ENVIRONMENT = \
-    PYTHONPATH="$(abs_builddir)/.libs:$(abs_builddir):$(PYTHONPATH)" \
-    srcdir="$(abs_srcdir)" \
-    PYTHON="$(PYTHON3)" # Ensure python3 is used
+	LD_PRELOAD="$(top_builddir)/libknet/.libs/libknet.so" \
+	PYTHONPATH="$(abs_builddir)/.libs:$(abs_builddir):$(PYTHONPATH)" \
+	srcdir="$(abs_srcdir)" \
+	PYTHON="$(PYTHON3)" # Ensure python3 is used
 
 # test_knet.py needs to be executable and have the correct shebang.
 # We list test files as EXTRA_DIST to ensure they are included in source distributions.
 EXTRA_DIST = tests/__init__.py tests/test_knet.py
diff --git a/libknet/bindings/python/_knet.c b/libknet/bindings/python/_knet.c
index 32bf6bc0..fbd650c9 100644
--- a/libknet/bindings/python/_knet.c
+++ b/libknet/bindings/python/_knet.c
@@ -1,114 +1,114 @@
 /*
  * Copyright (C) 2025 Red Hat, Inc.  All rights reserved.
  *
  * Author: Jules <jules@google.com> (Google AI Agent)
  *
  * This software licensed under GPL-2.0+
  */
 
 #define PY_SSIZE_T_CLEAN
 #include <Python.h>
-#include "libknet/libknet.h" // Adjust path if necessary based on include paths
+#include "libknet.h"
 
 // Capsule name for knet_handle_t
 #define KNET_HANDLE_T_CAPSULE_NAME "_knet_handle_t"
 
 // Wrapper for knet_handle_new
 static PyObject *
 py_knet_handle_new(PyObject *self, PyObject *args)
 {
     unsigned short host_id; // knet_node_id_t is uint16_t
     int log_fd;
     unsigned char default_log_level; // uint8_t
     unsigned long long flags; // uint64_t
     knet_handle_t handle;
 
     if (!PyArg_ParseTuple(args, "HibK", &host_id, &log_fd, &default_log_level, &flags)) {
         return NULL;
     }
 
     handle = knet_handle_new((knet_node_id_t)host_id, log_fd, (uint8_t)default_log_level, (uint64_t)flags);
 
     if (handle == NULL) {
         PyErr_SetFromErrno(PyExc_OSError); // Or a custom KnetException
         return NULL;
     }
 
     PyObject *capsule = PyCapsule_New(handle, KNET_HANDLE_T_CAPSULE_NAME, NULL); // No custom destructor for now
     if (capsule == NULL) {
         knet_handle_free(handle); // Clean up if capsule creation fails
         return NULL;
     }
     return capsule;
 }
 
 // Wrapper for knet_handle_free
 static PyObject *
 py_knet_handle_free(PyObject *self, PyObject *args)
 {
     PyObject *capsule;
     knet_handle_t handle;
 
     if (!PyArg_ParseTuple(args, "O", &capsule)) {
         return NULL;
     }
 
     if (!PyCapsule_CheckExact(capsule)) {
         PyErr_SetString(PyExc_TypeError, "Argument must be a knet handle capsule.");
         return NULL;
     }
 
     handle = (knet_handle_t)PyCapsule_GetPointer(capsule, KNET_HANDLE_T_CAPSULE_NAME);
     if (handle == NULL) {
         // PyCapsule_GetPointer already set an error (e.g., wrong capsule name)
         return NULL;
     }
 
     if (knet_handle_free(handle) == -1) {
         PyErr_SetFromErrno(PyExc_OSError); // Or a custom KnetException
         return NULL;
     }
 
     // It's good practice to invalidate the capsule after freeing the underlying resource,
     // though Python doesn't enforce it. One way is to set its pointer to NULL.
     // PyCapsule_SetPointer(capsule, NULL); // Requires a non-NULL name if destructor is NULL.
     // Or, more simply, just rely on the user not to use a freed handle.
     // If a destructor was provided to PyCapsule_New, it would be called when the capsule is GC'd.
     // Since we don't have one, make sure the user calls free explicitly.
 
     Py_RETURN_NONE;
 }
 
 // Method definitions
 static PyMethodDef KnetMethods[] = {
     {"handle_new", py_knet_handle_new, METH_VARARGS, "Create a new knet handle. Args: (host_id, log_fd, default_log_level, flags)"},
     {"handle_free", py_knet_handle_free, METH_VARARGS, "Free a knet handle. Args: (handle_capsule)"},
     {NULL, NULL, 0, NULL}        /* Sentinel */
 };
 
 // Module definition
 static struct PyModuleDef knetmodule = {
     PyModuleDef_HEAD_INIT,
     "_knet",   /* name of module */
     "Python bindings for libknet", /* module documentation, may be NULL */
     -1,       /* size of per-interpreter state of the module,
                  or -1 if the module keeps state in global variables. */
     KnetMethods
 };
 
 // Module initialization function
 PyMODINIT_FUNC
 PyInit__knet(void)
 {
     PyObject *m;
 
     m = PyModule_Create(&knetmodule);
     if (m == NULL)
         return NULL;
 
     // Optional: Add custom exceptions like KnetError = PyErr_NewException("_knet.Error", NULL, NULL);
     // Py_XINCREF(KnetError);
     // if (PyModule_AddObject(m, "Error", KnetError) < 0) { ... }
 
     return m;
 }
diff --git a/libknet/bindings/python/tests/test_knet.py b/libknet/bindings/python/tests/test_knet.py
old mode 100644
new mode 100755
index e8938e37..f6b83842
--- a/libknet/bindings/python/tests/test_knet.py
+++ b/libknet/bindings/python/tests/test_knet.py
@@ -1,116 +1,116 @@
 #!/usr/bin/env python3
 
 import unittest
 import os
 import sys
 
 # Attempt to import the compiled C extension _knet.
 # This follows the same logic as test_nozzle.py for locating the .so file.
 try:
     import _knet
 except ImportError as e:
     build_dir_guess1 = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '_build', 'libknet', 'bindings', 'python'))
     build_dir_guess2 = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # For non-out-of-tree or specific setups
 
     added_to_path = False
     if os.path.exists(os.path.join(build_dir_guess1, '_knet.so')):
         sys.path.insert(0, build_dir_guess1)
         added_to_path = True
     elif os.path.exists(os.path.join(build_dir_guess2, '_knet.so')):
         sys.path.insert(0, build_dir_guess2)
         added_to_path = True
 
     if added_to_path:
         try:
             import _knet
         except ImportError:
             print(f"Failed to import _knet even after adding potential build directory to sys.path.")
             print(f"Original error: {e}")
             print(f"Attempted directories: {build_dir_guess1}, {build_dir_guess2}")
             print(f"Current sys.path: {sys.path}")
             raise
     else:
         print(f"Failed to import _knet. Could not find _knet.so in guessed paths.")
         print(f"Original error: {e}")
         print(f"Attempted directories: {build_dir_guess1}, {build_dir_guess2}")
         # To help diagnose, list contents of attempted build directories if they exist
         if os.path.exists(build_dir_guess1):
             print(f"Contents of {build_dir_guess1}: {os.listdir(build_dir_guess1)}")
         else:
             print(f"{build_dir_guess1} does not exist.")
         if os.path.exists(build_dir_guess2):
             print(f"Contents of {build_dir_guess2}: {os.listdir(build_dir_guess2)}")
         else:
             print(f"{build_dir_guess2} does not exist.")
         raise
 
 
 # KNET_HANDLE_T_CAPSULE_NAME defined in _knet.c
 KNET_HANDLE_T_CAPSULE_NAME = "_knet_handle_t"
 
 class TestKnet(unittest.TestCase):
 
     def test_01_handle_new_and_free(self):
         """Test creating and freeing a knet handle."""
         host_id = 1
-        log_fd = -1  # Typically 0 for stdout/stderr, or -1 to disable, or a real fd.
-                     # Using -1 for basic test to avoid actual logging output.
+        log_fd = 0   # Typically 0 to disable, or a real fd.
+                     # Using 0 for basic test to avoid actual logging output.
         default_log_level = 0 # KNET_LOG_ERR
         flags = 0 # No special flags for basic test
 
         handle_capsule = None
         try:
             handle_capsule = _knet.handle_new(host_id, log_fd, default_log_level, flags)
             self.assertIsNotNone(handle_capsule, "knet_handle_new should return a handle (capsule).")
 
             # Check if it's a capsule and has the correct name
             self.assertTrue(hasattr(handle_capsule, '__class__'), "Returned handle does not look like an object.")
             # PyCapsule_CheckExact is not directly available in Python,
             # but we can check the type name if it's a well-behaved capsule.
             # For now, just ensuring it's not None and doesn't immediately crash.
             # A more robust check would be to try using it with another function
             # that expects this capsule type, or checking its type name string if accessible.
             # print(type(handle_capsule)) # Expected: <class 'PyCapsule'>
 
             # Attempt to get the pointer to verify it's a valid capsule of our type
             # This is more of an internal check, not typically done in Python tests,
             # but useful here to ensure the C extension is behaving.
             # PyCapsule_GetPointer would be the C equivalent. Python doesn't directly expose this.
             # We rely on handle_free to validate the capsule type.
 
         except Exception as e:
             self.fail(f"knet_handle_new raised an exception: {e}")
         finally:
             if handle_capsule:
                 try:
                     _knet.handle_free(handle_capsule)
                 except Exception as e:
                     self.fail(f"knet_handle_free raised an exception: {e}")
 
     def test_02_handle_free_invalid_capsule(self):
         """Test knet_handle_free with an invalid capsule type."""
         # Create a dummy capsule with a different name
         dummy_capsule = None
         try:
             # The C API PyCapsule_New takes (pointer, name, destructor).
             # We can't easily create a PyCapsule from Python side with a specific C pointer or name.
             # So, we'll pass a non-capsule type or a capsule of a different C type if we had one.
             # For now, let's pass a simple Python object.
             with self.assertRaises(TypeError): # Expecting a TypeError from the C extension
                 _knet.handle_free(object())
 
             # Test with None
             with self.assertRaises(TypeError): # PyArg_ParseTuple will fail with "O" if None is passed and not handled.
                                                # Or it could be a different error if specifically checked in C.
                 _knet.handle_free(None)
 
         except _knet.Error as e: # Assuming a custom _knet.Error for knet specific errors
             self.skipTest(f"Skipping invalid capsule test, _knet.Error not fully set up for this: {e}")
         except Exception as e:
             # The exact error might vary based on Python version and how PyArg_ParseTuple handles it.
             # We are checking that it *does* error out.
             pass
 
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/libnozzle/bindings/python/Makefile.am b/libnozzle/bindings/python/Makefile.am
index 2b99cf9a..0d98de89 100644
--- a/libnozzle/bindings/python/Makefile.am
+++ b/libnozzle/bindings/python/Makefile.am
@@ -1,58 +1,59 @@
 #
 # Copyright (C) 2025 Red Hat, Inc.  All rights reserved.
 #
 # Author: Jules <jules@google.com> (Google AI Agent)
 #
 # This software licensed under GPL-2.0+
 #
 
 MAINTAINERCLEANFILES = Makefile.in
 
 # Get CFLAGS and LIBS from python3-config or pkg-config (already handled in configure.ac)
-AM_CFLAGS = $(PYTHON_CFLAGS)
+AM_CFLAGS = $(PYTHON_CFLAGS) -I$(top_srcdir)/libnozzle
 AM_LDFLAGS = $(PYTHON_LIBS)
 
 # Define the Python extension module
 # The library will be named _nozzle.so
 # It will be built from _nozzle.c (which doesn't exist yet)
 pkglib_LTLIBRARIES = _nozzle.la
 _nozzle_la_SOURCES = _nozzle.c
 _nozzle_la_LDFLAGS = -module -avoid-version $(AM_LDFLAGS)
 _nozzle_la_CFLAGS = $(AM_CFLAGS)
 
 # Ensure the .so file is installed in a way that Python can find it,
 # typically within the package's directory structure or a site-packages directory.
 # For now, we'll install it into pkglibdir, which might need adjustment later
 # depending on the overall Python package structure.
 # pkglibdir = $(libdir)/kronosnet/python (adjust as necessary)
 
 # To make it discoverable by python, we might need a setup.py or similar,
 # or ensure it's installed in a standard Python module path.
 # This Makefile.am is just for building the .so.
 
 # Files to be cleaned
 CLEANFILES = *.lo *.la .libs/* _nozzle.so
 
 # Tests
 # Tell make where to find the tests and that they are python scripts
 # The path is relative to this Makefile.am
 TESTS = tests/test_nozzle.py
 
 # Set up the environment for running the tests
 # We need to ensure that the compiled Python extension (_nozzle.so) is findable.
 # It's typically in .libs after compilation by libtool, or in the current directory.
 # The $(top_builddir) variable points to the root of the build directory.
 # The $(abs_top_builddir) gives an absolute path.
 # The $(builddir) is the current directory where Makefile runs.
 # The $(abs_builddir) is its absolute path.
 # Our _nozzle.so will be in $(abs_builddir)/.libs or $(abs_builddir) after linking.
 # The test script itself is in $(srcdir)/tests/test_nozzle.py
 # We also need access to the source directory for the test script itself.
 AM_TESTS_ENVIRONMENT = \
-    PYTHONPATH="$(abs_builddir)/.libs:$(abs_builddir):$(PYTHONPATH)" \
-    srcdir="$(abs_srcdir)" \
-    PYTHON="$(PYTHON3)" # Ensure python3 is used if system python is older
+	LD_PRELOAD="$(top_builddir)/libnozzle/.libs/libnozzle.so" \
+	PYTHONPATH="$(abs_builddir)/.libs:$(abs_builddir):$(PYTHONPATH)" \
+	srcdir="$(abs_srcdir)" \
+	PYTHON="$(PYTHON3)" # Ensure python3 is used if system python is older
 
 # test_nozzle.py needs to be executable and have the correct shebang.
 # We list it as a source to ensure it's part of the distribution.
 EXTRA_DIST = tests/__init__.py tests/test_nozzle.py
diff --git a/libnozzle/bindings/python/_nozzle.c b/libnozzle/bindings/python/_nozzle.c
index 336c7b44..4fac076f 100644
--- a/libnozzle/bindings/python/_nozzle.c
+++ b/libnozzle/bindings/python/_nozzle.c
@@ -1,165 +1,165 @@
 /*
  * Copyright (C) 2025 Red Hat, Inc.  All rights reserved.
  *
  * Author: Jules <jules@google.com> (Google AI Agent)
  *
  * This software licensed under GPL-2.0+
  */
 
 #define PY_SSIZE_T_CLEAN
 #include <Python.h>
-#include "libnozzle/libnozzle.h" // Adjust path if necessary based on include paths
+#include "libnozzle.h"
 #include <net/if.h> // For IFNAMSIZ
 
 // Capsule name for nozzle_t
 #define NOZZLE_T_CAPSULE_NAME "_nozzle_t"
 
 // Wrapper for nozzle_open
 static PyObject *
 py_nozzle_open(PyObject *self, PyObject *args)
 {
     const char *devname_in;
     const char *updownpath;
     char devname_out[IFNAMSIZ];
     nozzle_t handle;
 
     if (!PyArg_ParseTuple(args, "ss", &devname_in, &updownpath)) {
         return NULL;
     }
 
     // Initialize devname_out, copy devname_in if it's not empty
     memset(devname_out, 0, IFNAMSIZ);
     if (strlen(devname_in) > 0) {
         strncpy(devname_out, devname_in, IFNAMSIZ - 1);
     }
 
     handle = nozzle_open(devname_out, IFNAMSIZ, updownpath);
 
     if (handle == NULL) {
         PyErr_SetFromErrno(PyExc_OSError); // Or a custom exception
         return NULL;
     }
 
     // Return a tuple: (capsule_containing_handle, actual_devname_string)
     PyObject *capsule = PyCapsule_New(handle, NOZZLE_T_CAPSULE_NAME, NULL);
     if (capsule == NULL) {
         // If capsule creation fails, we should close the handle we just opened
         nozzle_close(handle);
         return NULL;
     }
     return Py_BuildValue("Ns", capsule, devname_out);
 }
 
 // Wrapper for nozzle_close
 static PyObject *
 py_nozzle_close(PyObject *self, PyObject *args)
 {
     PyObject *capsule;
     nozzle_t handle;
 
     if (!PyArg_ParseTuple(args, "O", &capsule)) {
         return NULL;
     }
 
     handle = (nozzle_t)PyCapsule_GetPointer(capsule, NOZZLE_T_CAPSULE_NAME);
     if (handle == NULL) {
         return NULL; // PyCapsule_GetPointer already set an error
     }
 
     if (nozzle_close(handle) == -1) {
         PyErr_SetFromErrno(PyExc_OSError); // Or a custom exception
         return NULL;
     }
 
     Py_RETURN_NONE;
 }
 
 // Wrapper for nozzle_get_name_by_handle
 static PyObject *
 py_nozzle_get_name_by_handle(PyObject *self, PyObject *args)
 {
     PyObject *capsule;
     nozzle_t handle;
     const char *name;
 
     if (!PyArg_ParseTuple(args, "O", &capsule)) {
         return NULL;
     }
 
     handle = (nozzle_t)PyCapsule_GetPointer(capsule, NOZZLE_T_CAPSULE_NAME);
     if (handle == NULL) {
         return NULL;
     }
 
     name = nozzle_get_name_by_handle(handle);
     if (name == NULL) {
         // nozzle_get_name_by_handle sets errno on error, but might also return NULL if handle is invalid
         // without necessarily setting errno (though docs say it does).
         // For safety, set a generic error if name is NULL.
         PyErr_SetString(PyExc_ValueError, "Failed to get name for nozzle handle or handle invalid");
         return NULL;
     }
 
     return PyUnicode_FromString(name);
 }
 
 // Wrapper for nozzle_get_fd
 static PyObject *
 py_nozzle_get_fd(PyObject *self, PyObject *args)
 {
     PyObject *capsule;
     nozzle_t handle;
     int fd;
 
     if (!PyArg_ParseTuple(args, "O", &capsule)) {
         return NULL;
     }
 
     handle = (nozzle_t)PyCapsule_GetPointer(capsule, NOZZLE_T_CAPSULE_NAME);
     if (handle == NULL) {
         return NULL;
     }
 
     fd = nozzle_get_fd(handle);
     if (fd == -1) {
         PyErr_SetFromErrno(PyExc_OSError);
         return NULL;
     }
     return PyLong_FromLong(fd);
 }
 
 
 // Method definitions
 static PyMethodDef NozzleMethods[] = {
     {"open", py_nozzle_open, METH_VARARGS, "Open a nozzle (tap) interface. Args: (devname_requested, updownpath_script_dir). Returns (handle, actual_devname)"},
     {"close", py_nozzle_close, METH_VARARGS, "Close a nozzle interface. Args: (handle)"},
     {"get_name", py_nozzle_get_name_by_handle, METH_VARARGS, "Get interface name from handle. Args: (handle)"},
     {"get_fd", py_nozzle_get_fd, METH_VARARGS, "Get file descriptor from handle. Args: (handle)"},
     {NULL, NULL, 0, NULL}        /* Sentinel */
 };
 
 // Module definition
 static struct PyModuleDef nozzlemodule = {
     PyModuleDef_HEAD_INIT,
     "_nozzle",   /* name of module */
     "Python bindings for libnozzle", /* module documentation, may be NULL */
     -1,       /* size of per-interpreter state of the module,
                  or -1 if the module keeps state in global variables. */
     NozzleMethods
 };
 
 // Module initialization function
 PyMODINIT_FUNC
 PyInit__nozzle(void)
 {
     PyObject *m;
 
     m = PyModule_Create(&nozzlemodule);
     if (m == NULL)
         return NULL;
 
     // Optional: Add custom exceptions or constants here
     // e.g., PyModule_AddStringConstant(m, "NOZZLE_CONSTANT", "value");
 
     return m;
 }
diff --git a/libnozzle/bindings/python/tests/test_nozzle.py b/libnozzle/bindings/python/tests/test_nozzle.py
old mode 100644
new mode 100755
index c8d045e3..99392cc1
--- a/libnozzle/bindings/python/tests/test_nozzle.py
+++ b/libnozzle/bindings/python/tests/test_nozzle.py
@@ -1,156 +1,157 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2025 Red Hat, Inc.  All rights reserved.
 #
 # Author: Jules <jules@google.com> (Google AI Agent)
 #
 # This software licensed under GPL-2.0+
 #
 
 import unittest
 import os
 import sys
 
 # Attempt to import the compiled C extension.
 # This assumes that when the test is run, either:
 # 1. The _nozzle.so is in the same directory (e.g., copied by Makefile)
 # 2. PYTHONPATH is set to find it in the build directory (e.g., ../../build/libnozzle/bindings/python or similar)
 # 3. The module is installed.
 try:
     import _nozzle
 except ImportError as e:
     # A common location for the built .so file if running tests from `libnozzle/bindings/python/tests`
     # and the build dir is parallel to srcdir, e.g. `_build/libnozzle/bindings/python/`
     # This is a guess; a more robust solution involves build system support (e.g. via PYTHONPATH)
     build_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..', '_build', 'libnozzle', 'bindings', 'python'))
     if os.path.exists(os.path.join(build_dir, '_nozzle.so')): # Check typical automake build dir
         sys.path.insert(0, build_dir)
     else: # Try another common pattern for non-out-of-tree builds or specific setups
         build_dir_alt = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
         if os.path.exists(os.path.join(build_dir_alt, '_nozzle.so')):
              sys.path.insert(0, build_dir_alt)
 
     try:
         import _nozzle
     except ImportError:
         print(f"Failed to import _nozzle. Original error: {e}")
         print(f"Attempted to add build directories to sys.path: {build_dir}, {build_dir_alt}")
         print(f"Current sys.path: {sys.path}")
         # To help diagnose, list contents of attempted build directories
         if os.path.exists(build_dir):
             print(f"Contents of {build_dir}: {os.listdir(build_dir)}")
         if os.path.exists(build_dir_alt):
             print(f"Contents of {build_dir_alt}: {os.listdir(build_dir_alt)}")
         raise
 
 # Define a unique prefix for test interface names to avoid clashes
 # and make it easy to clean up if tests fail midway.
 TEST_IFACE_PREFIX = "nozpytst"
 UPDOWN_PATH_SCRIPTS = "/tmp/knet_nozzle_test_scripts" # Dummy path, ensure it exists or is not needed by basic open/close
 
+@unittest.skipUnless(os.geteuid() == 0, "requires root privileges")
 class TestNozzle(unittest.TestCase):
 
     def setUp(self):
         # For nozzle_open, updownpath is required. Create a dummy structure if it doesn't exist.
         # This is a simplification. Real tests might need more elaborate setup for updown scripts.
         os.makedirs(os.path.join(UPDOWN_PATH_SCRIPTS, "up.d"), exist_ok=True)
         os.makedirs(os.path.join(UPDOWN_PATH_SCRIPTS, "down.d"), exist_ok=True)
         # In a real CI environment, we might need to clean up interfaces
         # that were not closed due to previous test failures.
         # For now, we assume a clean state or manual cleanup.
         pass
 
     def tearDown(self):
         # pass # Individual tests will close their handles.
         # Clean up any interfaces that might have been left open by tests.
         # This is a bit complex as it requires listing system interfaces.
         # For now, we'll rely on tests to clean up after themselves.
         # Example of how one might attempt to clean up:
         # for i in range(5): # Try a few interface numbers
         #     try:
         #         # This is pseudo-code; actual cleanup needs to interact with the system
         #         # or use nozzle_get_handle_by_name if available and then close.
         #         iface_name_to_check = f"{TEST_IFACE_PREFIX}{i}"
         #         # handle = _nozzle.get_handle_by_name(iface_name_to_check) # If we had this
         #         # if handle: _nozzle.close(handle)
         #     except Exception:
         #         pass
         pass
 
 
     def test_01_open_and_close_interface(self):
         """Test opening and closing a nozzle interface with a specific name."""
         dev_name_req = TEST_IFACE_PREFIX + "0"
         try:
             handle, actual_dev_name = _nozzle.open(dev_name_req, UPDOWN_PATH_SCRIPTS)
             self.assertIsNotNone(handle, "Nozzle handle should not be None")
             self.assertTrue(actual_dev_name.startswith(TEST_IFACE_PREFIX), f"Actual device name {actual_dev_name} does not start with {TEST_IFACE_PREFIX}")
             self.assertEqual(actual_dev_name, dev_name_req, "Actual device name should match requested if specific name is given")
 
             # Test get_name
             name_from_handle = _nozzle.get_name(handle)
             self.assertEqual(name_from_handle, actual_dev_name, "Name from handle should match actual device name")
 
             # Test get_fd
             fd = _nozzle.get_fd(handle)
             self.assertIsInstance(fd, int, "File descriptor should be an integer")
             self.assertGreaterEqual(fd, 0, "File descriptor should be non-negative")
 
         finally:
             if 'handle' in locals() and handle:
                 _nozzle.close(handle)
 
         # After closing, operations on the handle should ideally fail.
         # PyCapsule does not automatically invalidate, so the C code would need to handle this,
         # or we accept that behavior is undefined after close for a stale handle.
         # For example, trying to get_name on a closed handle:
         # with self.assertRaises(Exception): # Expect some error
         #    _nozzle.get_name(handle)
 
 
     def test_02_open_interface_system_assigned_name(self):
         """Test opening a nozzle interface allowing system to assign name."""
         try:
             handle, actual_dev_name = _nozzle.open("", UPDOWN_PATH_SCRIPTS) # Empty string for system-assigned
             self.assertIsNotNone(handle, "Nozzle handle should not be None for system-assigned name")
             # System-assigned names usually start with 'tap' on Linux, or could be 'noz' if kernel/udev rules are set.
             # For this library, it's often 'tapX' or similar if not forced.
             # Given it's nozzle, it might try to create 'nozzleX' or 'nozX'
             self.assertTrue(len(actual_dev_name) > 0, "Actual device name should not be empty for system-assigned")
             # We cannot predict the exact name, but we can check its properties via other calls.
 
             name_from_handle = _nozzle.get_name(handle)
             self.assertEqual(name_from_handle, actual_dev_name, "Name from handle should match actual device name (system-assigned)")
 
             fd = _nozzle.get_fd(handle)
             self.assertIsInstance(fd, int, "File descriptor should be an integer (system-assigned)")
             self.assertGreaterEqual(fd, 0, "File descriptor should be non-negative (system-assigned)")
 
         finally:
             if 'handle' in locals() and handle:
                 _nozzle.close(handle)
 
     def test_03_open_non_existent_updownpath(self):
         """Test opening with a non-existent updownpath. Should still open device."""
         # nozzle_open itself doesn't fail if updownpath is invalid,
         # errors occur when nozzle_run_updown is called.
         dev_name_req = TEST_IFACE_PREFIX + "1"
         non_existent_path = "/tmp/nonexistent_path_for_nozzle_test"
         try:
             handle, actual_dev_name = _nozzle.open(dev_name_req, non_existent_path)
             self.assertIsNotNone(handle, "Nozzle handle should not be None even with non-existent updownpath")
             self.assertEqual(actual_dev_name, dev_name_req)
         finally:
             if 'handle' in locals() and handle:
                 _nozzle.close(handle)
 
     # Potential future tests:
     # - Error conditions for open (e.g., invalid devname format if enforced, permission issues)
     # - Error conditions for close (e.g., invalid handle)
     # - Thread safety if applicable
     # - Multiple open/close operations
 
 if __name__ == '__main__':
     # This allows running the test script directly.
     # For 'make check', the Makefile will typically invoke it.
     unittest.main()