diff --git a/.gitignore b/.gitignore index 0a6d45e65..8dd29db29 100644 --- a/.gitignore +++ b/.gitignore @@ -1,159 +1,160 @@ *.swp Makefile.in aclocal.m4 autoconf autoheader autom4te.cache automake autoscan.log compile configure configure.scan config.guess config.log config.sub config.status Makefile depcomp install-sh libtoolize ltmain.sh libtool make/stamp-h1 make/clusterautoconfig.h* missing resource-agents.spec *.pc .deps .libs *.o *.la *.lo *.loT rgmanager/src/resources/* !rgmanager/src/resources/*.in rgmanager/src/resources/Makefile.in rgmanager/src/resources/utils/config-utils.sh resource-agents-* .version # generated by ./autogen.sh && ./configure doc/man/*.7 doc/man/*.xml heartbeat/ocf-binaries heartbeat/ocf-directories heartbeat/ocf-shellfuncs heartbeat/send_ua heartbeat/shellfuncs heartbeat/*.pyc heartbeat/AoEtarget heartbeat/CTDB heartbeat/ManageRAID heartbeat/ManageVE heartbeat/Squid heartbeat/SysInfo heartbeat/aws-vpc-route53 heartbeat/azure-events heartbeat/azure-events-az heartbeat/clvm heartbeat/conntrackd heartbeat/dnsupdate heartbeat/dummypy heartbeat/eDir88 heartbeat/fio heartbeat/galera heartbeat/gcp-pd-move heartbeat/gcp-vpc-move-ip heartbeat/gcp-vpc-move-route heartbeat/gcp-vpc-move-vip heartbeat/ibm-cloud-vpc-cr-vip heartbeat/ibm-cloud-vpc-move-fip heartbeat/iSCSILogicalUnit heartbeat/iSCSITarget heartbeat/jira heartbeat/kamailio heartbeat/lxc heartbeat/lxd-info heartbeat/machine-info heartbeat/mariadb heartbeat/mpathpersist heartbeat/nfsnotify heartbeat/openstack-info +heartbeat/powervs-move-ip heartbeat/powervs-subnet heartbeat/rabbitmq-cluster heartbeat/redis heartbeat/rsyslog heartbeat/sg_persist heartbeat/slapd heartbeat/smb-share heartbeat/storage-mon heartbeat/sybaseASE heartbeat/syslog-ng heartbeat/vsftpd include/agent_config.h include/config.h include/config.h.in include/stamp-h1 include/stamp-h2 ldirectord/ldirectord ldirectord/ldirectord.8 ldirectord/OCF/ldirectord ldirectord/init.d/ldirectord ldirectord/init.d/ldirectord.debian ldirectord/init.d/ldirectord.debian.default ldirectord/systemd/ldirectord.service systemd/resource-agents.conf tools/findif tools/nfsconvert tools/ocf-tester tools/send_arp tools/storage_mon tools/tickle_tcp tools/ocft/README tools/ocft/README.zh_CN tools/ocft/caselib tools/ocft/ocft *.cache *.upgrade.xml py-compile ylwrap __pycache__ # BEAM Entries *.beam parser-messages MISC_ERRORS cscope.files cscope.out patches updates logs # OS and Editor Artifacts .DS_Store .bomb *.rej *.bz2 *.gz *.xz *.sed *.diff *.patch *.gres *~ # Misc HTML TAGS GPATH GRTAGS GSYMS GTAGS .gres.* *.orig .gdb_history *~ \#* .changes pacemaker.tar.gz diff --git a/configure.ac b/configure.ac index 8a74e6684..3765ac858 100644 --- a/configure.ac +++ b/configure.ac @@ -1,1118 +1,1126 @@ dnl dnl autoconf for Agents dnl dnl License: GNU General Public License (GPL) dnl =============================================== dnl Bootstrap dnl =============================================== AC_PREREQ(2.63) dnl Suggested structure: dnl information on the package dnl checks for programs dnl checks for libraries dnl checks for header files dnl checks for types dnl checks for structures dnl checks for compiler characteristics dnl checks for library functions dnl checks for system services AC_INIT([resource-agents], m4_esyscmd([make/git-version-gen .tarball-version]), [developers@clusterlabs.org]) AC_USE_SYSTEM_EXTENSIONS CRM_DTD_VERSION="1.0" AC_CONFIG_AUX_DIR(.) AC_CONFIG_MACRO_DIR([m4]) AC_CANONICAL_HOST dnl Where #defines go (e.g. `AC_CHECK_HEADERS' below) dnl dnl Internal header: include/config.h dnl - Contains ALL defines dnl - include/config.h.in is generated automatically by autoheader dnl - NOT to be included in any header files except lha_internal.h dnl (which is also not to be included in any other header files) dnl dnl External header: include/agent_config.h dnl - Contains a subset of defines checked here dnl - Manually edit include/agent_config.h.in to have configure include new defines dnl - Should not include HAVE_* defines dnl - Safe to include anywhere AM_CONFIG_HEADER(include/config.h include/agent_config.h) ALL_LINGUAS="en fr" AC_ARG_WITH(version, [ --with-version=version Override package version (if you're a packager needing to pretend) ], [ PACKAGE_VERSION="$withval" ]) AC_ARG_WITH(pkg-name, [ --with-pkg-name=name Override package name (if you're a packager needing to pretend) ], [ PACKAGE_NAME="$withval" ]) dnl dnl AM_INIT_AUTOMAKE([1.11.1 foreign dist-bzip2 dist-xz]) dnl AM_INIT_AUTOMAKE([1.10.1 foreign dist-bzip2]) AC_DEFINE_UNQUOTED(AGENTS_VERSION, "$PACKAGE_VERSION", Current agents version) CC_IN_CONFIGURE=yes export CC_IN_CONFIGURE LDD=ldd dnl ======================================================================== dnl Compiler characteristics dnl ======================================================================== # check stolen from gnulib/m4/gnu-make.m4 if ! ${MAKE-make} --version /cannot/make/this >/dev/null 2>&1; then AC_MSG_ERROR([you don't seem to have GNU make; it is required]) fi AC_PROG_CC dnl Can force other with environment variable "CC". AM_PROG_CC_C_O AC_PROG_CC_STDC AC_PROG_CPP AC_PROG_AWK AC_PROG_LN_S AC_PROG_INSTALL AC_PROG_MAKE_SET AC_C_STRINGIZE AC_C_INLINE AC_TYPE_SIZE_T AC_TYPE_SSIZE_T AC_TYPE_UID_T AC_TYPE_UINT16_T AC_TYPE_UINT8_T AC_TYPE_UINT32_T AC_CHECK_SIZEOF(char) AC_CHECK_SIZEOF(short) AC_CHECK_SIZEOF(int) AC_CHECK_SIZEOF(long) AC_CHECK_SIZEOF(long long) AC_STRUCT_TIMEZONE dnl =============================================== dnl Helpers dnl =============================================== cc_supports_flag() { local CPPFLAGS="$@" AC_MSG_CHECKING(whether $CC supports "$@") AC_PREPROC_IFELSE([AC_LANG_PROGRAM([])], [RC=0; AC_MSG_RESULT([yes])], [RC=1; AC_MSG_RESULT([no])]) return $RC } extract_header_define() { AC_MSG_CHECKING(for $2 in $1) Cfile=$srcdir/extract_define.$2.${$} printf "#include \n" > ${Cfile}.c printf "#include <%s>\n" $1 >> ${Cfile}.c printf "int main(int argc, char **argv) { printf(\"%%s\", %s); return 0; }\n" $2 >> ${Cfile}.c $CC $CFLAGS ${Cfile}.c -o ${Cfile} value=`${Cfile}` AC_MSG_RESULT($value) printf $value rm -f ${Cfile}.c ${Cfile} } AC_MSG_NOTICE(Sanitizing prefix: ${prefix}) case $prefix in NONE) prefix=/usr dnl Fix default variables - "prefix" variable if not specified if test "$localstatedir" = "\${prefix}/var"; then localstatedir="/var" fi if test "$sysconfdir" = "\${prefix}/etc"; then sysconfdir="/etc" fi ;; esac # ordering is important, PKG_PROG_PKG_CONFIG is to be invoked before any other PKG_* related stuff PKG_PROG_PKG_CONFIG(0.18) # PKG_CHECK_MODULES will fail if systemd is not found by default, so make sure # we set the proper vars and deal with it PKG_CHECK_MODULES([systemd], [systemd], [HAS_SYSTEMD=yes], [HAS_SYSTEMD=no]) if test "x$HAS_SYSTEMD" = "xyes"; then PKG_CHECK_VAR([SYSTEMD_UNIT_DIR], [systemd], [systemdsystemunitdir]) if test "x$SYSTEMD_UNIT_DIR" = "x"; then AC_MSG_ERROR([Unable to detect systemd unit dir automatically]) fi PKG_CHECK_VAR([SYSTEMD_TMPFILES_DIR], [systemd], [tmpfilesdir]) if test "x$SYSTEMD_TMPFILES_DIR" = "x"; then AC_MSG_ERROR([Unable to detect systemd tmpfiles directory automatically]) fi # sanitize systed vars when using non standard prefix if test "$prefix" != "/usr"; then SYSTEMD_UNIT_DIR="$prefix/$SYSTEMD_UNIT_DIR" AC_SUBST([SYSTEMD_UNIT_DIR]) SYSTEMD_TMPFILES_DIR="$prefix/$SYSTEMD_TMPFILES_DIR" AC_SUBST([SYSTEMD_TMPFILES_DIR]) fi fi AM_CONDITIONAL(HAVE_SYSTEMD, [test "x$HAS_SYSTEMD" = xyes ]) dnl =============================================== dnl Configure Options dnl =============================================== dnl Some systems, like Solaris require a custom package name AC_ARG_WITH(pkgname, [ --with-pkgname=name name for pkg (typically for Solaris) ], [ PKGNAME="$withval" ], [ PKGNAME="LXHAhb" ], ) AC_SUBST(PKGNAME) AC_ARG_ENABLE([ansi], [ --enable-ansi force GCC to compile to ANSI/ANSI standard for older compilers. [default=no]]) AC_ARG_ENABLE([fatal-warnings], [ --enable-fatal-warnings very pedantic and fatal warnings for gcc [default=yes]]) INITDIR="" AC_ARG_WITH(initdir, [ --with-initdir=DIR directory for init (rc) scripts [${INITDIR}]], [ INITDIR="$withval" ]) OCF_ROOT_DIR="${prefix}/lib/ocf" AC_ARG_WITH(ocf-root, [ --with-ocf-root=DIR directory for OCF scripts [${OCF_ROOT_DIR}]], [ OCF_ROOT_DIR="$withval" ]) HA_RSCTMPDIR=${localstatedir}/run/resource-agents AC_ARG_WITH(rsctmpdir, [ --with-rsctmpdir=DIR directory for resource agents state files [${HA_RSCTMPDIR}]], [ HA_RSCTMPDIR="$withval" ]) AC_ARG_ENABLE([libnet], [ --enable-libnet Use libnet for ARP based functionality, [default=try]], [enable_libnet="$enableval"], [enable_libnet=try]) BUILD_RGMANAGER=0 BUILD_LINUX_HA=0 RASSET=linux-ha AC_ARG_WITH(ras-set, [ --with-ras-set=SET build/install only linux-ha, rgmanager or all resource-agents [default: linux-ha]], [ RASSET="$withval" ]) if test x$RASSET = xyes || test x$RASSET = xall ; then BUILD_RGMANAGER=1 BUILD_LINUX_HA=1 fi if test x$RASSET = xlinux-ha; then BUILD_LINUX_HA=1 fi if test x$RASSET = xrgmanager; then BUILD_RGMANAGER=1 fi if test $BUILD_LINUX_HA -eq 0 && test $BUILD_RGMANAGER -eq 0; then AC_MSG_ERROR([Are you really sure you want this package?]) exit 1 fi AM_CONDITIONAL(BUILD_LINUX_HA, test $BUILD_LINUX_HA -eq 1) AM_CONDITIONAL(BUILD_RGMANAGER, test $BUILD_RGMANAGER -eq 1) AC_ARG_WITH(compat-habindir, [ --with-compat-habindir use HA_BIN directory with compatibility for the Heartbeat stack [${libexecdir}]], [], [with_compat_habindir=no]) AM_CONDITIONAL(WITH_COMPAT_HABINDIR, test "x$with_compat_habindir" != "xno") dnl =============================================== dnl General Processing dnl =============================================== echo Our Host OS: $host_os/$host AC_MSG_NOTICE(Sanitizing exec_prefix: ${exec_prefix}) case $exec_prefix in dnl For consistency with Heartbeat, map NONE->$prefix NONE) exec_prefix=$prefix;; prefix) exec_prefix=$prefix;; esac AC_MSG_NOTICE(Sanitizing INITDIR: ${INITDIR}) case $INITDIR in prefix) INITDIR=$prefix;; "") AC_MSG_CHECKING(which init (rc) directory to use) for initdir in /etc/init.d /etc/rc.d/init.d /sbin/init.d \ /usr/local/etc/rc.d /etc/rc.d do if test -d $initdir then INITDIR=$initdir break fi done if test -z $INITDIR then INITDIR=${sysconfdir}/init.d fi AC_MSG_RESULT($INITDIR);; esac AC_SUBST(INITDIR) if test "${prefix}" = "/usr"; then INITDIRPREFIX="$INITDIR" else INITDIRPREFIX="${prefix}/$INITDIR" fi AC_SUBST(INITDIRPREFIX) AC_MSG_NOTICE(Sanitizing libdir: ${libdir}) case $libdir in dnl For consistency with Heartbeat, map NONE->$prefix *prefix*|NONE) AC_MSG_CHECKING(which lib directory to use) for aDir in lib64 lib do trydir="${exec_prefix}/${aDir}" if test -d ${trydir} then libdir=${trydir} break fi done AC_MSG_RESULT($libdir); ;; esac if test "x$with_compat_habindir" != "xno" ; then libexecdir=${libdir} fi dnl Expand autoconf variables so that we dont end up with '${prefix}' dnl in #defines and python scripts dnl NOTE: Autoconf deliberately leaves them unexpanded to allow dnl make exec_prefix=/foo install dnl No longer being able to do this seems like no great loss to me... eval prefix="`eval echo ${prefix}`" eval exec_prefix="`eval echo ${exec_prefix}`" eval bindir="`eval echo ${bindir}`" eval sbindir="`eval echo ${sbindir}`" eval libexecdir="`eval echo ${libexecdir}`" eval datadir="`eval echo ${datadir}`" eval sysconfdir="`eval echo ${sysconfdir}`" eval sharedstatedir="`eval echo ${sharedstatedir}`" eval localstatedir="`eval echo ${localstatedir}`" eval libdir="`eval echo ${libdir}`" eval includedir="`eval echo ${includedir}`" eval oldincludedir="`eval echo ${oldincludedir}`" eval infodir="`eval echo ${infodir}`" eval mandir="`eval echo ${mandir}`" dnl docdir is a recent addition to autotools eval docdir="`eval echo ${docdir}`" if test "x$docdir" = "x"; then docdir="`eval echo ${datadir}/doc`" fi AC_SUBST(docdir) dnl Home-grown variables eval INITDIR="${INITDIR}" for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \ sharedstatedir localstatedir libdir includedir oldincludedir infodir \ mandir INITDIR docdir do dirname=`eval echo '${'${j}'}'` if test ! -d "$dirname" then AC_MSG_WARN([$j directory ($dirname) does not exist!]) fi done dnl This OS-based decision-making is poor autotools practice; dnl feature-based mechanisms are strongly preferred. dnl dnl So keep this section to a bare minimum; regard as a "necessary evil". REBOOT_OPTIONS="-f" POWEROFF_OPTIONS="-f" case "$host_os" in *bsd*) LIBS="-L/usr/local/lib" CPPFLAGS="$CPPFLAGS -I/usr/local/include" ;; *solaris*) REBOOT_OPTIONS="-n" POWEROFF_OPTIONS="-n" LDFLAGS+=" -lssp -lssp_nonshared" ;; *linux*) AC_DEFINE_UNQUOTED(ON_LINUX, 1, Compiling for Linux platform) POWEROFF_OPTIONS="-nf" REBOOT_OPTIONS="-nf" ;; darwin*) AC_DEFINE_UNQUOTED(ON_DARWIN, 1, Compiling for Darwin platform) LIBS="$LIBS -L${prefix}/lib" CFLAGS="$CFLAGS -I${prefix}/include" ;; esac AC_DEFINE_UNQUOTED(HA_LOG_FACILITY, LOG_DAEMON, Default logging facility) AC_MSG_NOTICE(Host CPU: $host_cpu) case "$host_cpu" in ppc64|powerpc64) case $CFLAGS in *powerpc64*) ;; *) if test "$GCC" = yes; then CFLAGS="$CFLAGS -m64" fi ;; esac esac AC_MSG_CHECKING(which format is needed to print uint64_t) case "$host_cpu" in s390x)U64T="%lu";; *64*) U64T="%lu";; *) U64T="%llu";; esac AC_MSG_RESULT($U64T) AC_DEFINE_UNQUOTED(U64T, "$U64T", Correct printf format for logging uint64_t) dnl Variables needed for substitution AC_CHECK_HEADERS(heartbeat/glue_config.h) if test "$ac_cv_header_heartbeat_glue_config_h" != "yes"; then enable_libnet=no fi AC_DEFINE_UNQUOTED(OCF_ROOT_DIR,"$OCF_ROOT_DIR", OCF root directory - specified by the OCF standard) AC_SUBST(OCF_ROOT_DIR) GLUE_STATE_DIR=${localstatedir}/run AC_DEFINE_UNQUOTED(GLUE_STATE_DIR,"$GLUE_STATE_DIR", Where to keep state files and sockets) AC_SUBST(GLUE_STATE_DIR) AC_DEFINE_UNQUOTED(HA_VARRUNDIR,"$GLUE_STATE_DIR", Where Heartbeat keeps state files and sockets - old name) HA_VARRUNDIR="$GLUE_STATE_DIR" AC_SUBST(HA_VARRUNDIR) # Expand $prefix eval HA_RSCTMPDIR="`eval echo ${HA_RSCTMPDIR}`" AC_DEFINE_UNQUOTED(HA_RSCTMPDIR,"$HA_RSCTMPDIR", Where Resource agents keep state files) AC_SUBST(HA_RSCTMPDIR) dnl Eventually move out of the heartbeat dir tree and create symlinks when needed HA_VARLIBHBDIR=${localstatedir}/lib/heartbeat AC_DEFINE_UNQUOTED(HA_VARLIBHBDIR,"$HA_VARLIBHBDIR", Whatever this used to mean) AC_SUBST(HA_VARLIBHBDIR) OCF_RA_DIR="${OCF_ROOT_DIR}/resource.d" AC_DEFINE_UNQUOTED(OCF_RA_DIR,"$OCF_RA_DIR", Location for OCF RAs) AC_SUBST(OCF_RA_DIR) OCF_RA_DIR_PREFIX="$OCF_RA_DIR" AC_SUBST(OCF_RA_DIR_PREFIX) OCF_LIB_DIR="${OCF_ROOT_DIR}/lib" AC_DEFINE_UNQUOTED(OCF_LIB_DIR,"$OCF_LIB_DIR", Location for shared code for OCF RAs) AC_SUBST(OCF_LIB_DIR) OCF_LIB_DIR_PREFIX="$OCF_LIB_DIR" AC_SUBST(OCF_LIB_DIR_PREFIX) dnl =============================================== dnl rgmanager ras bits dnl =============================================== LOGDIR=${localstatedir}/log/cluster CLUSTERDATA=${datadir}/cluster AC_SUBST([LOGDIR]) AC_SUBST([CLUSTERDATA]) dnl =============================================== dnl Program Paths dnl =============================================== PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin" export PATH AC_CHECK_PROGS(MAKE, gmake make) AC_CHECK_PROGS(SHELLCHECK, shellcheck) AM_CONDITIONAL(CI_CHECKS, test "x$SHELLCHECK" != "x" ) AC_PATH_PROGS(BASH_SHELL, bash) if test x"${BASH_SHELL}" = x""; then AC_MSG_ERROR(You need bash installed in order to build ${PACKAGE}) fi AC_PATH_PROGS(XSLTPROC, xsltproc) AM_CONDITIONAL(BUILD_DOC, test "x$XSLTPROC" != "x" ) if test "x$XSLTPROC" = "x"; then AC_MSG_WARN([xsltproc not installed, unable to (re-)build manual pages]) fi AC_SUBST(XSLTPROC) AC_PATH_PROGS(XMLCATALOG, xmlcatalog) AC_PATH_PROGS(SSH, ssh, /usr/bin/ssh) AC_PATH_PROGS(SCP, scp, /usr/bin/scp) AC_PATH_PROGS(TAR, tar) AC_PATH_PROGS(MD5, md5) AC_PATH_PROGS(TEST, test) AC_PATH_PROGS(PING, ping, /bin/ping) AC_PATH_PROGS(IFCONFIG, ifconfig, /sbin/ifconfig) AC_PATH_PROGS(MAILCMD, mailx mail, mail) AC_PATH_PROGS(RM, rm) AC_PROG_EGREP AC_PROG_FGREP AC_SUBST(BASH_SHELL) AC_SUBST(MAILCMD) AC_SUBST(SHELL) AC_SUBST(PING) AC_SUBST(RM) AC_SUBST(TEST) AM_PATH_PYTHON([3.6]) if test -z "$PYTHON"; then echo "*** Essential program python not found" 1>&2 exit 1 fi dnl Ensure PYTHON is an absolute path AC_PATH_PROG([PYTHON], [$PYTHON]) AM_PATH_PYTHON if test -z "$PYTHON"; then echo "*** Essential program python not found" 1>&2 fi AC_PYTHON_MODULE(json) AC_PYTHON_MODULE(pyroute2) AC_PYTHON_MODULE(requests) AC_PYTHON_MODULE(urllib3) AC_PYTHON_MODULE(ibm_cloud_fail_over) AS_VERSION_COMPARE([$PYTHON_VERSION], [3.6], [BUILD_OCF_PY=0], [BUILD_OCF_PY=1], [BUILD_OCF_PY=1]) BUILD_AZURE_EVENTS=1 if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then BUILD_AZURE_EVENTS=0 AC_MSG_WARN("Not building azure-events") fi AM_CONDITIONAL(BUILD_AZURE_EVENTS, test $BUILD_AZURE_EVENTS -eq 1) BUILD_AZURE_EVENTS_AZ=1 if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then BUILD_AZURE_EVENTS_AZ=0 AC_MSG_WARN("Not building azure-events-az") fi AM_CONDITIONAL(BUILD_AZURE_EVENTS_AZ, test $BUILD_AZURE_EVENTS_AZ -eq 1) BUILD_GCP_PD_MOVE=1 if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then BUILD_GCP_PD_MOVE=0 AC_MSG_WARN("Not building gcp-pd-move") fi AM_CONDITIONAL(BUILD_GCP_PD_MOVE, test $BUILD_GCP_PD_MOVE -eq 1) BUILD_GCP_VPC_MOVE_ROUTE=1 if test -z "$PYTHON" || test "x${HAVE_PYMOD_PYROUTE2}" != xyes || test $BUILD_OCF_PY -eq 0; then BUILD_GCP_VPC_MOVE_ROUTE=0 AC_MSG_WARN("Not building gcp-vpc-move-route") fi AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_ROUTE, test $BUILD_GCP_VPC_MOVE_ROUTE -eq 1) BUILD_GCP_VPC_MOVE_VIP=1 if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0; then BUILD_GCP_VPC_MOVE_VIP=0 AC_MSG_WARN("Not building gcp-vpc-move-vip") fi AM_CONDITIONAL(BUILD_GCP_VPC_MOVE_VIP, test $BUILD_GCP_VPC_MOVE_VIP -eq 1) +BUILD_POWERVS_MOVE_IP=1 +if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0 || test "x${HAVE_PYMOD_REQUESTS}" != xyes || test "x${HAVE_PYMOD_URLLIB3}" != xyes; then + BUILD_POWERVS_MOVE_IP=0 + AC_MSG_WARN("Not building powervs-move-ip") +fi +AM_CONDITIONAL(BUILD_POWERVS_MOVE_IP, test $BUILD_POWERVS_MOVE_IP -eq 1) + BUILD_POWERVS_SUBNET=1 if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0 || test "x${HAVE_PYMOD_REQUESTS}" != xyes || test "x${HAVE_PYMOD_URLLIB3}" != xyes; then BUILD_POWERVS_SUBNET=0 AC_MSG_WARN("Not building powervs-subnet") fi AM_CONDITIONAL(BUILD_POWERVS_SUBNET, test $BUILD_POWERVS_SUBNET -eq 1) BUILD_IBM_CLOUD_VPC_MOVE_ROUTE=1 if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0 || test "x${HAVE_PYMOD_IBM_CLOUD_FAIL_OVER}" != xyes; then BUILD_IBM_CLOUD_VPC_MOVE_ROUTE=0 AC_MSG_WARN("Not building ibm-cloud-vpc-cr-vip") fi AM_CONDITIONAL(BUILD_IBM_CLOUD_VPC_MOVE_ROUTE, test $BUILD_IBM_CLOUD_VPC_MOVE_ROUTE -eq 1) BUILD_IBM_CLOUD_VPC_MOVE_FIP=1 if test -z "$PYTHON" || test $BUILD_OCF_PY -eq 0 || test "x${HAVE_PYMOD_IBM_CLOUD_FAIL_OVER}" != xyes; then BUILD_IBM_CLOUD_VPC_MOVE_FIP=0 AC_MSG_WARN("Not building ibm-cloud-vpc-move-fip") fi AM_CONDITIONAL(BUILD_IBM_CLOUD_VPC_MOVE_FIP, test $BUILD_IBM_CLOUD_VPC_MOVE_FIP -eq 1) AC_PATH_PROGS(ROUTE, route) AC_DEFINE_UNQUOTED(ROUTE, "$ROUTE", path to route command) AC_MSG_CHECKING(ifconfig option to list interfaces) for IFCONFIG_A_OPT in "-A" "-a" "" do $IFCONFIG $IFCONFIG_A_OPT > /dev/null 2>&1 if test "$?" = 0 then AC_DEFINE_UNQUOTED(IFCONFIG_A_OPT, "$IFCONFIG_A_OPT", option for ifconfig command) AC_MSG_RESULT($IFCONFIG_A_OPT) break fi done AC_SUBST(IFCONFIG_A_OPT) if test x"${MAKE}" = x""; then AC_MSG_ERROR(You need (g)make installed in order to build ${PACKAGE}) fi STYLESHEET_PREFIX="" if test x"${XSLTPROC}" != x""; then AC_MSG_CHECKING(docbook to manpage transform) # first try to figure out correct template using xmlcatalog query, # resort to extensive (semi-deterministic) file search if that fails DOCBOOK_XSL_URI='http://docbook.sourceforge.net/release/xsl/current' DOCBOOK_XSL_PATH='manpages/docbook.xsl' STYLESHEET_PREFIX=$(${XMLCATALOG} "" ${DOCBOOK_XSL_URI} \ | sed -n 's|^file://||p;q') if test x"${STYLESHEET_PREFIX}" = x""; then DIRS=$(find "${datadir}" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) if test x"${DIRS}" = x""; then # when datadir is not standard OS path, we cannot find docbook.xsl # use standard OS path as backup DIRS=$(find "/usr/share" "/usr/local/share" -name $(basename $(dirname ${DOCBOOK_XSL_PATH})) \ -type d | LC_ALL=C sort) fi XSLT=$(basename ${DOCBOOK_XSL_PATH}) for d in ${DIRS}; do if test -f "${d}/${XSLT}"; then STYLESHEET_PREFIX=$(echo "${d}" | sed 's/\/manpages//') break fi done fi if test x"${STYLESHEET_PREFIX}" = x""; then AC_MSG_ERROR(You need docbook-style-xsl installed in order to build ${PACKAGE}) fi fi AC_MSG_RESULT($STYLESHEET_PREFIX) AC_SUBST(STYLESHEET_PREFIX) dnl =============================================== dnl Libraries dnl =============================================== AC_CHECK_LIB(socket, socket) AC_CHECK_LIB(gnugetopt, getopt_long) dnl if available if test "x${enable_thread_safe}" = "xyes"; then GPKGNAME="gthread-2.0" else GPKGNAME="glib-2.0" fi PKG_CHECK_MODULES([GLIB], [$GPKGNAME]) CPPFLAGS="$CPPFLAGS $GLIB_CFLAGS" LIBS="$LIBS $GLIB_LIBS" PKG_CHECK_MODULES([LIBQB], "libqb") dnl ======================================================================== dnl Headers dnl ======================================================================== AC_HEADER_STDC AC_CHECK_HEADERS(sys/socket.h) AC_CHECK_HEADERS(sys/sockio.h) AC_CHECK_HEADERS([arpa/inet.h]) AC_CHECK_HEADERS([fcntl.h]) AC_CHECK_HEADERS([limits.h]) AC_CHECK_HEADERS([malloc.h]) AC_CHECK_HEADERS([netdb.h]) AC_CHECK_HEADERS([netinet/in.h]) AC_CHECK_HEADERS([sys/file.h]) AC_CHECK_HEADERS([sys/ioctl.h]) AC_CHECK_HEADERS([sys/param.h]) AC_CHECK_HEADERS([sys/time.h]) AC_CHECK_HEADERS([syslog.h]) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_FUNC_FORK AC_FUNC_STRNLEN AC_CHECK_FUNCS([alarm gettimeofday inet_ntoa memset mkdir socket uname]) AC_CHECK_FUNCS([strcasecmp strchr strdup strerror strrchr strspn strstr strtol strtoul]) AC_PATH_PROGS(REBOOT, reboot, /sbin/reboot) AC_SUBST(REBOOT) AC_SUBST(REBOOT_OPTIONS) AC_DEFINE_UNQUOTED(REBOOT, "$REBOOT", path to the reboot command) AC_DEFINE_UNQUOTED(REBOOT_OPTIONS, "$REBOOT_OPTIONS", reboot options) AC_PATH_PROGS(POWEROFF_CMD, poweroff, /sbin/poweroff) AC_SUBST(POWEROFF_CMD) AC_SUBST(POWEROFF_OPTIONS) AC_DEFINE_UNQUOTED(POWEROFF_CMD, "$POWEROFF_CMD", path to the poweroff command) AC_DEFINE_UNQUOTED(POWEROFF_OPTIONS, "$POWEROFF_OPTIONS", poweroff options) AC_PATH_PROGS(POD2MAN, pod2man) AM_CONDITIONAL(BUILD_POD_DOC, test "x$POD2MAN" != "x" ) if test "x$POD2MAN" = "x"; then AC_MSG_WARN([pod2man not installed, unable to (re-)build ldirector manual page]) fi AC_SUBST(POD2MAN) dnl ======================================================================== dnl Functions dnl ======================================================================== AC_CHECK_FUNCS(getopt, AC_DEFINE(HAVE_DECL_GETOPT, 1, [Have getopt function])) dnl ======================================================================== dnl sfex dnl ======================================================================== build_sfex=no case $host_os in *Linux*|*linux*) if test "$ac_cv_header_heartbeat_glue_config_h" = "yes"; then build_sfex=yes fi ;; esac AM_CONDITIONAL(BUILD_SFEX, test "$build_sfex" = "yes" ) dnl ======================================================================== dnl tickle (needs port to BSD platforms) dnl ======================================================================== AC_CHECK_MEMBERS([struct iphdr.saddr],,,[[#include ]]) AM_CONDITIONAL(BUILD_TICKLE, test "$ac_cv_member_struct_iphdr_saddr" = "yes" ) dnl ======================================================================== dnl libnet dnl ======================================================================== libnet="" libnet_version="none" LIBNETLIBS="" LIBNETDEFINES="" AC_MSG_CHECKING(if libnet is required) libnet_fatal=$enable_libnet case $enable_libnet in no) ;; yes|libnet10|libnet11|10|11) libnet_fatal=yes;; try) case $host_os in *Linux*|*linux*) libnet_fatal=no;; *) libnet_fatal=yes;; dnl legacy behavior esac ;; *) libnet_fatal=yes; enable_libnet=try;; esac AC_MSG_RESULT($libnet_fatal) if test "x$enable_libnet" != "xno"; then AC_PATH_PROGS(LIBNETCONFIG, libnet-config) AC_CHECK_LIB(nsl, t_open) dnl -lnsl AC_CHECK_LIB(socket, socket) dnl -lsocket AC_CHECK_LIB(net, libnet_get_hwaddr, LIBNETLIBS=" -lnet", []) fi AC_MSG_CHECKING(for libnet) if test "x$LIBNETLIBS" != "x" -o "x$enable_libnet" = "xlibnet11"; then LIBNETDEFINES="" if test "$ac_cv_lib_nsl_t_open" = yes; then LIBNETLIBS="-lnsl $LIBNETLIBS" fi if test "$ac_cv_lib_socket_socket" = yes; then LIBNETLIBS="-lsocket $LIBNETLIBS" fi libnet=net libnet_version="libnet1.1" fi if test "x$enable_libnet" = "xtry" -o "x$enable_libnet" = "xlibnet10"; then if test "x$LIBNETLIBS" = x -a "x${LIBNETCONFIG}" != "x" ; then LIBNETDEFINES="`$LIBNETCONFIG --defines` `$LIBNETCONFIG --cflags`"; LIBNETLIBS="`$LIBNETCONFIG --libs`"; libnet_version="libnet1.0 (old)" case $LIBNETLIBS in *-l*) libnet=`echo $LIBNETLIBS | sed 's%.*-l%%'`;; *) libnet_version=none;; esac CPPFLAGS="$CPPFLAGS $LIBNETDEFINES" AC_CHECK_HEADERS(libnet.h) if test "$ac_cv_header_libnet_h" = no; then libnet_version=none fi fi fi AC_MSG_RESULT(found $libnet_version) if test "$libnet_version" = none; then LIBNETLIBS="" LIBNETDEFINES="" if test $libnet_fatal = yes; then AC_MSG_ERROR(libnet not found) fi else AC_CHECK_LIB($libnet,libnet_init, [new_libnet=yes; AC_DEFINE(HAVE_LIBNET_1_1_API, 1, Libnet 1.1 API)], [new_libnet=no; AC_DEFINE(HAVE_LIBNET_1_0_API, 1, Libnet 1.0 API)],$LIBNETLIBS) AC_SUBST(LIBNETLIBS) fi if test "$new_libnet" = yes; then AC_MSG_CHECKING(for libnet API 1.1.4: ) save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS -fgnu89-inline -Wall -Werror" AC_COMPILE_IFELSE([ AC_LANG_SOURCE(#include int main(){libnet_t *l=NULL; libnet_pblock_record_ip_offset(l, l->total_size); return(0); })], [AC_MSG_RESULT(no)], [AC_DEFINE(HAVE_LIBNET_1_1_4_API, 1, Libnet 1.1.4 API) AC_MSG_RESULT(yes)]) CFLAGS="$save_CFLAGS" fi sendarp_linux=0 case $host_os in *Linux*|*linux*) sendarp_linux=1;; esac redhat_based=0 AC_CHECK_FILE(/etc/redhat-release, [redhat_based=1]) AC_SUBST(LIBNETLIBS) AC_SUBST(LIBNETDEFINES) AM_CONDITIONAL(SENDARP_LINUX, test $sendarp_linux = 1 ) AM_CONDITIONAL(USE_LIBNET, test "x$libnet_version" != "xnone" ) AM_CONDITIONAL(NFSCONVERT, test $redhat_based = 1 ) dnl ************************************************************************ dnl * Check for netinet/icmp6.h to enable the IPv6addr resource agent AC_CHECK_HEADERS(netinet/icmp6.h,[],[],[#include ]) AM_CONDITIONAL(USE_IPV6ADDR_AGENT, test "$ac_cv_header_netinet_icmp6_h" = yes && test "$ac_cv_header_heartbeat_glue_config_h" = yes) AM_CONDITIONAL(IPV6ADDR_COMPATIBLE, test "$ac_cv_header_netinet_icmp6_h" = yes) dnl ======================================================================== dnl Compiler flags dnl ======================================================================== dnl Make sure that CFLAGS is not exported. If the user did dnl not have CFLAGS in their environment then this should have dnl no effect. However if CFLAGS was exported from the user's dnl environment, then the new CFLAGS will also be exported dnl to sub processes. CC_ERRORS="" CC_EXTRAS="" if export -p | fgrep " CFLAGS=" > /dev/null; then SAVED_CFLAGS="$CFLAGS" unset CFLAGS CFLAGS="$SAVED_CFLAGS" unset SAVED_CFLAGS fi if test "$GCC" != yes; then CFLAGS="$CFLAGS -g" enable_fatal_warnings=no else CFLAGS="$CFLAGS -ggdb3" # We had to eliminate -Wnested-externs because of libtool changes # Also remove -Waggregate-return because we use one libnet # call which returns a struct EXTRA_FLAGS="-fgnu89-inline -fstack-protector-all -Wall -Wbad-function-cast -Wcast-qual -Wdeclaration-after-statement -Wendif-labels -Wfloat-equal -Wformat=2 -Wformat-security -Wformat-nonliteral -Winline -Wmissing-prototypes -Wmissing-declarations -Wmissing-format-attribute -Wnested-externs -Wno-long-long -Wno-strict-aliasing -Wpointer-arith -Wstrict-prototypes -Wunsigned-char -Wwrite-strings -Wno-maybe-uninitialized" # Additional warnings it might be nice to enable one day # -Wshadow # -Wunreachable-code for j in $EXTRA_FLAGS do if cc_supports_flag $j then CC_EXTRAS="$CC_EXTRAS $j" fi done dnl In lib/ais/Makefile.am there's a gcc option available as of v4.x GCC_MAJOR=`gcc -v 2>&1 | awk 'END{print $3}' | sed 's/[.].*//'` AM_CONDITIONAL(GCC_4, test "${GCC_MAJOR}" = 4) dnl System specific options case "$host_os" in *linux*|*bsd*) if test "${enable_fatal_warnings}" = "unknown"; then enable_fatal_warnings=yes fi ;; esac if test "x${enable_fatal_warnings}" != xno && cc_supports_flag -Werror ; then enable_fatal_warnings=yes else enable_fatal_warnings=no fi if test "x${enable_ansi}" = xyes && cc_supports_flag -std=iso9899:199409; then AC_MSG_NOTICE(Enabling ANSI Compatibility) CC_EXTRAS="$CC_EXTRAS -ansi -D_GNU_SOURCE -DANSI_ONLY" fi AC_MSG_NOTICE(Activated additional gcc flags: ${CC_EXTRAS}) fi CFLAGS="$CFLAGS $CC_EXTRAS" NON_FATAL_CFLAGS="$CFLAGS" AC_SUBST(NON_FATAL_CFLAGS) dnl dnl We reset CFLAGS to include our warnings *after* all function dnl checking goes on, so that our warning flags don't keep the dnl AC_*FUNCS() calls above from working. In particular, -Werror will dnl *always* cause us troubles if we set it before here. dnl dnl if test "x${enable_fatal_warnings}" = xyes ; then AC_MSG_NOTICE(Enabling Fatal Warnings) CFLAGS="$CFLAGS -Werror" fi AC_SUBST(CFLAGS) dnl This is useful for use in Makefiles that need to remove one specific flag CFLAGS_COPY="$CFLAGS" AC_SUBST(CFLAGS_COPY) AC_SUBST(LOCALE) AC_SUBST(CC) AC_SUBST(MAKE) dnl The Makefiles and shell scripts we output AC_CONFIG_FILES(Makefile \ resource-agents.pc \ include/Makefile \ heartbeat/Makefile \ heartbeat/ocf-binaries \ heartbeat/ocf-directories \ heartbeat/ocf-shellfuncs \ heartbeat/shellfuncs \ systemd/Makefile \ systemd/resource-agents.conf \ tools/Makefile \ tools/nfsconvert \ tools/ocf-tester \ tools/ocft/Makefile \ tools/ocft/ocft \ tools/ocft/caselib \ tools/ocft/README \ tools/ocft/README.zh_CN \ ldirectord/Makefile \ ldirectord/ldirectord \ ldirectord/init.d/Makefile \ ldirectord/init.d/ldirectord \ ldirectord/init.d/ldirectord.debian \ ldirectord/init.d/ldirectord.debian.default \ ldirectord/systemd/Makefile \ ldirectord/systemd/ldirectord.service \ ldirectord/logrotate.d/Makefile \ ldirectord/OCF/Makefile \ ldirectord/OCF/ldirectord \ doc/Makefile \ doc/man/Makefile \ rgmanager/Makefile \ rgmanager/src/Makefile \ rgmanager/src/resources/Makefile \ rgmanager/src/resources/ocf-shellfuncs \ rgmanager/src/resources/svclib_nfslock \ rgmanager/src/resources/lvm_by_lv.sh \ rgmanager/src/resources/lvm_by_vg.sh \ rgmanager/src/resources/utils/Makefile \ rgmanager/src/resources/utils/fs-lib.sh \ rgmanager/src/resources/utils/messages.sh \ rgmanager/src/resources/utils/config-utils.sh \ rgmanager/src/resources/utils/member_util.sh \ rgmanager/src/resources/utils/ra-skelet.sh \ ) dnl Files we output that need to be executable AC_CONFIG_FILES([heartbeat/azure-events], [chmod +x heartbeat/azure-events]) AC_CONFIG_FILES([heartbeat/azure-events-az], [chmod +x heartbeat/azure-events-az]) AC_CONFIG_FILES([heartbeat/AoEtarget], [chmod +x heartbeat/AoEtarget]) AC_CONFIG_FILES([heartbeat/ManageRAID], [chmod +x heartbeat/ManageRAID]) AC_CONFIG_FILES([heartbeat/ManageVE], [chmod +x heartbeat/ManageVE]) AC_CONFIG_FILES([heartbeat/Squid], [chmod +x heartbeat/Squid]) AC_CONFIG_FILES([heartbeat/SysInfo], [chmod +x heartbeat/SysInfo]) AC_CONFIG_FILES([heartbeat/aws-vpc-route53], [chmod +x heartbeat/aws-vpc-route53]) AC_CONFIG_FILES([heartbeat/clvm], [chmod +x heartbeat/clvm]) AC_CONFIG_FILES([heartbeat/conntrackd], [chmod +x heartbeat/conntrackd]) AC_CONFIG_FILES([heartbeat/dnsupdate], [chmod +x heartbeat/dnsupdate]) AC_CONFIG_FILES([heartbeat/dummypy], [chmod +x heartbeat/dummypy]) AC_CONFIG_FILES([heartbeat/eDir88], [chmod +x heartbeat/eDir88]) AC_CONFIG_FILES([heartbeat/fio], [chmod +x heartbeat/fio]) AC_CONFIG_FILES([heartbeat/galera], [chmod +x heartbeat/galera]) AC_CONFIG_FILES([heartbeat/gcp-pd-move], [chmod +x heartbeat/gcp-pd-move]) AC_CONFIG_FILES([heartbeat/gcp-vpc-move-ip], [chmod +x heartbeat/gcp-vpc-move-ip]) AC_CONFIG_FILES([heartbeat/gcp-vpc-move-vip], [chmod +x heartbeat/gcp-vpc-move-vip]) AC_CONFIG_FILES([heartbeat/gcp-vpc-move-route], [chmod +x heartbeat/gcp-vpc-move-route]) AC_CONFIG_FILES([heartbeat/ibm-cloud-vpc-cr-vip], [chmod +x heartbeat/ibm-cloud-vpc-cr-vip]) AC_CONFIG_FILES([heartbeat/ibm-cloud-vpc-move-fip], [chmod +x heartbeat/ibm-cloud-vpc-move-fip]) AC_CONFIG_FILES([heartbeat/iSCSILogicalUnit], [chmod +x heartbeat/iSCSILogicalUnit]) AC_CONFIG_FILES([heartbeat/iSCSITarget], [chmod +x heartbeat/iSCSITarget]) AC_CONFIG_FILES([heartbeat/jira], [chmod +x heartbeat/jira]) AC_CONFIG_FILES([heartbeat/kamailio], [chmod +x heartbeat/kamailio]) AC_CONFIG_FILES([heartbeat/lxc], [chmod +x heartbeat/lxc]) AC_CONFIG_FILES([heartbeat/lxd-info], [chmod +x heartbeat/lxd-info]) AC_CONFIG_FILES([heartbeat/machine-info], [chmod +x heartbeat/machine-info]) AC_CONFIG_FILES([heartbeat/mariadb], [chmod +x heartbeat/mariadb]) AC_CONFIG_FILES([heartbeat/mpathpersist], [chmod +x heartbeat/mpathpersist]) AC_CONFIG_FILES([heartbeat/nfsnotify], [chmod +x heartbeat/nfsnotify]) AC_CONFIG_FILES([heartbeat/openstack-info], [chmod +x heartbeat/openstack-info]) +AC_CONFIG_FILES([heartbeat/powervs-move-ip], [chmod +x heartbeat/powervs-move-ip]) AC_CONFIG_FILES([heartbeat/powervs-subnet], [chmod +x heartbeat/powervs-subnet]) AC_CONFIG_FILES([heartbeat/rabbitmq-cluster], [chmod +x heartbeat/rabbitmq-cluster]) AC_CONFIG_FILES([heartbeat/redis], [chmod +x heartbeat/redis]) AC_CONFIG_FILES([heartbeat/rsyslog], [chmod +x heartbeat/rsyslog]) AC_CONFIG_FILES([heartbeat/smb-share], [chmod +x heartbeat/smb-share]) AC_CONFIG_FILES([heartbeat/sg_persist], [chmod +x heartbeat/sg_persist]) AC_CONFIG_FILES([heartbeat/slapd], [chmod +x heartbeat/slapd]) AC_CONFIG_FILES([heartbeat/storage-mon], [chmod +x heartbeat/storage-mon]) AC_CONFIG_FILES([heartbeat/sybaseASE], [chmod +x heartbeat/sybaseASE]) AC_CONFIG_FILES([heartbeat/syslog-ng], [chmod +x heartbeat/syslog-ng]) AC_CONFIG_FILES([heartbeat/vsftpd], [chmod +x heartbeat/vsftpd]) AC_CONFIG_FILES([heartbeat/CTDB], [chmod +x heartbeat/CTDB]) AC_CONFIG_FILES([rgmanager/src/resources/ASEHAagent.sh], [chmod +x rgmanager/src/resources/ASEHAagent.sh]) AC_CONFIG_FILES([rgmanager/src/resources/apache.sh], [chmod +x rgmanager/src/resources/apache.sh]) AC_CONFIG_FILES([rgmanager/src/resources/bind-mount.sh], [chmod +x rgmanager/src/resources/bind-mount.sh]) AC_CONFIG_FILES([rgmanager/src/resources/clusterfs.sh], [chmod +x rgmanager/src/resources/clusterfs.sh]) AC_CONFIG_FILES([rgmanager/src/resources/db2.sh], [chmod +x rgmanager/src/resources/db2.sh]) AC_CONFIG_FILES([rgmanager/src/resources/drbd.sh], [chmod +x rgmanager/src/resources/drbd.sh]) AC_CONFIG_FILES([rgmanager/src/resources/fs.sh], [chmod +x rgmanager/src/resources/fs.sh]) AC_CONFIG_FILES([rgmanager/src/resources/ip.sh], [chmod +x rgmanager/src/resources/ip.sh]) AC_CONFIG_FILES([rgmanager/src/resources/lvm.sh], [chmod +x rgmanager/src/resources/lvm.sh]) AC_CONFIG_FILES([rgmanager/src/resources/mysql.sh], [chmod +x rgmanager/src/resources/mysql.sh]) AC_CONFIG_FILES([rgmanager/src/resources/named.sh], [chmod +x rgmanager/src/resources/named.sh]) AC_CONFIG_FILES([rgmanager/src/resources/netfs.sh], [chmod +x rgmanager/src/resources/netfs.sh]) AC_CONFIG_FILES([rgmanager/src/resources/nfsclient.sh], [chmod +x rgmanager/src/resources/nfsclient.sh]) AC_CONFIG_FILES([rgmanager/src/resources/nfsexport.sh], [chmod +x rgmanager/src/resources/nfsexport.sh]) AC_CONFIG_FILES([rgmanager/src/resources/nfsserver.sh], [chmod +x rgmanager/src/resources/nfsserver.sh]) AC_CONFIG_FILES([rgmanager/src/resources/openldap.sh], [chmod +x rgmanager/src/resources/openldap.sh]) AC_CONFIG_FILES([rgmanager/src/resources/oracledb.sh], [chmod +x rgmanager/src/resources/oracledb.sh]) AC_CONFIG_FILES([rgmanager/src/resources/oradg.sh], [chmod +x rgmanager/src/resources/oradg.sh]) AC_CONFIG_FILES([rgmanager/src/resources/orainstance.sh], [chmod +x rgmanager/src/resources/orainstance.sh]) AC_CONFIG_FILES([rgmanager/src/resources/oralistener.sh], [chmod +x rgmanager/src/resources/oralistener.sh]) AC_CONFIG_FILES([rgmanager/src/resources/postgres-8.sh], [chmod +x rgmanager/src/resources/postgres-8.sh]) AC_CONFIG_FILES([rgmanager/src/resources/samba.sh], [chmod +x rgmanager/src/resources/samba.sh]) AC_CONFIG_FILES([rgmanager/src/resources/script.sh], [chmod +x rgmanager/src/resources/script.sh]) AC_CONFIG_FILES([rgmanager/src/resources/service.sh], [chmod +x rgmanager/src/resources/service.sh]) AC_CONFIG_FILES([rgmanager/src/resources/smb.sh], [chmod +x rgmanager/src/resources/smb.sh]) AC_CONFIG_FILES([rgmanager/src/resources/tomcat-5.sh], [chmod +x rgmanager/src/resources/tomcat-5.sh]) AC_CONFIG_FILES([rgmanager/src/resources/tomcat-6.sh], [chmod +x rgmanager/src/resources/tomcat-6.sh]) AC_CONFIG_FILES([rgmanager/src/resources/vm.sh], [chmod +x rgmanager/src/resources/vm.sh]) dnl Now process the entire list of files added by previous dnl calls to AC_CONFIG_FILES() AC_OUTPUT() dnl ***************** dnl Configure summary dnl ***************** AC_MSG_RESULT([]) AC_MSG_RESULT([$PACKAGE configuration:]) AC_MSG_RESULT([ Version = ${VERSION}]) AC_MSG_RESULT([ Build Version = $Format:%H$]) AC_MSG_RESULT([]) AC_MSG_RESULT([ Prefix = ${prefix}]) AC_MSG_RESULT([ Executables = ${sbindir}]) AC_MSG_RESULT([ Man pages = ${mandir}]) AC_MSG_RESULT([ Libraries = ${libdir}]) AC_MSG_RESULT([ Header files = ${includedir}]) AC_MSG_RESULT([ Arch-independent files = ${datadir}]) AC_MSG_RESULT([ Documentation = ${docdir}]) AC_MSG_RESULT([ State information = ${localstatedir}]) AC_MSG_RESULT([ System configuration = ${sysconfdir}]) AC_MSG_RESULT([ HA_BIN directory prefix = ${libexecdir}]) AC_MSG_RESULT([ RA state files = ${HA_RSCTMPDIR}]) AC_MSG_RESULT([ AIS Plugins = ${LCRSODIR}]) AC_MSG_RESULT([]) AC_MSG_RESULT([ CPPFLAGS = ${CPPFLAGS}]) AC_MSG_RESULT([ CFLAGS = ${CFLAGS}]) AC_MSG_RESULT([ Libraries = ${LIBS}]) AC_MSG_RESULT([ Stack Libraries = ${CLUSTERLIBS}]) diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am index 0d34c7c65..0dee5e9e1 100644 --- a/doc/man/Makefile.am +++ b/doc/man/Makefile.am @@ -1,277 +1,281 @@ # # doc: Linux-HA resource agents # # Copyright (C) 2009 Florian Haas # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in EXTRA_DIST = $(doc_DATA) $(REFENTRY_STYLESHEET) \ mkappendix.sh ralist.sh CLEANFILES = $(man_MANS) $(xmlfiles) metadata-*.xml STYLESHEET_PREFIX ?= http://docbook.sourceforge.net/release/xsl/current MANPAGES_STYLESHEET ?= $(STYLESHEET_PREFIX)/manpages/docbook.xsl HTML_STYLESHEET ?= $(STYLESHEET_PREFIX)/xhtml/docbook.xsl FO_STYLESHEET ?= $(STYLESHEET_PREFIX)/fo/docbook.xsl REFENTRY_STYLESHEET ?= ra2refentry.xsl XSLTPROC_OPTIONS ?= --xinclude XSLTPROC_MANPAGES_OPTIONS ?= $(XSLTPROC_OPTIONS) XSLTPROC_HTML_OPTIONS ?= $(XSLTPROC_OPTIONS) XSLTPROC_FO_OPTIONS ?= $(XSLTPROC_OPTIONS) radir = $(abs_top_builddir)/heartbeat # required for out-of-tree build symlinkstargets = \ ocf-distro ocf.py ocf-rarun ocf-returncodes \ findif.sh apache-conf.sh aws.sh http-mon.sh mysql-common.sh \ nfsserver-redhat.sh openstack-common.sh ora-common.sh preptree: for i in $(symlinkstargets); do \ if [ ! -f $(radir)/$$i ]; then \ rm -rf $(radir)/$$i; \ ln -sf $(abs_top_srcdir)/heartbeat/$$i $(radir)/$$i; \ fi; \ done $(radir)/%: $(abs_top_srcdir)/heartbeat/% if [ ! -f $@ ]; then \ ln -sf $< $@; \ fi # OCF_ROOT=. is necessary due to a sanity check in ocf-shellfuncs # (which tests whether $OCF_ROOT points to a directory metadata-%.xml: $(radir)/% preptree OCF_ROOT=. OCF_FUNCTIONS_DIR=$(radir) $< meta-data > $@ metadata-IPv6addr.xml: $(radir)/IPv6addr OCF_ROOT=. OCF_FUNCTIONS_DIR=$(radir) $< meta-data > $@ clean-local: find $(radir) -type l -exec rm -rf {} \; # Please note: we can't name the man pages # ocf:heartbeat:. Believe me, I've tried. It looks like it # works, but then it doesn't. While make can deal correctly with # colons in target names (when properly escaped), it royally messes up # when it is deals with _dependencies_ that contain colons. See Bug # 12126 on savannah.gnu.org. But, maybe it gets fixed soon, it was # first reported in 1995 and added to Savannah in in 2005... if BUILD_DOC man_MANS = ocf_heartbeat_AoEtarget.7 \ ocf_heartbeat_AudibleAlarm.7 \ ocf_heartbeat_ClusterMon.7 \ ocf_heartbeat_CTDB.7 \ ocf_heartbeat_Delay.7 \ ocf_heartbeat_Dummy.7 \ ocf_heartbeat_EvmsSCC.7 \ ocf_heartbeat_Evmsd.7 \ ocf_heartbeat_Filesystem.7 \ ocf_heartbeat_ICP.7 \ ocf_heartbeat_IPaddr.7 \ ocf_heartbeat_IPaddr2.7 \ ocf_heartbeat_IPsrcaddr.7 \ ocf_heartbeat_LVM.7 \ ocf_heartbeat_LVM-activate.7 \ ocf_heartbeat_LinuxSCSI.7 \ ocf_heartbeat_MailTo.7 \ ocf_heartbeat_ManageRAID.7 \ ocf_heartbeat_ManageVE.7 \ ocf_heartbeat_NodeUtilization.7 \ ocf_heartbeat_Pure-FTPd.7 \ ocf_heartbeat_Raid1.7 \ ocf_heartbeat_Route.7 \ ocf_heartbeat_SAPDatabase.7 \ ocf_heartbeat_SAPInstance.7 \ ocf_heartbeat_SendArp.7 \ ocf_heartbeat_ServeRAID.7 \ ocf_heartbeat_SphinxSearchDaemon.7 \ ocf_heartbeat_Squid.7 \ ocf_heartbeat_Stateful.7 \ ocf_heartbeat_SysInfo.7 \ ocf_heartbeat_VIPArip.7 \ ocf_heartbeat_VirtualDomain.7 \ ocf_heartbeat_WAS.7 \ ocf_heartbeat_WAS6.7 \ ocf_heartbeat_WinPopup.7 \ ocf_heartbeat_Xen.7 \ ocf_heartbeat_Xinetd.7 \ ocf_heartbeat_ZFS.7 \ ocf_heartbeat_aliyun-vpc-move-ip.7 \ ocf_heartbeat_anything.7 \ ocf_heartbeat_apache.7 \ ocf_heartbeat_asterisk.7 \ ocf_heartbeat_aws-vpc-move-ip.7 \ ocf_heartbeat_aws-vpc-route53.7 \ ocf_heartbeat_awseip.7 \ ocf_heartbeat_awsvip.7 \ ocf_heartbeat_azure-lb.7 \ ocf_heartbeat_clvm.7 \ ocf_heartbeat_conntrackd.7 \ ocf_heartbeat_corosync-qnetd.7 \ ocf_heartbeat_crypt.7 \ ocf_heartbeat_db2.7 \ ocf_heartbeat_dhcpd.7 \ ocf_heartbeat_docker.7 \ ocf_heartbeat_docker-compose.7 \ ocf_heartbeat_dovecot.7 \ ocf_heartbeat_dnsupdate.7 \ ocf_heartbeat_dummypy.7 \ ocf_heartbeat_eDir88.7 \ ocf_heartbeat_ethmonitor.7 \ ocf_heartbeat_exportfs.7 \ ocf_heartbeat_fio.7 \ ocf_heartbeat_galera.7 \ ocf_heartbeat_garbd.7 \ ocf_heartbeat_gcp-ilb.7 \ ocf_heartbeat_gcp-vpc-move-ip.7 \ ocf_heartbeat_iSCSILogicalUnit.7 \ ocf_heartbeat_iSCSITarget.7 \ ocf_heartbeat_iface-bridge.7 \ ocf_heartbeat_iface-macvlan.7 \ ocf_heartbeat_iface-vlan.7 \ ocf_heartbeat_ipsec.7 \ ocf_heartbeat_ids.7 \ ocf_heartbeat_iscsi.7 \ ocf_heartbeat_jboss.7 \ ocf_heartbeat_jira.7 \ ocf_heartbeat_kamailio.7 \ ocf_heartbeat_lvmlockd.7 \ ocf_heartbeat_lxc.7 \ ocf_heartbeat_lxd-info.7 \ ocf_heartbeat_machine-info.7 \ ocf_heartbeat_mariadb.7 \ ocf_heartbeat_mdraid.7 \ ocf_heartbeat_minio.7 \ ocf_heartbeat_mpathpersist.7 \ ocf_heartbeat_mysql.7 \ ocf_heartbeat_mysql-proxy.7 \ ocf_heartbeat_nagios.7 \ ocf_heartbeat_named.7 \ ocf_heartbeat_nfsnotify.7 \ ocf_heartbeat_nfsserver.7 \ ocf_heartbeat_nginx.7 \ ocf_heartbeat_nvmet-subsystem.7 \ ocf_heartbeat_nvmet-namespace.7 \ ocf_heartbeat_nvmet-port.7 \ ocf_heartbeat_openstack-info.7 \ ocf_heartbeat_ocivip.7 \ ocf_heartbeat_openstack-cinder-volume.7 \ ocf_heartbeat_openstack-floating-ip.7 \ ocf_heartbeat_openstack-virtual-ip.7 \ ocf_heartbeat_oraasm.7 \ ocf_heartbeat_oracle.7 \ ocf_heartbeat_oralsnr.7 \ ocf_heartbeat_osceip.7 \ ocf_heartbeat_ovsmonitor.7 \ ocf_heartbeat_pgagent.7 \ ocf_heartbeat_pgsql.7 \ ocf_heartbeat_pingd.7 \ ocf_heartbeat_podman.7 \ ocf_heartbeat_podman-etcd.7 \ ocf_heartbeat_portblock.7 \ ocf_heartbeat_postfix.7 \ ocf_heartbeat_pound.7 \ ocf_heartbeat_proftpd.7 \ ocf_heartbeat_rabbitmq-cluster.7 \ ocf_heartbeat_rabbitmq-server-ha.7 \ ocf_heartbeat_redis.7 \ ocf_heartbeat_rkt.7 \ ocf_heartbeat_rsyncd.7 \ ocf_heartbeat_rsyslog.7 \ ocf_heartbeat_scsi2reservation.7 \ ocf_heartbeat_sfex.7 \ ocf_heartbeat_slapd.7 \ ocf_heartbeat_smb-share.7 \ ocf_heartbeat_sybaseASE.7 \ ocf_heartbeat_sg_persist.7 \ ocf_heartbeat_storage-mon.7 \ ocf_heartbeat_symlink.7 \ ocf_heartbeat_syslog-ng.7 \ ocf_heartbeat_tomcat.7 \ ocf_heartbeat_varnish.7 \ ocf_heartbeat_vdo-vol.7 \ ocf_heartbeat_vmware.7 \ ocf_heartbeat_vsftpd.7 \ ocf_heartbeat_zabbixserver.7 if USE_IPV6ADDR_AGENT man_MANS += ocf_heartbeat_IPv6addr.7 endif if BUILD_AZURE_EVENTS man_MANS += ocf_heartbeat_azure-events.7 endif if BUILD_AZURE_EVENTS_AZ man_MANS += ocf_heartbeat_azure-events-az.7 endif if BUILD_GCP_PD_MOVE man_MANS += ocf_heartbeat_gcp-pd-move.7 endif if BUILD_GCP_VPC_MOVE_ROUTE man_MANS += ocf_heartbeat_gcp-vpc-move-route.7 endif if BUILD_GCP_VPC_MOVE_VIP man_MANS += ocf_heartbeat_gcp-vpc-move-vip.7 endif +if BUILD_POWERVS_MOVE_IP +man_MANS += ocf_heartbeat_powervs-move-ip.7 +endif + if BUILD_POWERVS_SUBNET man_MANS += ocf_heartbeat_powervs-subnet.7 endif if BUILD_IBM_CLOUD_VPC_MOVE_ROUTE man_MANS += ocf_heartbeat_ibm-cloud-vpc-cr-vip.7 endif if BUILD_IBM_CLOUD_VPC_MOVE_FIP man_MANS += ocf_heartbeat_ibm-cloud-vpc-move-fip.7 endif xmlfiles = $(man_MANS:.7=.xml) %.1 %.5 %.7 %.8: %.xml $(XSLTPROC) \ $(XSLTPROC_MANPAGES_OPTIONS) \ $(MANPAGES_STYLESHEET) $< ocf_heartbeat_%.xml: metadata-%.xml $(srcdir)/$(REFENTRY_STYLESHEET) $(XSLTPROC) --novalid \ --stringparam package $(PACKAGE_NAME) \ --stringparam version $(VERSION) \ --output $@ \ $(srcdir)/$(REFENTRY_STYLESHEET) $< ocf_resource_agents.xml: $(xmlfiles) mkappendix.sh ./mkappendix.sh $(xmlfiles) > $@ %.html: %.xml $(XSLTPROC) \ $(XSLTPROC_HTML_OPTIONS) \ --output $@ \ $(HTML_STYLESHEET) $< xml: ocf_resource_agents.xml endif diff --git a/heartbeat/Makefile.am b/heartbeat/Makefile.am index 839505af9..b5374163d 100644 --- a/heartbeat/Makefile.am +++ b/heartbeat/Makefile.am @@ -1,264 +1,268 @@ # Makefile.am for OCF RAs # # Author: Sun Jing Dong # Copyright (C) 2004 IBM # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # MAINTAINERCLEANFILES = Makefile.in EXTRA_DIST = $(ocf_SCRIPTS) $(ocfcommon_DATA) \ $(common_DATA) $(hb_DATA) $(dtd_DATA) \ README README.galera AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/linux-ha halibdir = $(libexecdir)/heartbeat ocfdir = $(OCF_RA_DIR_PREFIX)/heartbeat dtddir = $(datadir)/$(PACKAGE_NAME) dtd_DATA = ra-api-1.dtd metadata.rng ocf_PROGRAMS = if USE_IPV6ADDR_AGENT ocf_PROGRAMS += IPv6addr endif halib_PROGRAMS = if IPV6ADDR_COMPATIBLE halib_PROGRAMS += send_ua endif IPv6addr_SOURCES = IPv6addr.c IPv6addr_utils.c IPv6addr_LDADD = -lplumb $(LIBNETLIBS) send_ua_SOURCES = send_ua.c IPv6addr_utils.c send_ua_LDADD = $(LIBNETLIBS) ocf_SCRIPTS = AoEtarget \ AudibleAlarm \ ClusterMon \ CTDB \ Delay \ Dummy \ EvmsSCC \ Evmsd \ Filesystem \ ICP \ IPaddr \ IPaddr2 \ IPsrcaddr \ LVM \ LinuxSCSI \ lvmlockd \ LVM-activate \ MailTo \ ManageRAID \ ManageVE \ NodeUtilization \ Pure-FTPd \ Raid1 \ Route \ SAPDatabase \ SAPInstance \ SendArp \ ServeRAID \ SphinxSearchDaemon \ Squid \ Stateful \ SysInfo \ VIPArip \ VirtualDomain \ WAS \ WAS6 \ WinPopup \ Xen \ Xinetd \ ZFS \ aliyun-vpc-move-ip \ anything \ apache \ asterisk \ aws-vpc-move-ip \ aws-vpc-route53 \ awseip \ awsvip \ azure-lb \ clvm \ conntrackd \ corosync-qnetd \ crypt \ db2 \ dhcpd \ dnsupdate \ dummypy \ docker \ docker-compose \ dovecot \ eDir88 \ ethmonitor \ exportfs \ fio \ galera \ garbd \ gcp-ilb \ gcp-vpc-move-ip \ iSCSILogicalUnit \ iSCSITarget \ ids \ iface-bridge \ iface-macvlan \ iface-vlan \ ipsec \ iscsi \ jboss \ jira \ kamailio \ lxc \ lxd-info \ machine-info \ mariadb \ mdraid \ minio \ mysql \ mysql-proxy \ nagios \ named \ nfsnotify \ nfsserver \ nginx \ nvmet-subsystem \ nvmet-namespace \ nvmet-port \ ocivip \ openstack-cinder-volume \ openstack-floating-ip \ openstack-info \ openstack-virtual-ip \ oraasm \ oracle \ oralsnr \ osceip \ ovsmonitor \ pgagent \ pgsql \ pingd \ podman \ podman-etcd \ portblock \ postfix \ pound \ proftpd \ rabbitmq-cluster \ rabbitmq-server-ha \ redis \ rkt \ rsyncd \ rsyslog \ scsi2reservation \ sfex \ sg_persist \ mpathpersist \ slapd \ smb-share \ storage-mon \ sybaseASE \ symlink \ syslog-ng \ tomcat \ varnish \ vdo-vol \ vmware \ vsftpd \ zabbixserver if BUILD_AZURE_EVENTS ocf_SCRIPTS += azure-events endif if BUILD_AZURE_EVENTS_AZ ocf_SCRIPTS += azure-events-az endif if BUILD_GCP_PD_MOVE ocf_SCRIPTS += gcp-pd-move endif if BUILD_GCP_VPC_MOVE_ROUTE ocf_SCRIPTS += gcp-vpc-move-route endif if BUILD_GCP_VPC_MOVE_VIP ocf_SCRIPTS += gcp-vpc-move-vip endif +if BUILD_POWERVS_MOVE_IP +ocf_SCRIPTS += powervs-move-ip +endif + if BUILD_POWERVS_SUBNET ocf_SCRIPTS += powervs-subnet endif if BUILD_IBM_CLOUD_VPC_MOVE_ROUTE ocf_SCRIPTS += ibm-cloud-vpc-cr-vip endif if BUILD_IBM_CLOUD_VPC_MOVE_FIP ocf_SCRIPTS += ibm-cloud-vpc-move-fip endif ocfcommondir = $(OCF_LIB_DIR_PREFIX)/heartbeat ocfcommon_DATA = ocf-shellfuncs \ ocf-binaries \ ocf-directories \ ocf-returncodes \ ocf-rarun \ ocf-distro \ apache-conf.sh \ aws.sh \ http-mon.sh \ sapdb-nosha.sh \ sapdb.sh \ lvm-clvm.sh \ lvm-plain.sh \ lvm-tag.sh \ openstack-common.sh \ ora-common.sh \ mysql-common.sh \ nfsserver-redhat.sh \ findif.sh \ ocf.py # Legacy locations hbdir = $(sysconfdir)/ha.d hb_DATA = shellfuncs check: $(ocf_SCRIPTS:=.check) %.check: % OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) ./$< meta-data | xmllint --path $(abs_srcdir) --noout --relaxng $(abs_srcdir)/metadata.rng - do_spellcheck = printf '[%s]\n' "$(agent)"; \ OCF_ROOT=$(abs_srcdir) OCF_FUNCTIONS_DIR=$(abs_srcdir) \ ./$(agent) meta-data 2>/dev/null \ | xsltproc $(top_srcdir)/make/extract_text.xsl - \ | aspell pipe list -d en_US --ignore-case \ --home-dir=$(top_srcdir)/make -p spellcheck-ignore \ | sed -n 's|^&\([^:]*\):.*|\1|p'; spellcheck: @$(foreach agent,$(ocf_SCRIPTS), $(do_spellcheck)) clean-local: rm -rf __pycache__ *.pyc diff --git a/heartbeat/powervs-move-ip.in b/heartbeat/powervs-move-ip.in new file mode 100755 index 000000000..d55979e52 --- /dev/null +++ b/heartbeat/powervs-move-ip.in @@ -0,0 +1,1035 @@ +#!@PYTHON@ -tt +# ------------------------------------------------------------------------ +# Description: Resource agent for moving an overlay IP address between +# virtual server instances in different PowerVS workspaces. +# +# Authors: Edmund Haefele +# Walter Orb +# +# Copyright (c) 2025 International Business Machines, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ------------------------------------------------------------------------ + +import fcntl +import ipaddress +import json +import os +import socket +import subprocess +import sys +import textwrap +import time +from pathlib import Path +from urllib.parse import urlparse + +import requests +import requests.adapters +import urllib3.util + +# Constants +OCF_FUNCTIONS_DIR = os.environ.get( + "OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" % os.environ.get("OCF_ROOT") +) +RESOURCE_OPTIONS = ( + "ip", + "api_key", + "api_type", + "region", + "route_host_map", + "use_token_cache", + "monitor_api", + "device", + "proxy", +) +IP_CMD = "/usr/sbin/ip" +REQUESTS_TIMEOUT = 5 # Timeout for requests calls +HTTP_MAX_RETRIES = 3 # Maximum number of retries for HTTP requests +HTTP_BACKOFF_FACTOR = 0.3 # Sleep (factor * (2^number of previous retries)) secs +HTTP_STATUS_FORCE_RETRIES = (500, 502, 503, 504) # HTTP status codes to retry on +HTTP_RETRY_ALLOWED_METHODS = frozenset({"GET", "POST", "PUT", "DELETE"}) +CIDR_NETMASK = "32" + +sys.path.append(OCF_FUNCTIONS_DIR) +try: + import ocf +except ImportError: + sys.stderr.write("ImportError: ocf module import failed.") + sys.exit(5) + + +class OCFExitError(Exception): + """Exception class for OCF (Open Cluster Framework) exit errors.""" + + def __init__(self, message, exit_code): + ocf.ocf_exit_reason(message) + sys.exit(exit_code) + + +class CmdError(OCFExitError): + """Exception class for errors when running system commands.""" + + def __init__(self, message, exit_code): + super().__init__(f"[CmdError] {message}", exit_code) + + +def os_cmd(cmd_args, is_json=False, timeout=10): + """Run a system command and optionally parse JSON output.""" + ocf.logger.debug(f"[os_cmd]: args: {cmd_args}") + try: + result = subprocess.run( + cmd_args, + capture_output=True, + text=True, + check=True, + timeout=timeout, + env={"LANG": "C"}, + ) + if is_json: + try: + return json.loads(result.stdout) + except json.JSONDecodeError as e: + raise CmdError(f"os_cmd: JSON parsing failed: {e}", ocf.OCF_ERR_GENERIC) + + return result.returncode + + except subprocess.CalledProcessError as e: + raise CmdError( + f"os_cmd: command failed: {e.stderr}", + ocf.OCF_ERR_GENERIC, + ) + except subprocess.TimeoutExpired: + raise CmdError("os_cmd: command timed out", ocf.OCF_ERR_GENERIC) + + +def ip_cmd(*args, is_json=False): + """Generic wrapper for the ip command.""" + return os_cmd([IP_CMD] + list(args), is_json=is_json) + + +def ip_address_show(): + """Show IP addresses in JSON format.""" + return ip_cmd("-json", "address", "show", is_json=True) + + +def ip_address_add(cidr, device, label=None): + """Add an IP address to a device.""" + cmd = ["address", "add", cidr, "dev", device] + if label: + cmd += ["label", label] + return ip_cmd(*cmd) + + +def ip_address_delete(cidr, device): + """Delete an IP address from a device.""" + return ip_cmd("address", "delete", cidr, "dev", device) + + +def ip_find_device(ip): + """Find the device associated with a given IP address.""" + for iface in ip_address_show(): + addresses = [a["local"] for a in iface["addr_info"]] + if ip in addresses and "UP" in iface["flags"]: + return iface["ifname"] + + return None + + +def ip_check_device(device): + """Verify that a device with the specified interface name (device) exists.""" + for iface in ip_address_show(): + if iface["ifname"] == device and "UP" in iface["flags"]: + return True + + return False + + +def ip_alias_add(ip, device): + """Add an IP alias to the given device.""" + ip_cidr = f"{ip}/{CIDR_NETMASK}" + ocf.logger.debug( + f"[ip_alias_add]: adding IP alias '{ip_cidr}' to interface '{device}'" + ) + _ = ip_address_add(ip_cidr, device) + + +def ip_alias_remove(ip): + """Find the device with the given IP alias and remove the alias.""" + device = ip_find_device(ip) + if device: + ip_cidr = f"{ip}/{CIDR_NETMASK}" + ocf.logger.debug( + f"[ip_alias_remove]: removing IP alias '{ip_cidr}' from interface '{device}'" + ) + _ = ip_address_delete(ip_cidr, device) + + +def create_session_with_retries(): + """Create a request session with a retry strategy.""" + retry_strategy = urllib3.util.Retry( + total=HTTP_MAX_RETRIES, + status_forcelist=HTTP_STATUS_FORCE_RETRIES, + allowed_methods=HTTP_RETRY_ALLOWED_METHODS, + backoff_factor=HTTP_BACKOFF_FACTOR, + raise_on_status=False, + ) + adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy) + session = requests.Session() + session.mount("https://", adapter) + return session + + +class PowerCloudTokenManagerError(OCFExitError): + """Exception class for errors in the PowerCloudTokenManager.""" + + def __init__(self, message, exit_code): + super().__init__(f"[PowerCloudTokenManagerError] {message}", exit_code) + + +class PowerCloudTokenManager: + """Request and cache IBM Cloud tokens.""" + + _DEFAULT_RESOURCE_INSTANCE = "powervs-move-ip" + _TOKEN_REFRESH_BUFFER = 900 # 15 minutes + + def __init__( + self, + api_type="", + api_key="", + proxy="", + use_cache=False, + ): + self._auth_url = ( + "https://private.iam.cloud.ibm.com/identity/token" + if api_type == "private" + else "https://iam.cloud.ibm.com/identity/token" + ) + self._api_key = self._load_api_key(api_key) + self._proxy = proxy + self._session = create_session_with_retries() + self._cache_file = None + + if use_cache: + resource_instance = os.environ.get( + "OCF_RESOURCE_INSTANCE", self._DEFAULT_RESOURCE_INSTANCE + ) + self._cache_file = Path( + f"/var/run/resource-agents/{resource_instance}-token.json" + ) + self._cache_file.parent.mkdir(parents=True, exist_ok=True) + if not self._cache_file.exists(): + self._cache_file.touch() + os.chmod(self._cache_file, 0o600) + + def _load_api_key(self, api_key): + """Load API key from string or file.""" + if not api_key: + raise PowerCloudTokenManagerError( + "_load_api_key: API key is missing", + ocf.OCF_ERR_CONFIGURED, + ) + + # API key in string + if not api_key.startswith("@"): + return api_key + + # API key in file + api_key_path = Path(api_key[1:]) + if not api_key_path.is_file(): + raise PowerCloudTokenManagerError( + f"_load_api_key: API key file not found: '{api_key_path}'", + ocf.OCF_ERR_ARGS, + ) + + try: + content = api_key_path.read_text().strip() + api_key_field = json.loads(content).get("apikey", "") + except json.JSONDecodeError: + # data is text, return as is + api_key_field = content + + if not api_key_field: + raise PowerCloudTokenManagerError( + f"_load_api_key: invalid API key in file '{api_key_path}'", + ocf.OCF_ERR_ARGS, + ) + + return api_key_field + + def _request_new_token(self): + """Request a new access token.""" + headers = { + "content-type": "application/x-www-form-urlencoded", + "accept": "application/json", + } + data = { + "grant_type": "urn:ibm:params:oauth:grant-type:apikey", + "apikey": f"{self._api_key}", + } + + current_time = time.time() + try: + response = self._session.post( + self._auth_url, + headers=headers, + data=data, + proxies=self._proxy, + timeout=REQUESTS_TIMEOUT, + ) + response.raise_for_status() + token_data = response.json() + return ( + token_data["access_token"], + current_time + token_data["expires_in"], + current_time, + ) + except requests.RequestException as e: + ocf.logger.warning( + f"[PowerCloudTokenManager] _request_new_token: failed to request token: '{e}'" + ) + return None + + def _read_cache(self): + """Read token cache.""" + try: + with self._cache_file.open("r") as f: + fcntl.flock(f, fcntl.LOCK_EX) + try: + return json.load(f) + finally: + fcntl.flock(f, fcntl.LOCK_UN) + except (json.JSONDecodeError, FileNotFoundError, PermissionError): + ocf.logger.warning( + "[PowerCloudTokenManager] _read_cache: failed to read token cache read due to missing file or malformed JSON." + ) + return {} + + def _write_cache(self, token, expiration, refreshed_at): + """Write token cache.""" + try: + with self._cache_file.open("w") as f: + fcntl.flock(f, fcntl.LOCK_EX) + try: + json.dump( + { + "token": token, + "expiration": expiration, + "refreshed_at": refreshed_at, + }, + f, + ) + finally: + fcntl.flock(f, fcntl.LOCK_UN) + except Exception as e: + raise PowerCloudTokenManagerError( + f"_write_cache: failed to write token cache file: '{e}'", + ocf.OCF_ERR_GENERIC, + ) + + def _is_token_expired(self, expiration): + """Check if token is expired or near expiry.""" + return time.time() + self._TOKEN_REFRESH_BUFFER >= expiration + + def get_token(self): + """Get a valid access token, using cache if enabled.""" + if not self._cache_file: + result = self._request_new_token() + if result: + token, _, _ = result + return token + raise PowerCloudTokenManagerError( + "get_token: token request failed and no cache available", + ocf.OCF_ERR_GENERIC, + ) + + cache = self._read_cache() + token = cache.get("token") + expiration = cache.get("expiration", 0) + + if not token or self._is_token_expired(expiration): + result = self._request_new_token() + if result: + token, expiration, refreshed_at = result + refresh_time = time.ctime(refreshed_at) + ocf.logger.debug( + f"[PowerCloudTokenManager] get_token: refreshed token at '{refresh_time}'" + ) + self._write_cache(token, expiration, refreshed_at) + else: + ocf.logger.error( + "[PowerCloudTokenManager] get_token: failed to refresh token" + ) + if token and time.time() < expiration: + ocf.logger.warning( + "[PowerCloudTokenManager] get_token: using cached token as fallback" + ) + else: + raise PowerCloudTokenManagerError( + "get_token: no valid token available", + ocf.OCF_ERR_GENERIC, + ) + + return token + + +class PowerCloudAPIError(OCFExitError): + """Exception class for errors in PowerCloudAPI.""" + + def __init__(self, message, exit_code): + super().__init__(f"[PowerCloudAPIError] {message}", exit_code) + + +class PowerCloudAPI: + """Offers a convenient method for sending requests to the IBM Power Cloud API.""" + + _ALLOWED_API_TYPES = {"public", "private"} + + def __init__( + self, + api_key="", + api_type="", + region="", + crn="", + proxy="", + use_cache=False, + ): + """Initialize class variables, including the IBM Power Cloud API endpoint URL and HTTP header, and get an API token.""" + + self._crn = crn + self._proxy = self._get_proxy(proxy) + self._api_url = self._get_api_url(region, api_type) + token_manager = PowerCloudTokenManager( + api_type=api_type, api_key=api_key, proxy=self._proxy, use_cache=use_cache + ) + self._token = token_manager.get_token() + self._header = self._get_header() + self._session = create_session_with_retries() + + def _get_proxy(self, proxy): + """Validate a proxy URL and test TCP connectivity. Returns a proxy dict if reachable.""" + if not proxy: + return None + + parsed_url = urlparse(proxy) + is_valid_url = ( + parsed_url.hostname + and parsed_url.port + and parsed_url.scheme in ("http", "https") + ) + + if not is_valid_url: + raise PowerCloudAPIError( + f"_get_proxy: invalid proxy URL '{proxy}'", + ocf.OCF_ERR_CONFIGURED, + ) + + try: + with socket.create_connection( + (parsed_url.hostname, parsed_url.port), timeout=REQUESTS_TIMEOUT + ): + return {"https": proxy} + except OSError as e: + raise PowerCloudAPIError( + f"_get_proxy: cannot connect to proxy '{proxy}': {e}", + ocf.OCF_ERR_ARGS, + ) + + def _get_api_url(self, region, api_type): + """Generate and return the API URL for a given region and API type.""" + if not region: + raise PowerCloudAPIError( + "_get_api_url: missing region parameter", + ocf.OCF_ERR_CONFIGURED, + ) + + api_type = str(api_type).lower() + if api_type not in self._ALLOWED_API_TYPES: + raise PowerCloudAPIError( + f"_get_api_url: invalid api_type: '{api_type}', must be one of {self._ALLOWED_API_TYPES} ", + ocf.OCF_ERR_CONFIGURED, + ) + if api_type == "public" and not self._proxy: + raise PowerCloudAPIError( + "_get_api_url: api_type 'public' requires a proxy", + ocf.OCF_ERR_CONFIGURED, + ) + + subdomain = "private." if api_type == "private" else "" + return f"https://{subdomain}{region}.power-iaas.cloud.ibm.com" + + def _get_header(self): + """Construct request header.""" + return { + "Authorization": f"Bearer {self._token}", + "CRN": self._crn, + "Content-Type": "application/json", + } + + def send_api_request(self, method, resource, **kwargs): + """Perform an HTTP API call to the specified resource using the given method""" + url = f"{self._api_url}{resource}" + method = method.upper() + ocf.logger.debug(f"[PowerCloudAPI] send_api_request: '{method}' '{resource}'") + + try: + response = self._session.request( + method, + url, + headers=self._header, + proxies=self._proxy, + timeout=REQUESTS_TIMEOUT, + **kwargs, + ) + response.raise_for_status() + return response.json() + except requests.RequestException as e: + raise PowerCloudAPIError( + f"send_api_request: request error occured: '{method}' - '{url}' - '{e}'", + ocf.OCF_ERR_GENERIC, + ) + + +class PowerCloudRouteError(OCFExitError): + """Exception class for errors encountered while managing PowerVS network routes.""" + + def __init__(self, message, exit_code): + super().__init__(f"[PowerCloudRouteError] {message}", exit_code) + + +class PowerCloudRoute(PowerCloudAPI): + """Provides methods for managing network routes in Power Virtual Server.""" + + _CRN_PREFIX_INDEX = 0 + _CRN_TYPE_INDEX = 8 + _CRN_ROUTE_ID_INDEX = 9 + _CRN_EXPECTED_LENGTH = 10 + + def __init__( + self, + ip="", + api_key="", + api_type="", + region="", + route_host_map="", + device="", + proxy="", + monitor_api="", + use_token_cache="", + is_remote_route=False, + ): + """Initialize PowerCloudRoute instance.""" + self._is_remote_route = is_remote_route + self.ip = self._get_ip_info(ip) + self.crn, self.route_id = self._parse_route_map(route_host_map) + use_cache = str(use_token_cache).lower() == "true" + super().__init__( + api_key=api_key, + api_type=api_type, + region=region, + crn=self.crn, + proxy=proxy, + use_cache=use_cache, + ) + self.route_info = self._get_route_info() + self.route_name = self.route_info["name"] + self.device = self._get_device_name(device) + + def _get_ip_info(self, ip): + """Validate the given IP address and return its standard form.""" + try: + return str(ipaddress.ip_address(ip)) + except ValueError: + raise PowerCloudRouteError( + f"_get_ip_info: invalid IP address '{ip}'", + ocf.OCF_ERR_CONFIGURED, + ) + + def _parse_route_crn(self, route_crn): + """Parses a PowerVS route CRN and extract its base CRN and route ID.""" + crn_parts = route_crn.split(":") + + if ( + len(crn_parts) != self._CRN_EXPECTED_LENGTH + or crn_parts[self._CRN_PREFIX_INDEX] != "crn" + or crn_parts[self._CRN_TYPE_INDEX] != "route" + ): + raise PowerCloudAPIError( + f"_parse_route_crn: invalid CRN format for network-route: '{route_crn}'", + ocf.OCF_ERR_CONFIGURED, + ) + + workspace_crn = ":".join(crn_parts[: self._CRN_TYPE_INDEX]) + "::" + route_id = crn_parts[self._CRN_ROUTE_ID_INDEX] + + return workspace_crn, route_id + + def _parse_route_map(self, route_host_map): + """Validate the route host map and extract the associated CRN and route ID.""" + try: + route_map = dict(item.split(":", 1) for item in route_host_map.split(";")) + except ValueError: + raise PowerCloudRouteError( + f"_parse_route_map: invalid route_host_map format: '{route_host_map}'", + ocf.OCF_ERR_CONFIGURED, + ) + + hostname = os.uname().nodename + # set nodename to local hostname or get hostname of remote host from route_map + nodename = ( + hostname + if not self._is_remote_route + else next((h for h in route_map if h != hostname), None) + ) + + if not nodename or nodename not in route_map: + raise PowerCloudRouteError( + f"_parse_route_map: hostname '{nodename}' not found in route_host_map '{route_host_map}'", + ocf.OCF_ERR_CONFIGURED, + ) + + return self._parse_route_crn(route_map[nodename]) + + def _get_route_info(self): + """Retrieve and validate attributes of a PowerVS network route.""" + resource = f"/v1/routes/{self.route_id}" + route_info = self.send_api_request("GET", resource) + + zone = "remote" if self._is_remote_route else "local" + ocf.logger.debug( + f"[PowerCloudRoute] _get_route_info: {zone} route info: '{route_info}'" + ) + + if self.ip != route_info["destination"]: + raise PowerCloudRouteError( + f"_get_route_info: IP '{self.ip}' does not match the route destination address '{route_info['destination']}'", + ocf.OCF_ERR_CONFIGURED, + ) + + if route_info["advertise"] != "enable": + raise PowerCloudRouteError( + f"_get_route_info: route '{route_info['name']}' advertise flag must be set to enable", + ocf.OCF_ERR_CONFIGURED, + ) + + return route_info + + def _get_device_name(self, name): + """Verify the existence of a network interface with the specified name.""" + if self._is_remote_route: + return "" + + if name: + if ip_check_device(name): + return name + raise PowerCloudRouteError( + f"_get_device_name: network interface '{name}' does not exist or is down", + ocf.OCF_ERR_CONFIGURED, + ) + + next_hop = self.route_info["nextHop"] + interface_name = ip_find_device(next_hop) + if interface_name: + return interface_name + + raise PowerCloudRouteError( + f"_get_device_name: network interface with next hop '{next_hop}' does not exist or is down", + ocf.OCF_ERR_CONFIGURED, + ) + + def _set_route_enabled(self, enabled: bool): + """Enable or disable the PowerVS network route.""" + resource = f"/v1/routes/{self.route_id}" + data = json.dumps({"enabled": enabled}) + + state = "enabled" if enabled else "disabled" + response = self.send_api_request("PUT", resource, data=data) + ocf.logger.debug( + f"[PowerCloudRoute] _set_route_enabled: successfully {state} route '{self.route_name}', response: '{response}'" + ) + + def is_enabled(self): + """Check whether the PowerVS network route is currently enabled.""" + return self.route_info["state"] == "deployed" + + def enable(self): + """Enable the PowerVS network route.""" + if not self.is_enabled(): + self._set_route_enabled(True) + + def disable(self): + """Disable the PowerVS network route.""" + if self.is_enabled(): + self._set_route_enabled(False) + + +def create_route_instance(options, is_remote_route=False, catch_exception=False): + """Instantiate a PowerCloudRoute object and handle errors. + + Returns: + - PowerCloudRoute: The initialized route object if successful. + - None: If an error occurs and catch_exception is True. + + Raises: + - PowerCloudRouteError: If instantiation fails and catch_exception is False. + """ + # Filter only the valid resource agent options from options dictionary. + resource_options = {k: options.get(k, "") for k in RESOURCE_OPTIONS} + + try: + return PowerCloudRoute(**resource_options, is_remote_route=is_remote_route) + except Exception as e: + zone = "remote" if is_remote_route else "local" + ocf.logger.error( + f"[create_route_instance]: failed to instantiate {zone} route: '{e}'" + ) + if catch_exception: + return None + raise + + +def start_action( + ip="", + api_key="", + api_type="", + region="", + route_host_map="", + use_token_cache="", + monitor_api="", + device="", + proxy="", +): + """Assign the service IP. + + This function performs the following actions: + - Adds the specified IP address as an alias to the given network interface or the interface matching the route's next hop. + - Disables the remote network route. + - Enables the network route associated with the provided route host map. + """ + resource_options = locals() + + ocf.logger.info("[start_action]: enabling overlay IP") + ocf.logger.debug(f"[start_action]: options: '{resource_options}'") + + remote_route = create_route_instance(resource_options, is_remote_route=True) + # Disable remote route + ocf.logger.debug( + f"[start_action]: disabling remote route '{remote_route.route_name}'" + ) + remote_route.disable() + + local_route = create_route_instance(resource_options) + + # Add IP alias + ip_alias_add(ip, local_route.device) + + # Enable local route + ocf.logger.debug(f"[start_action]: enabling local route '{local_route.route_name}'") + local_route.enable() + + monitor_result = monitor_action(**resource_options) + if monitor_result != ocf.OCF_SUCCESS: + raise PowerCloudRouteError( + f"start_action: failed to enable local route '{local_route.route_name}'", + monitor_result, + ) + + ocf.logger.info( + f"[start_action]: successfully added IP alias '{ip}' and enabled local route '{local_route.route_name}'" + ) + return ocf.OCF_SUCCESS + + +def stop_action( + ip="", + api_key="", + api_type="", + region="", + route_host_map="", + use_token_cache="", + monitor_api="", + device="", + proxy="", +): + """Remove the service IP. + + This function performs the following actions: + - Disables the network route associated with the provided route host map. + - Removes the IP alias from the network interface. + """ + + resource_options = locals() + + ocf.logger.info("[stop_action]: disabling overlay IP") + ocf.logger.debug(f"[stop_action]: options: '{resource_options}'") + + try: + remote_route = create_route_instance(resource_options, is_remote_route=True) + ocf.logger.debug( + f"[stop_action]: disabling remote route '{remote_route.route_name}'" + ) + remote_route.disable() + + local_route = create_route_instance(resource_options) + ocf.logger.debug( + f"[stop_action]: disabling local route '{local_route.route_name}'" + ) + local_route.disable() + finally: + # Remove IP alias + ip_alias_remove(ip) + + monitor_result = monitor_action(**resource_options) + if monitor_result != ocf.OCF_NOT_RUNNING: + raise PowerCloudRouteError( + f"stop_action: failed to disable local route '{local_route.route_name}'", + monitor_result, + ) + + ocf.logger.info( + f"[stop_action]: successfully removed IP alias '{ip}' and disabled local route '{local_route.route_name}'" + ) + return ocf.OCF_SUCCESS + + +def monitor_action( + ip="", + api_key="", + api_type="", + region="", + route_host_map="", + use_token_cache="", + monitor_api="", + device="", + proxy="", +): + """Monitor the service IP. + + Checks the status of the assigned service IP address. + """ + resource_options = locals() + is_probe = ocf.is_probe() + use_extended_monitor = ocf.OCF_ACTION == "start" or ( + str(monitor_api).lower() == "true" and not is_probe + ) + + ocf.logger.debug( + f"[monitor_action]: options: '{resource_options}', is_probe: '{is_probe}'" + ) + + interface_name = ip_find_device(ip) + + if not use_extended_monitor: + if interface_name: + ocf.logger.debug( + f"[monitor_action]: IP alias '{ip}' is active'" + ) + return ocf.OCF_SUCCESS + else: + ocf.logger.debug( + f"[monitor_action]: IP alias '{ip}' is not active" + ) + return ocf.OCF_NOT_RUNNING + + remote_route = create_route_instance( + resource_options, is_remote_route=True, catch_exception=True + ) + if remote_route is None: + ocf.logger.error("[monitor_action]: failed to instantiate remote route") + return ocf.OCF_ERR_GENERIC + elif remote_route.is_enabled(): + ocf.logger.error( + f"[monitor_action]: remote route '{remote_route.route_name}' is enabled" + ) + return ocf.OCF_ERR_GENERIC + + local_route = create_route_instance( + resource_options, is_remote_route=False, catch_exception=True + ) + + if local_route is None: + ocf.logger.error("[monitor_action]: failed to instantiate local route") + return ocf.OCF_ERR_GENERIC + + if interface_name: + if local_route.is_enabled(): + ocf.logger.debug( + f"[monitor_action]: IP alias '{ip}' is active, local route '{local_route.route_name}' is enabled" + ) + return ocf.OCF_SUCCESS + else: + ocf.logger.error( + f"[monitor_action]: local route '{local_route.route_name}' is not enabled" + ) + return ocf.OCF_ERR_GENERIC + else: + if local_route.is_enabled(): + ocf.logger.error( + f"[monitor_action]: local route '{local_route.route_name}' is enabled, but IP alias is not configured" + ) + return ocf.OCF_ERR_GENERIC + else: + ocf.logger.debug( + f"[monitor_action]: IP alias '{ip}' is not active and local route '{local_route.route_name}' is disabled" + ) + return ocf.OCF_NOT_RUNNING + + +def validate_all_action( + ip="", + api_key="", + api_type="", + region="", + route_host_map="", + use_token_cache="", + monitor_api="", + device="", + proxy="", +): + """Validate resource agent parameters. + + Verifies the provided resource agent options by attempting to instantiate route objects for both local and remote routes. + """ + resource_options = locals() + + ocf.logger.info("[validate_all_action]: validate local and remote routes") + _ = create_route_instance(resource_options) + _ = create_route_instance(resource_options, is_remote_route=True) + + return ocf.OCF_SUCCESS + + +def main(): + """Instantiate the resource agent.""" + agent_description = textwrap.dedent("""\ + Resource Agent to move an IP address from one Power Virtual Server instance to another. + + Prerequisites: + 1. Red Hat Enterprise Linux 9.4 or higher + + 2. Two-node cluster + - Distributed across two PowerVS workspaces in separate data centers within the same region. + + 3. IBM Cloud API Key: + - Create a service API key with privileges for both workspaces. + - Save the key in a file and copy it to both cluster nodes using the same path and filename. + - Reference the key file path in the resource definition. + + For detailed guidance on high availability for SAP applications on PowerVS, visit: + https://cloud.ibm.com/docs/sap?topic=sap-ha-overview. + """) + + agent = ocf.Agent( + "powervs-move-ip", + shortdesc="Manages Power Virtual Server overlay IP routes.", + longdesc=agent_description, + version=1.00, + ) + + agent.add_parameter( + "ip", + shortdesc="IP address", + longdesc=( + "The virtual IP address is the destination address of a network route." + ), + content_type="string", + required=True, + ) + agent.add_parameter( + "api_key", + shortdesc="API Key or @API_KEY_FILE_PATH", + longdesc=( + "API Key or @API_KEY_FILE_PATH for IBM Cloud access. " + "The API key content or the path of an API key file that is indicated by the @ symbol." + ), + content_type="string", + required=True, + ) + agent.add_parameter( + "api_type", + shortdesc="API type", + longdesc="Connect to Power Virtual Server regional endpoints over a public or private network (public|private).", + content_type="string", + default="private", + required=True, + ) + agent.add_parameter( + "region", + shortdesc="Power Virtual Server region", + longdesc=( + "Region that represents the geographic area where the instance is located. " + "The region is used to identify the Cloud API endpoint." + ), + content_type="string", + required=True, + ) + agent.add_parameter( + "route_host_map", + shortdesc="Mapping of hostnames to IBM Cloud route CRNs", + longdesc=( + "Map the hostname of the Power Virtual Server instance to the route CRN of the overlay IP route. " + "Separate hostname and route CRN with a colon ':', separate different hostname and route CRN pairs with a semicolon ';'. " + "Example: hostname1:route-crn-of-instance1;hostname2:route-crn-of-instance2" + ), + content_type="string", + required=True, + ) + agent.add_parameter( + "use_token_cache", + shortdesc="Enable API token cache", + longdesc="Enable caching of the API access token in a local file to reduce authentication overhead. ", + content_type="string", + default="True", + required=False, + ) + agent.add_parameter( + "monitor_api", + shortdesc="Enhanced API Monitoring", + longdesc="Enable enhanced monitoring by using Power Cloud API calls to verify route configuration correctness. ", + content_type="string", + default="False", + required=False, + ) + agent.add_parameter( + "device", + shortdesc="Network adapter for the overlay IP address", + longdesc=( + "Network adapter for the overlay IP address. " + "The adapter must have the same name on all Power Virtual Server instances. " + "If the `device` parameter is not specified, the IP alias is assigned to the interface whose configured IP address matches the route's next hop address. " + ), + content_type="string", + default="", + required=False, + ) + agent.add_parameter( + "proxy", + shortdesc="Proxy", + longdesc=( + "Proxy server used to access IBM Cloud API endpoints. " + "The value must be a valid URL in the format 'http[s]://hostname:port'. " + ), + content_type="string", + default="", + required=False, + ) + agent.add_action("start", timeout=60, handler=start_action) + agent.add_action("stop", timeout=60, handler=stop_action) + agent.add_action( + "monitor", depth=0, timeout=60, interval=60, handler=monitor_action + ) + agent.add_action("validate-all", timeout=60, handler=validate_all_action) + agent.run() + + +if __name__ == "__main__": + main()