diff --git a/cts/benchmark/clubench.in b/cts/benchmark/clubench.in
index 1ed27942b8..ff141c8618 100644
--- a/cts/benchmark/clubench.in
+++ b/cts/benchmark/clubench.in
@@ -1,202 +1,196 @@
#!/bin/sh
#
-# Copyright 2010-2021 the Pacemaker project contributors
+# Copyright 2010-2025 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
SSHOPTS="-l root -o PasswordAuthentication=no -o ConnectTimeout=5"
msg() {
echo "$@" >&2
}
usage() {
echo "usage: $0
"
echo " dir: working directory (with the control file)"
exit 0
}
[ $# -eq 0 ] && usage
WORKDIR=$1
test -d "$WORKDIR" || usage
CTSCTRL=~/.cts
CTRL=$WORKDIR/control
CSV=$WORKDIR/bench.csv
STATS=$WORKDIR/bench.stats
test -f $CTRL && . $CTRL
@datadir@/@PACKAGE@/tests/cts/cluster_test 500 || {
msg "cluster_test failed"
exit 1
}
test -f $CTSCTRL || {
msg no CTS control file $CTSCTRL
exit 1
}
. $CTSCTRL
: ${CTS_logfacility:=local7}
-: ${CTS_stack:=corosync}
: ${CTS_logfile:="@CRM_LOG_DIR@/ha-log-bench"}
: ${CTS_adv:="--schema pacemaker-1.2 --clobber-cib -r"}
: ${RUNS:=3}
: ${CTSTESTS:="--benchmark"}
: ${CTSDIR:="@datadir@/@PACKAGE@/tests/cts"}
: ${CTS_node_list:=""}
: ${CTS_boot:=""}
: ${CTS_stonith:=""}
: ${CTS_stonith_args:=""}
[ -n "$CTS_node_list" ] || {
msg no node list specified
exit 1
}
-case "$CTS_stack" in
-corosync) CRM_REPORT_OPTS="--corosync";;
-*) msg "$CTS_stack: cluster stack not recognized"; exit 1;;
-esac
-
-CTSOPTS="--stack $CTS_stack --at-boot $CTS_boot $CTS_adv"
+CTSOPTS="--at-boot $CTS_boot $CTS_adv"
CTSOPTS="$CTSOPTS --facility $CTS_logfacility --logfile $CTS_logfile"
if [ "x$CTS_stonith" != "x" ]; then
CTSOPTS="$CTSOPTS --stonith-type $CTS_stonith"
[ "x$CTS_stonith_args" != "x" ] &&
CTSOPTS="$CTSOPTS --stonith-params \"$CTS_stonith_args\""
else
CTSOPTS="$CTSOPTS --stonith 0"
fi
CTSOPTS="$CTSOPTS $CTSTESTS"
fibonacci() {
F_LIMIT=$1
F_N=2
F_N_PREV=1
while [ $F_N -le $F_LIMIT ]; do
echo $F_N
F_N_TMP=$F_N
F_N=$((F_N+F_N_PREV))
F_N_PREV=$F_N_TMP
done
[ $F_N_PREV -ne $F_LIMIT ] && echo $F_LIMIT
}
[ "$SERIES" ] ||
SERIES=$(fibonacci "$(echo $CTS_node_list | wc -w)")
get_nodes() {
GN_C_NODES=$(echo $CTS_node_list | awk -v n="$1" '
{ for( i=1; i<=NF; i++ ) node[cnt++]=$i }
END{for( i=0; i "$RC_ODIR/ctsrun.out" 2>&1 &
ctspid=$!
tail -f "$RC_ODIR/ctsrun.out" &
tailpid=$!
wait $ctspid
kill $tailpid >/dev/null 2>&1
}
bench_re='CTS:.*runtime:'
diginfo() {
DI_CTS_DIR="$1"
DI_S="$2"
filter="$3"
(
cd "$DI_CTS_DIR" || return
for r in [0-9]*.tar.bz2; do
tar xjf $r
DI_D=$(basename "$r" .tar.bz2)
for DI_V in $(grep "$bench_re" "$DI_D/ha-log.txt" | eval "$filter"); do
DI_S="$DI_S,$DI_V"
done
rm -r "$DI_D"
done
echo $DI_S
)
}
printheader() {
diginfo $1 "" "awk '{print \$(NF-2)}'"
}
printstats() {
diginfo $1 "$clusize" "awk '{print \$(NF)}'"
}
printmedians() {
PM_F="$1"
PM_S="$clusize"
PM_MIDDLE=$((RUNS/2 + 1))
set $(head -1 "$PM_F" | sed 's/,/ /g')
PM_COLS=$#
for PM_I in $(seq 2 $PM_COLS); do
PM_V=$(awk -v i=$PM_I -F, '{print $i}' < $PM_F | sort -n | head -$PM_MIDDLE | tail -1)
PM_S="$PM_S,$PM_V"
done
echo $PM_S
}
rm -f $CSV
tmpf=`mktemp`
test -f "$tmpf" || {
msg "can't create temporary file"
exit 1
}
trap "rm -f $tmpf" 0
for clusize in $SERIES; do
nodes=`get_nodes $clusize`
outdir=$WORKDIR/$clusize
rm -rf $outdir
mkdir -p $outdir
rm -f $tmpf
node_cleanup
for i in `seq $RUNS`; do
true > $CTS_logfile
mkdir -p $outdir/$i
runcts $outdir/$i
mkreports $outdir/$i
printstats $outdir/$i >> $tmpf
done
[ -f "$CSV" ] || printheader $outdir/1 > $CSV
printmedians $tmpf >> $CSV
cat $tmpf >> $STATS
msg "Statistics for $clusize-node cluster saved"
done
msg "Tests done for series $SERIES, output in $CSV and $STATS"
# vim: set filetype=sh:
diff --git a/cts/cluster_test.in b/cts/cluster_test.in
index 9dcc64612a..281241d040 100755
--- a/cts/cluster_test.in
+++ b/cts/cluster_test.in
@@ -1,177 +1,168 @@
#!@BASH_PATH@
#
-# Copyright 2008-2020 the Pacemaker project contributors
+# Copyright 2008-2025 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
if [ -e ~/.cts ]; then
. ~/.cts
fi
anyAsked=0
[ $# -lt 1 ] || CTS_numtests=$1
die() { echo "$@"; exit 1; }
if [ -z "$CTS_asked_once" ]; then
anyAsked=1
echo "This script should only be executed on the test exerciser."
echo "The test exerciser will remotely execute the actions required by the"
echo "tests and should not be part of the cluster itself."
read -p "Is this host intended to be the test exerciser? (yN) " doUnderstand
[ "$doUnderstand" = "y" ] \
|| die "This script must be executed on the test exerciser"
fi
if [ -z "$CTS_node_list" ]; then
anyAsked=1
read -p "Please list your cluster nodes (eg. node1 node2 node3): " CTS_node_list
else
echo "Beginning test of cluster: $CTS_node_list"
fi
-if [ -z "$CTS_stack" ]; then
- anyAsked=1
- read -p "Which cluster stack are you using? ([corosync]): " CTS_stack
- [ -n "$CTS_stack" ] || CTS_stack=corosync
-else
- echo "Using the $CTS_stack cluster stack"
-fi
-
[ "${CTS_node_list}" = "${CTS_node_list/$HOSTNAME/}" ] \
|| die "This script must be executed on the test exerciser, and the test exerciser cannot be part of the cluster"
printf "+ Bootstrapping ssh... "
if [ -z "$SSH_AUTH_SOCK" ]; then
printf "\n + Initializing SSH "
eval "$(ssh-agent)"
echo " + Adding identities..."
ssh-add
rc=$?
if [ $rc -ne 0 ]; then
echo " -- No identities added"
printf "\nThe ability to open key-based 'ssh' connections (as the user 'root') is required to use CTS.\n"
read -p " - Do you want this program to help you create one? (yN) " auto_fix
if [ "$auto_fix" = "y" ]; then
ssh-keygen -t dsa
ssh-add
else
die "Please run 'ssh-keygen -t dsa' to create a new key"
fi
fi
else
echo "OK"
fi
test_ok=1
printf "+ Testing ssh configuration... "
for n in $CTS_node_list; do
ssh -l root -o PasswordAuthentication=no -o ConnectTimeout=5 "$n" /bin/true
rc=$?
if [ $rc -ne 0 ]; then
echo " - connection to $n failed"
test_ok=0
fi
done
if [ $test_ok -eq 0 ]; then
printf "\nThe ability to open key-based 'ssh' connections (as the user 'root') is required to use CTS.\n"
read -p " - Do you want this program to help you with such a setup? (yN) " auto_fix
if [ "$auto_fix" = "y" ]; then
# XXX are we picking the most suitable identity?
privKey=$(ssh-add -L | head -n1 | cut -d" " -f3)
sshCopyIdOpts="-o User=root"
[ -z "$privKey" ] || sshCopyIdOpts+=" -i \"${privKey}.pub\""
for n in $CTS_node_list; do
eval "ssh-copy-id $sshCopyIdOpts \"${n}\"" \
|| die "Attempt to 'ssh-copy-id $sshCopyIdOpts \"$n\"' failed"
done
else
die "Please install one of your SSH public keys to root's account on all cluster nodes"
fi
fi
echo "OK"
if [ -z "$CTS_logfile" ]; then
anyAsked=1
read -p " + Where does/should syslog store logs from remote hosts? (/var/log/messages) " CTS_logfile
[ -n "$CTS_logfile" ] || CTS_logfile=/var/log/messages
fi
[ -e "$CTS_logfile" ] || die "$CTS_logfile doesn't exist"
if [ -z "$CTS_logfacility" ]; then
anyAsked=1
read -p " + Which log facility does the cluster use? (daemon) " CTS_logfacility
[ -n "$CTS_logfacility" ] || CTS_logfacility=daemon
fi
if [ -z "$CTS_boot" ]; then
read -p "+ Is the cluster software started automatically when a node boots? [yN] " CTS_boot
if [ -z "$CTS_boot" ]; then
CTS_boot=0
else
case $CTS_boot in
1|y|Y) CTS_boot=1;;
*) CTS_boot=0;;
esac
fi
fi
if [ -z "$CTS_numtests" ]; then
read -p "+ How many test iterations should be performed? (500) " CTS_numtests
[ -n "$CTS_numtests" ] || CTS_numtests=500
fi
if [ -z "$CTS_asked_once" ]; then
anyAsked=1
read -p "+ What type of STONITH agent do you use? (none) " CTS_stonith
[ -z "$CTS_stonith" ] \
|| read -p "+ List any STONITH agent parameters (eq. device_host=switch.power.com): " CTS_stonith_args
[ -n "$CTS_adv" ] \
|| read -p "+ (Advanced) Any extra CTS parameters? (none) " CTS_adv
fi
[ $anyAsked -eq 0 ] \
|| read -p "+ Save values to ~/.cts for next time? (yN) " doSave
if [ "$doSave" = "y" ]; then
cat > ~/.cts <<-EOF
# CTS Test data
- CTS_stack="$CTS_stack"
CTS_node_list="$CTS_node_list"
CTS_logfile="$CTS_logfile"
CTS_logport="$CTS_logport"
CTS_logfacility="$CTS_logfacility"
CTS_asked_once=1
CTS_adv="$CTS_adv"
CTS_stonith="$CTS_stonith"
CTS_stonith_args="$CTS_stonith_args"
CTS_boot="$CTS_boot"
EOF
fi
cts_extra=""
if [ -n "$CTS_stonith" ]; then
cts_extra="$cts_extra --stonith-type $CTS_stonith"
[ -z "$CTS_stonith_args" ] \
|| cts_extra="$cts_extra --stonith-params \"$CTS_stonith_args\""
else
cts_extra="$cts_extra --stonith 0"
echo " - Testing a cluster without STONITH is like a blunt pencil... pointless"
fi
printf "\nAll set to go for %d iterations!\n" "$CTS_numtests"
[ $anyAsked -ne 0 ] \
|| echo "+ To use a different configuration, remove ~/.cts and re-run cts (or edit it manually)."
echo Now paste the following command into this shell:
-echo "@PYTHON@ `dirname "$0"`/cts-lab -L \"$CTS_logfile\" --syslog-facility \"$CTS_logfacility\" --no-unsafe-tests --stack \"$CTS_stack\" $CTS_adv --at-boot \"$CTS_boot\" $cts_extra \"$CTS_numtests\" --nodes \"$CTS_node_list\""
+echo "@PYTHON@ `dirname "$0"`/cts-lab -L \"$CTS_logfile\" --syslog-facility \"$CTS_logfacility\" --no-unsafe-tests $CTS_adv --at-boot \"$CTS_boot\" $cts_extra \"$CTS_numtests\" --nodes \"$CTS_node_list\""
# vim: set filetype=sh:
diff --git a/cts/cts-lab.in b/cts/cts-lab.in
index a909ebad01..023207d631 100644
--- a/cts/cts-lab.in
+++ b/cts/cts-lab.in
@@ -1,136 +1,130 @@
#!@PYTHON@
"""Command-line interface to Pacemaker's Cluster Test Suite (CTS)."""
# pylint doesn't like the module name "cts-lab" which is an invalid complaint for this file
# This also disables various other invalid names - it thinks scenario and match are constants
# that should have all caps names, and that cm and n are too short.
# pylint: disable=invalid-name
__copyright__ = "Copyright 2001-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import signal
import sys
from pacemaker._cts.CTS import CtsLab
from pacemaker._cts.cmcorosync import Corosync2
from pacemaker._cts.audits import audit_list
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.scenarios import AllOnce, Boot, BootCluster, LeaveBooted, RandomTests, Sequence
from pacemaker._cts.tests import test_list
# These are globals so they can be used by the signal handler.
scenario = None
LogFactory().add_stderr()
def sig_handler(signum, _frame):
"""Handle the given signal number."""
LogFactory().log(f"Interrupted by signal {signum}")
if scenario:
scenario.summarize()
if signum == 15:
if scenario:
scenario.teardown()
sys.exit(1)
def plural_s(n):
"""Return a string suffix depending on whether or not n is > 1."""
if n == 1:
return ""
return "S"
if __name__ == '__main__':
environment = CtsLab(sys.argv[1:])
iters = environment["iterations"]
tests = []
# Set the signal handler
signal.signal(15, sig_handler)
signal.signal(10, sig_handler)
- # Create the Cluster Manager object
- cm = None
-
- if environment["Stack"] == "corosync 2+":
- cm = Corosync2()
- else:
- LogFactory().log(f"Unknown stack: {environment['stack']}")
- sys.exit(1)
+ # Create the Cluster Manager object.
+ # Currently Corosync2 is the only available cluster manager.
+ cm = Corosync2()
if environment["TruncateLog"]:
if environment["OutputFile"] is None:
LogFactory().log("Ignoring truncate request because no output file specified")
else:
LogFactory().log(f"Truncating {environment['OutputFile']}")
with open(environment["OutputFile"], "w", encoding="utf-8") as outputfile:
outputfile.truncate(0)
audits = audit_list(cm)
if environment["ListTests"]:
tests = test_list(cm, audits)
LogFactory().log(f"Total {len(tests)} tests")
for test in tests:
LogFactory().log(test.name)
sys.exit(0)
elif len(environment["tests"]) == 0:
tests = test_list(cm, audits)
else:
chosen = environment["tests"]
for test_case in chosen:
match = None
for test in test_list(cm, audits):
if test.name == test_case:
match = test
if not match:
LogFactory().log("--choose: No applicable/valid tests chosen")
sys.exit(1)
else:
tests.append(match)
# Scenario selection
if environment["scenario"] == "all-once":
iters = len(tests)
scenario = AllOnce(cm, [BootCluster(cm, environment)], audits, tests)
elif environment["scenario"] == "sequence":
scenario = Sequence(cm, [BootCluster(cm, environment)], audits, tests)
elif environment["scenario"] == "boot":
scenario = Boot(cm, [LeaveBooted(cm, environment)], audits, [])
else:
scenario = RandomTests(cm, [BootCluster(cm, environment)], audits, tests)
LogFactory().log(f">>>>>>>>>>>>>>>> BEGINNING {iters!r} TEST{plural_s(iters)}")
- LogFactory().log(f"Stack: {environment['Stack']} ({environment['Name']})")
LogFactory().log(f"Schema: {environment['Schema']}")
LogFactory().log(f"Scenario: {scenario.__doc__}")
LogFactory().log(f"CTS Exerciser: {environment['cts-exerciser']}")
LogFactory().log(f"CTS Logfile: {environment['OutputFile']}")
LogFactory().log(f"Random Seed: {environment['RandSeed']}")
if "syslogd" in environment:
LogFactory().log(f"Syslog variant: {environment['syslogd'].strip()}")
LogFactory().log(f"System log files: {environment['LogFileName']}")
if "IPBase" in environment:
LogFactory().log(f"Base IP for resources: {environment['IPBase']}")
LogFactory().log(f"Cluster starts at boot: {environment['at-boot']}")
environment.dump()
rc = environment.run(scenario, iters)
sys.exit(rc)
# vim: set filetype=python:
diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py
index bff7a6ab65..53986282d7 100644
--- a/python/pacemaker/_cts/environment.py
+++ b/python/pacemaker/_cts/environment.py
@@ -1,595 +1,569 @@
"""Test environment classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["EnvFactory", "set_cts_path"]
__copyright__ = "Copyright 2014-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import argparse
from contextlib import suppress
from glob import glob
import os
import random
import shlex
import socket
import sys
from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
from pacemaker._cts.watcher import LogKind
class Environment:
"""
A class for managing the CTS environment.
This consists largely of processing and storing command line parameters.
"""
# pylint doesn't understand that self._rsh is callable (it stores the
# singleton instance of RemoteExec, as returned by the getInstance method
# of RemoteFactory).
# @TODO See if type annotations fix this.
# I think we could also fix this by getting rid of the getInstance methods,
# but that's a project for another day. For now, just disable the warning.
# pylint: disable=not-callable
def __init__(self, args):
"""
Create a new Environment instance.
This class can be treated kind of like a dictionary due to the presence
of typical dict functions like __contains__, __getitem__, and __setitem__.
However, it is not a dictionary so do not rely on standard dictionary
behavior.
Arguments:
args -- A list of command line parameters, minus the program name.
If None, sys.argv will be used.
"""
self.data = {}
self._nodes = []
# Set some defaults before processing command line arguments. These are
# either not set by any command line parameter, or they need a default
# that can't be set in add_argument.
self["DeadTime"] = 300
self["StartTime"] = 300
self["StableTime"] = 30
self["tests"] = []
self["IPagent"] = "IPaddr2"
self["DoFencing"] = True
self["CIBResource"] = False
self["log_kind"] = None
self["scenario"] = "random"
+ # Hard-coded since there is only one supported cluster manager/stack
+ self["Name"] = "crm-corosync"
+ self["Stack"] = "corosync 2+"
+
self.random_gen = random.Random()
self._logger = LogFactory()
self._rsh = RemoteFactory().getInstance()
self._parse_args(args)
if not self["ListTests"]:
self._validate()
self._discover()
def dump(self):
"""Print the current environment."""
for key in sorted(self.data.keys()):
self._logger.debug(f"{f'Environment[{key}]':35}: {str(self[key])}")
def __contains__(self, key):
"""Return True if the given key exists in the environment."""
if key == "nodes":
return True
return key in self.data
def __getitem__(self, key):
"""Return the given environment key, or None if it does not exist."""
if key == "nodes":
return self._nodes
- if key == "Name":
- return self._get_stack_short()
-
return self.data.get(key)
def __setitem__(self, key, value):
"""Set the given environment key to the given value, overriding any previous value."""
- if key == "Stack":
- self._set_stack(value)
-
- elif key == "nodes":
+ if key == "nodes":
self._nodes = []
for node in value:
# I don't think I need the IP address, etc. but this validates
# the node name against /etc/hosts and/or DNS, so it's a
# GoodThing(tm).
try:
n = node.strip()
# @TODO This only handles IPv4, use getaddrinfo() instead
# (here and in _discover())
socket.gethostbyname_ex(n)
self._nodes.append(n)
except socket.herror:
self._logger.log(f"{node} not found in DNS... aborting")
raise
else:
self.data[key] = value
def random_node(self):
"""Choose a random node from the cluster."""
return self.random_gen.choice(self["nodes"])
- def _set_stack(self, name):
- """Normalize the given cluster stack name."""
- if name in ["corosync", "cs", "mcp"]:
- self.data["Stack"] = "corosync 2+"
-
- else:
- raise ValueError(f"Unknown stack: {name}")
-
- def _get_stack_short(self):
- """Return the short name for the currently set cluster stack."""
- if "Stack" not in self.data:
- return "unknown"
-
- if self.data["Stack"] == "corosync 2+":
- return "crm-corosync"
-
- LogFactory().log(f"Unknown stack: {self['stack']}")
- raise ValueError(f"Unknown stack: {self['stack']}")
-
def _detect_systemd(self, node):
"""Detect whether systemd is in use on the target node."""
if "have_systemd" not in self.data:
(rc, _) = self._rsh(node, "systemctl list-units", verbose=0)
self["have_systemd"] = rc == 0
def _detect_syslog(self, node):
"""Detect the syslog variant in use on the target node (if any)."""
if "syslogd" in self.data:
return
if self["have_systemd"]:
# Systemd
(_, lines) = self._rsh(node, r"systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", verbose=1)
else:
# SYS-V
(_, lines) = self._rsh(node, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", verbose=1)
with suppress(IndexError):
self["syslogd"] = lines[0].strip()
def disable_service(self, node, service):
"""Disable the given service on the given node."""
if self["have_systemd"]:
# Systemd
(rc, _) = self._rsh(node, f"systemctl disable {service}")
return rc
# SYS-V
(rc, _) = self._rsh(node, f"chkconfig {service} off")
return rc
def enable_service(self, node, service):
"""Enable the given service on the given node."""
if self["have_systemd"]:
# Systemd
(rc, _) = self._rsh(node, f"systemctl enable {service}")
return rc
# SYS-V
(rc, _) = self._rsh(node, f"chkconfig {service} on")
return rc
def service_is_enabled(self, node, service):
"""Return True if the given service is enabled on the given node."""
if self["have_systemd"]:
# Systemd
# With "systemctl is-enabled", we should check if the service is
# explicitly "enabled" instead of the return code. For example it returns
# 0 if the service is "static" or "indirect", but they don't really count
# as "enabled".
(rc, _) = self._rsh(node, f"systemctl is-enabled {service} | grep enabled")
return rc == 0
# SYS-V
(rc, _) = self._rsh(node, f"chkconfig --list | grep -e {service}.*on")
return rc == 0
def _detect_at_boot(self, node):
"""Detect if the cluster starts at boot."""
if "at-boot" not in self.data:
self["at-boot"] = self.service_is_enabled(node, "corosync") \
or self.service_is_enabled(node, "pacemaker")
def _detect_ip_offset(self, node):
"""Detect the offset for IPaddr resources."""
if self["CIBResource"] and "IPBase" not in self.data:
(_, lines) = self._rsh(node, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0)
network = lines[0].strip()
(_, lines) = self._rsh(node, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, verbose=0)
try:
self["IPBase"] = lines[0].strip()
except (IndexError, TypeError):
self["IPBase"] = None
if not self["IPBase"]:
self["IPBase"] = " fe80::1234:56:7890:1000"
self._logger.log("Could not determine an offset for IPaddr resources. Perhaps nmap is not installed on the nodes.")
self._logger.log(f"""Defaulting to '{self["IPBase"]}', use --test-ip-base to override""")
return
# pylint thinks self["IPBase"] is a list, not a string, which causes it
# to error out because a list doesn't have split().
# pylint: disable=no-member
last_part = self["IPBase"].split('.')[3]
if int(last_part) >= 240:
self._logger.log(f"Could not determine an offset for IPaddr resources. Upper bound is too high: {self['IPBase']} {last_part}")
self["IPBase"] = " fe80::1234:56:7890:1000"
self._logger.log(f"""Defaulting to '{self["IPBase"]}', use --test-ip-base to override""")
def _validate(self):
"""Check that we were given all required command line parameters."""
if not self["nodes"]:
raise ValueError("No nodes specified!")
def _discover(self):
"""Probe cluster nodes to figure out how to log and manage services."""
exerciser = socket.gethostname()
# Use the IP where possible to avoid name lookup failures
for ip in socket.gethostbyname_ex(exerciser)[2]:
if ip != "127.0.0.1":
exerciser = ip
break
self["cts-exerciser"] = exerciser
node = self["nodes"][0]
self._detect_systemd(node)
self._detect_syslog(node)
self._detect_at_boot(node)
self._detect_ip_offset(node)
def _parse_args(self, argv):
"""
Parse and validate command line parameters.
Set the appropriate values in the environment dictionary. If argv is
None, use sys.argv instead.
"""
if not argv:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(epilog=f"{sys.argv[0]} -g virt1 -r --stonith ssh --schema pacemaker-2.0 500")
grp1 = parser.add_argument_group("Common options")
grp1.add_argument("-g", "--dsh-group", "--group",
metavar="GROUP", dest="group",
help="Use the nodes listed in the named DSH group (~/.dsh/groups/$name)")
grp1.add_argument("--benchmark",
action="store_true",
help="Add timing information")
grp1.add_argument("--list", "--list-tests",
action="store_true", dest="list_tests",
help="List the valid tests")
grp1.add_argument("--nodes",
default="",
metavar="NODES",
help="List of cluster nodes separated by whitespace")
- grp1.add_argument("--stack",
- default="corosync",
- metavar="STACK",
- help="Which cluster stack is installed")
grp2 = parser.add_argument_group("Options that CTS will usually auto-detect correctly")
grp2.add_argument("-L", "--logfile",
metavar="PATH",
help="Where to look for logs from cluster nodes (or 'journal' for systemd journal)")
grp2.add_argument("--at-boot", "--cluster-starts-at-boot",
choices=["1", "0", "yes", "no"],
help="Does the cluster software start at boot time?")
grp2.add_argument("--facility", "--syslog-facility",
default="daemon",
metavar="NAME",
help="Which syslog facility to log to")
grp2.add_argument("--ip", "--test-ip-base",
metavar="IP",
help="Offset for generated IP address resources")
grp3 = parser.add_argument_group("Options for release testing")
grp3.add_argument("-r", "--populate-resources",
action="store_true",
help="Generate a sample configuration")
grp3.add_argument("--choose",
metavar="NAME",
help="Run only the named tests, separated by whitespace")
grp3.add_argument("--fencing", "--stonith",
choices=["1", "0", "yes", "no", "lha", "openstack", "rhcs", "rhevm", "scsi", "ssh", "virt", "xvm"],
default="1",
help="What fencing agent to use")
grp3.add_argument("--once",
action="store_true",
help="Run all valid tests once")
grp4 = parser.add_argument_group("Additional (less common) options")
grp4.add_argument("-c", "--clobber-cib",
action="store_true",
help="Erase any existing configuration")
grp4.add_argument("-y", "--yes",
action="store_true", dest="always_continue",
help="Continue to run whenever prompted")
grp4.add_argument("--boot",
action="store_true",
help="")
grp4.add_argument("--cib-filename",
metavar="PATH",
help="Install the given CIB file to the cluster")
grp4.add_argument("--experimental-tests",
action="store_true",
help="Include experimental tests")
grp4.add_argument("--loop-minutes",
type=int, default=60,
help="")
grp4.add_argument("--no-loop-tests",
action="store_true",
help="Don't run looping/time-based tests")
grp4.add_argument("--no-unsafe-tests",
action="store_true",
help="Don't run tests that are unsafe for use with ocfs2/drbd")
grp4.add_argument("--notification-agent",
metavar="PATH",
default="/var/lib/pacemaker/notify.sh",
help="Script to configure for Pacemaker alerts")
grp4.add_argument("--notification-recipient",
metavar="R",
default="/var/lib/pacemaker/notify.log",
help="Recipient to pass to alert script")
grp4.add_argument("--oprofile",
default="",
metavar="NODES",
help="List of cluster nodes to run oprofile on")
grp4.add_argument("--outputfile",
metavar="PATH",
help="Location to write logs to")
grp4.add_argument("--qarsh",
action="store_true",
help="Use QARSH to access nodes instead of SSH")
grp4.add_argument("--schema",
metavar="SCHEMA",
default=f"pacemaker-{BuildOptions.CIB_SCHEMA_VERSION}",
help="Create a CIB conforming to the given schema")
grp4.add_argument("--seed",
metavar="SEED",
help="Use the given string as the random number seed")
grp4.add_argument("--set",
action="append",
metavar="ARG",
default=[],
help="Set key=value pairs (can be specified multiple times)")
grp4.add_argument("--stonith-args",
metavar="ARGS",
default="hostlist=all,livedangerously=yes",
help="")
grp4.add_argument("--stonith-type",
metavar="TYPE",
default="external/ssh",
help="")
grp4.add_argument("--trunc",
action="store_true", dest="truncate",
help="Truncate log file before starting")
grp4.add_argument("--valgrind-procs",
metavar="PROCS",
default="pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd",
help="Run valgrind against the given space-separated list of processes")
grp4.add_argument("--warn-inactive",
action="store_true",
help="Warn if a resource is assigned to an inactive node")
parser.add_argument("iterations",
nargs='?',
type=int, default=1,
help="Number of tests to run")
args = parser.parse_args(args=argv)
# Set values on this object based on what happened with command line
# processing. This has to be done in several blocks.
# These values can always be set. Most get a default from the add_argument
# calls, they only do one thing, and they do not have any side effects.
self["CIBfilename"] = args.cib_filename if args.cib_filename else None
self["ClobberCIB"] = args.clobber_cib
self["ListTests"] = args.list_tests
self["Schema"] = args.schema
- self["Stack"] = args.stack
self["SyslogFacility"] = args.facility
self["TruncateLog"] = args.truncate
self["at-boot"] = args.at_boot in ["1", "yes"]
self["benchmark"] = args.benchmark
self["continue"] = args.always_continue
self["experimental-tests"] = args.experimental_tests
self["iterations"] = args.iterations
self["loop-minutes"] = args.loop_minutes
self["loop-tests"] = not args.no_loop_tests
self["nodes"] = shlex.split(args.nodes)
self["notification-agent"] = args.notification_agent
self["notification-recipient"] = args.notification_recipient
self["oprofile"] = shlex.split(args.oprofile)
self["stonith-params"] = args.stonith_args
self["stonith-type"] = args.stonith_type
self["unsafe-tests"] = not args.no_unsafe_tests
self["valgrind-procs"] = args.valgrind_procs
self["warn-inactive"] = args.warn_inactive
# Nodes and groups are mutually exclusive. Additionally, --group does
# more than just set a value. Here, set nodes first and then if a group
# is specified, override the previous nodes value.
if args.group:
self["OutputFile"] = f"{os.environ['HOME']}/cluster-{args.dsh_group}.log"
LogFactory().add_file(self["OutputFile"], "CTS")
dsh_file = f"{os.environ['HOME']}/.dsh/group/{args.dsh_group}"
if os.path.isfile(dsh_file):
self["nodes"] = []
with open(dsh_file, "r", encoding="utf-8") as f:
for line in f:
stripped = line.strip()
if not stripped.startswith('#'):
self["nodes"].append(stripped)
else:
print(f"Unknown DSH group: {args.dsh_group}")
# Everything else either can't have a default set in an add_argument
# call (likely because we don't want to always have a value set for it)
# or it does something fancier than just set a single value. However,
# order does not matter for these as long as the user doesn't provide
# conflicting arguments on the command line. So just do Everything
# alphabetically.
if args.boot:
self["scenario"] = "boot"
if args.choose:
self["scenario"] = "sequence"
self["tests"].extend(shlex.split(args.choose))
self["iterations"] = len(self["tests"])
if args.fencing in ["0", "no"]:
self["DoFencing"] = False
elif args.fencing in ["rhcs", "virt", "xvm"]:
self["stonith-type"] = "fence_xvm"
elif args.fencing == "scsi":
self["stonith-type"] = "fence_scsi"
elif args.fencing in ["lha", "ssh"]:
self["stonith-params"] = "hostlist=all,livedangerously=yes"
self["stonith-type"] = "external/ssh"
elif args.fencing == "openstack":
self["stonith-type"] = "fence_openstack"
print("Obtaining OpenStack credentials from the current environment")
region = os.environ['OS_REGION_NAME']
tenant = os.environ['OS_TENANT_NAME']
auth = os.environ['OS_AUTH_URL']
user = os.environ['OS_USERNAME']
password = os.environ['OS_PASSWORD']
self["stonith-params"] = f"region={region},tenant={tenant},auth={auth},user={user},password={password}"
elif args.fencing == "rhevm":
self["stonith-type"] = "fence_rhevm"
print("Obtaining RHEV-M credentials from the current environment")
user = os.environ['RHEVM_USERNAME']
password = os.environ['RHEVM_PASSWORD']
server = os.environ['RHEVM_SERVER']
port = os.environ['RHEVM_PORT']
self["stonith-params"] = f"login={user},passwd={password},ipaddr={server},ipport={port},ssl=1,shell_timeout=10"
if args.ip:
self["CIBResource"] = True
self["ClobberCIB"] = True
self["IPBase"] = args.ip
if args.logfile == "journal":
self["LogAuditDisabled"] = True
self["log_kind"] = LogKind.JOURNAL
elif args.logfile:
self["LogAuditDisabled"] = True
self["LogFileName"] = args.logfile
self["log_kind"] = LogKind.REMOTE_FILE
else:
# We can't set this as the default on the parser.add_argument call
# for this option because then args.logfile will be set, which means
# the above branch will be taken and those other values will also be
# set.
self["LogFileName"] = "/var/log/messages"
if args.once:
self["scenario"] = "all-once"
if args.outputfile:
self["OutputFile"] = args.outputfile
LogFactory().add_file(self["OutputFile"])
if args.populate_resources:
self["CIBResource"] = True
self["ClobberCIB"] = True
if args.qarsh:
self._rsh.enable_qarsh()
self.random_gen.seed(args.seed)
for kv in args.set:
(name, value) = kv.split("=")
self[name] = value
print(f"Setting {name} = {value}")
class EnvFactory:
"""A class for constructing a singleton instance of an Environment object."""
instance = None
# pylint: disable=invalid-name
def getInstance(self, args=None):
"""
Return the previously created instance of Environment.
If no instance exists, create a new instance and return that.
"""
if not EnvFactory.instance:
EnvFactory.instance = Environment(args)
return EnvFactory.instance
def set_cts_path(extra=None):
"""Set the PATH environment variable appropriately for the tests."""
new_path = os.environ['PATH']
# Add any search paths given on the command line
if extra is not None:
for p in extra:
new_path = f"{p}:{new_path}"
cwd = os.getcwd()
if os.path.exists(f"{cwd}/cts/cts-attrd.in"):
# pylint: disable=protected-access
print(f"Running tests from the source tree: {BuildOptions._BUILD_DIR}")
for d in glob(f"{BuildOptions._BUILD_DIR}/daemons/*/"):
new_path = f"{d}:{new_path}"
new_path = f"{BuildOptions._BUILD_DIR}/tools:{new_path}"
new_path = f"{BuildOptions._BUILD_DIR}/cts/support:{new_path}"
print(f"Using local schemas from: {cwd}/xml")
os.environ["PCMK_schema_directory"] = f"{cwd}/xml"
else:
print(f"Running tests from the install tree: {BuildOptions.DAEMON_DIR} (not {cwd})")
new_path = f"{BuildOptions.DAEMON_DIR}:{new_path}"
os.environ["PCMK_schema_directory"] = BuildOptions.SCHEMA_DIR
print(f'Using PATH="{new_path}"')
os.environ['PATH'] = new_path