diff --git a/cts/benchmark/clubench.in b/cts/benchmark/clubench.in
index 1ed27942b8..ff141c8618 100644
--- a/cts/benchmark/clubench.in
+++ b/cts/benchmark/clubench.in
@@ -1,202 +1,196 @@
#!/bin/sh
#
-# Copyright 2010-2021 the Pacemaker project contributors
+# Copyright 2010-2025 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
SSHOPTS="-l root -o PasswordAuthentication=no -o ConnectTimeout=5"
msg() {
echo "$@" >&2
}
usage() {
echo "usage: $0
"
echo " dir: working directory (with the control file)"
exit 0
}
[ $# -eq 0 ] && usage
WORKDIR=$1
test -d "$WORKDIR" || usage
CTSCTRL=~/.cts
CTRL=$WORKDIR/control
CSV=$WORKDIR/bench.csv
STATS=$WORKDIR/bench.stats
test -f $CTRL && . $CTRL
@datadir@/@PACKAGE@/tests/cts/cluster_test 500 || {
msg "cluster_test failed"
exit 1
}
test -f $CTSCTRL || {
msg no CTS control file $CTSCTRL
exit 1
}
. $CTSCTRL
: ${CTS_logfacility:=local7}
-: ${CTS_stack:=corosync}
: ${CTS_logfile:="@CRM_LOG_DIR@/ha-log-bench"}
: ${CTS_adv:="--schema pacemaker-1.2 --clobber-cib -r"}
: ${RUNS:=3}
: ${CTSTESTS:="--benchmark"}
: ${CTSDIR:="@datadir@/@PACKAGE@/tests/cts"}
: ${CTS_node_list:=""}
: ${CTS_boot:=""}
: ${CTS_stonith:=""}
: ${CTS_stonith_args:=""}
[ -n "$CTS_node_list" ] || {
msg no node list specified
exit 1
}
-case "$CTS_stack" in
-corosync) CRM_REPORT_OPTS="--corosync";;
-*) msg "$CTS_stack: cluster stack not recognized"; exit 1;;
-esac
-
-CTSOPTS="--stack $CTS_stack --at-boot $CTS_boot $CTS_adv"
+CTSOPTS="--at-boot $CTS_boot $CTS_adv"
CTSOPTS="$CTSOPTS --facility $CTS_logfacility --logfile $CTS_logfile"
if [ "x$CTS_stonith" != "x" ]; then
CTSOPTS="$CTSOPTS --stonith-type $CTS_stonith"
[ "x$CTS_stonith_args" != "x" ] &&
CTSOPTS="$CTSOPTS --stonith-params \"$CTS_stonith_args\""
else
CTSOPTS="$CTSOPTS --stonith 0"
fi
CTSOPTS="$CTSOPTS $CTSTESTS"
fibonacci() {
F_LIMIT=$1
F_N=2
F_N_PREV=1
while [ $F_N -le $F_LIMIT ]; do
echo $F_N
F_N_TMP=$F_N
F_N=$((F_N+F_N_PREV))
F_N_PREV=$F_N_TMP
done
[ $F_N_PREV -ne $F_LIMIT ] && echo $F_LIMIT
}
[ "$SERIES" ] ||
SERIES=$(fibonacci "$(echo $CTS_node_list | wc -w)")
get_nodes() {
GN_C_NODES=$(echo $CTS_node_list | awk -v n="$1" '
{ for( i=1; i<=NF; i++ ) node[cnt++]=$i }
END{for( i=0; i "$RC_ODIR/ctsrun.out" 2>&1 &
ctspid=$!
tail -f "$RC_ODIR/ctsrun.out" &
tailpid=$!
wait $ctspid
kill $tailpid >/dev/null 2>&1
}
bench_re='CTS:.*runtime:'
diginfo() {
DI_CTS_DIR="$1"
DI_S="$2"
filter="$3"
(
cd "$DI_CTS_DIR" || return
for r in [0-9]*.tar.bz2; do
tar xjf $r
DI_D=$(basename "$r" .tar.bz2)
for DI_V in $(grep "$bench_re" "$DI_D/ha-log.txt" | eval "$filter"); do
DI_S="$DI_S,$DI_V"
done
rm -r "$DI_D"
done
echo $DI_S
)
}
printheader() {
diginfo $1 "" "awk '{print \$(NF-2)}'"
}
printstats() {
diginfo $1 "$clusize" "awk '{print \$(NF)}'"
}
printmedians() {
PM_F="$1"
PM_S="$clusize"
PM_MIDDLE=$((RUNS/2 + 1))
set $(head -1 "$PM_F" | sed 's/,/ /g')
PM_COLS=$#
for PM_I in $(seq 2 $PM_COLS); do
PM_V=$(awk -v i=$PM_I -F, '{print $i}' < $PM_F | sort -n | head -$PM_MIDDLE | tail -1)
PM_S="$PM_S,$PM_V"
done
echo $PM_S
}
rm -f $CSV
tmpf=`mktemp`
test -f "$tmpf" || {
msg "can't create temporary file"
exit 1
}
trap "rm -f $tmpf" 0
for clusize in $SERIES; do
nodes=`get_nodes $clusize`
outdir=$WORKDIR/$clusize
rm -rf $outdir
mkdir -p $outdir
rm -f $tmpf
node_cleanup
for i in `seq $RUNS`; do
true > $CTS_logfile
mkdir -p $outdir/$i
runcts $outdir/$i
mkreports $outdir/$i
printstats $outdir/$i >> $tmpf
done
[ -f "$CSV" ] || printheader $outdir/1 > $CSV
printmedians $tmpf >> $CSV
cat $tmpf >> $STATS
msg "Statistics for $clusize-node cluster saved"
done
msg "Tests done for series $SERIES, output in $CSV and $STATS"
# vim: set filetype=sh:
diff --git a/cts/cluster_test.in b/cts/cluster_test.in
index 9dcc64612a..281241d040 100755
--- a/cts/cluster_test.in
+++ b/cts/cluster_test.in
@@ -1,177 +1,168 @@
#!@BASH_PATH@
#
-# Copyright 2008-2020 the Pacemaker project contributors
+# Copyright 2008-2025 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
if [ -e ~/.cts ]; then
. ~/.cts
fi
anyAsked=0
[ $# -lt 1 ] || CTS_numtests=$1
die() { echo "$@"; exit 1; }
if [ -z "$CTS_asked_once" ]; then
anyAsked=1
echo "This script should only be executed on the test exerciser."
echo "The test exerciser will remotely execute the actions required by the"
echo "tests and should not be part of the cluster itself."
read -p "Is this host intended to be the test exerciser? (yN) " doUnderstand
[ "$doUnderstand" = "y" ] \
|| die "This script must be executed on the test exerciser"
fi
if [ -z "$CTS_node_list" ]; then
anyAsked=1
read -p "Please list your cluster nodes (eg. node1 node2 node3): " CTS_node_list
else
echo "Beginning test of cluster: $CTS_node_list"
fi
-if [ -z "$CTS_stack" ]; then
- anyAsked=1
- read -p "Which cluster stack are you using? ([corosync]): " CTS_stack
- [ -n "$CTS_stack" ] || CTS_stack=corosync
-else
- echo "Using the $CTS_stack cluster stack"
-fi
-
[ "${CTS_node_list}" = "${CTS_node_list/$HOSTNAME/}" ] \
|| die "This script must be executed on the test exerciser, and the test exerciser cannot be part of the cluster"
printf "+ Bootstrapping ssh... "
if [ -z "$SSH_AUTH_SOCK" ]; then
printf "\n + Initializing SSH "
eval "$(ssh-agent)"
echo " + Adding identities..."
ssh-add
rc=$?
if [ $rc -ne 0 ]; then
echo " -- No identities added"
printf "\nThe ability to open key-based 'ssh' connections (as the user 'root') is required to use CTS.\n"
read -p " - Do you want this program to help you create one? (yN) " auto_fix
if [ "$auto_fix" = "y" ]; then
ssh-keygen -t dsa
ssh-add
else
die "Please run 'ssh-keygen -t dsa' to create a new key"
fi
fi
else
echo "OK"
fi
test_ok=1
printf "+ Testing ssh configuration... "
for n in $CTS_node_list; do
ssh -l root -o PasswordAuthentication=no -o ConnectTimeout=5 "$n" /bin/true
rc=$?
if [ $rc -ne 0 ]; then
echo " - connection to $n failed"
test_ok=0
fi
done
if [ $test_ok -eq 0 ]; then
printf "\nThe ability to open key-based 'ssh' connections (as the user 'root') is required to use CTS.\n"
read -p " - Do you want this program to help you with such a setup? (yN) " auto_fix
if [ "$auto_fix" = "y" ]; then
# XXX are we picking the most suitable identity?
privKey=$(ssh-add -L | head -n1 | cut -d" " -f3)
sshCopyIdOpts="-o User=root"
[ -z "$privKey" ] || sshCopyIdOpts+=" -i \"${privKey}.pub\""
for n in $CTS_node_list; do
eval "ssh-copy-id $sshCopyIdOpts \"${n}\"" \
|| die "Attempt to 'ssh-copy-id $sshCopyIdOpts \"$n\"' failed"
done
else
die "Please install one of your SSH public keys to root's account on all cluster nodes"
fi
fi
echo "OK"
if [ -z "$CTS_logfile" ]; then
anyAsked=1
read -p " + Where does/should syslog store logs from remote hosts? (/var/log/messages) " CTS_logfile
[ -n "$CTS_logfile" ] || CTS_logfile=/var/log/messages
fi
[ -e "$CTS_logfile" ] || die "$CTS_logfile doesn't exist"
if [ -z "$CTS_logfacility" ]; then
anyAsked=1
read -p " + Which log facility does the cluster use? (daemon) " CTS_logfacility
[ -n "$CTS_logfacility" ] || CTS_logfacility=daemon
fi
if [ -z "$CTS_boot" ]; then
read -p "+ Is the cluster software started automatically when a node boots? [yN] " CTS_boot
if [ -z "$CTS_boot" ]; then
CTS_boot=0
else
case $CTS_boot in
1|y|Y) CTS_boot=1;;
*) CTS_boot=0;;
esac
fi
fi
if [ -z "$CTS_numtests" ]; then
read -p "+ How many test iterations should be performed? (500) " CTS_numtests
[ -n "$CTS_numtests" ] || CTS_numtests=500
fi
if [ -z "$CTS_asked_once" ]; then
anyAsked=1
read -p "+ What type of STONITH agent do you use? (none) " CTS_stonith
[ -z "$CTS_stonith" ] \
|| read -p "+ List any STONITH agent parameters (eq. device_host=switch.power.com): " CTS_stonith_args
[ -n "$CTS_adv" ] \
|| read -p "+ (Advanced) Any extra CTS parameters? (none) " CTS_adv
fi
[ $anyAsked -eq 0 ] \
|| read -p "+ Save values to ~/.cts for next time? (yN) " doSave
if [ "$doSave" = "y" ]; then
cat > ~/.cts <<-EOF
# CTS Test data
- CTS_stack="$CTS_stack"
CTS_node_list="$CTS_node_list"
CTS_logfile="$CTS_logfile"
CTS_logport="$CTS_logport"
CTS_logfacility="$CTS_logfacility"
CTS_asked_once=1
CTS_adv="$CTS_adv"
CTS_stonith="$CTS_stonith"
CTS_stonith_args="$CTS_stonith_args"
CTS_boot="$CTS_boot"
EOF
fi
cts_extra=""
if [ -n "$CTS_stonith" ]; then
cts_extra="$cts_extra --stonith-type $CTS_stonith"
[ -z "$CTS_stonith_args" ] \
|| cts_extra="$cts_extra --stonith-params \"$CTS_stonith_args\""
else
cts_extra="$cts_extra --stonith 0"
echo " - Testing a cluster without STONITH is like a blunt pencil... pointless"
fi
printf "\nAll set to go for %d iterations!\n" "$CTS_numtests"
[ $anyAsked -ne 0 ] \
|| echo "+ To use a different configuration, remove ~/.cts and re-run cts (or edit it manually)."
echo Now paste the following command into this shell:
-echo "@PYTHON@ `dirname "$0"`/cts-lab -L \"$CTS_logfile\" --syslog-facility \"$CTS_logfacility\" --no-unsafe-tests --stack \"$CTS_stack\" $CTS_adv --at-boot \"$CTS_boot\" $cts_extra \"$CTS_numtests\" --nodes \"$CTS_node_list\""
+echo "@PYTHON@ `dirname "$0"`/cts-lab -L \"$CTS_logfile\" --syslog-facility \"$CTS_logfacility\" --no-unsafe-tests $CTS_adv --at-boot \"$CTS_boot\" $cts_extra \"$CTS_numtests\" --nodes \"$CTS_node_list\""
# vim: set filetype=sh:
diff --git a/cts/cts-lab.in b/cts/cts-lab.in
index a909ebad01..023207d631 100644
--- a/cts/cts-lab.in
+++ b/cts/cts-lab.in
@@ -1,136 +1,130 @@
#!@PYTHON@
"""Command-line interface to Pacemaker's Cluster Test Suite (CTS)."""
# pylint doesn't like the module name "cts-lab" which is an invalid complaint for this file
# This also disables various other invalid names - it thinks scenario and match are constants
# that should have all caps names, and that cm and n are too short.
# pylint: disable=invalid-name
__copyright__ = "Copyright 2001-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import signal
import sys
from pacemaker._cts.CTS import CtsLab
from pacemaker._cts.cmcorosync import Corosync2
from pacemaker._cts.audits import audit_list
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.scenarios import AllOnce, Boot, BootCluster, LeaveBooted, RandomTests, Sequence
from pacemaker._cts.tests import test_list
# These are globals so they can be used by the signal handler.
scenario = None
LogFactory().add_stderr()
def sig_handler(signum, _frame):
"""Handle the given signal number."""
LogFactory().log(f"Interrupted by signal {signum}")
if scenario:
scenario.summarize()
if signum == 15:
if scenario:
scenario.teardown()
sys.exit(1)
def plural_s(n):
"""Return a string suffix depending on whether or not n is > 1."""
if n == 1:
return ""
return "S"
if __name__ == '__main__':
environment = CtsLab(sys.argv[1:])
iters = environment["iterations"]
tests = []
# Set the signal handler
signal.signal(15, sig_handler)
signal.signal(10, sig_handler)
- # Create the Cluster Manager object
- cm = None
-
- if environment["Stack"] == "corosync 2+":
- cm = Corosync2()
- else:
- LogFactory().log(f"Unknown stack: {environment['stack']}")
- sys.exit(1)
+ # Create the Cluster Manager object.
+ # Currently Corosync2 is the only available cluster manager.
+ cm = Corosync2()
if environment["TruncateLog"]:
if environment["OutputFile"] is None:
LogFactory().log("Ignoring truncate request because no output file specified")
else:
LogFactory().log(f"Truncating {environment['OutputFile']}")
with open(environment["OutputFile"], "w", encoding="utf-8") as outputfile:
outputfile.truncate(0)
audits = audit_list(cm)
if environment["ListTests"]:
tests = test_list(cm, audits)
LogFactory().log(f"Total {len(tests)} tests")
for test in tests:
LogFactory().log(test.name)
sys.exit(0)
elif len(environment["tests"]) == 0:
tests = test_list(cm, audits)
else:
chosen = environment["tests"]
for test_case in chosen:
match = None
for test in test_list(cm, audits):
if test.name == test_case:
match = test
if not match:
LogFactory().log("--choose: No applicable/valid tests chosen")
sys.exit(1)
else:
tests.append(match)
# Scenario selection
if environment["scenario"] == "all-once":
iters = len(tests)
scenario = AllOnce(cm, [BootCluster(cm, environment)], audits, tests)
elif environment["scenario"] == "sequence":
scenario = Sequence(cm, [BootCluster(cm, environment)], audits, tests)
elif environment["scenario"] == "boot":
scenario = Boot(cm, [LeaveBooted(cm, environment)], audits, [])
else:
scenario = RandomTests(cm, [BootCluster(cm, environment)], audits, tests)
LogFactory().log(f">>>>>>>>>>>>>>>> BEGINNING {iters!r} TEST{plural_s(iters)}")
- LogFactory().log(f"Stack: {environment['Stack']} ({environment['Name']})")
LogFactory().log(f"Schema: {environment['Schema']}")
LogFactory().log(f"Scenario: {scenario.__doc__}")
LogFactory().log(f"CTS Exerciser: {environment['cts-exerciser']}")
LogFactory().log(f"CTS Logfile: {environment['OutputFile']}")
LogFactory().log(f"Random Seed: {environment['RandSeed']}")
if "syslogd" in environment:
LogFactory().log(f"Syslog variant: {environment['syslogd'].strip()}")
LogFactory().log(f"System log files: {environment['LogFileName']}")
if "IPBase" in environment:
LogFactory().log(f"Base IP for resources: {environment['IPBase']}")
LogFactory().log(f"Cluster starts at boot: {environment['at-boot']}")
environment.dump()
rc = environment.run(scenario, iters)
sys.exit(rc)
# vim: set filetype=python:
diff --git a/cts/cts.in b/cts/cts.in
index 20dcb1554f..e9f73dd6a3 100755
--- a/cts/cts.in
+++ b/cts/cts.in
@@ -1,406 +1,270 @@
#!@BASH_PATH@
#
-# Copyright 2012-2023 the Pacemaker project contributors
+# Copyright 2012-2025 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
# e.g. /etc/sysconfig or /etc/default
CONFIG_DIR=@CONFIGDIR@
cts_root=`dirname $0`
logfile=0
summary=0
verbose=0
watch=0
saved=0
tests=""
install=0
clean=0
-kill=0
run=0
boot=0
-target=rhel-7
-cmd=""
-trace=""
custom_log=""
patterns="-e CTS:"
-function sed_in_place_remotely() {
- cluster-helper -g $cluster_name -- cp -p "\"$1\"" "\"$1.sed\"" \&\& sed -e "\"$2\"" "\"$1\"" \> "\"$1.sed\"" \&\& mv "\"$1.sed\"" "\"$1\""
-}
-
-
helpmsg=$(cat </dev/null
if [ $? != 0 ]; then
echo $0 needs the cluster-helper script to be in your path
exit 1
fi
which cluster-clean &>/dev/null
if [ $? != 0 ]; then
echo $0 needs the cluster-clean script to be in your path
exit 1
fi
-if [ "x$cluster_name" = x ] || [ "x$cluster_name" = xpick ]; then
+if [ "x$cluster_name" = x ] ; then
clusters=`ls -1 ~/.dsh/group/[a-z]+[0-9] | sed s/.*group.// | tr '\n' ' ' `
echo "custom) interactively define a cluster"
for i in $clusters; do
echo "$i) `cluster-helper --list short -g $i`"
done
read -p "Choose a cluster [custom]: " cluster_name
echo
fi
if [ -z $cluster_name ]; then
cluster_name=custom
fi
case $cluster_name in
custom)
read -p "Cluster name: " cluster_name
read -p "Cluster hosts: " cluster_hosts
read -p "Cluster log file: " cluster_log
cluster-helper add -g "$cluster_name" -w "$cluster_hosts"
;;
*)
cluster_hosts=`cluster-helper --list short -g $cluster_name`
cluster_log=~/cluster-$cluster_name.log
;;
esac
-# NOTES ABOUT THESE AWESOME REGULAR EXPRESSIONS:
-#
-# * We can't assume GNU sed. Unfortunately, + and * are GNU extensions. Thus,
-# we have to use {1,} for + and {0,} for *.
-# * You don't need to add an extra set of escaped quotes around the sed expression
-# arguments here - sed_in_place_remotely will do that for you.
-# * Only literal quotes need the triple backslashes. All other special characters
-# are fine with just a single one.
-# * sed needs a LOT of characters escaped - \, {, }, (, ), and | at least.
-
-if [ x$cmd != x ]; then
- config="${CONFIG_DIR}/pacemaker"
- case $cmd in
- trace-ls|tls)
- cluster-helper -g $cluster_name -- grep "^[[:space:]]*PCMK_trace_functions" $config
- ;;
- trace-add|tadd)
- echo "Adding $trace to PCMK_trace_functions"
- # Note that this only works if there's already a PCMK_trace_functions line.
- # If there isn't one, create it with trace-set first.
- #
- # Match optional whitespace; then PCMK_trace_functions; then an equals
- # surrounded by optional whitespace; then an optional quote; then whatever
- # else (presumably, this is the list of previously traced functions with
- # an optional trailing quote). Replace the entire line with
- # PCMK_trace_functions=,
- sed_in_place_remotely "$config" "s/^[ \t]\{0,\}PCMK_trace_functions[ \t]\{0,\}=[ \t]\{0,\}\(\\\"\{0,1\}\)\(.\{1,\}\)/PCMK_trace_functions=\1$trace,\2/"
- ;;
- trace-rm|trm)
- echo "Removing $trace from PCMK_trace_functions"
- # A bunch of simple regexes are easier to follow than one giant one.
- # Look for $trace in the following places on any line containing
- # PCMK_trace_functions near the beginning:
- #
- # (1) At the start of a list -
- # Match one of a leading quote, or an equals followed by optional
- # whitespace; then $trace; then a comma. Replace $trace with whatever
- # came before it.
- # (2) In the middle of a list -
- # Match a comma; then $trace; then a comma. Replace $trace with a
- # single comma.
- # (3) At the end of a list -
- # Match a comma; then $trace; then one of a quote, whitespace, or
- # the EOL. Replace $trace with whatever came after it.
- # (4) All by itself -
- # Match one of a leading quote, whitespace, or equals followed by
- # optional whitespace; then $trace; then one of a trailing quote,
- # whitespace, or the EOL. Replace $trace with whatever came before
- # and after it.
- sed_in_place_remotely "$config" "/^[ \t]\{0,\}PCMK_trace_functions/ { \
- s/\(\\\"\|=\|[ \t]\{1,\}\)$trace,/\1/ ; \
- s/,$trace,/,/ ; \
- s/,$trace\(\\\"\|[ \t]\{1,\}\|$\)/\1/ ; \
- s/\(\\\"\|[ \t]\{1,\}\|=[ \t]\{0,\}\)$trace\(\\\"\|[ \t]\{1,\}\|$\)/\1\2/ }"
- ;;
- trace-set|tset)
- echo "Setting PCMK_trace_functions to '$trace'"
- # Do this in two separate sed commands:
- #
- # (1) Unconditionally remove any existing PCMK_trace_functions= lines.
- # (2) Add a new line with $trace after the example line, which therefore
- # must exist. Note that GNU sed would support "a PCMK_trace_functions=$trace",
- # but that's an extension. For all other seds, we have to put the
- # command and the text on separate lines.
- sed_in_place_remotely "$config" "/^[ \t]*PCMK_trace_functions/ d ; /^# Example: PCMK_trace_functions/ a\\\
-PCMK_trace_functions=\\\"$trace\\\""
- ;;
- esac
- exit 0
-fi
-
if [ $run = 1 ]; then
install=1
clean=1
fi
if [ $clean = 1 ]; then
rm -f $cluster_log
cluster-clean -g $cluster_name --kill
-elif [ $kill = 1 ]; then
- cluster-clean -g $cluster_name --kill-only
- exit 0
fi
if [ $install = 1 ]; then
cluster-helper -g $cluster_name -- yum install -y pacemaker pacemaker-debuginfo pacemaker-cts libqb libqb-debuginfo
fi
if [ $boot = 1 ]; then
$cts_root/cts-lab -r -c -g $cluster_name --boot
rc=$?
if [ $rc = 0 ]; then
echo "The cluster is ready..."
fi
exit $rc
elif [ $run = 1 ]; then
$cts_root/cts-lab -r -c -g $cluster_name 500 "$@"
exit $?
elif [ $clean = 1 ]; then
exit 0
fi
-screen -ls | grep cts-$cluster_name &>/dev/null
-active=$?
-
if [ ! -z $custom_log ]; then
cluster_log=$custom_log
fi
if [ "x$tests" != x ] && [ "x$tests" != "x " ]; then
for t in $tests; do
echo "crm_report --cts-log $cluster_log -d -T $t"
crm_report --cts-log $cluster_log -d -T $t
done
elif [ $logfile = 1 ]; then
echo $cluster_log
elif [ $summary = 1 ]; then
files=$cluster_log
if [ $saved = 1 ]; then
files=`ls -1tr ~/CTS-*/cluster-log.txt`
fi
for f in $files; do
echo $f
case $verbose in
0)
cat -n $f | grep $patterns | grep -v "CTS: debug:"
;;
1)
cat -n $f | grep $patterns | grep -v "CTS:.* cmd:"
;;
*)
cat -n $f | grep $patterns
;;
esac
echo ""
done
elif [ $watch = 1 ]; then
case $verbose in
0)
tail -F $cluster_log | grep $patterns | grep -v "CTS: debug:"
;;
1)
tail -F $cluster_log | grep $patterns | grep -v "CTS:.* cmd:"
;;
*)
tail -F $cluster_log | grep $patterns
;;
esac
-
-elif [ $active = 0 ]; then
- screen -x cts-$cluster_name
-
-else
- touch $cluster_log
- export cluster_name cluster_hosts cluster_log
- screen -S cts-$cluster_name bash
fi
# vim: set filetype=sh:
diff --git a/python/pacemaker/_cts/cib.py b/python/pacemaker/_cts/cib.py
index 3f0ed8327b..0a8da551c0 100644
--- a/python/pacemaker/_cts/cib.py
+++ b/python/pacemaker/_cts/cib.py
@@ -1,408 +1,404 @@
"""CIB generator for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["ConfigFactory"]
__copyright__ = "Copyright 2008-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import warnings
import tempfile
from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.cibxml import Alerts, Clone, Expression, FencingTopology, Group, Nodes, OpDefaults, Option, Resource, Rule
from pacemaker._cts.network import next_ip
class CIB:
"""A class for generating, representing, and installing a CIB file onto cluster nodes."""
def __init__(self, cm, version, factory, tmpfile=None):
"""
Create a new CIB instance.
Arguments:
cm -- A ClusterManager instance
version -- The schema syntax version
factory -- A ConfigFactory instance
tmpfile -- Where to store the CIB, or None to use a new tempfile
"""
# pylint: disable=invalid-name
self._cib = None
self._cm = cm
self._counter = 1
self._factory = factory
self._num_nodes = 0
self.version = version
if not tmpfile:
warnings.filterwarnings("ignore")
# pylint: disable=consider-using-with
f = tempfile.NamedTemporaryFile(delete=True)
f.close()
tmpfile = f.name
warnings.resetwarnings()
self._factory.tmpfile = tmpfile
def _show(self):
"""Query a cluster node for its generated CIB; log and return the result."""
output = ""
(_, result) = self._factory.rsh(self._factory.target, f"HOME=/root CIB_file={self._factory.tmpfile} cibadmin -Q", verbose=1)
for line in result:
output += line
self._factory.debug(f"Generated Config: {line}")
return output
def new_ip(self, name=None):
"""Generate an IP resource for the next available IP address, optionally specifying the resource's name."""
- if self._cm.env["IPagent"] == "IPaddr2":
- ip = next_ip(self._cm.env["IPBase"])
- if not name:
- if ":" in ip:
- (_, _, suffix) = ip.rpartition(":")
- name = f"r{suffix}"
- else:
- name = f"r{ip}"
-
- r = Resource(self._factory, name, self._cm.env["IPagent"], "ocf")
- r["ip"] = ip
-
+ ip = next_ip(self._cm.env["IPBase"])
+ if not name:
if ":" in ip:
- r["cidr_netmask"] = "64"
- r["nic"] = "eth0"
+ (_, _, suffix) = ip.rpartition(":")
+ name = f"r{suffix}"
else:
- r["cidr_netmask"] = "32"
+ name = f"r{ip}"
- else:
- if not name:
- name = f"r{self._cm.env['IPagent']}{self._counter}"
- self._counter += 1
+ r = Resource(self._factory, name, "IPaddr2", "ocf")
+ r["ip"] = ip
- r = Resource(self._factory, name, self._cm.env["IPagent"], "ocf")
+ if ":" in ip:
+ r["cidr_netmask"] = "64"
+ r["nic"] = "eth0"
+ else:
+ r["cidr_netmask"] = "32"
r.add_op("monitor", "5s")
return r
def get_node_id(self, node_name):
"""Check the cluster configuration for the node ID for the given node_name."""
# We can't account for every possible configuration,
# so we only return a node ID if:
# * The node is specified in /etc/corosync/corosync.conf
# with "ring0_addr:" equal to node_name and "nodeid:"
# explicitly specified.
# In all other cases, we return 0.
node_id = 0
# awkward command: use } as record separator
# so each corosync.conf "object" is one record;
# match the "node {" record that has "ring0_addr: node_name";
# then print the substring of that record after "nodeid:"
awk = r"""awk -v RS="}" """ \
r"""'/^(\s*nodelist\s*{)?\s*node\s*{.*(ring0_addr|name):\s*%s(\s+|$)/""" \
r"""{gsub(/.*nodeid:\s*/,"");gsub(/\s+.*$/,"");print}' %s""" \
% (node_name, BuildOptions.COROSYNC_CONFIG_FILE)
(rc, output) = self._factory.rsh(self._factory.target, awk, verbose=1)
if rc == 0 and len(output) == 1:
try:
node_id = int(output[0])
except ValueError:
node_id = 0
return node_id
def install(self, target):
"""Generate a CIB file and install it to the given cluster node."""
old = self._factory.tmpfile
# Force a rebuild
self._cib = None
self._factory.tmpfile = f"{BuildOptions.CIB_DIR}/cib.xml"
self.contents(target)
self._factory.rsh(self._factory.target, f"chown {BuildOptions.DAEMON_USER} {self._factory.tmpfile}")
self._factory.tmpfile = old
def contents(self, target):
"""Generate a complete CIB file."""
+ # pylint: disable=too-many-locals
# fencing resource
if self._cib:
return self._cib
if target:
self._factory.target = target
self._factory.rsh(self._factory.target, f"HOME=/root cibadmin --empty {self.version} > {self._factory.tmpfile}")
self._num_nodes = len(self._cm.env["nodes"])
no_quorum = "stop"
if self._num_nodes < 3:
no_quorum = "ignore"
self._factory.log(f"Cluster only has {self._num_nodes} nodes, configuring: no-quorum-policy=ignore")
# We don't need a nodes section unless we add attributes
stn = None
# Fencing resource
# Define first so that the shell doesn't reject every update
if self._cm.env["DoFencing"]:
# Define the "real" fencing device
st = Resource(self._factory, "Fencing", self._cm.env["stonith-type"], "stonith")
# Set a threshold for unreliable stonith devices such as the vmware one
st.add_meta("migration-threshold", "5")
st.add_op("monitor", "120s", timeout="120s")
st.add_op("stop", "0", timeout="60s")
st.add_op("start", "0", timeout="60s")
# For remote node tests, a cluster node is stopped and brought back up
# as a remote node with the name "remote-OLDNAME". To allow fencing
# devices to fence these nodes, create a list of all possible node names.
all_node_names = [prefix + n for n in self._cm.env["nodes"] for prefix in ('', 'remote-')]
# Add all parameters specified by user
entries = self._cm.env["stonith-params"].split(',')
for entry in entries:
try:
(name, value) = entry.split('=', 1)
except ValueError:
print(f"Warning: skipping invalid fencing parameter: {entry}")
continue
# Allow user to specify "all" as the node list, and expand it here
if name in ["hostlist", "pcmk_host_list"] and value == "all":
value = ' '.join(all_node_names)
st[name] = value
st.commit()
# Test advanced fencing logic
stf_nodes = []
stt_nodes = []
attr_nodes = {}
# Create the levels
stl = FencingTopology(self._factory)
for node in self._cm.env["nodes"]:
# Remote node tests will rename the node
remote_node = f"remote-{node}"
# Randomly assign node to a fencing method
- ftype = self._cm.env.random_gen.choice(["levels-and", "levels-or ", "broadcast "])
+ # @TODO What does "broadcast" do, if anything?
+ types = ["levels-and", "levels-or", "broadcast"]
+ width = max(len(t) for t in types)
+ ftype = self._cm.env.random_gen.choice(types)
# For levels-and, randomly choose targeting by node name or attribute
by = ""
if ftype == "levels-and":
node_id = self.get_node_id(node)
if node_id == 0 or self._cm.env.random_gen.choice([True, False]):
by = " (by name)"
else:
attr_nodes[node] = node_id
by = " (by attribute)"
- self._cm.log(f" - Using {ftype} fencing for node: {node}{by}")
+ self._cm.log(f" - Using {ftype:{width}} fencing for node: {node}{by}")
if ftype == "levels-and":
# If targeting by name, add a topology level for this node
if node not in attr_nodes:
stl.level(1, node, "FencingPass,Fencing")
# Always target remote nodes by name, otherwise we would need to add
# an attribute to the remote node only during remote tests (we don't
# want nonexistent remote nodes showing up in the non-remote tests).
# That complexity is not worth the effort.
stl.level(1, remote_node, "FencingPass,Fencing")
# Add the node (and its remote equivalent) to the list of levels-and nodes.
stt_nodes.extend([node, remote_node])
- elif ftype == "levels-or ":
+ elif ftype == "levels-or":
for n in [node, remote_node]:
stl.level(1, n, "FencingFail")
stl.level(2, n, "Fencing")
stf_nodes.extend([node, remote_node])
# If any levels-and nodes were targeted by attribute,
# create the attributes and a level for the attribute.
if attr_nodes:
stn = Nodes(self._factory)
for (node_name, node_id) in attr_nodes.items():
stn.add_node(node_name, node_id, {"cts-fencing": "levels-and"})
stl.level(1, None, "FencingPass,Fencing", "cts-fencing", "levels-and")
# Create a Dummy agent that always passes for levels-and
if stt_nodes:
stt = Resource(self._factory, "FencingPass", "fence_dummy", "stonith")
stt["pcmk_host_list"] = " ".join(stt_nodes)
# Wait this many seconds before doing anything, handy for letting disks get flushed too
stt["random_sleep_range"] = "30"
stt["mode"] = "pass"
stt.commit()
# Create a Dummy agent that always fails for levels-or
if stf_nodes:
stf = Resource(self._factory, "FencingFail", "fence_dummy", "stonith")
stf["pcmk_host_list"] = " ".join(stf_nodes)
# Wait this many seconds before doing anything, handy for letting disks get flushed too
stf["random_sleep_range"] = "30"
stf["mode"] = "fail"
stf.commit()
# Now commit the levels themselves
stl.commit()
o = Option(self._factory)
o["stonith-enabled"] = self._cm.env["DoFencing"]
o["start-failure-is-fatal"] = "false"
o["pe-input-series-max"] = "5000"
o["shutdown-escalation"] = "5min"
o["batch-limit"] = "10"
o["dc-deadtime"] = "5s"
o["no-quorum-policy"] = no_quorum
o.commit()
o = OpDefaults(self._factory)
o["timeout"] = "90s"
o.commit()
# Commit the nodes section if we defined one
if stn is not None:
stn.commit()
# Add an alerts section if possible
if self._factory.rsh.exists_on_all(self._cm.env["notification-agent"], self._cm.env["nodes"]):
alerts = Alerts(self._factory)
alerts.add_alert(self._cm.env["notification-agent"],
self._cm.env["notification-recipient"])
alerts.commit()
# Add resources?
if self._cm.env["CIBResource"]:
self.add_resources()
# generate cib
self._cib = self._show()
if self._factory.tmpfile != f"{BuildOptions.CIB_DIR}/cib.xml":
self._factory.rsh(self._factory.target, f"rm -f {self._factory.tmpfile}")
return self._cib
def add_resources(self):
"""Add various resources and their constraints to the CIB."""
# Per-node resources
for node in self._cm.env["nodes"]:
name = f"rsc_{node}"
r = self.new_ip(name)
r.prefer(node, "100")
r.commit()
# Migrator
# Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach
m = Resource(self._factory, "migrator", "Dummy", "ocf", "pacemaker")
m["passwd"] = "whatever"
m.add_meta("resource-stickiness", "1")
m.add_meta("allow-migrate", "1")
m.add_op("monitor", "P10S")
m.commit()
# Ping the test exerciser
p = Resource(self._factory, "ping-1", "ping", "ocf", "pacemaker")
p.add_op("monitor", "60s")
p["host_list"] = self._cm.env["cts-exerciser"]
p["name"] = "connected"
p["debug"] = "true"
c = Clone(self._factory, "Connectivity", p)
c["globally-unique"] = "false"
c.commit()
# promotable clone resource
s = Resource(self._factory, "stateful-1", "Stateful", "ocf", "pacemaker")
s.add_op("monitor", "15s", timeout="60s")
s.add_op("monitor", "16s", timeout="60s", role="Promoted")
ms = Clone(self._factory, "promotable-1", s)
ms["promotable"] = "true"
ms["clone-max"] = self._num_nodes
ms["clone-node-max"] = 1
ms["promoted-max"] = 1
ms["promoted-node-max"] = 1
# Require connectivity to run the promotable clone
r = Rule(self._factory, "connected", "-INFINITY", op="or")
r.add_child(Expression(self._factory, "m1-connected-1", "connected", "lt", "1"))
r.add_child(Expression(self._factory, "m1-connected-2", "connected", "not_defined", None))
ms.prefer("connected", rule=r)
ms.commit()
# Group Resource
g = Group(self._factory, "group-1")
g.add_child(self.new_ip())
if self._cm.env["have_systemd"]:
sysd = Resource(self._factory, "petulant", "pacemaker-cts-dummyd@10", "service")
sysd.add_op("monitor", "P10S")
g.add_child(sysd)
else:
g.add_child(self.new_ip())
g.add_child(self.new_ip())
# Make group depend on the promotable clone
g.after("promotable-1", first="promote", then="start")
g.colocate("promotable-1", "INFINITY", withrole="Promoted")
g.commit()
# LSB resource dependent on group-1
if BuildOptions.INIT_DIR is not None:
lsb = Resource(self._factory, "lsb-dummy", "LSBDummy", "lsb")
lsb.add_op("monitor", "5s")
lsb.after("group-1")
lsb.colocate("group-1")
lsb.commit()
class ConfigFactory:
"""Singleton to generate a CIB file for the environment's schema version."""
def __init__(self, cm):
"""
Create a new ConfigFactory instance.
Arguments:
cm -- A ClusterManager instance
"""
# pylint: disable=invalid-name
self._cm = cm
self.rsh = self._cm.rsh
if not self._cm.env["ListTests"]:
self.target = self._cm.env["nodes"][0]
self.tmpfile = None
def log(self, args):
"""Log a message."""
self._cm.log(f"cib: {args}")
def debug(self, args):
"""Log a debug message."""
self._cm.debug(f"cib: {args}")
def create_config(self, name=f"pacemaker-{BuildOptions.CIB_SCHEMA_VERSION}"):
"""Return a CIB object for the given schema version."""
return CIB(self._cm, name, self)
diff --git a/python/pacemaker/_cts/corosync.py b/python/pacemaker/_cts/corosync.py
index beb574d2b8..ace74ec2f1 100644
--- a/python/pacemaker/_cts/corosync.py
+++ b/python/pacemaker/_cts/corosync.py
@@ -1,186 +1,188 @@
"""A module providing functions for manipulating corosync."""
__all__ = ["Corosync", "localname"]
-__copyright__ = "Copyright 2009-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2009-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+)"
import os
import shutil
import subprocess
import tempfile
import time
from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.environment import EnvFactory
from pacemaker._cts.process import killall, stdout_from_command
AUTOGEN_COROSYNC_TEMPLATE = """
totem {
version: 2
cluster_name: %s
crypto_cipher: none
crypto_hash: none
transport: udp
}
nodelist {
node {
nodeid: 1
name: %s
ring0_addr: 127.0.0.1
}
}
logging {
debug: off
to_syslog: no
to_stderr: no
to_logfile: yes
logfile: %s
}
"""
def corosync_cfg_exists():
"""Return whether the corosync config file exists."""
return os.path.exists(BuildOptions.COROSYNC_CONFIG_FILE)
def corosync_log_file(cfgfile):
"""Return the path to the corosync log file, or None."""
with open(cfgfile, "r", encoding="utf-8") as f:
for line in f.readlines():
# "to_logfile:" could also be in the config file, so check for a
# slash to make sure it's a path we're looking at.
if "logfile: /" in line:
return line.split()[-1]
return None
def generate_corosync_cfg(logdir, cluster_name, node_name):
"""
Generate a corosync config file.
If there's a corosync config file already installed on the system, move
it to a temporary location and return that temporary name. Otherwise,
return None.
"""
retval = None
if corosync_cfg_exists():
# pylint: disable=consider-using-with
config_dir = os.path.dirname(BuildOptions.COROSYNC_CONFIG_FILE)
f = tempfile.NamedTemporaryFile(dir=config_dir, prefix="corosync.conf-")
f.close()
shutil.move(BuildOptions.COROSYNC_CONFIG_FILE, f.name)
retval = f.name
logfile = os.path.join(logdir, "corosync.log")
with open(BuildOptions.COROSYNC_CONFIG_FILE, "w", encoding="utf-8") as corosync_cfg:
corosync_cfg.write(AUTOGEN_COROSYNC_TEMPLATE % (cluster_name, node_name, logfile))
return retval
def localname():
"""Return the uname of the local host."""
our_uname = stdout_from_command(["uname", "-n"])
if our_uname:
our_uname = our_uname[0]
else:
our_uname = "localhost"
return our_uname
class Corosync:
"""A class for managing corosync processes and config files."""
def __init__(self, verbose, logdir, cluster_name):
"""
Create a new Corosync instance.
Arguments:
verbose -- Whether to print the corosync log file
logdir -- The base directory under which to store log files
cluster_name -- The name of the cluster
"""
self.verbose = verbose
self.logdir = logdir
self.cluster_name = cluster_name
# The Corosync class doesn't use self._env._nodes, but the
# "--nodes" argument is required to be present and nonempty
self._env = EnvFactory().getInstance(args=["--nodes", "localhost"])
self._existing_cfg_file = None
def _ready(self, logfile, timeout=10):
"""Return whether corosync is ready."""
i = 0
while i < timeout:
with open(logfile, "r", encoding="utf-8") as corosync_log:
for line in corosync_log.readlines():
if line.endswith("ready to provide service.\n"):
# Even once the line is in the log file, we may still need to wait just
# a little bit longer before corosync is really ready to go.
time.sleep(1)
return
time.sleep(1)
i += 1
raise TimeoutError
def start(self, kill_first=False, timeout=10):
"""
Start the corosync process.
Arguments:
kill_first -- Whether to kill any pre-existing corosync processes before
starting a new one
timeout -- If corosync does not start within this many seconds, raise
TimeoutError
"""
if kill_first:
killall(["corosync"])
self._existing_cfg_file = generate_corosync_cfg(self.logdir,
self.cluster_name, localname())
logfile = corosync_log_file(BuildOptions.COROSYNC_CONFIG_FILE)
+ # pylint doesn't understand that self._env is subscriptable.
+ # pylint: disable=unsubscriptable-object
if self._env["have_systemd"]:
cmd = ["systemctl", "start", "corosync.service"]
else:
cmd = ["corosync"]
if self.verbose:
print("Starting corosync")
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as test:
test.wait()
# Wait for corosync to be ready before returning
self._ready(logfile, timeout=timeout)
def stop(self):
"""Stop the corosync process."""
killall(["corosync"])
if self.verbose:
print("Corosync output")
logfile = corosync_log_file(BuildOptions.COROSYNC_CONFIG_FILE)
with open(logfile, "r", encoding="utf-8") as corosync_log:
for line in corosync_log.readlines():
print(line.strip())
os.remove(BuildOptions.COROSYNC_CONFIG_FILE)
# If there was a previous corosync config file, move it back into place
if self._existing_cfg_file:
shutil.move(self._existing_cfg_file, BuildOptions.COROSYNC_CONFIG_FILE)
diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py
index d87fe93ecb..5b63326442 100644
--- a/python/pacemaker/_cts/environment.py
+++ b/python/pacemaker/_cts/environment.py
@@ -1,611 +1,568 @@
"""Test environment classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["EnvFactory", "set_cts_path"]
__copyright__ = "Copyright 2014-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import argparse
from contextlib import suppress
from glob import glob
import os
import random
import shlex
import socket
import sys
-import time
from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
from pacemaker._cts.watcher import LogKind
class Environment:
"""
A class for managing the CTS environment.
This consists largely of processing and storing command line parameters.
"""
# pylint doesn't understand that self._rsh is callable (it stores the
# singleton instance of RemoteExec, as returned by the getInstance method
# of RemoteFactory).
# @TODO See if type annotations fix this.
# I think we could also fix this by getting rid of the getInstance methods,
# but that's a project for another day. For now, just disable the warning.
# pylint: disable=not-callable
def __init__(self, args):
"""
Create a new Environment instance.
This class can be treated kind of like a dictionary due to the presence
of typical dict functions like __contains__, __getitem__, and __setitem__.
However, it is not a dictionary so do not rely on standard dictionary
behavior.
Arguments:
args -- A list of command line parameters, minus the program name.
If None, sys.argv will be used.
"""
self.data = {}
self._nodes = []
# Set some defaults before processing command line arguments. These are
# either not set by any command line parameter, or they need a default
# that can't be set in add_argument.
self["DeadTime"] = 300
self["StartTime"] = 300
self["StableTime"] = 30
self["tests"] = []
- self["IPagent"] = "IPaddr2"
self["DoFencing"] = True
self["CIBResource"] = False
self["log_kind"] = None
self["scenario"] = "random"
+ # Hard-coded since there is only one supported cluster manager/stack
+ self["Name"] = "crm-corosync"
+ self["Stack"] = "corosync 2+"
+
self.random_gen = random.Random()
self._logger = LogFactory()
self._rsh = RemoteFactory().getInstance()
- self._target = "localhost"
- self._seed_random()
self._parse_args(args)
if not self["ListTests"]:
self._validate()
self._discover()
- def _seed_random(self, seed=None):
- """
- Initialize the random number generator.
-
- Arguments:
- seed -- Use this to see the random number generator, or use the
- current time if None.
- """
- if not seed:
- seed = int(time.time())
-
- self["RandSeed"] = seed
- self.random_gen.seed(str(seed))
-
def dump(self):
"""Print the current environment."""
for key in sorted(self.data.keys()):
self._logger.debug(f"{f'Environment[{key}]':35}: {str(self[key])}")
def __contains__(self, key):
"""Return True if the given key exists in the environment."""
if key == "nodes":
return True
return key in self.data
def __getitem__(self, key):
"""Return the given environment key, or None if it does not exist."""
if key == "nodes":
return self._nodes
- if key == "Name":
- return self._get_stack_short()
-
return self.data.get(key)
def __setitem__(self, key, value):
"""Set the given environment key to the given value, overriding any previous value."""
- if key == "Stack":
- self._set_stack(value)
-
- elif key == "nodes":
+ if key == "nodes":
self._nodes = []
for node in value:
# I don't think I need the IP address, etc. but this validates
# the node name against /etc/hosts and/or DNS, so it's a
# GoodThing(tm).
try:
n = node.strip()
# @TODO This only handles IPv4, use getaddrinfo() instead
# (here and in _discover())
socket.gethostbyname_ex(n)
self._nodes.append(n)
except socket.herror:
self._logger.log(f"{node} not found in DNS... aborting")
raise
else:
self.data[key] = value
def random_node(self):
"""Choose a random node from the cluster."""
return self.random_gen.choice(self["nodes"])
- def _set_stack(self, name):
- """Normalize the given cluster stack name."""
- if name in ["corosync", "cs", "mcp"]:
- self.data["Stack"] = "corosync 2+"
-
- else:
- raise ValueError(f"Unknown stack: {name}")
-
- def _get_stack_short(self):
- """Return the short name for the currently set cluster stack."""
- if "Stack" not in self.data:
- return "unknown"
-
- if self.data["Stack"] == "corosync 2+":
- return "crm-corosync"
-
- LogFactory().log(f"Unknown stack: {self['stack']}")
- raise ValueError(f"Unknown stack: {self['stack']}")
-
- def _detect_systemd(self):
+ def _detect_systemd(self, node):
"""Detect whether systemd is in use on the target node."""
if "have_systemd" not in self.data:
- (rc, _) = self._rsh(self._target, "systemctl list-units", verbose=0)
+ (rc, _) = self._rsh(node, "systemctl list-units", verbose=0)
self["have_systemd"] = rc == 0
- def _detect_syslog(self):
+ def _detect_syslog(self, node):
"""Detect the syslog variant in use on the target node (if any)."""
if "syslogd" in self.data:
return
if self["have_systemd"]:
# Systemd
- (_, lines) = self._rsh(self._target, r"systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", verbose=1)
+ (_, lines) = self._rsh(node, r"systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", verbose=1)
else:
# SYS-V
- (_, lines) = self._rsh(self._target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", verbose=1)
+ (_, lines) = self._rsh(node, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", verbose=1)
with suppress(IndexError):
self["syslogd"] = lines[0].strip()
def disable_service(self, node, service):
"""Disable the given service on the given node."""
if self["have_systemd"]:
# Systemd
(rc, _) = self._rsh(node, f"systemctl disable {service}")
return rc
# SYS-V
(rc, _) = self._rsh(node, f"chkconfig {service} off")
return rc
def enable_service(self, node, service):
"""Enable the given service on the given node."""
if self["have_systemd"]:
# Systemd
(rc, _) = self._rsh(node, f"systemctl enable {service}")
return rc
# SYS-V
(rc, _) = self._rsh(node, f"chkconfig {service} on")
return rc
def service_is_enabled(self, node, service):
"""Return True if the given service is enabled on the given node."""
if self["have_systemd"]:
# Systemd
# With "systemctl is-enabled", we should check if the service is
# explicitly "enabled" instead of the return code. For example it returns
# 0 if the service is "static" or "indirect", but they don't really count
# as "enabled".
(rc, _) = self._rsh(node, f"systemctl is-enabled {service} | grep enabled")
return rc == 0
# SYS-V
(rc, _) = self._rsh(node, f"chkconfig --list | grep -e {service}.*on")
return rc == 0
- def _detect_at_boot(self):
+ def _detect_at_boot(self, node):
"""Detect if the cluster starts at boot."""
if "at-boot" not in self.data:
- self["at-boot"] = self.service_is_enabled(self._target, "corosync") \
- or self.service_is_enabled(self._target, "pacemaker")
+ self["at-boot"] = self.service_is_enabled(node, "corosync") \
+ or self.service_is_enabled(node, "pacemaker")
- def _detect_ip_offset(self):
+ def _detect_ip_offset(self, node):
"""Detect the offset for IPaddr resources."""
if self["CIBResource"] and "IPBase" not in self.data:
- (_, lines) = self._rsh(self._target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0)
+ (_, lines) = self._rsh(node, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0)
network = lines[0].strip()
- (_, lines) = self._rsh(self._target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, verbose=0)
+ (_, lines) = self._rsh(node, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, verbose=0)
try:
self["IPBase"] = lines[0].strip()
except (IndexError, TypeError):
self["IPBase"] = None
if not self["IPBase"]:
self["IPBase"] = " fe80::1234:56:7890:1000"
self._logger.log("Could not determine an offset for IPaddr resources. Perhaps nmap is not installed on the nodes.")
self._logger.log(f"""Defaulting to '{self["IPBase"]}', use --test-ip-base to override""")
return
# pylint thinks self["IPBase"] is a list, not a string, which causes it
# to error out because a list doesn't have split().
# pylint: disable=no-member
last_part = self["IPBase"].split('.')[3]
if int(last_part) >= 240:
self._logger.log(f"Could not determine an offset for IPaddr resources. Upper bound is too high: {self['IPBase']} {last_part}")
self["IPBase"] = " fe80::1234:56:7890:1000"
self._logger.log(f"""Defaulting to '{self["IPBase"]}', use --test-ip-base to override""")
def _validate(self):
"""Check that we were given all required command line parameters."""
if not self["nodes"]:
raise ValueError("No nodes specified!")
def _discover(self):
"""Probe cluster nodes to figure out how to log and manage services."""
- self._target = random.Random().choice(self["nodes"])
-
exerciser = socket.gethostname()
# Use the IP where possible to avoid name lookup failures
for ip in socket.gethostbyname_ex(exerciser)[2]:
if ip != "127.0.0.1":
exerciser = ip
break
self["cts-exerciser"] = exerciser
- self._detect_systemd()
- self._detect_syslog()
- self._detect_at_boot()
- self._detect_ip_offset()
+ node = self["nodes"][0]
+ self._detect_systemd(node)
+ self._detect_syslog(node)
+ self._detect_at_boot(node)
+ self._detect_ip_offset(node)
def _parse_args(self, argv):
"""
Parse and validate command line parameters.
Set the appropriate values in the environment dictionary. If argv is
None, use sys.argv instead.
"""
if not argv:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(epilog=f"{sys.argv[0]} -g virt1 -r --stonith ssh --schema pacemaker-2.0 500")
grp1 = parser.add_argument_group("Common options")
grp1.add_argument("-g", "--dsh-group", "--group",
metavar="GROUP", dest="group",
help="Use the nodes listed in the named DSH group (~/.dsh/groups/$name)")
grp1.add_argument("--benchmark",
action="store_true",
help="Add timing information")
grp1.add_argument("--list", "--list-tests",
action="store_true", dest="list_tests",
help="List the valid tests")
grp1.add_argument("--nodes",
default="",
metavar="NODES",
help="List of cluster nodes separated by whitespace")
- grp1.add_argument("--stack",
- default="corosync",
- metavar="STACK",
- help="Which cluster stack is installed")
grp2 = parser.add_argument_group("Options that CTS will usually auto-detect correctly")
grp2.add_argument("-L", "--logfile",
metavar="PATH",
help="Where to look for logs from cluster nodes (or 'journal' for systemd journal)")
grp2.add_argument("--at-boot", "--cluster-starts-at-boot",
choices=["1", "0", "yes", "no"],
help="Does the cluster software start at boot time?")
grp2.add_argument("--facility", "--syslog-facility",
default="daemon",
metavar="NAME",
help="Which syslog facility to log to")
grp2.add_argument("--ip", "--test-ip-base",
metavar="IP",
help="Offset for generated IP address resources")
grp3 = parser.add_argument_group("Options for release testing")
grp3.add_argument("-r", "--populate-resources",
action="store_true",
help="Generate a sample configuration")
grp3.add_argument("--choose",
metavar="NAME",
help="Run only the named tests, separated by whitespace")
grp3.add_argument("--fencing", "--stonith",
choices=["1", "0", "yes", "no", "lha", "openstack", "rhcs", "rhevm", "scsi", "ssh", "virt", "xvm"],
default="1",
help="What fencing agent to use")
grp3.add_argument("--once",
action="store_true",
help="Run all valid tests once")
grp4 = parser.add_argument_group("Additional (less common) options")
grp4.add_argument("-c", "--clobber-cib",
action="store_true",
help="Erase any existing configuration")
grp4.add_argument("-y", "--yes",
action="store_true", dest="always_continue",
help="Continue to run whenever prompted")
grp4.add_argument("--boot",
action="store_true",
help="")
grp4.add_argument("--cib-filename",
metavar="PATH",
help="Install the given CIB file to the cluster")
grp4.add_argument("--experimental-tests",
action="store_true",
help="Include experimental tests")
grp4.add_argument("--loop-minutes",
type=int, default=60,
help="")
grp4.add_argument("--no-loop-tests",
action="store_true",
help="Don't run looping/time-based tests")
grp4.add_argument("--no-unsafe-tests",
action="store_true",
help="Don't run tests that are unsafe for use with ocfs2/drbd")
grp4.add_argument("--notification-agent",
metavar="PATH",
default="/var/lib/pacemaker/notify.sh",
help="Script to configure for Pacemaker alerts")
grp4.add_argument("--notification-recipient",
metavar="R",
default="/var/lib/pacemaker/notify.log",
help="Recipient to pass to alert script")
grp4.add_argument("--oprofile",
default="",
metavar="NODES",
help="List of cluster nodes to run oprofile on")
grp4.add_argument("--outputfile",
metavar="PATH",
help="Location to write logs to")
grp4.add_argument("--qarsh",
action="store_true",
help="Use QARSH to access nodes instead of SSH")
grp4.add_argument("--schema",
metavar="SCHEMA",
default=f"pacemaker-{BuildOptions.CIB_SCHEMA_VERSION}",
help="Create a CIB conforming to the given schema")
grp4.add_argument("--seed",
metavar="SEED",
help="Use the given string as the random number seed")
grp4.add_argument("--set",
action="append",
metavar="ARG",
default=[],
help="Set key=value pairs (can be specified multiple times)")
grp4.add_argument("--stonith-args",
metavar="ARGS",
default="hostlist=all,livedangerously=yes",
help="")
grp4.add_argument("--stonith-type",
metavar="TYPE",
default="external/ssh",
help="")
grp4.add_argument("--trunc",
action="store_true", dest="truncate",
help="Truncate log file before starting")
grp4.add_argument("--valgrind-procs",
metavar="PROCS",
default="pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd",
help="Run valgrind against the given space-separated list of processes")
grp4.add_argument("--warn-inactive",
action="store_true",
help="Warn if a resource is assigned to an inactive node")
parser.add_argument("iterations",
nargs='?',
type=int, default=1,
help="Number of tests to run")
args = parser.parse_args(args=argv)
# Set values on this object based on what happened with command line
# processing. This has to be done in several blocks.
# These values can always be set. Most get a default from the add_argument
# calls, they only do one thing, and they do not have any side effects.
self["CIBfilename"] = args.cib_filename if args.cib_filename else None
self["ClobberCIB"] = args.clobber_cib
self["ListTests"] = args.list_tests
self["Schema"] = args.schema
- self["Stack"] = args.stack
self["SyslogFacility"] = args.facility
self["TruncateLog"] = args.truncate
self["at-boot"] = args.at_boot in ["1", "yes"]
self["benchmark"] = args.benchmark
self["continue"] = args.always_continue
self["experimental-tests"] = args.experimental_tests
self["iterations"] = args.iterations
self["loop-minutes"] = args.loop_minutes
self["loop-tests"] = not args.no_loop_tests
self["nodes"] = shlex.split(args.nodes)
self["notification-agent"] = args.notification_agent
self["notification-recipient"] = args.notification_recipient
self["oprofile"] = shlex.split(args.oprofile)
self["stonith-params"] = args.stonith_args
self["stonith-type"] = args.stonith_type
self["unsafe-tests"] = not args.no_unsafe_tests
self["valgrind-procs"] = args.valgrind_procs
self["warn-inactive"] = args.warn_inactive
# Nodes and groups are mutually exclusive. Additionally, --group does
# more than just set a value. Here, set nodes first and then if a group
# is specified, override the previous nodes value.
if args.group:
self["OutputFile"] = f"{os.environ['HOME']}/cluster-{args.dsh_group}.log"
LogFactory().add_file(self["OutputFile"], "CTS")
dsh_file = f"{os.environ['HOME']}/.dsh/group/{args.dsh_group}"
if os.path.isfile(dsh_file):
self["nodes"] = []
with open(dsh_file, "r", encoding="utf-8") as f:
for line in f:
stripped = line.strip()
if not stripped.startswith('#'):
self["nodes"].append(stripped)
else:
print(f"Unknown DSH group: {args.dsh_group}")
# Everything else either can't have a default set in an add_argument
# call (likely because we don't want to always have a value set for it)
# or it does something fancier than just set a single value. However,
# order does not matter for these as long as the user doesn't provide
# conflicting arguments on the command line. So just do Everything
# alphabetically.
if args.boot:
self["scenario"] = "boot"
if args.choose:
self["scenario"] = "sequence"
self["tests"].extend(shlex.split(args.choose))
self["iterations"] = len(self["tests"])
if args.fencing in ["0", "no"]:
self["DoFencing"] = False
elif args.fencing in ["rhcs", "virt", "xvm"]:
self["stonith-type"] = "fence_xvm"
elif args.fencing == "scsi":
self["stonith-type"] = "fence_scsi"
elif args.fencing in ["lha", "ssh"]:
self["stonith-params"] = "hostlist=all,livedangerously=yes"
self["stonith-type"] = "external/ssh"
elif args.fencing == "openstack":
self["stonith-type"] = "fence_openstack"
print("Obtaining OpenStack credentials from the current environment")
region = os.environ['OS_REGION_NAME']
tenant = os.environ['OS_TENANT_NAME']
auth = os.environ['OS_AUTH_URL']
user = os.environ['OS_USERNAME']
password = os.environ['OS_PASSWORD']
self["stonith-params"] = f"region={region},tenant={tenant},auth={auth},user={user},password={password}"
elif args.fencing == "rhevm":
self["stonith-type"] = "fence_rhevm"
print("Obtaining RHEV-M credentials from the current environment")
user = os.environ['RHEVM_USERNAME']
password = os.environ['RHEVM_PASSWORD']
server = os.environ['RHEVM_SERVER']
port = os.environ['RHEVM_PORT']
self["stonith-params"] = f"login={user},passwd={password},ipaddr={server},ipport={port},ssl=1,shell_timeout=10"
if args.ip:
self["CIBResource"] = True
self["ClobberCIB"] = True
self["IPBase"] = args.ip
if args.logfile == "journal":
self["LogAuditDisabled"] = True
self["log_kind"] = LogKind.JOURNAL
elif args.logfile:
self["LogAuditDisabled"] = True
self["LogFileName"] = args.logfile
self["log_kind"] = LogKind.REMOTE_FILE
else:
# We can't set this as the default on the parser.add_argument call
# for this option because then args.logfile will be set, which means
# the above branch will be taken and those other values will also be
# set.
self["LogFileName"] = "/var/log/messages"
if args.once:
self["scenario"] = "all-once"
if args.outputfile:
self["OutputFile"] = args.outputfile
LogFactory().add_file(self["OutputFile"])
if args.populate_resources:
self["CIBResource"] = True
self["ClobberCIB"] = True
if args.qarsh:
self._rsh.enable_qarsh()
+ self.random_gen.seed(args.seed)
+
for kv in args.set:
(name, value) = kv.split("=")
self[name] = value
print(f"Setting {name} = {value}")
class EnvFactory:
"""A class for constructing a singleton instance of an Environment object."""
instance = None
# pylint: disable=invalid-name
def getInstance(self, args=None):
"""
Return the previously created instance of Environment.
If no instance exists, create a new instance and return that.
"""
if not EnvFactory.instance:
EnvFactory.instance = Environment(args)
return EnvFactory.instance
def set_cts_path(extra=None):
"""Set the PATH environment variable appropriately for the tests."""
new_path = os.environ['PATH']
# Add any search paths given on the command line
if extra is not None:
for p in extra:
new_path = f"{p}:{new_path}"
cwd = os.getcwd()
if os.path.exists(f"{cwd}/cts/cts-attrd.in"):
# pylint: disable=protected-access
print(f"Running tests from the source tree: {BuildOptions._BUILD_DIR}")
for d in glob(f"{BuildOptions._BUILD_DIR}/daemons/*/"):
new_path = f"{d}:{new_path}"
new_path = f"{BuildOptions._BUILD_DIR}/tools:{new_path}"
new_path = f"{BuildOptions._BUILD_DIR}/cts/support:{new_path}"
print(f"Using local schemas from: {cwd}/xml")
os.environ["PCMK_schema_directory"] = f"{cwd}/xml"
else:
print(f"Running tests from the install tree: {BuildOptions.DAEMON_DIR} (not {cwd})")
new_path = f"{BuildOptions.DAEMON_DIR}:{new_path}"
os.environ["PCMK_schema_directory"] = BuildOptions.SCHEMA_DIR
print(f'Using PATH="{new_path}"')
os.environ['PATH'] = new_path
diff --git a/python/pacemaker/_cts/tests/remotestonithd.py b/python/pacemaker/_cts/tests/remotestonithd.py
index 624b802f46..53b0a850d2 100644
--- a/python/pacemaker/_cts/tests/remotestonithd.py
+++ b/python/pacemaker/_cts/tests/remotestonithd.py
@@ -1,53 +1,55 @@
"""Fail the connection resource and fence the remote node."""
__all__ = ["RemoteStonithd"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.remotedriver import RemoteDriver
class RemoteStonithd(RemoteDriver):
"""Fail the connection resource and fence the remote node."""
def __init__(self, cm):
"""
Create a new RemoteStonithd instance.
Arguments:
cm -- A ClusterManager instance
"""
RemoteDriver.__init__(self, cm)
self.name = "RemoteStonithd"
def __call__(self, node):
"""Perform this test."""
if not self.start_new_test(node):
return self.failure(self.fail_string)
self.fail_connection(node)
self.cleanup_metal(node)
self.debug("Waiting for the cluster to recover")
self._cm.cluster_stable()
if self.failed:
return self.failure(self.fail_string)
return self.success()
def is_applicable(self):
"""Return True if this test is applicable in the current test configuration."""
+ # pylint doesn't understand that self._env is subscriptable.
+ # pylint: disable=unsubscriptable-object
return self._env["DoFencing"] and RemoteDriver.is_applicable(self)
@property
def errors_to_ignore(self):
"""Return list of errors which should be ignored."""
return [
r"Lost connection to Pacemaker Remote node",
r"Software caused connection abort",
r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor",
r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*",
r"schedulerd.*:\s+Recover\s+remote-.*\s+\(.*\)",
r"error: Result of monitor operation for .* on remote-.*: Internal communication failure"
] + super().errors_to_ignore
diff --git a/tools/cluster-clean.in b/tools/cluster-clean.in
index 91a629479a..879385daef 100755
--- a/tools/cluster-clean.in
+++ b/tools/cluster-clean.in
@@ -1,101 +1,96 @@
#!@BASH_PATH@
#
-# Copyright 2011-2023 the Pacemaker project contributors
+# Copyright 2011-2025 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
# This source code is licensed under the GNU General Public License version 2
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
hosts=
group=
kill=0
while true; do
case "$1" in
-x) set -x; shift;;
-w) for h in $2; do
hosts="$hosts -w $h";
done
shift; shift;;
-g) group=$2; shift; shift;;
--kill) kill=1; shift;;
- --kill-only) kill=2; shift;;
"") break;;
*) echo "unknown option: $1"; exit 1;;
esac
done
if [ x"$group" = x -a x"$hosts" = x ]; then
group=$CTS_GROUP
fi
if [ x"$hosts" != x ]; then
echo `date` ": Cleaning up hosts:"
target=$hosts
elif [ x"$group" != x ]; then
echo `date` ": Cleaning up group: $group"
target="-g $group"
else
echo "You didn't specify any nodes to clean up"
exit 1
fi
cluster-helper --list bullet $target
if [ $kill != 0 ]; then
echo "Cleaning processes"
# Bah. Force systemd to actually look at the process and realize it's dead
cluster-helper $target -- "service corosync stop" &> /dev/null &
cluster-helper $target -- "service pacemaker stop" &> /dev/null &
cluster-helper $target -- "killall -q -9 corosync pacemakerd pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-remoted pacemaker-schedulerd dlm_controld gfs_controld" &> /dev/null
cluster-helper $target -- 'kill -9 `pidof valgrind`' &> /dev/null
-
- if [ $kill == 2 ]; then
- exit 0
- fi
fi
#logrotate -f $cluster_rotate
echo "Cleaning files"
log_files=""
log_files="$log_files 'messages*'"
log_files="$log_files 'localmessages*'"
log_files="$log_files 'cluster*.log'"
log_files="$log_files 'corosync.log*'"
log_files="$log_files 'pacemaker.log*'"
log_files="$log_files '*.journal'"
log_files="$log_files '*.journal~'"
log_files="$log_files 'secure-*'"
state_files=""
state_files="$state_files 'cib.xml*'"
state_files="$state_files 'valgrind-*'"
state_files="$state_files 'cib-*'"
state_files="$state_files 'core.*'"
state_files="$state_files 'cts.*'"
state_files="$state_files 'pe*.bz2'"
state_files="$state_files 'fdata-*'"
for f in $log_files; do
cluster-helper $target -- "find /var/log -name '$f' -exec rm -f \{\} \;"
done
for f in $state_files; do
cluster-helper $target -- "find /var/lib -name '$f' -exec rm -f \{\} \;"
done
cluster-helper $target -- "find /dev/shm -name 'qb-*' -exec rm -f \{\} \;"
cluster-helper $target -- "find @CRM_BLACKBOX_DIR@ -name '*-*' -exec rm -f \{\} \;"
cluster-helper $target -- "find /tmp -name '*.valgrind' -exec rm -f \{\} \;"
cluster-helper $target -- 'service rsyslog restart' > /dev/null 2>&1
cluster-helper $target -- 'systemctl restart systemd-journald.socket' > /dev/null 2>&1
cluster-helper $target -- logger -i -p daemon.info __clean_logs__
#touch $cluster_log
echo `date` ": Clean complete"
# vim: set filetype=sh: