diff --git a/python/pacemaker/_cts/CTS.py b/python/pacemaker/_cts/CTS.py
index bc46525b5f..d09314a3e1 100644
--- a/python/pacemaker/_cts/CTS.py
+++ b/python/pacemaker/_cts/CTS.py
@@ -1,230 +1,230 @@
"""Main classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["CtsLab", "NodeStatus", "Process"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import sys
import time
import traceback
from pacemaker.exitstatus import ExitStatus
from pacemaker._cts.environment import EnvFactory
from pacemaker._cts.input import should_continue
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
class CtsLab:
"""
A class that defines the Lab Environment for the Cluster Test System.
It defines those things which are expected to change from test
environment to test environment for the same cluster manager.
This is where you define the set of nodes that are in your test lab,
what kind of reset mechanism you use, etc. All this data is stored
as key/value pairs in an Environment instance constructed from arguments
passed to this class.
The CTS code ignores names it doesn't know about or need. Individual
tests have access to this information, and it is perfectly acceptable
to provide hints, tweaks, fine-tuning directions, or other information
to the tests through this mechanism.
"""
def __init__(self, args=None):
"""
Create a new CtsLab instance.
This class can be treated kind of like a dictionary due to the presence
of typical dict functions like __contains__, __getitem__, and __setitem__.
However, it is not a dictionary so do not rely on standard dictionary
behavior.
Arguments:
args -- A list of command line parameters, minus the program name.
"""
self._env = EnvFactory().getInstance(args)
self._logger = LogFactory()
def dump(self):
"""Print the current environment."""
self._env.dump()
def __contains__(self, key):
"""Return True if the given environment key exists."""
# pylint gets confused because of EnvFactory here.
# pylint: disable=unsupported-membership-test
return key in self._env
def __getitem__(self, key):
"""Return the given environment key, or raise KeyError if it does not exist."""
# Throughout this file, pylint has trouble understanding that EnvFactory
# and RemoteFactory are singleton instances that can be treated as callable
# and subscriptable objects. Various warnings are disabled because of this.
# See also a comment about self._rsh in environment.py.
# pylint: disable=unsubscriptable-object
return self._env[key]
def __setitem__(self, key, value):
"""Set the given environment key to the given value, overriding any previous value."""
# pylint: disable=unsupported-assignment-operation
self._env[key] = value
def run(self, scenario, iterations):
"""
Run the given scenario the given number of times.
Returns ExitStatus.OK on success, or ExitStatus.ERROR on error.
"""
if not scenario:
self._logger.log("No scenario was defined")
return ExitStatus.ERROR
self._logger.log("Cluster nodes: ")
# pylint: disable=unsubscriptable-object
for node in self._env["nodes"]:
- self._logger.log(" * %s" % (node))
+ self._logger.log(f" * {node}")
if not scenario.setup():
return ExitStatus.ERROR
# We want to alert on any exceptions caused by running a scenario, so
# here it's okay to disable the pylint warning.
# pylint: disable=bare-except
try:
scenario.run(iterations)
except: # noqa: E722
- self._logger.log("Exception by %s" % sys.exc_info()[0])
+ self._logger.log(f"Exception by {sys.exc_info()[0]}")
self._logger.traceback(traceback)
scenario.summarize()
scenario.teardown()
return ExitStatus.ERROR
scenario.teardown()
scenario.summarize()
if scenario.stats["failure"] > 0:
return ExitStatus.ERROR
if scenario.stats["success"] != iterations:
self._logger.log("No failure count but success != requested iterations")
return ExitStatus.ERROR
return ExitStatus.OK
class NodeStatus:
"""
A class for querying the status of cluster nodes.
Are nodes up? Do they respond to SSH connections?
"""
def __init__(self, env):
"""
Create a new NodeStatus instance.
Arguments:
env -- An Environment instance
"""
self._env = env
def _node_booted(self, node):
"""Return True if the given node is booted (responds to pings)."""
# pylint: disable=not-callable
- (rc, _) = RemoteFactory().getInstance()("localhost", "ping -nq -c1 -w1 %s" % node, verbose=0)
+ (rc, _) = RemoteFactory().getInstance()("localhost", f"ping -nq -c1 -w1 {node}", verbose=0)
return rc == 0
def _sshd_up(self, node):
"""Return true if sshd responds on the given node."""
# pylint: disable=not-callable
(rc, _) = RemoteFactory().getInstance()(node, "true", verbose=0)
return rc == 0
def wait_for_node(self, node, timeout=300):
"""
Wait for a node to become available.
Should the timeout be reached, the user will be given a choice whether
to continue or not. If not, ValueError will be raised.
Returns True when the node is available, or False if the timeout is
reached.
"""
initial_timeout = timeout
anytimeouts = False
while timeout > 0:
if self._node_booted(node) and self._sshd_up(node):
if anytimeouts:
# Fudge to wait for the system to finish coming up
time.sleep(30)
- LogFactory().debug("Node %s now up" % node)
+ LogFactory().debug(f"Node {node} now up")
return True
time.sleep(30)
if not anytimeouts:
- LogFactory().debug("Waiting for node %s to come up" % node)
+ LogFactory().debug(f"Waiting for node {node} to come up")
anytimeouts = True
timeout -= 1
- LogFactory().log("%s did not come up within %d tries" % (node, initial_timeout))
+ LogFactory().log(f"{node} did not come up within {initial_timeout} tries")
if not should_continue(self._env["continue"]):
- raise ValueError("%s did not come up within %d tries" % (node, initial_timeout))
+ raise ValueError(f"{node} did not come up within {initial_timeout} tries")
return False
def wait_for_all_nodes(self, nodes, timeout=300):
"""Return True when all nodes come up, or False if the timeout is reached."""
for node in nodes:
if not self.wait_for_node(node, timeout):
return False
return True
class Process:
"""A class for managing a Pacemaker daemon."""
# pylint: disable=invalid-name
def __init__(self, cm, name, dc_only=False, pats=None, dc_pats=None,
badnews_ignore=None):
"""
Create a new Process instance.
Arguments:
cm -- A ClusterManager instance
name -- The command being run
dc_only -- Should this daemon be killed only on the DC?
pats -- Regexes we expect to find in log files
dc_pats -- Additional DC-specific regexes we expect to find
in log files
badnews_ignore -- Regexes for lines in the log that can be ignored
"""
self._cm = cm
self.badnews_ignore = badnews_ignore
self.dc_only = dc_only
self.dc_pats = dc_pats
self.name = name
self.pats = pats
if self.badnews_ignore is None:
self.badnews_ignore = []
if self.dc_pats is None:
self.dc_pats = []
if self.pats is None:
self.pats = []
def kill(self, node):
"""Kill the instance of this process running on the given node."""
- (rc, _) = self._cm.rsh(node, "killall -9 %s" % self.name)
+ (rc, _) = self._cm.rsh(node, f"killall -9 {self.name}")
if rc != 0:
- self._cm.log("ERROR: Kill %s failed on node %s" % (self.name, node))
+ self._cm.log(f"ERROR: Kill {self.name} failed on node {node}")
diff --git a/python/pacemaker/_cts/audits.py b/python/pacemaker/_cts/audits.py
index 5c97425479..24a391930f 100644
--- a/python/pacemaker/_cts/audits.py
+++ b/python/pacemaker/_cts/audits.py
@@ -1,1027 +1,1022 @@
"""Auditing classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["AuditConstraint", "AuditResource", "ClusterAudit", "audit_list"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
import time
import uuid
from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.input import should_continue
from pacemaker._cts.watcher import LogKind, LogWatcher
class ClusterAudit:
"""
The base class for various kinds of auditors.
Specific audit implementations should be built on top of this one. Audits
can do all kinds of checks on the system. The basic interface for callers
is the `__call__` method, which returns True if the audit passes and False
if it fails.
"""
def __init__(self, cm):
"""
Create a new ClusterAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
# pylint: disable=invalid-name
self._cm = cm
self.name = None
def __call__(self):
"""Perform the audit action."""
raise NotImplementedError
def is_applicable(self):
"""
Return True if this audit is applicable in the current test configuration.
This method must be implemented by all subclasses.
"""
raise NotImplementedError
def log(self, args):
"""Log a message."""
- self._cm.log("audit: %s" % args)
+ self._cm.log(f"audit: {args}")
def debug(self, args):
"""Log a debug message."""
- self._cm.debug("audit: %s" % args)
+ self._cm.debug(f"audit: {args}")
class LogAudit(ClusterAudit):
"""
Audit each cluster node to verify that some logging system is usable.
This is done by logging a unique test message and then verifying that we
can read back that test message using logging tools.
"""
def __init__(self, cm):
"""
Create a new LogAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
ClusterAudit.__init__(self, cm)
self.name = "LogAudit"
def _restart_cluster_logging(self, nodes=None):
"""Restart logging on the given nodes, or all if none are given."""
if not nodes:
nodes = self._cm.env["nodes"]
- self._cm.debug("Restarting logging on: %r" % nodes)
+ self._cm.debug(f"Restarting logging on: {nodes!r}")
for node in nodes:
if self._cm.env["have_systemd"]:
(rc, _) = self._cm.rsh(node, "systemctl stop systemd-journald.socket")
if rc != 0:
- self._cm.log("ERROR: Cannot stop 'systemd-journald' on %s" % node)
+ self._cm.log(f"ERROR: Cannot stop 'systemd-journald' on {node}")
(rc, _) = self._cm.rsh(node, "systemctl start systemd-journald.service")
if rc != 0:
- self._cm.log("ERROR: Cannot start 'systemd-journald' on %s" % node)
+ self._cm.log(f"ERROR: Cannot start 'systemd-journald' on {node}")
if "syslogd" in self._cm.env:
- (rc, _) = self._cm.rsh(node, "service %s restart" % self._cm.env["syslogd"])
+ (rc, _) = self._cm.rsh(node, f"service {self._cm.env['syslogd']} restart")
if rc != 0:
- self._cm.log("ERROR: Cannot restart '%s' on %s" % (self._cm.env["syslogd"], node))
+ self._cm.log(f"""ERROR: Cannot restart '{self._cm.env["syslogd"]}' on {node}""")
def _create_watcher(self, patterns, kind):
"""Create a new LogWatcher instance for the given patterns."""
watch = LogWatcher(self._cm.env["LogFileName"], patterns,
self._cm.env["nodes"], kind, "LogAudit", 5,
silent=True)
watch.set_watch()
return watch
def _test_logging(self):
"""Perform the log audit."""
patterns = []
prefix = "Test message from"
suffix = str(uuid.uuid4())
watch = {}
for node in self._cm.env["nodes"]:
# Look for the node name in two places to make sure
# that syslog is logging with the correct hostname
m = re.search("^([^.]+).*", node)
if m:
simple = m.group(1)
else:
simple = node
- patterns.append("%s.*%s %s %s" % (simple, prefix, node, suffix))
+ patterns.append(f"{simple}.*{prefix} {node} {suffix}")
watch_pref = self._cm.env["log_kind"]
if watch_pref is None:
kinds = [LogKind.LOCAL_FILE]
if self._cm.env["have_systemd"]:
kinds.append(LogKind.JOURNAL)
kinds.append(LogKind.REMOTE_FILE)
for k in kinds:
watch[k] = self._create_watcher(patterns, k)
- self._cm.log("Logging test message with identifier %s" % suffix)
+ self._cm.log(f"Logging test message with identifier {suffix}")
else:
watch[watch_pref] = self._create_watcher(patterns, watch_pref)
for node in self._cm.env["nodes"]:
- cmd = "logger -p %s.info %s %s %s" % (self._cm.env["SyslogFacility"], prefix, node, suffix)
+ cmd = f"logger -p {self._cm.env['SyslogFacility']}.info {prefix} {node} {suffix}"
(rc, _) = self._cm.rsh(node, cmd, synchronous=False, verbose=0)
if rc != 0:
- self._cm.log("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node))
+ self._cm.log(f"ERROR: Cannot execute remote command [{cmd}] on {node}")
for k in list(watch.keys()):
w = watch[k]
if watch_pref is None:
- self._cm.log("Checking for test message in %s logs" % k)
+ self._cm.log(f"Checking for test message in {k} logs")
w.look_for_all(silent=True)
if w.unmatched:
for regex in w.unmatched:
- self._cm.log("Test message [%s] not found in %s logs" % (regex, w.kind))
+ self._cm.log(f"Test message [{regex}] not found in {w.kind} logs")
else:
if watch_pref is None:
- self._cm.log("Found test message in %s logs" % k)
+ self._cm.log(f"Found test message in {k} logs")
self._cm.env["log_kind"] = k
return 1
return False
def __call__(self):
"""Perform the audit action."""
max_attempts = 3
attempt = 0
self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"])
while attempt <= max_attempts and not self._test_logging():
attempt += 1
self._restart_cluster_logging()
time.sleep(60 * attempt)
if attempt > max_attempts:
self._cm.log("ERROR: Cluster logging unrecoverable.")
return False
return True
def is_applicable(self):
"""Return True if this audit is applicable in the current test configuration."""
if self._cm.env["LogAuditDisabled"]:
return False
return True
class DiskAudit(ClusterAudit):
"""
Audit disk usage on cluster nodes.
Verify that there is enough free space left on whichever mounted file
system holds the logs.
Warn on: less than 100 MB or 10% of free space
Error on: less than 10 MB or 5% of free space
"""
def __init__(self, cm):
"""
Create a new DiskAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
ClusterAudit.__init__(self, cm)
self.name = "DiskspaceAudit"
def __call__(self):
"""Perform the audit action."""
result = True
# @TODO Use directory of PCMK_logfile if set on host
dfcmd = "df -BM %s | tail -1 | awk '{print $(NF-1)\" \"$(NF-2)}' | tr -d 'M%%'" % BuildOptions.LOG_DIR
self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"])
for node in self._cm.env["nodes"]:
(_, dfout) = self._cm.rsh(node, dfcmd, verbose=1)
if not dfout:
- self._cm.log("ERROR: Cannot execute remote df command [%s] on %s" % (dfcmd, node))
+ self._cm.log(f"ERROR: Cannot execute remote df command [{dfcmd}] on {node}")
continue
dfout = dfout[0].strip()
try:
(used, remain) = dfout.split()
used_percent = int(used)
remaining_mb = int(remain)
except (ValueError, TypeError):
- self._cm.log("Warning: df output '%s' from %s was invalid [%s, %s]"
- % (dfout, node, used, remain))
+ self._cm.log(f"Warning: df output '{dfout}' from {node} was invalid [{used}, {remain}]")
else:
if remaining_mb < 10 or used_percent > 95:
- self._cm.log("CRIT: Out of log disk space on %s (%d%% / %dMB)"
- % (node, used_percent, remaining_mb))
+ self._cm.log(f"CRIT: Out of log disk space on {node} ({used_percent}% / {remaining_mb}MB)")
result = False
if not should_continue(self._cm.env):
- raise ValueError("Disk full on %s" % node)
+ raise ValueError(f"Disk full on {node}")
elif remaining_mb < 100 or used_percent > 90:
- self._cm.log("WARN: Low on log disk space (%dMB) on %s" % (remaining_mb, node))
+ self._cm.log(f"WARN: Low on log disk space ({remaining_mb}MB) on {node}")
return result
def is_applicable(self):
"""Return True if this audit is applicable in the current test configuration."""
return True
class FileAudit(ClusterAudit):
"""
Audit the filesystem looking for various failure conditions.
Check for:
* The presence of core dumps from corosync or Pacemaker daemons
* Stale IPC files
"""
def __init__(self, cm):
"""
Create a new FileAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
ClusterAudit.__init__(self, cm)
self.known = []
self.name = "FileAudit"
def __call__(self):
"""Perform the audit action."""
result = True
self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"])
for node in self._cm.env["nodes"]:
(_, lsout) = self._cm.rsh(node, "ls -al /var/lib/pacemaker/cores/* | grep core.[0-9]", verbose=1)
for line in lsout:
line = line.strip()
if line not in self.known:
result = False
self.known.append(line)
- self._cm.log("Warning: Pacemaker core file on %s: %s" % (node, line))
+ self._cm.log(f"Warning: Pacemaker core file on {node}: {line}")
(_, lsout) = self._cm.rsh(node, "ls -al /var/lib/corosync | grep core.[0-9]", verbose=1)
for line in lsout:
line = line.strip()
if line not in self.known:
result = False
self.known.append(line)
- self._cm.log("Warning: Corosync core file on %s: %s" % (node, line))
+ self._cm.log(f"Warning: Corosync core file on {node}: {line}")
if self._cm.expected_status.get(node) == "down":
clean = False
(_, lsout) = self._cm.rsh(node, "ls -al /dev/shm | grep qb-", verbose=1)
for line in lsout:
result = False
clean = True
- self._cm.log("Warning: Stale IPC file on %s: %s" % (node, line))
+ self._cm.log(f"Warning: Stale IPC file on {node}: {line}")
if clean:
(_, lsout) = self._cm.rsh(node, "ps axf | grep -e pacemaker -e corosync", verbose=1)
for line in lsout:
- self._cm.debug("ps[%s]: %s" % (node, line))
+ self._cm.debug(f"ps[{node}]: {line}")
self._cm.rsh(node, "rm -rf /dev/shm/qb-*")
else:
- self._cm.debug("Skipping %s" % node)
+ self._cm.debug(f"Skipping {node}")
return result
def is_applicable(self):
"""Return True if this audit is applicable in the current test configuration."""
return True
class AuditResource:
"""A base class for storing information about a cluster resource."""
def __init__(self, cm, line):
"""
Create a new AuditResource instance.
Arguments:
cm -- A ClusterManager instance
line -- One line of output from `crm_resource` describing a single
resource
"""
# pylint: disable=invalid-name
fields = line.split()
self._cm = cm
self.line = line
self.type = fields[1]
self.id = fields[2]
self.clone_id = fields[3]
self.parent = fields[4]
self.rprovider = fields[5]
self.rclass = fields[6]
self.rtype = fields[7]
self.host = fields[8]
self.needs_quorum = fields[9]
self.flags = int(fields[10])
self.flags_s = fields[11]
if self.parent == "NA":
self.parent = None
@property
def unique(self):
"""Return True if this resource is unique."""
return self.flags & 0x20
@property
def orphan(self):
"""Return True if this resource is an orphan."""
return self.flags & 0x01
@property
def managed(self):
"""Return True if this resource is managed by the cluster."""
return self.flags & 0x02
class AuditConstraint:
"""A base class for storing information about a cluster constraint."""
def __init__(self, cm, line):
"""
Create a new AuditConstraint instance.
Arguments:
cm -- A ClusterManager instance
line -- One line of output from `crm_resource` describing a single
constraint
"""
# pylint: disable=invalid-name
fields = line.split()
self._cm = cm
self.line = line
self.type = fields[1]
self.id = fields[2]
self.rsc = fields[3]
self.target = fields[4]
self.score = fields[5]
self.rsc_role = fields[6]
self.target_role = fields[7]
if self.rsc_role == "NA":
self.rsc_role = None
if self.target_role == "NA":
self.target_role = None
class PrimitiveAudit(ClusterAudit):
"""
Audit primitive resources to verify a variety of conditions.
Check that:
* Resources are active and managed only when expected
* Resources are active on the expected cluster node
* Resources are not orphaned
"""
def __init__(self, cm):
"""
Create a new PrimitiveAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
ClusterAudit.__init__(self, cm)
self.name = "PrimitiveAudit"
self._active_nodes = []
self._constraints = []
self._inactive_nodes = []
self._resources = []
self._target = None
def _audit_resource(self, resource, quorum):
"""Perform the audit of a single resource."""
rc = True
active = self._cm.resource_location(resource.id)
if len(active) == 1:
if quorum:
- self.debug("Resource %s active on %r" % (resource.id, active))
+ self.debug(f"Resource {resource.id} active on {active!r}")
elif resource.needs_quorum == 1:
- self._cm.log("Resource %s active without quorum: %r" % (resource.id, active))
+ self._cm.log(f"Resource {resource.id} active without quorum: {active!r}")
rc = False
elif not resource.managed:
- self._cm.log("Resource %s not managed. Active on %r" % (resource.id, active))
+ self._cm.log(f"Resource {resource.id} not managed. Active on {active!r}")
elif not resource.unique:
# TODO: Figure out a clever way to actually audit these resource types
if len(active) > 1:
- self.debug("Non-unique resource %s is active on: %r" % (resource.id, active))
+ self.debug(f"Non-unique resource {resource.id} is active on: {active!r}")
else:
- self.debug("Non-unique resource %s is not active" % resource.id)
+ self.debug(f"Non-unique resource {resource.id} is not active")
elif len(active) > 1:
- self._cm.log("Resource %s is active multiple times: %r" % (resource.id, active))
+ self._cm.log(f"Resource {resource.id} is active multiple times: {active!r}")
rc = False
elif resource.orphan:
- self.debug("Resource %s is an inactive orphan" % resource.id)
+ self.debug(f"Resource {resource.id} is an inactive orphan")
elif not self._inactive_nodes:
- self._cm.log("WARN: Resource %s not served anywhere" % resource.id)
+ self._cm.log(f"WARN: Resource {resource.id} not served anywhere")
rc = False
elif self._cm.env["warn-inactive"]:
if quorum or not resource.needs_quorum:
- self._cm.log("WARN: Resource %s not served anywhere (Inactive nodes: %r)"
- % (resource.id, self._inactive_nodes))
+ self._cm.log(f"WARN: Resource {resource.id} not served anywhere "
+ f"(Inactive nodes: {self._inactive_nodes!r})")
else:
- self.debug("Resource %s not served anywhere (Inactive nodes: %r)"
- % (resource.id, self._inactive_nodes))
+ self.debug(f"Resource {resource.id} not served anywhere "
+ f"(Inactive nodes: {self._inactive_nodes!r})")
elif quorum or not resource.needs_quorum:
- self.debug("Resource %s not served anywhere (Inactive nodes: %r)"
- % (resource.id, self._inactive_nodes))
+ self.debug(f"Resource {resource.id} not served anywhere "
+ f"(Inactive nodes: {self._inactive_nodes!r})")
return rc
def _setup(self):
"""
Verify cluster nodes are active.
Collect resource and colocation information used for performing the audit.
"""
for node in self._cm.env["nodes"]:
if self._cm.expected_status[node] == "up":
self._active_nodes.append(node)
else:
self._inactive_nodes.append(node)
for node in self._cm.env["nodes"]:
if self._target is None and self._cm.expected_status[node] == "up":
self._target = node
if not self._target:
# TODO: In Pacemaker 1.0 clusters we'll be able to run crm_resource
# with CIB_file=/path/to/cib.xml even when the cluster isn't running
- self.debug("No nodes active - skipping %s" % self.name)
+ self.debug(f"No nodes active - skipping {self.name}")
return False
(_, lines) = self._cm.rsh(self._target, "crm_resource --list-cts",
verbose=1)
for line in lines:
if re.search("^Resource", line):
self._resources.append(AuditResource(self._cm, line))
elif re.search("^Constraint", line):
self._constraints.append(AuditConstraint(self._cm, line))
else:
- self._cm.log("Unknown entry: %s" % line)
+ self._cm.log(f"Unknown entry: {line}")
return True
def __call__(self):
"""Perform the audit action."""
result = True
if not self._setup():
return result
quorum = self._cm.has_quorum(None)
for resource in self._resources:
if resource.type == "primitive" and not self._audit_resource(resource, quorum):
result = False
return result
def is_applicable(self):
"""Return True if this audit is applicable in the current test configuration."""
# @TODO Due to long-ago refactoring, this name test would never match,
# so this audit (and those derived from it) would never run.
# Uncommenting the next lines fixes the name test, but that then
# exposes pre-existing bugs that need to be fixed.
# if self._cm["Name"] == "crm-corosync":
# return True
return False
class GroupAudit(PrimitiveAudit):
"""
Audit group resources.
Check that:
* Each of its child primitive resources is active on the expected cluster node
"""
def __init__(self, cm):
"""
Create a new GroupAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
PrimitiveAudit.__init__(self, cm)
self.name = "GroupAudit"
def __call__(self):
result = True
if not self._setup():
return result
for group in self._resources:
if group.type != "group":
continue
first_match = True
group_location = None
for child in self._resources:
if child.parent != group.id:
continue
nodes = self._cm.resource_location(child.id)
if first_match and len(nodes) > 0:
group_location = nodes[0]
first_match = False
if len(nodes) > 1:
result = False
- self._cm.log("Child %s of %s is active more than once: %r"
- % (child.id, group.id, nodes))
+ self._cm.log(f"Child {child.id} of {group.id} is active more than once: {nodes!r}")
elif not nodes:
# Groups are allowed to be partially active
# However we do need to make sure later children aren't running
group_location = None
- self.debug("Child %s of %s is stopped" % (child.id, group.id))
+ self.debug(f"Child {child.id} of {group.id} is stopped")
elif nodes[0] != group_location:
result = False
- self._cm.log("Child %s of %s is active on the wrong node (%s) expected %s"
- % (child.id, group.id, nodes[0], group_location))
+ self._cm.log(f"Child {child.id} of {group.id} is active on the wrong "
+ f"node ({nodes[0]}) expected {group_location}")
else:
- self.debug("Child %s of %s is active on %s" % (child.id, group.id, nodes[0]))
+ self.debug(f"Child {child.id} of {group.id} is active on {nodes[0]}")
return result
class CloneAudit(PrimitiveAudit):
"""
Audit clone resources.
NOTE: Currently, this class does not perform any actual audit functions.
"""
def __init__(self, cm):
"""
Create a new CloneAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
PrimitiveAudit.__init__(self, cm)
self.name = "CloneAudit"
def __call__(self):
result = True
if not self._setup():
return result
for clone in self._resources:
if clone.type != "clone":
continue
for child in self._resources:
if child.parent == clone.id and child.type == "primitive":
- self.debug("Checking child %s of %s..." % (child.id, clone.id))
+ self.debug(f"Checking child {child.id} of {clone.id}...")
# Check max and node_max
# Obtain with:
# crm_resource -g clone_max --meta -r child.id
# crm_resource -g clone_node_max --meta -r child.id
return result
class ColocationAudit(PrimitiveAudit):
"""
Audit cluster resources.
Check that:
* Resources are colocated with the expected resource
"""
def __init__(self, cm):
"""
Create a new ColocationAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
PrimitiveAudit.__init__(self, cm)
self.name = "ColocationAudit"
def _crm_location(self, resource):
"""Return a list of cluster nodes where a given resource is running."""
(rc, lines) = self._cm.rsh(self._target,
- "crm_resource --locate -r %s -Q" % resource,
+ f"crm_resource --locate -r {resource} -Q",
verbose=1)
hosts = []
if rc == 0:
for line in lines:
fields = line.split()
hosts.append(fields[0])
return hosts
def __call__(self):
result = True
if not self._setup():
return result
for coloc in self._constraints:
if coloc.type != "rsc_colocation":
continue
source = self._crm_location(coloc.rsc)
target = self._crm_location(coloc.target)
if not source:
- self.debug("Colocation audit (%s): %s not running" % (coloc.id, coloc.rsc))
+ self.debug(f"Colocation audit ({coloc.id}): {coloc.rsc} not running")
else:
for node in source:
if node not in target:
result = False
- self._cm.log("Colocation audit (%s): %s running on %s (not in %r)"
- % (coloc.id, coloc.rsc, node, target))
+ self._cm.log(f"Colocation audit ({coloc.id}): {coloc.rsc} running "
+ f"on {node} (not in {target!r})")
else:
- self.debug("Colocation audit (%s): %s running on %s (in %r)"
- % (coloc.id, coloc.rsc, node, target))
+ self.debug(f"Colocation audit ({coloc.id}): {coloc.rsc} running "
+ f"on {node} (in {target!r})")
return result
class ControllerStateAudit(ClusterAudit):
"""Verify active and inactive resources."""
def __init__(self, cm):
"""
Create a new ControllerStateAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
ClusterAudit.__init__(self, cm)
self.name = "ControllerStateAudit"
def __call__(self):
result = True
up_are_down = 0
down_are_up = 0
unstable_list = []
for node in self._cm.env["nodes"]:
should_be = self._cm.expected_status[node]
rc = self._cm.test_node_cm(node)
if rc > 0:
if should_be == "down":
down_are_up += 1
if rc == 1:
unstable_list.append(node)
elif should_be == "up":
up_are_down += 1
if len(unstable_list) > 0:
result = False
- self._cm.log("Cluster is not stable: %d (of %d): %r"
- % (len(unstable_list), self._cm.upcount(), unstable_list))
+ self._cm.log(f"Cluster is not stable: {len(unstable_list)} (of "
+ f"{self._cm.upcount()}): {unstable_list!r}")
if up_are_down > 0:
result = False
- self._cm.log("%d (of %d) nodes expected to be up were down."
- % (up_are_down, len(self._cm.env["nodes"])))
+ self._cm.log(f"{up_are_down} (of {len(self._cm.env['nodes'])}) nodes "
+ "expected to be up were down.")
if down_are_up > 0:
result = False
- self._cm.log("%d (of %d) nodes expected to be down were up."
- % (down_are_up, len(self._cm.env["nodes"])))
+ self._cm.log(f"{down_are_up} (of {len(self._cm.env['nodes'])}) nodes "
+ "expected to be down were up.")
return result
def is_applicable(self):
"""Return True if this audit is applicable in the current test configuration."""
# @TODO Due to long-ago refactoring, this name test would never match,
# so this audit (and those derived from it) would never run.
# Uncommenting the next lines fixes the name test, but that then
# exposes pre-existing bugs that need to be fixed.
# if self._cm["Name"] == "crm-corosync":
# return True
return False
class CIBAudit(ClusterAudit):
"""Audit the CIB by verifying that it is identical across cluster nodes."""
def __init__(self, cm):
"""
Create a new CIBAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
ClusterAudit.__init__(self, cm)
self.name = "CibAudit"
def __call__(self):
result = True
ccm_partitions = self._cm.find_partitions()
if not ccm_partitions:
self.debug("\tNo partitions to audit")
return result
for partition in ccm_partitions:
- self.debug("\tAuditing CIB consistency for: %s" % partition)
+ self.debug(f"\tAuditing CIB consistency for: {partition}")
if self._audit_cib_contents(partition) == 0:
result = False
return result
def _audit_cib_contents(self, hostlist):
"""Perform the CIB audit on the given hosts."""
passed = True
node0 = None
node0_xml = None
partition_hosts = hostlist.split()
for node in partition_hosts:
node_xml = self._store_remote_cib(node, node0)
if node_xml is None:
- self._cm.log("Could not perform audit: No configuration from %s" % node)
+ self._cm.log(f"Could not perform audit: No configuration from {node}")
passed = False
elif node0 is None:
node0 = node
node0_xml = node_xml
elif node0_xml is None:
- self._cm.log("Could not perform audit: No configuration from %s" % node0)
+ self._cm.log(f"Could not perform audit: No configuration from {node0}")
passed = False
else:
(rc, result) = self._cm.rsh(
- node0, "crm_diff -VV -cf --new %s --original %s" % (node_xml, node0_xml), verbose=1)
+ node0, f"crm_diff -VV -cf --new {node_xml} --original {node0_xml}", verbose=1)
if rc != 0:
- self._cm.log("Diff between %s and %s failed: %d" % (node0_xml, node_xml, rc))
+ self._cm.log(f"Diff between {node0_xml} and {node_xml} failed: {rc}")
passed = False
for line in result:
if not re.search("", line):
passed = False
- self.debug("CibDiff[%s-%s]: %s" % (node0, node, line))
+ self.debug(f"CibDiff[{node0}-{node}]: {line}")
else:
- self.debug("CibDiff[%s-%s] Ignoring: %s" % (node0, node, line))
+ self.debug(f"CibDiff[{node0}-{node}] Ignoring: {line}")
return passed
def _store_remote_cib(self, node, target):
"""
Store a copy of the given node's CIB on the given target node.
If no target is given, store the CIB on the given node.
"""
- filename = "/tmp/ctsaudit.%s.xml" % node
+ filename = f"/tmp/ctsaudit.{node}.xml"
if not target:
target = node
(rc, lines) = self._cm.rsh(node, self._cm["CibQuery"], verbose=1)
if rc != 0:
self._cm.log("Could not retrieve configuration")
return None
- self._cm.rsh("localhost", "rm -f %s" % filename)
+ self._cm.rsh("localhost", f"rm -f {filename}")
for line in lines:
- self._cm.rsh("localhost", "echo \'%s\' >> %s" % (line[:-1], filename), verbose=0)
+ self._cm.rsh("localhost", f"echo \'{line[:-1]}\' >> {filename}", verbose=0)
- if self._cm.rsh.copy(filename, "root@%s:%s" % (target, filename), silent=True) != 0:
+ if self._cm.rsh.copy(filename, f"root@{target}:{filename}", silent=True) != 0:
self._cm.log("Could not store configuration")
return None
return filename
def is_applicable(self):
"""Return True if this audit is applicable in the current test configuration."""
# @TODO Due to long-ago refactoring, this name test would never match,
# so this audit (and those derived from it) would never run.
# Uncommenting the next lines fixes the name test, but that then
# exposes pre-existing bugs that need to be fixed.
# if self._cm["Name"] == "crm-corosync":
# return True
return False
class PartitionAudit(ClusterAudit):
"""
Audit each partition in a cluster to verify a variety of conditions.
Check that:
* The number of partitions and the nodes in each is as expected
* Each node is active when it should be active and inactive when it
should be inactive
* The status and epoch of each node is as expected
* A partition has quorum
* A partition has a DC when expected
"""
def __init__(self, cm):
"""
Create a new PartitionAudit instance.
Arguments:
cm -- A ClusterManager instance
"""
ClusterAudit.__init__(self, cm)
self.name = "PartitionAudit"
self._node_epoch = {}
self._node_state = {}
self._node_quorum = {}
def __call__(self):
result = True
ccm_partitions = self._cm.find_partitions()
if not ccm_partitions:
return result
self._cm.cluster_stable(double_check=True)
if len(ccm_partitions) != self._cm.partitions_expected:
- self._cm.log("ERROR: %d cluster partitions detected:" % len(ccm_partitions))
+ self._cm.log(f"ERROR: {len(ccm_partitions)} cluster partitions detected:")
result = False
for partition in ccm_partitions:
- self._cm.log("\t %s" % partition)
+ self._cm.log(f"\t {partition}")
for partition in ccm_partitions:
if self._audit_partition(partition) == 0:
result = False
return result
def _trim_string(self, avalue):
"""Remove the last character from a multi-character string."""
if not avalue:
return None
if len(avalue) > 1:
return avalue[:-1]
return avalue
def _trim2int(self, avalue):
"""Remove the last character from a multi-character string and convert the result to an int."""
trimmed = self._trim_string(avalue)
if trimmed:
return int(trimmed)
return None
def _audit_partition(self, partition):
"""Perform the audit of a single partition."""
passed = True
dc_found = []
dc_allowed_list = []
lowest_epoch = None
node_list = partition.split()
- self.debug("Auditing partition: %s" % partition)
+ self.debug(f"Auditing partition: {partition}")
for node in node_list:
if self._cm.expected_status[node] != "up":
- self._cm.log("Warn: Node %s appeared out of nowhere" % node)
+ self._cm.log(f"Warn: Node {node} appeared out of nowhere")
self._cm.expected_status[node] = "up"
# not in itself a reason to fail the audit (not what we're
# checking for in this audit)
(_, out) = self._cm.rsh(node, self._cm["StatusCmd"] % node, verbose=1)
self._node_state[node] = out[0].strip()
(_, out) = self._cm.rsh(node, self._cm["EpochCmd"], verbose=1)
self._node_epoch[node] = out[0].strip()
(_, out) = self._cm.rsh(node, self._cm["QuorumCmd"], verbose=1)
self._node_quorum[node] = out[0].strip()
- self.debug("Node %s: %s - %s - %s." % (node, self._node_state[node], self._node_epoch[node], self._node_quorum[node]))
+ self.debug(f"Node {node}: {self._node_state[node]} - {self._node_epoch[node]} - {self._node_quorum[node]}.")
self._node_state[node] = self._trim_string(self._node_state[node])
self._node_epoch[node] = self._trim2int(self._node_epoch[node])
self._node_quorum[node] = self._trim_string(self._node_quorum[node])
if not self._node_epoch[node]:
- self._cm.log("Warn: Node %s dissappeared: cant determin epoch" % node)
+ self._cm.log(f"Warn: Node {node} disappeared: can't determine epoch")
self._cm.expected_status[node] = "down"
# not in itself a reason to fail the audit (not what we're
# checking for in this audit)
elif lowest_epoch is None or self._node_epoch[node] < lowest_epoch:
lowest_epoch = self._node_epoch[node]
if not lowest_epoch:
- self._cm.log("Lowest epoch not determined in %s" % partition)
+ self._cm.log(f"Lowest epoch not determined in {partition}")
passed = False
for node in node_list:
if self._cm.expected_status[node] != "up":
continue
if self._cm.is_node_dc(node, self._node_state[node]):
dc_found.append(node)
if self._node_epoch[node] == lowest_epoch:
- self.debug("%s: OK" % node)
+ self.debug(f"{node}: OK")
elif not self._node_epoch[node]:
- self.debug("Check on %s ignored: no node epoch" % node)
+ self.debug(f"Check on {node} ignored: no node epoch")
elif not lowest_epoch:
- self.debug("Check on %s ignored: no lowest epoch" % node)
+ self.debug(f"Check on {node} ignored: no lowest epoch")
else:
- self._cm.log("DC %s is not the oldest node (%d vs. %d)"
- % (node, self._node_epoch[node], lowest_epoch))
+ self._cm.log(f"DC {node} is not the oldest node "
+ f"({self._node_epoch[node]} vs. {lowest_epoch})")
passed = False
if not dc_found:
- self._cm.log("DC not found on any of the %d allowed nodes: %s (of %s)"
- % (len(dc_allowed_list), str(dc_allowed_list), str(node_list)))
+ self._cm.log(f"DC not found on any of the {len(dc_allowed_list)} allowed "
+ f"nodes: {dc_allowed_list} (of {node_list})")
elif len(dc_found) > 1:
- self._cm.log("%d DCs (%s) found in cluster partition: %s"
- % (len(dc_found), str(dc_found), str(node_list)))
+ self._cm.log(f"{len(dc_found)} DCs ({dc_found}) found in cluster partition: {node_list}")
passed = False
if not passed:
for node in node_list:
if self._cm.expected_status[node] == "up":
- self._cm.log("epoch %s : %s"
- % (self._node_epoch[node], self._node_state[node]))
+ self._cm.log(f"epoch {self._node_epoch[node]} : {self._node_state[node]}")
return passed
def is_applicable(self):
"""Return True if this audit is applicable in the current test configuration."""
# @TODO Due to long-ago refactoring, this name test would never match,
# so this audit (and those derived from it) would never run.
# Uncommenting the next lines fixes the name test, but that then
# exposes pre-existing bugs that need to be fixed.
# if self._cm["Name"] == "crm-corosync":
# return True
return False
# pylint: disable=invalid-name
def audit_list(cm):
"""Return a list of instances of applicable audits that can be performed."""
result = []
for auditclass in [DiskAudit, FileAudit, LogAudit, ControllerStateAudit,
PartitionAudit, PrimitiveAudit, GroupAudit, CloneAudit,
ColocationAudit, CIBAudit]:
a = auditclass(cm)
if a.is_applicable():
result.append(a)
return result
diff --git a/python/pacemaker/_cts/cib.py b/python/pacemaker/_cts/cib.py
index ada231899f..3f0ed8327b 100644
--- a/python/pacemaker/_cts/cib.py
+++ b/python/pacemaker/_cts/cib.py
@@ -1,408 +1,408 @@
"""CIB generator for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["ConfigFactory"]
-__copyright__ = "Copyright 2008-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2008-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import warnings
import tempfile
from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.cibxml import Alerts, Clone, Expression, FencingTopology, Group, Nodes, OpDefaults, Option, Resource, Rule
from pacemaker._cts.network import next_ip
class CIB:
"""A class for generating, representing, and installing a CIB file onto cluster nodes."""
def __init__(self, cm, version, factory, tmpfile=None):
"""
Create a new CIB instance.
Arguments:
cm -- A ClusterManager instance
version -- The schema syntax version
factory -- A ConfigFactory instance
tmpfile -- Where to store the CIB, or None to use a new tempfile
"""
# pylint: disable=invalid-name
self._cib = None
self._cm = cm
self._counter = 1
self._factory = factory
self._num_nodes = 0
self.version = version
if not tmpfile:
warnings.filterwarnings("ignore")
# pylint: disable=consider-using-with
f = tempfile.NamedTemporaryFile(delete=True)
f.close()
tmpfile = f.name
warnings.resetwarnings()
self._factory.tmpfile = tmpfile
def _show(self):
"""Query a cluster node for its generated CIB; log and return the result."""
output = ""
- (_, result) = self._factory.rsh(self._factory.target, "HOME=/root CIB_file=%s cibadmin -Q" % self._factory.tmpfile, verbose=1)
+ (_, result) = self._factory.rsh(self._factory.target, f"HOME=/root CIB_file={self._factory.tmpfile} cibadmin -Q", verbose=1)
for line in result:
output += line
- self._factory.debug("Generated Config: %s" % line)
+ self._factory.debug(f"Generated Config: {line}")
return output
def new_ip(self, name=None):
"""Generate an IP resource for the next available IP address, optionally specifying the resource's name."""
if self._cm.env["IPagent"] == "IPaddr2":
ip = next_ip(self._cm.env["IPBase"])
if not name:
if ":" in ip:
(_, _, suffix) = ip.rpartition(":")
- name = "r%s" % suffix
+ name = f"r{suffix}"
else:
- name = "r%s" % ip
+ name = f"r{ip}"
r = Resource(self._factory, name, self._cm.env["IPagent"], "ocf")
r["ip"] = ip
if ":" in ip:
r["cidr_netmask"] = "64"
r["nic"] = "eth0"
else:
r["cidr_netmask"] = "32"
else:
if not name:
- name = "r%s%d" % (self._cm.env["IPagent"], self._counter)
+ name = f"r{self._cm.env['IPagent']}{self._counter}"
self._counter += 1
r = Resource(self._factory, name, self._cm.env["IPagent"], "ocf")
r.add_op("monitor", "5s")
return r
def get_node_id(self, node_name):
"""Check the cluster configuration for the node ID for the given node_name."""
# We can't account for every possible configuration,
# so we only return a node ID if:
# * The node is specified in /etc/corosync/corosync.conf
# with "ring0_addr:" equal to node_name and "nodeid:"
# explicitly specified.
# In all other cases, we return 0.
node_id = 0
# awkward command: use } as record separator
# so each corosync.conf "object" is one record;
# match the "node {" record that has "ring0_addr: node_name";
# then print the substring of that record after "nodeid:"
awk = r"""awk -v RS="}" """ \
r"""'/^(\s*nodelist\s*{)?\s*node\s*{.*(ring0_addr|name):\s*%s(\s+|$)/""" \
r"""{gsub(/.*nodeid:\s*/,"");gsub(/\s+.*$/,"");print}' %s""" \
% (node_name, BuildOptions.COROSYNC_CONFIG_FILE)
(rc, output) = self._factory.rsh(self._factory.target, awk, verbose=1)
if rc == 0 and len(output) == 1:
try:
node_id = int(output[0])
except ValueError:
node_id = 0
return node_id
def install(self, target):
"""Generate a CIB file and install it to the given cluster node."""
old = self._factory.tmpfile
# Force a rebuild
self._cib = None
- self._factory.tmpfile = "%s/cib.xml" % BuildOptions.CIB_DIR
+ self._factory.tmpfile = f"{BuildOptions.CIB_DIR}/cib.xml"
self.contents(target)
- self._factory.rsh(self._factory.target, "chown %s %s" % (BuildOptions.DAEMON_USER, self._factory.tmpfile))
+ self._factory.rsh(self._factory.target, f"chown {BuildOptions.DAEMON_USER} {self._factory.tmpfile}")
self._factory.tmpfile = old
def contents(self, target):
"""Generate a complete CIB file."""
# fencing resource
if self._cib:
return self._cib
if target:
self._factory.target = target
- self._factory.rsh(self._factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self._factory.tmpfile))
+ self._factory.rsh(self._factory.target, f"HOME=/root cibadmin --empty {self.version} > {self._factory.tmpfile}")
self._num_nodes = len(self._cm.env["nodes"])
no_quorum = "stop"
if self._num_nodes < 3:
no_quorum = "ignore"
- self._factory.log("Cluster only has %d nodes, configuring: no-quorum-policy=ignore" % self._num_nodes)
+ self._factory.log(f"Cluster only has {self._num_nodes} nodes, configuring: no-quorum-policy=ignore")
# We don't need a nodes section unless we add attributes
stn = None
# Fencing resource
# Define first so that the shell doesn't reject every update
if self._cm.env["DoFencing"]:
# Define the "real" fencing device
st = Resource(self._factory, "Fencing", self._cm.env["stonith-type"], "stonith")
# Set a threshold for unreliable stonith devices such as the vmware one
st.add_meta("migration-threshold", "5")
st.add_op("monitor", "120s", timeout="120s")
st.add_op("stop", "0", timeout="60s")
st.add_op("start", "0", timeout="60s")
# For remote node tests, a cluster node is stopped and brought back up
# as a remote node with the name "remote-OLDNAME". To allow fencing
# devices to fence these nodes, create a list of all possible node names.
all_node_names = [prefix + n for n in self._cm.env["nodes"] for prefix in ('', 'remote-')]
# Add all parameters specified by user
entries = self._cm.env["stonith-params"].split(',')
for entry in entries:
try:
(name, value) = entry.split('=', 1)
except ValueError:
- print("Warning: skipping invalid fencing parameter: %s" % entry)
+ print(f"Warning: skipping invalid fencing parameter: {entry}")
continue
# Allow user to specify "all" as the node list, and expand it here
if name in ["hostlist", "pcmk_host_list"] and value == "all":
value = ' '.join(all_node_names)
st[name] = value
st.commit()
# Test advanced fencing logic
stf_nodes = []
stt_nodes = []
attr_nodes = {}
# Create the levels
stl = FencingTopology(self._factory)
for node in self._cm.env["nodes"]:
# Remote node tests will rename the node
- remote_node = "remote-%s" % node
+ remote_node = f"remote-{node}"
# Randomly assign node to a fencing method
ftype = self._cm.env.random_gen.choice(["levels-and", "levels-or ", "broadcast "])
# For levels-and, randomly choose targeting by node name or attribute
by = ""
if ftype == "levels-and":
node_id = self.get_node_id(node)
if node_id == 0 or self._cm.env.random_gen.choice([True, False]):
by = " (by name)"
else:
attr_nodes[node] = node_id
by = " (by attribute)"
- self._cm.log(" - Using %s fencing for node: %s%s" % (ftype, node, by))
+ self._cm.log(f" - Using {ftype} fencing for node: {node}{by}")
if ftype == "levels-and":
# If targeting by name, add a topology level for this node
if node not in attr_nodes:
stl.level(1, node, "FencingPass,Fencing")
# Always target remote nodes by name, otherwise we would need to add
# an attribute to the remote node only during remote tests (we don't
# want nonexistent remote nodes showing up in the non-remote tests).
# That complexity is not worth the effort.
stl.level(1, remote_node, "FencingPass,Fencing")
# Add the node (and its remote equivalent) to the list of levels-and nodes.
stt_nodes.extend([node, remote_node])
elif ftype == "levels-or ":
for n in [node, remote_node]:
stl.level(1, n, "FencingFail")
stl.level(2, n, "Fencing")
stf_nodes.extend([node, remote_node])
# If any levels-and nodes were targeted by attribute,
# create the attributes and a level for the attribute.
if attr_nodes:
stn = Nodes(self._factory)
for (node_name, node_id) in attr_nodes.items():
stn.add_node(node_name, node_id, {"cts-fencing": "levels-and"})
stl.level(1, None, "FencingPass,Fencing", "cts-fencing", "levels-and")
# Create a Dummy agent that always passes for levels-and
if stt_nodes:
stt = Resource(self._factory, "FencingPass", "fence_dummy", "stonith")
stt["pcmk_host_list"] = " ".join(stt_nodes)
# Wait this many seconds before doing anything, handy for letting disks get flushed too
stt["random_sleep_range"] = "30"
stt["mode"] = "pass"
stt.commit()
# Create a Dummy agent that always fails for levels-or
if stf_nodes:
stf = Resource(self._factory, "FencingFail", "fence_dummy", "stonith")
stf["pcmk_host_list"] = " ".join(stf_nodes)
# Wait this many seconds before doing anything, handy for letting disks get flushed too
stf["random_sleep_range"] = "30"
stf["mode"] = "fail"
stf.commit()
# Now commit the levels themselves
stl.commit()
o = Option(self._factory)
o["stonith-enabled"] = self._cm.env["DoFencing"]
o["start-failure-is-fatal"] = "false"
o["pe-input-series-max"] = "5000"
o["shutdown-escalation"] = "5min"
o["batch-limit"] = "10"
o["dc-deadtime"] = "5s"
o["no-quorum-policy"] = no_quorum
o.commit()
o = OpDefaults(self._factory)
o["timeout"] = "90s"
o.commit()
# Commit the nodes section if we defined one
if stn is not None:
stn.commit()
# Add an alerts section if possible
if self._factory.rsh.exists_on_all(self._cm.env["notification-agent"], self._cm.env["nodes"]):
alerts = Alerts(self._factory)
alerts.add_alert(self._cm.env["notification-agent"],
self._cm.env["notification-recipient"])
alerts.commit()
# Add resources?
if self._cm.env["CIBResource"]:
self.add_resources()
# generate cib
self._cib = self._show()
- if self._factory.tmpfile != "%s/cib.xml" % BuildOptions.CIB_DIR:
- self._factory.rsh(self._factory.target, "rm -f %s" % self._factory.tmpfile)
+ if self._factory.tmpfile != f"{BuildOptions.CIB_DIR}/cib.xml":
+ self._factory.rsh(self._factory.target, f"rm -f {self._factory.tmpfile}")
return self._cib
def add_resources(self):
"""Add various resources and their constraints to the CIB."""
# Per-node resources
for node in self._cm.env["nodes"]:
- name = "rsc_%s" % node
+ name = f"rsc_{node}"
r = self.new_ip(name)
r.prefer(node, "100")
r.commit()
# Migrator
# Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach
m = Resource(self._factory, "migrator", "Dummy", "ocf", "pacemaker")
m["passwd"] = "whatever"
m.add_meta("resource-stickiness", "1")
m.add_meta("allow-migrate", "1")
m.add_op("monitor", "P10S")
m.commit()
# Ping the test exerciser
p = Resource(self._factory, "ping-1", "ping", "ocf", "pacemaker")
p.add_op("monitor", "60s")
p["host_list"] = self._cm.env["cts-exerciser"]
p["name"] = "connected"
p["debug"] = "true"
c = Clone(self._factory, "Connectivity", p)
c["globally-unique"] = "false"
c.commit()
# promotable clone resource
s = Resource(self._factory, "stateful-1", "Stateful", "ocf", "pacemaker")
s.add_op("monitor", "15s", timeout="60s")
s.add_op("monitor", "16s", timeout="60s", role="Promoted")
ms = Clone(self._factory, "promotable-1", s)
ms["promotable"] = "true"
ms["clone-max"] = self._num_nodes
ms["clone-node-max"] = 1
ms["promoted-max"] = 1
ms["promoted-node-max"] = 1
# Require connectivity to run the promotable clone
r = Rule(self._factory, "connected", "-INFINITY", op="or")
r.add_child(Expression(self._factory, "m1-connected-1", "connected", "lt", "1"))
r.add_child(Expression(self._factory, "m1-connected-2", "connected", "not_defined", None))
ms.prefer("connected", rule=r)
ms.commit()
# Group Resource
g = Group(self._factory, "group-1")
g.add_child(self.new_ip())
if self._cm.env["have_systemd"]:
sysd = Resource(self._factory, "petulant", "pacemaker-cts-dummyd@10", "service")
sysd.add_op("monitor", "P10S")
g.add_child(sysd)
else:
g.add_child(self.new_ip())
g.add_child(self.new_ip())
# Make group depend on the promotable clone
g.after("promotable-1", first="promote", then="start")
g.colocate("promotable-1", "INFINITY", withrole="Promoted")
g.commit()
# LSB resource dependent on group-1
if BuildOptions.INIT_DIR is not None:
lsb = Resource(self._factory, "lsb-dummy", "LSBDummy", "lsb")
lsb.add_op("monitor", "5s")
lsb.after("group-1")
lsb.colocate("group-1")
lsb.commit()
class ConfigFactory:
"""Singleton to generate a CIB file for the environment's schema version."""
def __init__(self, cm):
"""
Create a new ConfigFactory instance.
Arguments:
cm -- A ClusterManager instance
"""
# pylint: disable=invalid-name
self._cm = cm
self.rsh = self._cm.rsh
if not self._cm.env["ListTests"]:
self.target = self._cm.env["nodes"][0]
self.tmpfile = None
def log(self, args):
"""Log a message."""
- self._cm.log("cib: %s" % args)
+ self._cm.log(f"cib: {args}")
def debug(self, args):
"""Log a debug message."""
- self._cm.debug("cib: %s" % args)
+ self._cm.debug(f"cib: {args}")
- def create_config(self, name="pacemaker-%s" % BuildOptions.CIB_SCHEMA_VERSION):
+ def create_config(self, name=f"pacemaker-{BuildOptions.CIB_SCHEMA_VERSION}"):
"""Return a CIB object for the given schema version."""
return CIB(self._cm, name, self)
diff --git a/python/pacemaker/_cts/cibxml.py b/python/pacemaker/_cts/cibxml.py
index a5bc315481..8c9179deb6 100644
--- a/python/pacemaker/_cts/cibxml.py
+++ b/python/pacemaker/_cts/cibxml.py
@@ -1,673 +1,673 @@
"""CIB XML generator for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = [
"Alerts",
"Clone",
"Expression",
"FencingTopology",
"Group",
"Nodes",
"OpDefaults",
"Option",
"Resource",
"Rule",
]
-__copyright__ = "Copyright 2008-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2008-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
def key_val_string(**kwargs):
"""
Construct a string from kwargs containing key=value pairs separated by spaces.
This is suitable for using in an XML element as a list of its attributes.
Any pairs that have value=None will be skipped.
Note that a dictionary can be passed to this function instead of kwargs
by using a construction like:
key_val_string(**{"a": 1, "b": 2})
"""
retval = ""
for (k, v) in kwargs.items():
if v is None:
continue
- retval += ' %s="%s"' % (k, v)
+ retval += f' {k}="{v}"'
return retval
def element(element_name, **kwargs):
"""
Create an XML element string with the given element_name and attributes.
This element does not support having any children, so it will be closed
on the same line. The attributes are processed by key_val_string.
"""
- return "<%s %s/>" % (element_name, key_val_string(**kwargs))
+ return f"<{element_name} {key_val_string(**kwargs)}/>"
def containing_element(element_name, inner, **kwargs):
"""Like element, but surrounds some child text passed by the inner parameter."""
attrs = key_val_string(**kwargs)
- return "<%s %s>%s%s>" % (element_name, attrs, inner, element_name)
+ return f"<{element_name} {attrs}>{inner}{element_name}>"
class XmlBase:
"""
A base class for deriving all kinds of XML sections in the CIB.
This class contains only the most basic operations common to all sections.
It is up to subclasses to provide most behavior.
Note that subclasses of this base class often have different sets of
arguments to their __init__ methods. In general this is not a great
practice, however it is so thoroughly used in these classes that trying
to straighten it out is likely to cause more bugs than just leaving it
alone for now.
"""
def __init__(self, factory, tag, _id, **kwargs):
"""
Create a new XmlBase instance.
Arguments:
factory -- A ConfigFactory instance
tag -- The XML element's start and end tag
_id -- A unique name for the element
kwargs -- Any additional key/value pairs that should be added to
this element as attributes
"""
self._children = []
self._factory = factory
self._kwargs = kwargs
self._tag = tag
self.name = _id
def __repr__(self):
"""Return a short string description of this XML section."""
- return "%s-%s" % (self._tag, self.name)
+ return f"{self._tag}-{self.name}"
def add_child(self, child):
"""Add an XML section as a child of this one."""
self._children.append(child)
def __setitem__(self, key, value):
"""
Add a key/value pair to this element.
The resulting pair becomes an XML attribute. If value is None, remove
the key.
"""
if value:
self._kwargs[key] = value
else:
self._kwargs.pop(key, None)
def show(self):
"""Recursively return a string representation of this XML section."""
- text = '''<%s''' % self._tag
+ text = f"<{self._tag}"
if self.name:
- text += ''' id="%s"''' % self.name
+ text += f' id="{self.name}"'
text += key_val_string(**self._kwargs)
if not self._children:
text += '''/>'''
return text
text += '''>'''
for c in self._children:
text += c.show()
- text += '''%s>''' % self._tag
+ text += f"{self._tag}>"
return text
def _run(self, operation, xml, section, options=""):
"""
Update the CIB on the cluster to include this XML section.
Arguments:
operation -- Whether this update is a "create" or "modify" operation
xml -- The XML to update the CIB with, typically the result
of calling show
section -- Which section of the CIB this update applies to (see
the --scope argument to cibadmin for allowed values)
options -- Extra options to pass to cibadmin
"""
if self.name:
label = self.name
else:
- label = "<%s>" % self._tag
+ label = f"<{self._tag}>"
- self._factory.debug("Writing out %s" % label)
+ self._factory.debug(f"Writing out {label}")
- fixed = "HOME=/root CIB_file=%s" % self._factory.tmpfile
- fixed += " cibadmin --%s --scope %s %s --xml-text '%s'" % (operation, section, options, xml)
+ fixed = f"HOME=/root CIB_file={self._factory.tmpfile}"
+ fixed += f" cibadmin --{operation} --scope {section} {options} --xml-text '{xml}'"
(rc, _) = self._factory.rsh(self._factory.target, fixed)
if rc != 0:
- raise RuntimeError("Configure call failed: %s" % fixed)
+ raise RuntimeError(f"Configure call failed: {fixed}")
class InstanceAttributes(XmlBase):
"""Create an XML section with key/value pairs."""
def __init__(self, factory, _id, attrs):
"""
Create a new InstanceAttributes instance.
Arguments:
factory -- A ConfigFactory instance
_id -- A unique name for the element
attrs -- Key/value pairs to add as nvpair child elements
"""
XmlBase.__init__(self, factory, "instance_attributes", _id)
# Create an for each attribute
for (attr, value) in attrs.items():
- self.add_child(XmlBase(factory, "nvpair", "%s-%s" % (_id, attr),
+ self.add_child(XmlBase(factory, "nvpair", f"{_id}-{attr}",
name=attr, value=value))
class Node(XmlBase):
"""Create a XML section for a single node, complete with node attributes."""
def __init__(self, factory, node_name, node_id, node_attrs):
"""
Create a new Node instance.
Arguments:
factory -- A ConfigFactory instance
node_name -- The value of the uname attribute for this node
node_id -- A unique name for the element
node_attrs -- Additional key/value pairs to set as instance
attributes for this node
"""
XmlBase.__init__(self, factory, "node", node_id, uname=node_name)
- self.add_child(InstanceAttributes(factory, "%s-1" % node_name, node_attrs))
+ self.add_child(InstanceAttributes(factory, f"{node_name}-1", node_attrs))
class Nodes(XmlBase):
"""Create a XML section containing multiple Node instances as children."""
def __init__(self, factory):
"""
Create a new Nodes instance.
Arguments:
factory -- A ConfigFactory instance
"""
XmlBase.__init__(self, factory, "nodes", None)
def add_node(self, node_name, node_id, node_attrs):
"""
Add a child node element.
Arguments:
node_name -- The value of the uname attribute for this node
node_id -- A unique name for the element
node_attrs -- Additional key/value pairs to set as instance
attributes for this node
"""
self.add_child(Node(self._factory, node_name, node_id, node_attrs))
def commit(self):
"""Modify the CIB on the cluster to include this XML section."""
self._run("modify", self.show(), "configuration", "--allow-create")
class FencingTopology(XmlBase):
"""Create a XML section describing how fencing is configured in the cluster."""
def __init__(self, factory):
"""
Create a new FencingTopology instance.
Arguments:
factory -- A ConfigFactory instance
"""
XmlBase.__init__(self, factory, "fencing-topology", None)
def level(self, index, target, devices, target_attr=None, target_value=None):
"""
Generate a XML element.
index -- The order in which to attempt fencing-levels
(1 through 9). Levels are attempted in ascending
order until one succeeds.
target -- The name of a single node to which this level applies
devices -- A list of devices that must all be tried for this
level
target_attr -- The name of a node attribute that is set for nodes
to which this level applies
target_value -- The value of a node attribute that is set for nodes
to which this level applies
"""
if target:
- xml_id = "cts-%s.%d" % (target, index)
+ xml_id = f"cts-{target}.{index}"
self.add_child(XmlBase(self._factory, "fencing-level", xml_id, target=target, index=index, devices=devices))
else:
- xml_id = "%s-%s.%d" % (target_attr, target_value, index)
+ xml_id = f"{target_attr}-{target_value}.{index}"
child = XmlBase(self._factory, "fencing-level", xml_id, index=index, devices=devices)
child["target-attribute"] = target_attr
child["target-value"] = target_value
self.add_child(child)
def commit(self):
"""Create this XML section in the CIB."""
self._run("create", self.show(), "configuration", "--allow-create")
class Option(XmlBase):
"""Create a XML section of key/value pairs for cluster-wide configuration settings."""
def __init__(self, factory, _id="cib-bootstrap-options"):
"""
Create a new Option instance.
Arguments:
factory -- A ConfigFactory instance
_id -- A unique name for the element
"""
XmlBase.__init__(self, factory, "cluster_property_set", _id)
def __setitem__(self, key, value):
"""Add a child nvpair element containing the given key/value pair."""
- self.add_child(XmlBase(self._factory, "nvpair", "cts-%s" % key, name=key, value=value))
+ self.add_child(XmlBase(self._factory, "nvpair", f"cts-{key}", name=key, value=value))
def commit(self):
"""Modify the CIB on the cluster to include this XML section."""
self._run("modify", self.show(), "crm_config", "--allow-create")
class OpDefaults(XmlBase):
"""Create a XML section of key/value pairs for operation default settings."""
def __init__(self, factory):
"""
Create a new OpDefaults instance.
Arguments:
factory -- A ConfigFactory instance
"""
XmlBase.__init__(self, factory, "op_defaults", None)
self.meta = XmlBase(self._factory, "meta_attributes", "cts-op_defaults-meta")
self.add_child(self.meta)
def __setitem__(self, key, value):
"""Add a child nvpair meta_attribute element containing the given key/value pair."""
- self.meta.add_child(XmlBase(self._factory, "nvpair", "cts-op_defaults-%s" % key, name=key, value=value))
+ self.meta.add_child(XmlBase(self._factory, "nvpair", f"cts-op_defaults-{key}", name=key, value=value))
def commit(self):
"""Modify the CIB on the cluster to include this XML section."""
self._run("modify", self.show(), "configuration", "--allow-create")
class Alerts(XmlBase):
"""Create an XML section."""
def __init__(self, factory):
"""
Create a new Alerts instance.
Arguments:
factory -- A ConfigFactory instance
"""
XmlBase.__init__(self, factory, "alerts", None)
self._alert_count = 0
def add_alert(self, path, recipient):
"""
Create a new alert as a child of this XML section.
Arguments:
path -- The path to a script to be called when a cluster
event occurs
recipient -- An environment variable to be passed to the script
"""
self._alert_count += 1
- alert = XmlBase(self._factory, "alert", "alert-%d" % self._alert_count,
+ alert = XmlBase(self._factory, "alert", f"alert-{self._alert_count}",
path=path)
recipient1 = XmlBase(self._factory, "recipient",
- "alert-%d-recipient-1" % self._alert_count,
+ f"alert-{self._alert_count}-recipient-1",
value=recipient)
alert.add_child(recipient1)
self.add_child(alert)
def commit(self):
"""Modify the CIB on the cluster to include this XML section."""
self._run("modify", self.show(), "configuration", "--allow-create")
class Expression(XmlBase):
"""Create an XML element as part of some constraint rule."""
def __init__(self, factory, _id, attr, op, value=None):
"""
Create a new Expression instance.
Arguments:
factory -- A ConfigFactory instance
_id -- A unique name for the element
attr -- The attribute to be tested
op -- The comparison to perform ("lt", "eq", "defined", etc.)
value -- Value for comparison (can be None for "defined" and
"not_defined" operations)
"""
XmlBase.__init__(self, factory, "expression", _id, attribute=attr, operation=op)
if value:
self["value"] = value
class Rule(XmlBase):
"""Create a XML section consisting of one or more expressions, as part of some constraint."""
def __init__(self, factory, _id, score, op="and", expr=None):
"""
Create a new Rule instance.
Arguments:
factory -- A ConfigFactory instance
_id -- A unique name for the element
score -- If this rule is used in a location constraint and
evaluates to true, apply this score to the constraint
op -- If this rule contains more than one expression, use this
boolean op when evaluating
expr -- An Expression instance that can be added to this Rule
when it is created
"""
XmlBase.__init__(self, factory, "rule", _id)
self["boolean-op"] = op
self["score"] = score
if expr:
self.add_child(expr)
class Resource(XmlBase):
"""
A base class that creates all kinds of XML sections.
These sections fully describe a single cluster resource. This defaults to
primitive resources, but subclasses can create other types.
"""
def __init__(self, factory, _id, rtype, standard, provider=None):
"""
Create a new Resource instance.
Arguments:
factory -- A ConfigFactory instance
_id -- A unique name for the element
rtype -- The name of the resource agent
standard -- The standard the resource agent follows ("ocf",
"systemd", etc.)
provider -- The vendor providing the resource agent
"""
XmlBase.__init__(self, factory, "native", _id)
self._provider = provider
self._rtype = rtype
self._standard = standard
self._meta = {}
self._op = []
self._param = {}
self._coloc = {}
self._needs = {}
self._scores = {}
if self._standard == "ocf" and not provider:
self._provider = "heartbeat"
elif self._standard == "lsb":
self._provider = None
def __setitem__(self, key, value):
"""Add a child nvpair element containing the given key/value pair as an instance attribute."""
self._add_param(key, value)
def add_op(self, _id, interval, **kwargs):
"""
Add an operation child XML element to this resource.
Arguments:
_id -- A unique name for the element. Also, the action to
perform ("monitor", "start", "stop", etc.)
interval -- How frequently (in seconds) to perform the operation
kwargs -- Any additional key/value pairs that should be added to
this element as attributes
"""
- self._op.append(XmlBase(self._factory, "op", "%s-%s" % (_id, interval),
+ self._op.append(XmlBase(self._factory, "op", f"{_id}-{interval}",
name=_id, interval=interval, **kwargs))
def _add_param(self, name, value):
"""Add a child nvpair element containing the given key/value pair as an instance attribute."""
self._param[name] = value
def add_meta(self, name, value):
"""Add a child nvpair element containing the given key/value pair as a meta attribute."""
self._meta[name] = value
def prefer(self, node, score="INFINITY", rule=None):
"""
Add a location constraint where this resource prefers some node.
Arguments:
node -- The name of the node to prefer
score -- Apply this score to the location constraint
rule -- A Rule instance to use in creating this constraint, instead
of creating a new rule
"""
if not rule:
- rule = Rule(self._factory, "prefer-%s-r" % node, score,
- expr=Expression(self._factory, "prefer-%s-e" % node, "#uname", "eq", node))
+ rule = Rule(self._factory, f"prefer-{node}-r", score,
+ expr=Expression(self._factory, f"prefer-{node}-e", "#uname", "eq", node))
self._scores[node] = rule
def after(self, resource, kind="Mandatory", first="start", then="start", **kwargs):
"""
Create an ordering constraint between this resource and some other.
Arguments:
resource -- The name of the dependent resource
kind -- How to enforce the constraint ("mandatory", "optional",
"serialize")
first -- The action that this resource must complete before the
then-action can be initiated for the dependent resource
("start", "stop", "promote", "demote")
then -- The action that the dependent resource can execute only
after the first-action has completed (same values as
first)
kwargs -- Any additional key/value pairs that should be added to
this element as attributes
"""
kargs = kwargs.copy()
kargs["kind"] = kind
if then:
kargs["first-action"] = "start"
kargs["then-action"] = then
if first:
kargs["first-action"] = first
self._needs[resource] = kargs
def colocate(self, resource, score="INFINITY", role=None, withrole=None, **kwargs):
"""
Create a colocation constraint between this resource and some other.
Arguments:
resource -- The name of the resource that should be located relative
this one
score -- Apply this score to the colocation constraint
role -- Apply this colocation constraint only to promotable clones
in this role ("started", "promoted", "unpromoted")
withrole -- Apply this colocation constraint only to with-rsc promotable
clones in this role
kwargs -- Any additional key/value pairs that should be added to
this element as attributes
"""
kargs = kwargs.copy()
kargs["score"] = score
if role:
kargs["rsc-role"] = role
if withrole:
kargs["with-rsc-role"] = withrole
self._coloc[resource] = kargs
def _constraints(self):
"""Generate a XML section containing all previously added ordering and colocation constraints."""
text = ""
for (k, v) in self._scores.items():
- attrs = {"id": "prefer-%s" % k, "rsc": self.name}
+ attrs = {"id": f"prefer-{k}", "rsc": self.name}
text += containing_element("rsc_location", v.show(), **attrs)
for (k, kargs) in self._needs.items():
- attrs = {"id": "%s-after-%s" % (self.name, k), "first": k, "then": self.name}
+ attrs = {"id": f"{self.name}-after-{k}", "first": k, "then": self.name}
text += element("rsc_order", **attrs, **kargs)
for (k, kargs) in self._coloc.items():
- attrs = {"id": "%s-with-%s" % (self.name, k), "rsc": self.name, "with-rsc": k}
+ attrs = {"id": f"{self.name}-with-{k}", "rsc": self.name, "with-rsc": k}
text += element("rsc_colocation", **attrs)
text += ""
return text
def show(self):
"""Recursively return a string representation of this XML section."""
- text = ''''''
if self._meta:
nvpairs = ""
for (p, v) in self._meta.items():
- attrs = {"id": "%s-%s" % (self.name, p), "name": p, "value": v}
+ attrs = {"id": f"{self.name}-{p}", "name": p, "value": v}
nvpairs += element("nvpair", **attrs)
text += containing_element("meta_attributes", nvpairs,
- id="%s-meta" % self.name)
+ id=f"{self.name}-meta")
if self._param:
nvpairs = ""
for (p, v) in self._param.items():
- attrs = {"id": "%s-%s" % (self.name, p), "name": p, "value": v}
+ attrs = {"id": f"{self.name}-{p}", "name": p, "value": v}
nvpairs += element("nvpair", **attrs)
text += containing_element("instance_attributes", nvpairs,
- id="%s-params" % self.name)
+ id=f"{self.name}-params")
if self._op:
text += ''''''
for o in self._op:
key = o.name
- o.name = "%s-%s" % (self.name, key)
+ o.name = f"{self.name}-{key}"
text += o.show()
o.name = key
text += ''''''
text += ''''''
return text
def commit(self):
"""Modify the CIB on the cluster to include this XML section."""
self._run("create", self.show(), "resources")
self._run("modify", self._constraints(), "constraints")
class Group(Resource):
"""
A specialized Resource subclass that creates a XML section.
This section describes a single group resource consisting of multiple child
primitive resources.
"""
def __init__(self, factory, _id):
"""
Create a new Group instance.
Arguments:
factory -- A ConfigFactory instance
_id -- A unique name for the element
"""
Resource.__init__(self, factory, _id, None, None)
self.tag = "group"
def __setitem__(self, key, value):
"""Add a child nvpair element containing the given key/value pair as an instance attribute."""
self.add_meta(key, value)
def show(self):
"""Recursively return a string representation of this XML section."""
- text = '''<%s id="%s">''' % (self.tag, self.name)
+ text = f'<{self.tag} id="{self.name}">'
if len(self._meta) > 0:
nvpairs = ""
for (p, v) in self._meta.items():
- attrs = {"id": "%s-%s" % (self.name, p), "name": p, "value": v}
+ attrs = {"id": f"{self.name}-{p}", "name": p, "value": v}
nvpairs += element("nvpair", **attrs)
text += containing_element("meta_attributes", nvpairs,
- id="%s-meta" % self.name)
+ id=f"{self.name}-meta")
for c in self._children:
text += c.show()
- text += '''%s>''' % self.tag
+ text += f"{self.tag}>"
return text
class Clone(Group):
"""
A specialized Group subclass that creates a XML section.
This section describes a clone resource containing multiple instances of a
single primitive resource.
"""
def __init__(self, factory, _id, child=None):
"""
Create a new Clone instance.
Arguments:
factory -- A ConfigFactory instance
_id -- A unique name for the element
child -- A Resource instance that can be added to this Clone
when it is created. Alternately, use add_child later.
Note that a Clone may only have one child.
"""
Group.__init__(self, factory, _id)
self.tag = "clone"
if child:
self.add_child(child)
def add_child(self, child):
"""
Add the given resource as a child of this Clone.
Note that a Clone resource only supports one child at a time.
"""
if not self._children:
self._children.append(child)
else:
- self._factory.log("Clones can only have a single child. Ignoring %s" % child.name)
+ self._factory.log(f"Clones can only have a single child. Ignoring {child.name}")
diff --git a/python/pacemaker/_cts/clustermanager.py b/python/pacemaker/_cts/clustermanager.py
index b9211eba9a..0f72150281 100644
--- a/python/pacemaker/_cts/clustermanager.py
+++ b/python/pacemaker/_cts/clustermanager.py
@@ -1,901 +1,899 @@
"""ClusterManager class for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["ClusterManager"]
__copyright__ = """Copyright 2000-2025 the Pacemaker project contributors.
Certain portions by Huang Zhen are copyright 2004
International Business Machines. The version control history for this file
may have further details."""
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import os
import re
import time
from collections import UserDict
from pacemaker.buildoptions import BuildOptions
from pacemaker.exitstatus import ExitStatus
from pacemaker._cts.CTS import NodeStatus
from pacemaker._cts.audits import AuditResource
from pacemaker._cts.cib import ConfigFactory
from pacemaker._cts.environment import EnvFactory
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.patterns import PatternSelector
from pacemaker._cts.remote import RemoteFactory
from pacemaker._cts.watcher import LogWatcher
# pylint doesn't understand that self._rsh is callable (it stores the
# singleton instance of RemoteExec, as returned by the getInstance method
# of RemoteFactory).
# @TODO See if type annotations fix this.
# I think we could also fix this by getting rid of the getInstance methods,
# but that's a project for another day. For now, just disable the warning.
# pylint: disable=not-callable
# ClusterManager has a lot of methods.
# pylint: disable=too-many-public-methods
class ClusterManager(UserDict):
"""
An abstract base class for managing the cluster.
This class implements high-level operations on the cluster and/or its cluster
managers. Actual cluster-specific management classes should be subclassed
from this one.
Among other things, this class tracks the state every node is expected to be in.
"""
def _final_conditions(self):
"""Check all keys to make sure they have a non-None value."""
for (key, val) in self._data.items():
if val is None:
- raise ValueError("Improper derivation: self[%s] must be overridden by subclass." % key)
+ raise ValueError(f"Improper derivation: self[{key}] must be overridden by subclass.")
def __init__(self):
"""
Create a new ClusterManager instance.
This class can be treated kind of like a dictionary due to the process
of certain dict functions like __getitem__ and __setitem__. This is
because it contains a lot of name/value pairs. However, it is not
actually a dictionary so do not rely on standard dictionary behavior.
"""
# Eventually, ClusterManager should not be a UserDict subclass. Until
# that point...
# pylint: disable=super-init-not-called
self.__instance_errors_to_ignore = []
self._cib_installed = False
self._data = {}
self._logger = LogFactory()
self.env = EnvFactory().getInstance()
self.expected_status = {}
self.name = self.env["Name"]
# pylint: disable=invalid-name
self.ns = NodeStatus(self.env)
self.our_node = os.uname()[1].lower()
self.partitions_expected = 1
self.rsh = RemoteFactory().getInstance()
self.templates = PatternSelector(self.env["Name"])
self._final_conditions()
self._cib_factory = ConfigFactory(self)
self._cib = self._cib_factory.create_config(self.env["Schema"])
self._cib_sync = {}
def __getitem__(self, key):
"""
Return the given key, checking for it in several places.
If key is "Name", return the name of the cluster manager. If the key
was previously added to the dictionary via __setitem__, return that.
Otherwise, return the template pattern for the key.
This method should not be used and may be removed in the future.
"""
if key == "Name":
return self.name
- print("FIXME: Getting %s from %r" % (key, self))
+ print(f"FIXME: Getting {key} from {self!r}")
if key in self._data:
return self._data[key]
return self.templates.get_patterns(key)
def __setitem__(self, key, value):
"""
Set the given key to the given value, overriding any previous value.
This method should not be used and may be removed in the future.
"""
- print("FIXME: Setting %s=%s on %r" % (key, value, self))
+ print(f"FIXME: Setting {key}={value} on {self!r}")
self._data[key] = value
def clear_instance_errors_to_ignore(self):
"""Reset instance-specific errors to ignore on each iteration."""
self.__instance_errors_to_ignore = []
@property
def instance_errors_to_ignore(self):
"""Return a list of known errors that should be ignored for a specific test instance."""
return self.__instance_errors_to_ignore
@property
def errors_to_ignore(self):
"""Return a list of known error messages that should be ignored."""
return self.templates.get_patterns("BadNewsIgnore")
def log(self, args):
"""Log a message."""
self._logger.log(args)
def debug(self, args):
"""Log a debug message."""
self._logger.debug(args)
def upcount(self):
"""Return how many nodes are up."""
count = 0
for node in self.env["nodes"]:
if self.expected_status[node] == "up":
count += 1
return count
def install_support(self, command="install"):
"""
Install or uninstall the CTS support files.
This includes various init scripts and data, daemons, fencing agents, etc.
"""
for node in self.env["nodes"]:
- self.rsh(node, "%s/cts-support %s" % (BuildOptions.DAEMON_DIR, command))
+ self.rsh(node, f"{BuildOptions.DAEMON_DIR}/cts-support {command}")
def prepare_fencing_watcher(self):
"""Return a LogWatcher object that watches for fencing log messages."""
# If we don't have quorum now but get it as a result of starting this node,
# then a bunch of nodes might get fenced
if self.has_quorum(None):
self.debug("Have quorum")
return None
if not self.templates["Pat:Fencing_start"]:
print("No start pattern")
return None
if not self.templates["Pat:Fencing_ok"]:
print("No ok pattern")
return None
stonith = None
stonith_pats = []
for peer in self.env["nodes"]:
if self.expected_status[peer] == "up":
continue
stonith_pats.extend([
self.templates["Pat:Fencing_ok"] % peer,
self.templates["Pat:Fencing_start"] % peer,
])
stonith = LogWatcher(self.env["LogFileName"], stonith_pats, self.env["nodes"],
self.env["log_kind"], "StartupFencing", 0)
stonith.set_watch()
return stonith
def fencing_cleanup(self, node, stonith):
"""Wait for a previously fenced node to return to the cluster."""
peer_list = []
peer_state = {}
- self.debug("Looking for nodes that were fenced as a result of %s starting" % node)
+ self.debug(f"Looking for nodes that were fenced as a result of {node} starting")
# If we just started a node, we may now have quorum (and permission to fence)
if not stonith:
self.debug("Nothing to do")
return peer_list
q = self.has_quorum(None)
if not q and len(self.env["nodes"]) > 2:
# We didn't gain quorum - we shouldn't have shot anyone
- self.debug("Quorum: %s Len: %d" % (q, len(self.env["nodes"])))
+ self.debug(f"Quorum: {q} Len: {len(self.env['nodes'])}")
return peer_list
for n in self.env["nodes"]:
peer_state[n] = "unknown"
# Now see if any states need to be updated
- self.debug("looking for: %r" % stonith.regexes)
+ self.debug(f"looking for: {stonith.regexes!r}")
shot = stonith.look(0)
while shot:
- self.debug("Found: %r" % shot)
+ self.debug(f"Found: {shot!r}")
del stonith.regexes[stonith.whichmatch]
# Extract node name
for n in self.env["nodes"]:
if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
peer = n
peer_state[peer] = "complete"
self.__instance_errors_to_ignore.append(self.templates["Pat:Fencing_ok"] % peer)
elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
# TODO: Correctly detect multiple fencing operations for the same host
peer = n
peer_state[peer] = "in-progress"
self.__instance_errors_to_ignore.append(self.templates["Pat:Fencing_start"] % peer)
if not peer:
- self._logger.log("ERROR: Unknown stonith match: %r" % shot)
+ self._logger.log(f"ERROR: Unknown stonith match: {shot!r}")
elif peer not in peer_list:
- self.debug("Found peer: %s" % peer)
+ self.debug(f"Found peer: {peer}")
peer_list.append(peer)
# Get the next one
shot = stonith.look(60)
for peer in peer_list:
- self.debug(" Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
+ self.debug(f" Peer {peer} was fenced as a result of {node} starting: {peer_state[peer]}")
if self.env["at-boot"]:
self.expected_status[peer] = "up"
else:
self.expected_status[peer] = "down"
if peer_state[peer] == "in-progress":
# Wait for any in-progress operations to complete
shot = stonith.look(60)
while stonith.regexes and shot:
- self.debug("Found: %r" % shot)
+ self.debug(f"Found: {shot!r}")
del stonith.regexes[stonith.whichmatch]
shot = stonith.look(60)
# Now make sure the node is alive too
self.ns.wait_for_node(peer, self.env["DeadTime"])
# Poll until it comes up
if self.env["at-boot"]:
if not self.stat_cm(peer):
time.sleep(self.env["StartTime"])
if not self.stat_cm(peer):
- self._logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
+ self._logger.log(f"ERROR: Peer {peer} failed to restart after being fenced")
return None
return peer_list
def start_cm(self, node, verbose=False):
"""Start up the cluster manager on a given node."""
if verbose:
- self._logger.log("Starting %s on node %s" % (self.templates["Name"], node))
+ self._logger.log(f"Starting {self.templates['Name']} on node {node}")
else:
- self.debug("Starting %s on node %s" % (self.templates["Name"], node))
+ self.debug(f"Starting {self.templates['Name']} on node {node}")
if node not in self.expected_status:
self.expected_status[node] = "down"
if self.expected_status[node] != "down":
return True
# Technically we should always be able to notice ourselves starting
patterns = [
self.templates["Pat:Local_started"] % node,
]
if self.upcount() == 0:
patterns.append(self.templates["Pat:DC_started"] % node)
else:
patterns.append(self.templates["Pat:NonDC_started"] % node)
watch = LogWatcher(self.env["LogFileName"], patterns,
self.env["nodes"], self.env["log_kind"],
"StartaCM", self.env["StartTime"] + 10)
self.install_config(node)
self.expected_status[node] = "any"
if self.stat_cm(node) and self.cluster_stable(self.env["DeadTime"]):
- self._logger.log("%s was already started" % node)
+ self._logger.log(f"{node} was already started")
return True
stonith = self.prepare_fencing_watcher()
watch.set_watch()
(rc, _) = self.rsh(node, self.templates["StartCmd"])
if rc != 0:
- self._logger.log("Warn: Start command failed on node %s" % node)
+ self._logger.log(f"Warn: Start command failed on node {node}")
self.fencing_cleanup(node, stonith)
return False
self.expected_status[node] = "up"
watch_result = watch.look_for_all()
if watch.unmatched:
for regex in watch.unmatched:
- self._logger.log("Warn: Startup pattern not found: %s" % regex)
+ self._logger.log(f"Warn: Startup pattern not found: {regex}")
if watch_result and self.cluster_stable(self.env["DeadTime"]):
self.fencing_cleanup(node, stonith)
return True
if self.stat_cm(node) and self.cluster_stable(self.env["DeadTime"]):
self.fencing_cleanup(node, stonith)
return True
- self._logger.log("Warn: Start failed for node %s" % node)
+ self._logger.log(f"Warn: Start failed for node {node}")
return False
def start_cm_async(self, node, verbose=False):
"""Start up the cluster manager on a given node without blocking."""
if verbose:
- self._logger.log("Starting %s on node %s" % (self["Name"], node))
+ self._logger.log(f"Starting {self['Name']} on node {node}")
else:
- self.debug("Starting %s on node %s" % (self["Name"], node))
+ self.debug(f"Starting {self['Name']} on node {node}")
self.install_config(node)
self.rsh(node, self.templates["StartCmd"], synchronous=False)
self.expected_status[node] = "up"
def stop_cm(self, node, verbose=False, force=False):
"""Stop the cluster manager on a given node."""
if verbose:
- self._logger.log("Stopping %s on node %s" % (self["Name"], node))
+ self._logger.log(f"Stopping {self['Name']} on node {node}")
else:
- self.debug("Stopping %s on node %s" % (self["Name"], node))
+ self.debug(f"Stopping {self['Name']} on node {node}")
if self.expected_status[node] != "up" and not force:
return True
(rc, _) = self.rsh(node, self.templates["StopCmd"])
if rc == 0:
# Make sure we can continue even if corosync leaks
self.expected_status[node] = "down"
self.cluster_stable(self.env["DeadTime"])
return True
- self._logger.log("ERROR: Could not stop %s on node %s" % (self["Name"], node))
+ self._logger.log(f"ERROR: Could not stop {self['Name']} on node {node}")
return False
def stop_cm_async(self, node):
"""Stop the cluster manager on a given node without blocking."""
- self.debug("Stopping %s on node %s" % (self["Name"], node))
+ self.debug(f"Stopping {self['Name']} on node {node}")
self.rsh(node, self.templates["StopCmd"], synchronous=False)
self.expected_status[node] = "down"
def startall(self, nodelist=None, verbose=False, quick=False):
"""Start the cluster manager on every node in the cluster, or on every node in nodelist."""
if not nodelist:
nodelist = self.env["nodes"]
for node in nodelist:
if self.expected_status[node] == "down":
self.ns.wait_for_all_nodes(nodelist, 300)
if not quick:
# This is used for "basic sanity checks", so only start one node ...
return self.start_cm(nodelist[0], verbose=verbose)
# Approximation of SimulStartList for --boot
watchpats = [
self.templates["Pat:DC_IDLE"],
]
for node in nodelist:
watchpats.extend([
self.templates["Pat:InfraUp"] % node,
self.templates["Pat:PacemakerUp"] % node,
self.templates["Pat:Local_started"] % node,
self.templates["Pat:They_up"] % (nodelist[0], node),
])
# Start all the nodes - at about the same time...
watch = LogWatcher(self.env["LogFileName"], watchpats, self.env["nodes"],
self.env["log_kind"], "fast-start",
self.env["DeadTime"] + 10)
watch.set_watch()
if not self.start_cm(nodelist[0], verbose=verbose):
return False
for node in nodelist:
self.start_cm_async(node, verbose=verbose)
watch.look_for_all()
if watch.unmatched:
for regex in watch.unmatched:
- self._logger.log("Warn: Startup pattern not found: %s" % regex)
+ self._logger.log(f"Warn: Startup pattern not found: {regex}")
if not self.cluster_stable():
self._logger.log("Cluster did not stabilize")
return False
return True
def stopall(self, nodelist=None, verbose=False, force=False):
"""Stop the cluster manager on every node in the cluster, or on every node in nodelist."""
ret = True
if not nodelist:
nodelist = self.env["nodes"]
for node in self.env["nodes"]:
if self.expected_status[node] == "up" or force:
if not self.stop_cm(node, verbose=verbose, force=force):
ret = False
return ret
def statall(self, nodelist=None):
"""Return the status of the cluster manager on every node in the cluster, or on every node in nodelist."""
result = {}
if not nodelist:
nodelist = self.env["nodes"]
for node in nodelist:
if self.stat_cm(node):
result[node] = "up"
else:
result[node] = "down"
return result
def isolate_node(self, target, nodes=None):
"""Break communication between the target node and all other nodes in the cluster, or nodes."""
if not nodes:
nodes = self.env["nodes"]
for node in nodes:
if node == target:
continue
(rc, _) = self.rsh(target, self.templates["BreakCommCmd"] % node)
if rc != 0:
- self._logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
+ self._logger.log(f"Could not break the communication between {target} and {node}: {rc}")
return False
- self.debug("Communication cut between %s and %s" % (target, node))
+ self.debug(f"Communication cut between {target} and {node}")
return True
def unisolate_node(self, target, nodes=None):
"""Re-establish communication between the target node and all other nodes in the cluster, or nodes."""
if not nodes:
nodes = self.env["nodes"]
for node in nodes:
if node == target:
continue
# Limit the amount of time we have asynchronous connectivity for
# Restore both sides as simultaneously as possible
self.rsh(target, self.templates["FixCommCmd"] % node, synchronous=False)
self.rsh(node, self.templates["FixCommCmd"] % target, synchronous=False)
- self.debug("Communication restored between %s and %s" % (target, node))
+ self.debug(f"Communication restored between {target} and {node}")
def oprofile_start(self, node=None):
"""Start profiling on the given node, or all nodes in the cluster."""
if not node:
for n in self.env["oprofile"]:
self.oprofile_start(n)
elif node in self.env["oprofile"]:
- self.debug("Enabling oprofile on %s" % node)
+ self.debug(f"Enabling oprofile on {node}")
self.rsh(node, "opcontrol --init")
self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
self.rsh(node, "opcontrol --start")
self.rsh(node, "opcontrol --reset")
def oprofile_save(self, test, node=None):
"""Save profiling data and restart profiling on the given node, or all nodes in the cluster."""
if not node:
for n in self.env["oprofile"]:
self.oprofile_save(test, n)
elif node in self.env["oprofile"]:
self.rsh(node, "opcontrol --dump")
- self.rsh(node, "opcontrol --save=cts.%d" % test)
+ self.rsh(node, f"opcontrol --save=cts.{test}")
# Read back with: opreport -l session:cts.0 image:/c*
self.oprofile_stop(node)
self.oprofile_start(node)
def oprofile_stop(self, node=None):
"""
Start profiling on the given node, or all nodes in the cluster.
This does not save profiling data, so call oprofile_save first if needed.
"""
if not node:
for n in self.env["oprofile"]:
self.oprofile_stop(n)
elif node in self.env["oprofile"]:
- self.debug("Stopping oprofile on %s" % node)
+ self.debug(f"Stopping oprofile on {node}")
self.rsh(node, "opcontrol --reset")
self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
def install_config(self, node):
"""Remove and re-install the CIB on the first node in the cluster."""
if not self.ns.wait_for_node(node):
- self.log("Node %s is not up." % node)
+ self.log(f"Node {node} is not up.")
return
if node in self._cib_sync or not self.env["ClobberCIB"]:
return
self._cib_sync[node] = True
- self.rsh(node, "rm -f %s/cib*" % BuildOptions.CIB_DIR)
+ self.rsh(node, f"rm -f {BuildOptions.CIB_DIR}/cib*")
# Only install the CIB on the first node, all the other ones will pick it up from there
if self._cib_installed:
return
self._cib_installed = True
if self.env["CIBfilename"] is None:
- self.log("Installing Generated CIB on node %s" % node)
+ self.log(f"Installing Generated CIB on node {node}")
self._cib.install(node)
else:
- self.log("Installing CIB (%s) on node %s" % (self.env["CIBfilename"], node))
+ self.log(f"Installing CIB ({self.env['CIBfilename']}) on node {node}")
rc = self.rsh.copy(self.env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node))
if rc != 0:
- raise ValueError("Can not scp file to %s %d" % (node, rc))
+ raise ValueError(f"Can not scp file to {node} {rc}")
- self.rsh(node, "chown %s %s/cib.xml" % (BuildOptions.DAEMON_USER, BuildOptions.CIB_DIR))
+ self.rsh(node, f"chown {BuildOptions.DAEMON_USER} {BuildOptions.CIB_DIR}/cib.xml")
def prepare(self):
"""
Finish initialization.
Clear out the expected status and record the current status of every
node in the cluster.
"""
self.partitions_expected = 1
for node in self.env["nodes"]:
self.expected_status[node] = ""
if self.env["experimental-tests"]:
self.unisolate_node(node)
self.stat_cm(node)
def test_node_cm(self, node):
"""
Check the status of a given node.
Returns 0 if the node is down, 1 if the node is up but unstable, and 2
if the node is up and stable.
"""
watchpats = [
"Current ping state: (S_IDLE|S_NOT_DC)",
self.templates["Pat:NonDC_started"] % node,
self.templates["Pat:DC_started"] % node,
]
idle_watch = LogWatcher(self.env["LogFileName"], watchpats, [node],
self.env["log_kind"], "ClusterIdle")
idle_watch.set_watch()
(_, out) = self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
if not out:
out = ""
else:
out = out[0].strip()
- self.debug("Node %s status: '%s'" % (node, out))
+ self.debug(f"Node {node} status: '{out}'")
if out.find('ok') < 0:
if self.expected_status[node] == "up":
- self.log("Node status for %s is %s but we think it should be %s"
- % (node, "down", self.expected_status[node]))
+ self.log(f"Node status for {node} is down but we think it should be {self.expected_status[node]}")
self.expected_status[node] = "down"
return 0
if self.expected_status[node] == "down":
- self.log("Node status for %s is %s but we think it should be %s: %s"
- % (node, "up", self.expected_status[node], out))
+ self.log(f"Node status for {node} is up but we think it should be {self.expected_status[node]}: {out}")
self.expected_status[node] = "up"
# check the output first - because syslog-ng loses messages
if out.find('S_NOT_DC') != -1:
# Up and stable
return 2
if out.find('S_IDLE') != -1:
# Up and stable
return 2
# fall back to syslog-ng and wait
if not idle_watch.look():
# just up
- self.debug("Warn: Node %s is unstable: %s" % (node, out))
+ self.debug(f"Warn: Node {node} is unstable: {out}")
return 1
# Up and stable
return 2
def stat_cm(self, node):
"""Report the status of the cluster manager on a given node."""
return self.test_node_cm(node) > 0
# Being up and being stable is not the same question...
def node_stable(self, node):
"""Return whether or not the given node is stable."""
if self.test_node_cm(node) == 2:
return True
- self.log("Warn: Node %s not stable" % node)
+ self.log(f"Warn: Node {node} not stable")
return False
def partition_stable(self, nodes, timeout=None):
"""Return whether or not all nodes in the given partition are stable."""
watchpats = [
"Current ping state: S_IDLE",
self.templates["Pat:DC_IDLE"],
]
self.debug("Waiting for cluster stability...")
if timeout is None:
timeout = self.env["DeadTime"]
if len(nodes) < 3:
self.debug("Cluster is inactive")
return True
idle_watch = LogWatcher(self.env["LogFileName"], watchpats, nodes.split(),
self.env["log_kind"], "ClusterStable", timeout)
idle_watch.set_watch()
for node in nodes.split():
# have each node dump its current state
self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
ret = idle_watch.look()
while ret:
self.debug(ret)
for node in nodes.split():
if re.search(node, ret):
return True
ret = idle_watch.look()
- self.debug("Warn: Partition %r not IDLE after %ds" % (nodes, timeout))
+ self.debug(f"Warn: Partition {nodes!r} not IDLE after {timeout}s")
return False
def cluster_stable(self, timeout=None, double_check=False):
"""Return whether or not all nodes in the cluster are stable."""
partitions = self.find_partitions()
for partition in partitions:
if not self.partition_stable(partition, timeout):
return False
if not double_check:
return True
# Make sure we are really stable and that all resources,
# including those that depend on transient node attributes,
# are started if they were going to be
time.sleep(5)
for partition in partitions:
if not self.partition_stable(partition, timeout):
return False
return True
def is_node_dc(self, node, status_line=None):
"""
Return whether or not the given node is the cluster DC.
Check the given status_line, or query the cluster if None.
"""
if not status_line:
(_, out) = self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
if out:
status_line = out[0].strip()
if not status_line:
return False
if status_line.find('S_IDLE') != -1:
return True
if status_line.find('S_INTEGRATION') != -1:
return True
if status_line.find('S_FINALIZE_JOIN') != -1:
return True
if status_line.find('S_POLICY_ENGINE') != -1:
return True
if status_line.find('S_TRANSITION_ENGINE') != -1:
return True
return False
def active_resources(self, node):
"""Return a list of primitive resources active on the given node."""
(_, output) = self.rsh(node, "crm_resource -c", verbose=1)
resources = []
for line in output:
if not re.search("^Resource", line):
continue
tmp = AuditResource(self, line)
if tmp.type == "primitive" and tmp.host == node:
resources.append(tmp.id)
return resources
def resource_location(self, rid):
"""Return a list of nodes on which the given resource is running."""
resource_nodes = []
for node in self.env["nodes"]:
if self.expected_status[node] != "up":
continue
cmd = self.templates["RscRunning"] % rid
(rc, lines) = self.rsh(node, cmd)
if rc == 127:
- self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd)
+ self.log(f"Command '{cmd}' failed. Binary or pacemaker-cts package not installed?")
for line in lines:
- self.log("Output: %s " % line)
+ self.log(f"Output: {line} ")
elif rc == 0:
resource_nodes.append(node)
return resource_nodes
def find_partitions(self):
"""
Return a list of all partitions in the cluster.
Each element of the list is itself a list of all active nodes in that
partition.
"""
ccm_partitions = []
for node in self.env["nodes"]:
if self.expected_status[node] != "up":
- self.debug("Node %s is down... skipping" % node)
+ self.debug(f"Node {node} is down... skipping")
continue
(_, out) = self.rsh(node, self.templates["PartitionCmd"], verbose=1)
if not out:
- self.log("no partition details for %s" % node)
+ self.log(f"no partition details for {node}")
continue
partition = out[0].strip()
if len(partition) <= 2:
- self.log("bad partition details for %s" % node)
+ self.log(f"bad partition details for {node}")
continue
nodes = partition.split()
nodes.sort()
partition = ' '.join(nodes)
found = 0
for a_partition in ccm_partitions:
if partition == a_partition:
found = 1
if found == 0:
- self.debug("Adding partition from %s: %s" % (node, partition))
+ self.debug(f"Adding partition from {node}: {partition}")
ccm_partitions.append(partition)
else:
- self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node))
+ self.debug(f"Partition '{partition}' from {node} is consistent with existing entries")
- self.debug("Found partitions: %r" % ccm_partitions)
+ self.debug(f"Found partitions: {ccm_partitions!r}")
return ccm_partitions
def has_quorum(self, node_list):
"""Return whether or not the cluster has quorum."""
# If we are auditing a partition, then one side will
# have quorum and the other not.
# So the caller needs to tell us which we are checking
# If no value for node_list is specified... assume all nodes
if not node_list:
node_list = self.env["nodes"]
for node in node_list:
if self.expected_status[node] != "up":
continue
(rc, quorum) = self.rsh(node, self.templates["QuorumCmd"], verbose=1)
if rc != ExitStatus.OK:
- self.debug("WARN: Quorum check on %s returned error (%d)" % (node, rc))
+ self.debug(f"WARN: Quorum check on {node} returned error ({rc})")
continue
quorum = quorum[0].strip()
if quorum.find("1") != -1:
return True
if quorum.find("0") != -1:
return False
- self.debug("WARN: Unexpected quorum test result from %s:%s" % (node, quorum))
+ self.debug(f"WARN: Unexpected quorum test result from {node}:{quorum}")
return False
@property
def components(self):
"""
Return a list of all patterns that should be ignored for the cluster's components.
This must be provided by all subclasses.
"""
raise NotImplementedError
def in_standby_mode(self, node):
"""Return whether or not the node is in Standby."""
(_, out) = self.rsh(node, self.templates["StandbyQueryCmd"] % node, verbose=1)
if not out:
return False
out = out[0].strip()
- self.debug("Standby result: %s" % out)
+ self.debug(f"Standby result: {out}")
return out == "on"
def set_standby_mode(self, node, status):
"""
Set node to Standby if status is True, or Active if status is False.
Return whether the node is now in the requested status.
"""
current_status = self.in_standby_mode(node)
if current_status == status:
return True
if status:
cmd = self.templates["StandbyCmd"] % (node, "on")
else:
cmd = self.templates["StandbyCmd"] % (node, "off")
(rc, _) = self.rsh(node, cmd)
return rc == 0
def add_dummy_rsc(self, node, rid):
"""Add a dummy resource with the given ID to the given node."""
- rsc_xml = """ '
-
+ rsc_xml = f""" '
+
-
+
- '""" % (rid, rid)
- constraint_xml = """ '
-
- '
- """ % (rid, node, node, rid)
+ '"""
+
+ constraint_xml = f""" '
+
+ '"""
self.rsh(node, self.templates['CibAddXml'] % rsc_xml)
self.rsh(node, self.templates['CibAddXml'] % constraint_xml)
def remove_dummy_rsc(self, node, rid):
"""Remove the previously added dummy resource given by rid on the given node."""
- constraint = "\"//rsc_location[@rsc='%s']\"" % rid
- rsc = "\"//primitive[@id='%s']\"" % rid
+ constraint = f"\"//rsc_location[@rsc='{rid}']\""
+ rsc = f"\"//primitive[@id='{rid}']\""
self.rsh(node, self.templates['CibDelXpath'] % constraint)
self.rsh(node, self.templates['CibDelXpath'] % rsc)
diff --git a/python/pacemaker/_cts/cmcorosync.py b/python/pacemaker/_cts/cmcorosync.py
index f64b811b94..d0ab08d70c 100644
--- a/python/pacemaker/_cts/cmcorosync.py
+++ b/python/pacemaker/_cts/cmcorosync.py
@@ -1,75 +1,75 @@
"""Corosync-specific class for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["Corosync2"]
-__copyright__ = "Copyright 2007-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2007-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.CTS import Process
from pacemaker._cts.clustermanager import ClusterManager
from pacemaker._cts.patterns import PatternSelector
# Throughout this file, pylint has trouble understanding that EnvFactory
# is a singleton instance that can be treated as a subscriptable object.
# Various warnings are disabled because of this. See also a comment about
# self._rsh in environment.py.
# pylint: disable=unsubscriptable-object
class Corosync2(ClusterManager):
"""A subclass of ClusterManager specialized to handle corosync2 and later based clusters."""
def __init__(self):
"""Create a new Corosync2 instance."""
ClusterManager.__init__(self)
self._fullcomplist = {}
self.templates = PatternSelector(self.name)
@property
def components(self):
"""Return a list of patterns that should be ignored for the cluster's components."""
complist = []
if not self._fullcomplist:
common_ignore = self.templates.get_component("common-ignore")
daemons = [
"pacemaker-based",
"pacemaker-controld",
"pacemaker-attrd",
"pacemaker-execd",
"pacemaker-fenced"
]
for c in daemons:
- badnews = self.templates.get_component("%s-ignore" % c) + common_ignore
+ badnews = self.templates.get_component(f"{c}-ignore") + common_ignore
proc = Process(self, c, pats=self.templates.get_component(c),
badnews_ignore=badnews)
self._fullcomplist[c] = proc
# the scheduler uses dc_pats instead of pats
badnews = self.templates.get_component("pacemaker-schedulerd-ignore") + common_ignore
proc = Process(self, "pacemaker-schedulerd",
dc_pats=self.templates.get_component("pacemaker-schedulerd"),
badnews_ignore=badnews)
self._fullcomplist["pacemaker-schedulerd"] = proc
# add (or replace) extra components
badnews = self.templates.get_component("corosync-ignore") + common_ignore
proc = Process(self, "corosync", pats=self.templates.get_component("corosync"),
badnews_ignore=badnews)
self._fullcomplist["corosync"] = proc
# Processes running under valgrind can't be shot with "killall -9 processname",
# so don't include them in the returned list
vgrind = self.env["valgrind-procs"].split()
for (key, val) in self._fullcomplist.items():
if self.env["valgrind-tests"] and key in vgrind:
- self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
+ self.log(f"Filtering {key} from the component list as it is being profiled by valgrind")
continue
if key == "pacemaker-fenced" and not self.env["DoFencing"]:
continue
complist.append(val)
return complist
diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py
index 1315e45265..b7987e45e5 100644
--- a/python/pacemaker/_cts/environment.py
+++ b/python/pacemaker/_cts/environment.py
@@ -1,645 +1,642 @@
"""Test environment classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["EnvFactory"]
__copyright__ = "Copyright 2014-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import argparse
from contextlib import suppress
import os
import random
import socket
import sys
import time
from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
from pacemaker._cts.watcher import LogKind
class Environment:
"""
A class for managing the CTS environment.
This consists largely of processing and storing command line parameters.
"""
# pylint doesn't understand that self._rsh is callable (it stores the
# singleton instance of RemoteExec, as returned by the getInstance method
# of RemoteFactory).
# @TODO See if type annotations fix this.
# I think we could also fix this by getting rid of the getInstance methods,
# but that's a project for another day. For now, just disable the warning.
# pylint: disable=not-callable
def __init__(self, args):
"""
Create a new Environment instance.
This class can be treated kind of like a dictionary due to the presence
of typical dict functions like __contains__, __getitem__, and __setitem__.
However, it is not a dictionary so do not rely on standard dictionary
behavior.
Arguments:
args -- A list of command line parameters, minus the program name.
If None, sys.argv will be used.
"""
self.data = {}
self._nodes = []
# Set some defaults before processing command line arguments. These are
# either not set by any command line parameter, or they need a default
# that can't be set in add_argument.
self["DeadTime"] = 300
self["StartTime"] = 300
self["StableTime"] = 30
self["tests"] = []
self["IPagent"] = "IPaddr2"
self["DoFencing"] = True
self["ClobberCIB"] = False
self["CIBfilename"] = None
self["CIBResource"] = False
self["log_kind"] = None
self["node-limit"] = 0
self["scenario"] = "random"
self.random_gen = random.Random()
self._logger = LogFactory()
self._rsh = RemoteFactory().getInstance()
self._target = "localhost"
self._seed_random()
self._parse_args(args)
if not self["ListTests"]:
self._validate()
self._discover()
def _seed_random(self, seed=None):
"""
Initialize the random number generator.
Arguments:
seed -- Use this to see the random number generator, or use the
current time if None.
"""
if not seed:
seed = int(time.time())
self["RandSeed"] = seed
self.random_gen.seed(str(seed))
def dump(self):
"""Print the current environment."""
keys = []
for key in list(self.data.keys()):
keys.append(key)
keys.sort()
for key in keys:
- s = "Environment[%s]" % key
- self._logger.debug("{key:35}: {val}".format(key=s, val=str(self[key])))
+ self._logger.debug(f"{f'Environment[{key}]':35}: {str(self[key])}")
def keys(self):
"""Return a list of all environment keys stored in this instance."""
return list(self.data.keys())
def __contains__(self, key):
"""Return True if the given key exists in the environment."""
if key == "nodes":
return True
return key in self.data
def __getitem__(self, key):
"""Return the given environment key, or None if it does not exist."""
if str(key) == "0":
raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead")
if key == "nodes":
return self._nodes
if key == "Name":
return self._get_stack_short()
return self.data.get(key)
def __setitem__(self, key, value):
"""Set the given environment key to the given value, overriding any previous value."""
if key == "Stack":
self._set_stack(value)
elif key == "node-limit":
self.data[key] = value
self._filter_nodes()
elif key == "nodes":
self._nodes = []
for node in value:
# I don't think I need the IP address, etc. but this validates
# the node name against /etc/hosts and/or DNS, so it's a
# GoodThing(tm).
try:
n = node.strip()
# @TODO This only handles IPv4, use getaddrinfo() instead
# (here and in _discover())
socket.gethostbyname_ex(n)
self._nodes.append(n)
except socket.herror:
- self._logger.log("%s not found in DNS... aborting" % node)
+ self._logger.log(f"{node} not found in DNS... aborting")
raise
self._filter_nodes()
else:
self.data[key] = value
def random_node(self):
"""Choose a random node from the cluster."""
return self.random_gen.choice(self["nodes"])
def get(self, key, default=None):
"""Return the value for key if key is in the environment, else default."""
if key == "nodes":
return self._nodes
return self.data.get(key, default)
def _set_stack(self, name):
"""Normalize the given cluster stack name."""
if name in ["corosync", "cs", "mcp"]:
self.data["Stack"] = "corosync 2+"
else:
- raise ValueError("Unknown stack: %s" % name)
+ raise ValueError(f"Unknown stack: {name}")
def _get_stack_short(self):
"""Return the short name for the currently set cluster stack."""
if "Stack" not in self.data:
return "unknown"
if self.data["Stack"] == "corosync 2+":
return "crm-corosync"
- LogFactory().log("Unknown stack: %s" % self["stack"])
- raise ValueError("Unknown stack: %s" % self["stack"])
+ LogFactory().log(f"Unknown stack: {self['stack']}")
+ raise ValueError(f"Unknown stack: {self['stack']}")
def _detect_systemd(self):
"""Detect whether systemd is in use on the target node."""
if "have_systemd" not in self.data:
(rc, _) = self._rsh(self._target, "systemctl list-units", verbose=0)
self["have_systemd"] = rc == 0
def _detect_syslog(self):
"""Detect the syslog variant in use on the target node (if any)."""
if "syslogd" in self.data:
return
if self["have_systemd"]:
# Systemd
(_, lines) = self._rsh(self._target, r"systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", verbose=1)
else:
# SYS-V
(_, lines) = self._rsh(self._target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", verbose=1)
with suppress(IndexError):
self["syslogd"] = lines[0].strip()
def disable_service(self, node, service):
"""Disable the given service on the given node."""
if self["have_systemd"]:
# Systemd
- (rc, _) = self._rsh(node, "systemctl disable %s" % service)
+ (rc, _) = self._rsh(node, f"systemctl disable {service}")
return rc
# SYS-V
- (rc, _) = self._rsh(node, "chkconfig %s off" % service)
+ (rc, _) = self._rsh(node, f"chkconfig {service} off")
return rc
def enable_service(self, node, service):
"""Enable the given service on the given node."""
if self["have_systemd"]:
# Systemd
- (rc, _) = self._rsh(node, "systemctl enable %s" % service)
+ (rc, _) = self._rsh(node, f"systemctl enable {service}")
return rc
# SYS-V
- (rc, _) = self._rsh(node, "chkconfig %s on" % service)
+ (rc, _) = self._rsh(node, f"chkconfig {service} on")
return rc
def service_is_enabled(self, node, service):
"""Return True if the given service is enabled on the given node."""
if self["have_systemd"]:
# Systemd
# With "systemctl is-enabled", we should check if the service is
# explicitly "enabled" instead of the return code. For example it returns
# 0 if the service is "static" or "indirect", but they don't really count
# as "enabled".
- (rc, _) = self._rsh(node, "systemctl is-enabled %s | grep enabled" % service)
+ (rc, _) = self._rsh(node, f"systemctl is-enabled {service} | grep enabled")
return rc == 0
# SYS-V
- (rc, _) = self._rsh(node, "chkconfig --list | grep -e %s.*on" % service)
+ (rc, _) = self._rsh(node, f"chkconfig --list | grep -e {service}.*on")
return rc == 0
def _detect_at_boot(self):
"""Detect if the cluster starts at boot."""
if "at-boot" not in self.data:
self["at-boot"] = self.service_is_enabled(self._target, "corosync") \
or self.service_is_enabled(self._target, "pacemaker")
def _detect_ip_offset(self):
"""Detect the offset for IPaddr resources."""
if self["CIBResource"] and "IPBase" not in self.data:
(_, lines) = self._rsh(self._target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0)
network = lines[0].strip()
(_, lines) = self._rsh(self._target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, verbose=0)
try:
self["IPBase"] = lines[0].strip()
except (IndexError, TypeError):
self["IPBase"] = None
if not self["IPBase"]:
self["IPBase"] = " fe80::1234:56:7890:1000"
self._logger.log("Could not determine an offset for IPaddr resources. Perhaps nmap is not installed on the nodes.")
- self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
+ self._logger.log(f"""Defaulting to '{self["IPBase"]}', use --test-ip-base to override""")
return
# pylint thinks self["IPBase"] is a list, not a string, which causes it
# to error out because a list doesn't have split().
# pylint: disable=no-member
- if int(self["IPBase"].split('.')[3]) >= 240:
- self._logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s"
- % (self["IPBase"], self["IPBase"].split('.')[3]))
+ last_part = self["IPBase"].split('.')[3]
+
+ if int(last_part) >= 240:
+ self._logger.log(f"Could not determine an offset for IPaddr resources. Upper bound is too high: {self['IPBase']} {last_part}")
self["IPBase"] = " fe80::1234:56:7890:1000"
- self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
+ self._logger.log(f"""Defaulting to '{self["IPBase"]}', use --test-ip-base to override""")
def _filter_nodes(self):
"""
Filter the list of cluster nodes.
If --limit-nodes is given, keep that many nodes from the front of the
list of cluster nodes and drop the rest.
"""
if self["node-limit"] > 0:
if len(self["nodes"]) > self["node-limit"]:
- # pylint thinks self["node-limit"] is a list even though we initialize
- # it as an int in __init__ and treat it as an int everywhere.
- # pylint: disable=bad-string-format-type
- self._logger.log("Limiting the number of nodes configured=%d (max=%d)"
- % (len(self["nodes"]), self["node-limit"]))
+ self._logger.log(f"Limiting the number of nodes configured={len(self['nodes'])} "
+ f"(max={self['node-limit']})")
while len(self["nodes"]) > self["node-limit"]:
self["nodes"].pop(len(self["nodes"]) - 1)
def _validate(self):
"""Check that we were given all required command line parameters."""
if not self["nodes"]:
raise ValueError("No nodes specified!")
def _discover(self):
"""Probe cluster nodes to figure out how to log and manage services."""
self._target = random.Random().choice(self["nodes"])
exerciser = socket.gethostname()
# Use the IP where possible to avoid name lookup failures
for ip in socket.gethostbyname_ex(exerciser)[2]:
if ip != "127.0.0.1":
exerciser = ip
break
self["cts-exerciser"] = exerciser
self._detect_systemd()
self._detect_syslog()
self._detect_at_boot()
self._detect_ip_offset()
def _parse_args(self, argv):
"""
Parse and validate command line parameters.
Set the appropriate values in the environment dictionary. If argv is
None, use sys.argv instead.
"""
if not argv:
argv = sys.argv[1:]
- parser = argparse.ArgumentParser(epilog="%s -g virt1 -r --stonith ssh --schema pacemaker-2.0 500" % sys.argv[0])
+ parser = argparse.ArgumentParser(epilog=f"{sys.argv[0]} -g virt1 -r --stonith ssh --schema pacemaker-2.0 500")
grp1 = parser.add_argument_group("Common options")
grp1.add_argument("-g", "--dsh-group", "--group",
metavar="GROUP", dest="group",
help="Use the nodes listed in the named DSH group (~/.dsh/groups/$name)")
grp1.add_argument("-l", "--limit-nodes",
type=int, default=0,
metavar="MAX",
help="Only use the first MAX cluster nodes supplied with --nodes")
grp1.add_argument("--benchmark",
action="store_true",
help="Add timing information")
grp1.add_argument("--list", "--list-tests",
action="store_true", dest="list_tests",
help="List the valid tests")
grp1.add_argument("--nodes",
metavar="NODES",
help="List of cluster nodes separated by whitespace")
grp1.add_argument("--stack",
default="corosync",
metavar="STACK",
help="Which cluster stack is installed")
grp2 = parser.add_argument_group("Options that CTS will usually auto-detect correctly")
grp2.add_argument("-L", "--logfile",
metavar="PATH",
help="Where to look for logs from cluster nodes (or 'journal' for systemd journal)")
grp2.add_argument("--at-boot", "--cluster-starts-at-boot",
choices=["1", "0", "yes", "no"],
help="Does the cluster software start at boot time?")
grp2.add_argument("--facility", "--syslog-facility",
default="daemon",
metavar="NAME",
help="Which syslog facility to log to")
grp2.add_argument("--ip", "--test-ip-base",
metavar="IP",
help="Offset for generated IP address resources")
grp3 = parser.add_argument_group("Options for release testing")
grp3.add_argument("-r", "--populate-resources",
action="store_true",
help="Generate a sample configuration")
grp3.add_argument("--choose",
metavar="NAME",
help="Run only the named tests, separated by whitespace")
grp3.add_argument("--fencing", "--stonith",
choices=["1", "0", "yes", "no", "lha", "openstack", "rhcs", "rhevm", "scsi", "ssh", "virt", "xvm"],
default="1",
help="What fencing agent to use")
grp3.add_argument("--once",
action="store_true",
help="Run all valid tests once")
grp4 = parser.add_argument_group("Additional (less common) options")
grp4.add_argument("-c", "--clobber-cib",
action="store_true",
help="Erase any existing configuration")
grp4.add_argument("-y", "--yes",
action="store_true", dest="always_continue",
help="Continue to run whenever prompted")
grp4.add_argument("--boot",
action="store_true",
help="")
grp4.add_argument("--cib-filename",
metavar="PATH",
help="Install the given CIB file to the cluster")
grp4.add_argument("--experimental-tests",
action="store_true",
help="Include experimental tests")
grp4.add_argument("--loop-minutes",
type=int, default=60,
help="")
grp4.add_argument("--no-loop-tests",
action="store_true",
help="Don't run looping/time-based tests")
grp4.add_argument("--no-unsafe-tests",
action="store_true",
help="Don't run tests that are unsafe for use with ocfs2/drbd")
grp4.add_argument("--notification-agent",
metavar="PATH",
default="/var/lib/pacemaker/notify.sh",
help="Script to configure for Pacemaker alerts")
grp4.add_argument("--notification-recipient",
metavar="R",
default="/var/lib/pacemaker/notify.log",
help="Recipient to pass to alert script")
grp4.add_argument("--oprofile",
metavar="NODES",
help="List of cluster nodes to run oprofile on")
grp4.add_argument("--outputfile",
metavar="PATH",
help="Location to write logs to")
grp4.add_argument("--qarsh",
action="store_true",
help="Use QARSH to access nodes instead of SSH")
grp4.add_argument("--schema",
metavar="SCHEMA",
- default="pacemaker-%s" % BuildOptions.CIB_SCHEMA_VERSION,
+ default=f"pacemaker-{BuildOptions.CIB_SCHEMA_VERSION}",
help="Create a CIB conforming to the given schema")
grp4.add_argument("--seed",
metavar="SEED",
help="Use the given string as the random number seed")
grp4.add_argument("--set",
action="append",
metavar="ARG",
default=[],
help="Set key=value pairs (can be specified multiple times)")
grp4.add_argument("--stonith-args",
metavar="ARGS",
default="hostlist=all,livedangerously=yes",
help="")
grp4.add_argument("--stonith-type",
metavar="TYPE",
default="external/ssh",
help="")
grp4.add_argument("--trunc",
action="store_true", dest="truncate",
help="Truncate log file before starting")
grp4.add_argument("--valgrind-procs",
metavar="PROCS",
default="pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd",
help="Run valgrind against the given space-separated list of processes")
grp4.add_argument("--valgrind-tests",
action="store_true",
help="Include tests using valgrind")
grp4.add_argument("--warn-inactive",
action="store_true",
help="Warn if a resource is assigned to an inactive node")
parser.add_argument("iterations",
nargs='?',
type=int, default=1,
help="Number of tests to run")
args = parser.parse_args(args=argv)
# Set values on this object based on what happened with command line
# processing. This has to be done in several blocks.
# These values can always be set. They get a default from the add_argument
# calls, only do one thing, and they do not have any side effects.
self["ClobberCIB"] = args.clobber_cib
self["ListTests"] = args.list_tests
self["Schema"] = args.schema
self["Stack"] = args.stack
self["SyslogFacility"] = args.facility
self["TruncateLog"] = args.truncate
self["at-boot"] = args.at_boot in ["1", "yes"]
self["benchmark"] = args.benchmark
self["continue"] = args.always_continue
self["experimental-tests"] = args.experimental_tests
self["iterations"] = args.iterations
self["loop-minutes"] = args.loop_minutes
self["loop-tests"] = not args.no_loop_tests
self["notification-agent"] = args.notification_agent
self["notification-recipient"] = args.notification_recipient
self["node-limit"] = args.limit_nodes
self["stonith-params"] = args.stonith_args
self["stonith-type"] = args.stonith_type
self["unsafe-tests"] = not args.no_unsafe_tests
self["valgrind-procs"] = args.valgrind_procs
self["valgrind-tests"] = args.valgrind_tests
self["warn-inactive"] = args.warn_inactive
# Nodes and groups are mutually exclusive, so their defaults cannot be
# set in their add_argument calls. Additionally, groups does more than
# just set a value. Here, set nodes first and then if a group is
# specified, override the previous nodes value.
if args.nodes:
self["nodes"] = args.nodes.split(" ")
else:
self["nodes"] = []
if args.group:
- self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args.dsh_group)
+ self["OutputFile"] = f"{os.environ['HOME']}/cluster-{args.dsh_group}.log"
LogFactory().add_file(self["OutputFile"], "CTS")
- dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args.dsh_group)
+ dsh_file = f"{os.environ['HOME']}/.dsh/group/{args.dsh_group}"
if os.path.isfile(dsh_file):
self["nodes"] = []
with open(dsh_file, "r", encoding="utf-8") as f:
for line in f:
stripped = line.strip()
if not stripped.startswith('#'):
self["nodes"].append(stripped)
else:
- print("Unknown DSH group: %s" % args.dsh_group)
+ print(f"Unknown DSH group: {args.dsh_group}")
# Everything else either can't have a default set in an add_argument
# call (likely because we don't want to always have a value set for it)
# or it does something fancier than just set a single value. However,
# order does not matter for these as long as the user doesn't provide
# conflicting arguments on the command line. So just do Everything
# alphabetically.
if args.boot:
self["scenario"] = "boot"
if args.cib_filename:
self["CIBfilename"] = args.cib_filename
else:
self["CIBfilename"] = None
if args.choose:
self["scenario"] = "sequence"
self["tests"].extend(args.choose.split())
self["iterations"] = len(self["tests"])
if args.fencing:
if args.fencing in ["0", "no"]:
self["DoFencing"] = False
else:
self["DoFencing"] = True
if args.fencing in ["rhcs", "virt", "xvm"]:
self["stonith-type"] = "fence_xvm"
elif args.fencing == "scsi":
self["stonith-type"] = "fence_scsi"
elif args.fencing in ["lha", "ssh"]:
self["stonith-params"] = "hostlist=all,livedangerously=yes"
self["stonith-type"] = "external/ssh"
elif args.fencing == "openstack":
self["stonith-type"] = "fence_openstack"
print("Obtaining OpenStack credentials from the current environment")
- self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % (
- os.environ['OS_REGION_NAME'],
- os.environ['OS_TENANT_NAME'],
- os.environ['OS_AUTH_URL'],
- os.environ['OS_USERNAME'],
- os.environ['OS_PASSWORD']
- )
+ region = os.environ['OS_REGION_NAME']
+ tenant = os.environ['OS_TENANT_NAME']
+ auth = os.environ['OS_AUTH_URL']
+ user = os.environ['OS_USERNAME']
+ password = os.environ['OS_PASSWORD']
+
+ self["stonith-params"] = f"region={region},tenant={tenant},auth={auth},user={user},password={password}"
elif args.fencing == "rhevm":
self["stonith-type"] = "fence_rhevm"
print("Obtaining RHEV-M credentials from the current environment")
- self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
- os.environ['RHEVM_USERNAME'],
- os.environ['RHEVM_PASSWORD'],
- os.environ['RHEVM_SERVER'],
- os.environ['RHEVM_PORT'],
- )
+ user = os.environ['RHEVM_USERNAME']
+ password = os.environ['RHEVM_PASSWORD']
+ server = os.environ['RHEVM_SERVER']
+ port = os.environ['RHEVM_PORT']
+
+ self["stonith-params"] = f"login={user},passwd={password},ipaddr={server},ipport={port},ssl=1,shell_timeout=10"
if args.ip:
self["CIBResource"] = True
self["ClobberCIB"] = True
self["IPBase"] = args.ip
if args.logfile == "journal":
self["LogAuditDisabled"] = True
self["log_kind"] = LogKind.JOURNAL
elif args.logfile:
self["LogAuditDisabled"] = True
self["LogFileName"] = args.logfile
self["log_kind"] = LogKind.REMOTE_FILE
else:
# We can't set this as the default on the parser.add_argument call
# for this option because then args.logfile will be set, which means
# the above branch will be taken and those other values will also be
# set.
self["LogFileName"] = "/var/log/messages"
if args.once:
self["scenario"] = "all-once"
if args.oprofile:
self["oprofile"] = args.oprofile.split(" ")
else:
self["oprofile"] = []
if args.outputfile:
self["OutputFile"] = args.outputfile
LogFactory().add_file(self["OutputFile"])
if args.populate_resources:
self["CIBResource"] = True
self["ClobberCIB"] = True
if args.qarsh:
self._rsh.enable_qarsh()
for kv in args.set:
(name, value) = kv.split("=")
self[name] = value
- print("Setting %s = %s" % (name, value))
+ print(f"Setting {name} = {value}")
class EnvFactory:
"""A class for constructing a singleton instance of an Environment object."""
instance = None
# pylint: disable=invalid-name
def getInstance(self, args=None):
"""
Return the previously created instance of Environment.
If no instance exists, create a new instance and return that.
"""
if not EnvFactory.instance:
EnvFactory.instance = Environment(args)
return EnvFactory.instance
diff --git a/python/pacemaker/_cts/logging.py b/python/pacemaker/_cts/logging.py
index de1ed054f9..4548ce9a0b 100644
--- a/python/pacemaker/_cts/logging.py
+++ b/python/pacemaker/_cts/logging.py
@@ -1,119 +1,118 @@
"""Logging classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["LogFactory"]
-__copyright__ = "Copyright 2014-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2014-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import os
import sys
import time
class Logger:
"""Abstract class to use as parent for CTS logging classes."""
TimeFormat = "%b %d %H:%M:%S\t"
def __init__(self, filename=None, tag=None):
# Whether this logger should print debug messages
self._debug_target = True
self._logfile = filename
if tag:
- self._source = "%s: " % tag
+ self._source = f"{tag}: "
else:
self._source = ""
def __call__(self, lines):
"""Log specified messages."""
raise ValueError("Abstract class member (__call__)")
def write(self, line):
"""Log a single line excluding trailing whitespace."""
return self(line.rstrip())
def writelines(self, lines):
"""Log a series of lines excluding trailing whitespace."""
for line in lines:
self.write(line)
@property
def is_debug_target(self):
"""Return True if this logger should receive debug messages."""
return self._debug_target
class StdErrLog(Logger):
"""Class to log to standard error."""
def __init__(self, filename, tag):
Logger.__init__(self, filename, tag)
self._debug_target = False
def __call__(self, lines):
"""Log specified lines to stderr."""
timestamp = time.strftime(Logger.TimeFormat,
time.localtime(time.time()))
if isinstance(lines, str):
lines = [lines]
for line in lines:
- print("%s%s" % (timestamp, line), file=sys.__stderr__)
+ print(f"{timestamp}{line}", file=sys.__stderr__)
sys.__stderr__.flush()
class FileLog(Logger):
"""Class to log to a file."""
def __init__(self, filename, tag):
Logger.__init__(self, filename, tag)
self._hostname = os.uname()[1]
def __call__(self, lines):
"""Log specified lines to the file."""
with open(self._logfile, "at", encoding="utf-8") as logf:
timestamp = time.strftime(Logger.TimeFormat,
time.localtime(time.time()))
if isinstance(lines, str):
lines = [lines]
for line in lines:
- print("%s%s %s%s" % (timestamp, self._hostname, self._source, line),
- file=logf)
+ print(f"{timestamp}{self._hostname} {self._source}{line}", file=logf)
class LogFactory:
"""Singleton to log messages to various destinations."""
log_methods = []
have_stderr = False
def add_file(self, filename, tag=None):
"""When logging messages, log them to specified file."""
if filename:
LogFactory.log_methods.append(FileLog(filename, tag))
def add_stderr(self):
"""When logging messages, log them to standard error."""
if not LogFactory.have_stderr:
LogFactory.have_stderr = True
LogFactory.log_methods.append(StdErrLog(None, None))
def log(self, args):
"""Log a message (to all configured log destinations)."""
for logfn in LogFactory.log_methods:
logfn(args.strip())
def debug(self, args):
"""Log a debug message (to all configured log destinations)."""
for logfn in LogFactory.log_methods:
if logfn.is_debug_target:
- logfn("debug: %s" % args.strip())
+ logfn(f"debug: {args.strip()}")
def traceback(self, traceback):
"""Log a stack trace (to all configured log destinations)."""
for logfn in LogFactory.log_methods:
traceback.print_exc(50, logfn)
diff --git a/python/pacemaker/_cts/network.py b/python/pacemaker/_cts/network.py
index 6ba776c97d..3a77cff960 100644
--- a/python/pacemaker/_cts/network.py
+++ b/python/pacemaker/_cts/network.py
@@ -1,59 +1,59 @@
"""Network related utilities for CTS."""
__all__ = ["next_ip"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
# pylint: disable=global-statement
CURRENT_IP = None
def next_ip(ip_base=None, reset=False):
"""
Return the next available IP address.
This function only increments the last portion of the IP address. Once it
has hit the upper limit, ValueError will be raised.
Arguments:
ip_base -- The initial IP address to start from. The first call to next_ip
will return the next IP address from this base. Each subsequent
call will return the next address from the previous call, so you
can just omit this argument for subsequent calls.
reset -- Force next_ip to start from ip_base again. This requires also
passing the ip_base argument. (Mostly useful for unit testing,
but may be useful elsewhere).
"""
global CURRENT_IP
if CURRENT_IP is None or reset:
CURRENT_IP = ip_base
new_ip = None
# Split the existing IP address up into a tuple of:
# (everything except the last part of the addr, the separator, the last part of the addr).
# For instance, "192.168.1.2" becomes ("192.168.1", ".", "2"). Then,
# increment the last part of the address and paste everything back
# together.
if ":" in CURRENT_IP:
# This is an IPv6 address
fields = CURRENT_IP.rpartition(":")
new_ip = int(fields[2], 16) + 1
if new_ip > 0xffff:
raise ValueError("No more available IP addresses")
# hex() puts "0x" at the front of the string, so strip it off.
new_ip = hex(new_ip)[2:]
else:
# This is an IPv4 address
fields = CURRENT_IP.rpartition(".")
new_ip = int(fields[2]) + 1
if new_ip > 255:
raise ValueError("No more available IP addresses")
- CURRENT_IP = "%s%s%s" % (fields[0], fields[1], new_ip)
+ CURRENT_IP = f"{fields[0]}{fields[1]}{new_ip}"
return CURRENT_IP
diff --git a/python/pacemaker/_cts/patterns.py b/python/pacemaker/_cts/patterns.py
index 083a2af6ac..39b53b4394 100644
--- a/python/pacemaker/_cts/patterns.py
+++ b/python/pacemaker/_cts/patterns.py
@@ -1,399 +1,399 @@
"""Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["PatternSelector"]
-__copyright__ = "Copyright 2008-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2008-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+)"
import argparse
from pacemaker.buildoptions import BuildOptions
class BasePatterns:
"""
The base class for holding a stack-specific set of command and log file/stdout patterns.
Stack-specific classes need to be built on top of this one.
"""
def __init__(self):
"""Create a new BasePatterns instance which holds a very minimal set of basic patterns."""
self._bad_news = []
self._components = {}
self._name = "crm-base"
self._ignore = [
"avoid confusing Valgrind",
# Logging bug in some versions of libvirtd
r"libvirtd.*: internal error: Failed to parse PCI config address",
# pcs can log this when node is fenced, but fencing is OK in some
# tests (and we will catch it in pacemaker logs when not OK)
r"pcs.daemon:No response from: .* request: get_configs, error:",
# This is overbroad, but there's no way to say that only certain
# transition errors are acceptable. We have to rely on causes of a
# transition error logging their own error message, which should
# always be the case.
r"pacemaker-schedulerd.* Calculated transition .*/pe-error",
]
self._commands = {
"StatusCmd": "crmadmin -t 60 -S %s 2>/dev/null",
"CibQuery": "cibadmin -Q",
"CibAddXml": "cibadmin --modify -c --xml-text %s",
"CibDelXpath": "cibadmin --delete --xpath %s",
"RscRunning": BuildOptions.DAEMON_DIR + "/cts-exec-helper -R -r %s",
"CIBfile": "%s:" + BuildOptions.CIB_DIR + "/cib.xml",
"TmpDir": "/tmp",
"BreakCommCmd": "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1",
"FixCommCmd": "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1",
"MaintenanceModeOn": "cibadmin --modify -c --xml-text ''",
"MaintenanceModeOff": "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"",
"StandbyCmd": "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null",
"StandbyQueryCmd": "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null",
}
self._search = {
"Pat:DC_IDLE": r"pacemaker-controld.*State transition.*-> S_IDLE",
# This won't work if we have multiple partitions
"Pat:Local_started": r"%s\W.*controller successfully started",
"Pat:NonDC_started": r"%s\W.*State transition.*-> S_NOT_DC",
"Pat:DC_started": r"%s\W.*State transition.*-> S_IDLE",
"Pat:We_stopped": r"%s\W.*OVERRIDE THIS PATTERN",
"Pat:They_stopped": r"%s\W.*LOST:.* %s ",
"Pat:They_dead": r"node %s.*: is dead",
"Pat:They_up": r"%s %s\W.*OVERRIDE THIS PATTERN",
"Pat:TransitionComplete": "Transition status: Complete: complete",
"Pat:Fencing_start": r"Requesting peer fencing .* targeting %s",
"Pat:Fencing_ok": r"pacemaker-fenced.*:\s*Operation .* targeting %s by .* for .*@.*: OK",
"Pat:Fencing_recover": r"pacemaker-schedulerd.*: Recover\s+%s",
"Pat:Fencing_active": r"stonith resource .* is active on 2 nodes (attempting recovery)",
"Pat:Fencing_probe": r"pacemaker-controld.* Result of probe operation for %s on .*: Error",
"Pat:RscOpOK": r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?OK",
"Pat:RscOpFail": r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of %s ",
"Pat:CloneOpFail": r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of (%s|%s) ",
"Pat:RscRemoteOpOK": r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?OK",
"Pat:NodeFenced": r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK",
}
def get_component(self, key):
"""
Return the patterns for a single component as a list, given by key.
This is typically the name of some subprogram (pacemaker-based,
pacemaker-fenced, etc.) or various special purpose keys. If key is
unknown, return an empty list.
"""
if key in self._components:
return self._components[key]
- print("Unknown component '%s' for %s" % (key, self._name))
+ print(f"Unknown component '{key}' for {self._name}")
return []
def get_patterns(self, key):
"""
Return various patterns supported by this object, given by key.
Depending on the key, this could either be a list or a hash. If key is
unknown, return None.
"""
if key == "BadNews":
return self._bad_news
if key == "BadNewsIgnore":
return self._ignore
if key == "Commands":
return self._commands
if key == "Search":
return self._search
if key == "Components":
return self._components
- print("Unknown pattern '%s' for %s" % (key, self._name))
+ print(f"Unknown pattern '{key}' for {self._name}")
return None
def __getitem__(self, key):
if key == "Name":
return self._name
if key in self._commands:
return self._commands[key]
if key in self._search:
return self._search[key]
- print("Unknown template '%s' for %s" % (key, self._name))
+ print(f"Unknown template '{key}' for {self._name}")
return None
class Corosync2Patterns(BasePatterns):
"""Patterns for Corosync version 2 cluster manager class."""
def __init__(self):
BasePatterns.__init__(self)
self._name = "crm-corosync"
self._commands.update({
"StartCmd": "service corosync start && service pacemaker start",
"StopCmd": "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop",
"EpochCmd": "crm_node -e",
"QuorumCmd": "crm_node -q",
"PartitionCmd": "crm_node -p",
})
self._search.update({
# Close enough ... "Corosync Cluster Engine exiting normally" isn't
# printed reliably.
"Pat:We_stopped": r"%s\W.*Unloading all Corosync service engines",
"Pat:They_stopped": r"%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:They_dead": r"pacemaker-controld.*Node %s(\[|\s).*state is now lost",
"Pat:They_up": r"\W%s\W.*pacemaker-controld.*Node %s state is now member",
"Pat:ChildExit": r"\[[0-9]+\] exited with status [0-9]+ \(",
# "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes()
"Pat:ChildKilled": r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)",
"Pat:ChildRespawn": r"%s\W.*pacemakerd.*Respawning subdaemon %s after unexpected exit",
"Pat:InfraUp": r"%s\W.*corosync.*Initializing transport",
"Pat:PacemakerUp": r"%s\W.*pacemakerd.*Starting Pacemaker",
})
self._ignore += [
r"crm_mon:",
r"crmadmin:",
r"update_trace_data",
r"async_notify:.*strange, client not found",
r"Parse error: Ignoring unknown option .*nodename",
r"error.*: Operation 'reboot' .* using FencingFail returned ",
r"getinfo response error: 1$",
r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE",
r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)",
]
self._bad_news = [
r"[^(]error:",
r"crit:",
r"ERROR:",
r"CRIT:",
r"Shutting down...NOW",
r"Timer I_TERMINATE just popped",
r"input=I_ERROR",
r"input=I_FAIL",
r"input=I_INTEGRATED cause=C_TIMER_POPPED",
r"input=I_FINALIZED cause=C_TIMER_POPPED",
r"input=I_ERROR",
r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting",
r"schedulerd.*Attempting recovery of resource",
r"is taking more than 2x its timeout",
r"Confirm not received from",
r"Welcome reply not received from",
r"Attempting to schedule .* after a stop",
r"Resource .* was active at shutdown",
r"duplicate entries for call_id",
r"Search terminated:",
r":global_timer_callback",
r"Faking parameter digest creation",
r"Parameters to .* action changed:",
r"Parameters to .* changed",
r"pacemakerd.*\[[0-9]+\] terminated( with signal|$)",
r"pacemakerd.*\[[0-9]+\] .* will now be killed",
r"pacemaker-schedulerd.*Recover\s+.*\(.* -\> .*\)",
r"rsyslogd.* lost .* due to rate-limiting",
r"Peer is not part of our cluster",
r"We appear to be in an election loop",
r"Unknown node -> we will not deliver message",
r"(Blackbox dump requested|Problem detected)",
r"pacemakerd.*Could not connect to Cluster Configuration Database API",
r"Receiving messages from a node we think is dead",
r"share the same cluster nodeid",
r"share the same name",
r"pacemaker-controld:.*Transition failed: terminated",
r"Local CIB .* differs from .*:",
r"warn.*:\s*Continuing but .* will NOT be used",
r"warn.*:\s*Cluster configuration file .* is corrupt",
r"Election storm",
r"stalled the FSA with pending inputs",
]
self._components["common-ignore"] = [
r"Pending action:",
r"resource( was|s were) active at shutdown",
r"pending LRM operations at shutdown",
r"Lost connection to the CIB manager",
r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported",
r"pacemaker-controld.*:\s*Exiting now due to errors",
r".*:\s*Requesting fencing \([^)]+\) targeting node ",
r"(Blackbox dump requested|Problem detected)",
]
self._components["corosync-ignore"] = [
r"Could not connect to Corosync CFG: CS_ERR_LIBRARY",
r"error:.*Connection to the CPG API failed: Library error",
r"\[[0-9]+\] exited with status [0-9]+ \(",
r"\[[0-9]+\] terminated with signal 15",
r"pacemaker-based.*error:.*Corosync connection lost",
r"pacemaker-fenced.*error:.*Corosync connection terminated",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state",
r"pacemaker-controld.*error:.*Could not recover from internal error",
r"error:.*Connection to cib_(shm|rw).* (failed|closed)",
r"error:.*cib_(shm|rw) IPC provider disconnected while waiting",
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"error: Lost fencer connection",
]
self._components["corosync"] = [
# We expect each daemon to lose its cluster connection.
# However, if the CIB manager loses its connection first,
# it's possible for another daemon to lose that connection and
# exit before losing the cluster connection.
r"pacemakerd.*:\s*warning:.*Lost connection to cluster layer",
r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (Corosync process group|the CIB manager)",
r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"schedulerd.*Scheduling node .* for fencing",
r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK",
]
self._components["pacemaker-based"] = [
r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102",
r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.* Respawning subdaemon pacemaker-attrd after unexpected exit",
r"pacemakerd.* Respawning subdaemon pacemaker-based after unexpected exit",
r"pacemakerd.* Respawning subdaemon pacemaker-controld after unexpected exit",
r"pacemakerd.* Respawning subdaemon pacemaker-fenced after unexpected exit",
r"pacemaker-.* Connection to cib_.* (failed|closed)",
r"pacemaker-attrd.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*:.*Lost connection to the CIB manager",
r"pacemaker-controld.*I_ERROR.*handle_cib_disconnect",
r"pacemaker-controld.* State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
]
self._components["pacemaker-based-ignore"] = [
r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error \(Lost connection to fencer\)",
r"pacemaker-controld.*:Could not connect to attrd: Connection refused",
]
self._components["pacemaker-execd"] = [
r"pacemaker-controld.*Lost connection to local executor",
r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1",
r"pacemakerd.* Respawning subdaemon pacemaker-execd after unexpected exit",
r"pacemakerd.* Respawning subdaemon pacemaker-controld after unexpected exit",
]
self._components["pacemaker-execd-ignore"] = [
r"pacemaker-(attrd|controld).*Connection to lrmd.* (failed|closed)",
r"pacemaker-(attrd|controld).*Could not execute alert",
]
self._components["pacemaker-controld"] = [
r"State transition .* -> S_IDLE",
]
self._components["pacemaker-controld-ignore"] = []
self._components["pacemaker-attrd"] = []
self._components["pacemaker-attrd-ignore"] = [
r"pacemaker-controld.*Connection to attrd (IPC failed|closed)",
]
self._components["pacemaker-schedulerd"] = [
r"State transition .* S_RECOVERY",
r"pacemakerd.* Respawning subdaemon pacemaker-controld after unexpected exit",
r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
r"pacemaker-controld.*Lost connection to the scheduler",
r"pacemaker-controld.*I_ERROR.*save_cib_contents",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
]
self._components["pacemaker-schedulerd-ignore"] = [
r"Connection to pengine.* (failed|closed)",
]
self._components["pacemaker-fenced"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"Lost fencer connection",
r"pacemaker-controld.*Fencer successfully connected",
]
self._components["pacemaker-fenced-ignore"] = [
r"(error|warning):.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
r"error:.*Lost fencer connection",
r"error:.*Fencer connection failed \(will retry\)",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error \(Lost connection to fencer\)",
]
self._components["pacemaker-fenced-ignore"].extend(self._components["common-ignore"])
patternVariants = {
"crm-base": BasePatterns,
"crm-corosync": Corosync2Patterns
}
class PatternSelector:
"""Choose from among several Pattern objects and return the information from that object."""
def __init__(self, name="crm-corosync"):
"""
Create a new PatternSelector object.
Instantiate whatever class is given by name. Defaults to Corosync2Patterns
for "crm-corosync" or None. While other objects could be supported in the
future, only this and the base object are supported at this time.
"""
self._name = name
# If no name was given, use the default. Otherwise, look up the appropriate
# class in patternVariants, instantiate it, and use that.
if not name:
self._base = Corosync2Patterns()
else:
self._base = patternVariants[name]()
def get_patterns(self, kind):
"""Call get_patterns on the previously instantiated pattern object."""
return self._base.get_patterns(kind)
def get_template(self, key):
"""
Return a single pattern from the previously instantiated pattern object.
If no pattern exists for the given key, return None.
"""
return self._base[key]
def get_component(self, kind):
"""Call get_component on the previously instantiated pattern object."""
return self._base.get_component(kind)
def __getitem__(self, key):
"""Return the pattern for the given key, or None if it does not exist."""
return self.get_template(key)
# PYTHONPATH=python python python/pacemaker/_cts/patterns.py -k crm-corosync -t StartCmd
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-k", "--kind", metavar="KIND")
parser.add_argument("-t", "--template", metavar="TEMPLATE")
args = parser.parse_args()
print(PatternSelector(args.kind)[args.template])
diff --git a/python/pacemaker/_cts/process.py b/python/pacemaker/_cts/process.py
index c25ce33ea7..8d652037e0 100644
--- a/python/pacemaker/_cts/process.py
+++ b/python/pacemaker/_cts/process.py
@@ -1,72 +1,72 @@
"""A module for managing and communicating with external processes."""
__all__ = ["killall", "exit_if_proc_running", "pipe_communicate", "stdout_from_command"]
-__copyright__ = "Copyright 2009-2023 the Pacemaker project contributors"
+__copyright__ = "Copyright 2009-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+)"
import subprocess
import sys
import psutil
from pacemaker.exitstatus import ExitStatus
def killall(process_names, terminate=False):
"""Kill all instances of every process in a list."""
if not process_names:
return
if not isinstance(process_names, list):
process_names = [process_names]
procs = []
for proc in psutil.process_iter(["name"]):
if proc.info["name"] in process_names:
procs.append(proc)
if terminate:
for proc in procs:
proc.terminate()
_, alive = psutil.wait_procs(procs, timeout=3)
procs = alive
for proc in procs:
proc.kill()
def is_proc_running(process_name):
"""Check whether a process with a given name is running."""
for proc in psutil.process_iter(["name"]):
if proc.info["name"] == process_name:
return True
return False
def exit_if_proc_running(process_name):
"""Exit with error if a given process is running."""
if is_proc_running(process_name):
- print("Error: %s is already running!" % process_name)
- print("Run %s only when the cluster is stopped." % sys.argv[0])
+ print(f"Error: {process_name} is already running!")
+ print(f"Run {sys.argv[0]} only when the cluster is stopped.")
sys.exit(ExitStatus.ERROR)
def pipe_communicate(pipes, check_stderr=False, stdin=None):
"""Get text output from pipes."""
if stdin is not None:
pipe_outputs = pipes.communicate(input=stdin.encode())
else:
pipe_outputs = pipes.communicate()
output = pipe_outputs[0].decode(sys.stdout.encoding)
if check_stderr:
output += pipe_outputs[1].decode(sys.stderr.encoding)
return output
def stdout_from_command(args):
"""Execute command and return its standard output."""
with subprocess.Popen(args, stdout=subprocess.PIPE) as p:
p.wait()
return pipe_communicate(p).split("\n")
diff --git a/python/pacemaker/_cts/remote.py b/python/pacemaker/_cts/remote.py
index ac70f8aeb8..d62adc9109 100644
--- a/python/pacemaker/_cts/remote.py
+++ b/python/pacemaker/_cts/remote.py
@@ -1,281 +1,281 @@
"""Remote command runner for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["RemoteExec", "RemoteFactory"]
-__copyright__ = "Copyright 2014-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2014-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
import os
from subprocess import Popen, PIPE
from threading import Thread
from pacemaker._cts.logging import LogFactory
def convert2string(lines):
"""
Convert byte strings to UTF-8 strings.
Lists of byte strings are converted to a list of UTF-8 strings. All other
text formats are passed through.
"""
if isinstance(lines, bytes):
return lines.decode("utf-8")
if isinstance(lines, list):
lst = []
for line in lines:
if isinstance(line, bytes):
line = line.decode("utf-8")
lst.append(line)
return lst
return lines
class AsyncCmd(Thread):
"""A class for doing the hard work of running a command on another machine."""
def __init__(self, node, command, proc=None, delegate=None):
"""
Create a new AsyncCmd instance.
Arguments:
node -- The remote machine to run on
command -- The ssh command string to use for remote execution
proc -- If not None, a process object previously created with Popen.
Instead of spawning a new process, we will then wait on
this process to finish and handle its output.
delegate -- When the command completes, call the async_complete method
on this object
"""
self._command = command
self._delegate = delegate
self._logger = LogFactory()
self._node = node
self._proc = proc
Thread.__init__(self)
def run(self):
"""Run the previously instantiated AsyncCmd object."""
out = None
err = None
if not self._proc:
# pylint: disable=consider-using-with
self._proc = Popen(self._command, stdout=PIPE, stderr=PIPE, close_fds=True, shell=True)
- self._logger.debug("cmd: async: target=%s, pid=%d: %s" % (self._node, self._proc.pid, self._command))
+ self._logger.debug(f"cmd: async: target={self._node}, pid={self._proc.pid}: {self._command}")
self._proc.wait()
if self._delegate:
- self._logger.debug("cmd: pid %d returned %d to %r" % (self._proc.pid, self._proc.returncode, self._delegate))
+ self._logger.debug(f"cmd: pid {self._proc.pid} returned {self._proc.returncode} to {self._delegate!r}")
else:
- self._logger.debug("cmd: pid %d returned %d" % (self._proc.pid, self._proc.returncode))
+ self._logger.debug(f"cmd: pid {self._proc.pid} returned {self._proc.returncode}")
if self._proc.stderr:
err = self._proc.stderr.readlines()
self._proc.stderr.close()
for line in err:
- self._logger.debug("cmd: stderr[%d]: %s" % (self._proc.pid, line))
+ self._logger.debug(f"cmd: stderr[{self._proc.pid}]: {line}")
err = convert2string(err)
if self._proc.stdout:
out = self._proc.stdout.readlines()
self._proc.stdout.close()
out = convert2string(out)
if self._delegate:
self._delegate.async_complete(self._proc.pid, self._proc.returncode, out, err)
class RemoteExec:
"""
An abstract class for remote execution.
It runs a command on another machine using ssh and scp.
"""
def __init__(self, command, cp_command, silent=False):
"""
Create a new RemoteExec instance.
Arguments:
command -- The ssh command string to use for remote execution
cp_command -- The scp command string to use for copying files
silent -- Should we log command status?
"""
self._command = command
self._cp_command = cp_command
self._logger = LogFactory()
self._silent = silent
self._our_node = os.uname()[1].lower()
def _fixcmd(self, cmd):
"""Perform shell escapes on certain characters in the input cmd string."""
return re.sub("\'", "'\\''", cmd)
def _cmd(self, args):
"""Given a list of arguments, return the string that will be run on the remote system."""
sysname = args[0]
command = args[1]
if sysname is None or sysname.lower() in [self._our_node, "localhost"]:
ret = command
else:
- ret = "%s %s '%s'" % (self._command, sysname, self._fixcmd(command))
+ ret = f"{self._command} {sysname} '{self._fixcmd(command)}'"
return ret
def _log(self, args):
"""Log a message."""
if not self._silent:
self._logger.log(args)
def _debug(self, args):
"""Log a message at the debug level."""
if not self._silent:
self._logger.debug(args)
def call_async(self, node, command, delegate=None):
"""
Run the given command on the given remote system and do not wait for it to complete.
Arguments:
node -- The remote machine to run on
command -- The command to run, as a string
delegate -- When the command completes, call the async_complete method
on this object
Returns the running process object.
"""
aproc = AsyncCmd(node, self._cmd([node, command]), delegate=delegate)
aproc.start()
return aproc
def __call__(self, node, command, synchronous=True, verbose=2):
"""
Run the given command on the given remote system.
If you call this class like a function, this is what gets called. It's
approximately the same as a system() call on the remote machine.
Arguments:
node -- The remote machine to run on
command -- The command to run, as a string
synchronous -- Should we wait for the command to complete?
verbose -- If 0, do not lo:g anything. If 1, log the command and its
return code but not its output. If 2, additionally log
command output.
Returns a tuple of (return code, command output).
"""
rc = 0
result = None
# pylint: disable=consider-using-with
proc = Popen(self._cmd([node, command]),
stdout=PIPE, stderr=PIPE, close_fds=True, shell=True)
if not synchronous and proc.pid > 0 and not self._silent:
aproc = AsyncCmd(node, command, proc=proc)
aproc.start()
return (rc, result)
if proc.stdout:
result = proc.stdout.readlines()
proc.stdout.close()
else:
self._log("No stdout stream")
rc = proc.wait()
if verbose > 0:
- self._debug("cmd: target=%s, rc=%d: %s" % (node, rc, command))
+ self._debug(f"cmd: target={node}, rc={rc}: {command}")
result = convert2string(result)
if proc.stderr:
errors = proc.stderr.readlines()
proc.stderr.close()
for err in errors:
- self._debug("cmd: stderr: %s" % err)
+ self._debug(f"cmd: stderr: {err}")
if verbose == 2:
for line in result:
- self._debug("cmd: stdout: %s" % line)
+ self._debug(f"cmd: stdout: {line}")
return (rc, result)
def copy(self, source, target, silent=False):
"""
Perform a copy of the source file to the remote target.
This function uses the cp_command provided when the RemoteExec object
was created.
Returns the return code of the cp_command.
"""
# @TODO Use subprocess module with argument array instead
# (self._cp_command should be an array too)
- cmd = "%s '%s' '%s'" % (self._cp_command, source, target)
+ cmd = f"{self._cp_command} '{source}' '{target}'"
rc = os.system(cmd)
if not silent:
- self._debug("cmd: rc=%d: %s" % (rc, cmd))
+ self._debug(f"cmd: rc={rc}: {cmd}")
return rc
def exists_on_all(self, filename, hosts):
"""Return True if specified file exists on all specified hosts."""
for host in hosts:
- rc = self(host, "test -r %s" % filename)
+ rc = self(host, f"test -r {filename}")
if rc != 0:
return False
return True
class RemoteFactory:
"""A class for constructing a singleton instance of a RemoteExec object."""
# Class variables
# -n: no stdin, -x: no X11,
# -o ServerAliveInterval=5: disconnect after 3*5s if the server
# stops responding
command = ("ssh -l root -n -x -o ServerAliveInterval=5 "
"-o ConnectTimeout=10 -o TCPKeepAlive=yes "
"-o ServerAliveCountMax=3 ")
# -B: batch mode, -q: no stats (quiet)
cp_command = "scp -B -q"
instance = None
# pylint: disable=invalid-name
def getInstance(self):
"""
Return the previously created instance of RemoteExec.
If no instance exists, create one and then return that.
"""
if not RemoteFactory.instance:
RemoteFactory.instance = RemoteExec(RemoteFactory.command,
RemoteFactory.cp_command,
False)
return RemoteFactory.instance
def enable_qarsh(self):
"""Enable the QA remote shell."""
# http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/
print("Using QARSH for connections to cluster nodes")
RemoteFactory.command = "qarsh -t 300 -l root"
RemoteFactory.cp_command = "qacp -q"
diff --git a/python/pacemaker/_cts/scenarios.py b/python/pacemaker/_cts/scenarios.py
index 81e9e40d1a..07f2b38805 100644
--- a/python/pacemaker/_cts/scenarios.py
+++ b/python/pacemaker/_cts/scenarios.py
@@ -1,424 +1,421 @@
"""Test scenario classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = [
"AllOnce",
"Boot",
"BootCluster",
"LeaveBooted",
"RandomTests",
"Sequence",
]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
import time
from pacemaker._cts.audits import ClusterAudit
from pacemaker._cts.input import should_continue
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.watcher import LogWatcher
class ScenarioComponent:
"""
The base class for all scenario components.
A scenario component is one single step in a scenario. Each component is
basically just a setup and teardown method.
"""
def __init__(self, cm, env):
"""
Create a new ScenarioComponent instance.
Arguments:
cm -- A ClusterManager instance
env -- An Environment instance
"""
# pylint: disable=invalid-name
self._cm = cm
self._env = env
def is_applicable(self):
"""
Return True if this component is applicable in the given Environment.
This method must be provided by all subclasses.
"""
raise NotImplementedError
def setup(self):
"""
Set up the component, returning True on success.
This method must be provided by all subclasses.
"""
raise NotImplementedError
def teardown(self):
"""
Tear down the given component.
This method must be provided by all subclasses.
"""
raise NotImplementedError
class Scenario:
"""
The base class for scenarios.
A scenario is an ordered list of ScenarioComponent objects. A scenario
proceeds by setting up all its components in sequence, running a list of
tests and audits, and then tearing down its components in reverse.
"""
def __init__(self, cm, components, audits, tests):
"""
Create a new Scenario instance.
Arguments:
cm -- A ClusterManager instance
components -- A list of ScenarioComponents comprising this Scenario
audits -- A list of ClusterAudits that will be performed as
part of this Scenario
tests -- A list of CTSTests that will be run
"""
# pylint: disable=invalid-name
self.stats = {
"success": 0,
"failure": 0,
"BadNews": 0,
"skipped": 0
}
self.tests = tests
self._audits = audits
self._bad_news = None
self._cm = cm
self._components = components
for comp in components:
if not issubclass(comp.__class__, ScenarioComponent):
raise ValueError("Init value must be subclass of ScenarioComponent")
for audit in audits:
if not issubclass(audit.__class__, ClusterAudit):
raise ValueError("Init value must be subclass of ClusterAudit")
for test in tests:
if not issubclass(test.__class__, CTSTest):
raise ValueError("Init value must be a subclass of CTSTest")
def is_applicable(self):
"""Return True if all ScenarioComponents are applicable."""
for comp in self._components:
if not comp.is_applicable():
return False
return True
def setup(self):
"""
Set up the scenario, returning True on success.
If setup fails at some point, tear down those components that did
successfully set up.
"""
self._cm.prepare()
self.audit() # Also detects remote/local log config
self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"])
self.audit()
self._cm.install_support()
self._bad_news = LogWatcher(self._cm.env["LogFileName"],
self._cm.templates.get_patterns("BadNews"),
self._cm.env["nodes"],
self._cm.env["log_kind"],
"BadNews", 0)
self._bad_news.set_watch() # Call after we've figured out what type of log watching to do in LogAudit
j = 0
while j < len(self._components):
if not self._components[j].setup():
# OOPS! We failed. Tear partial setups down.
self.audit()
self._cm.log("Tearing down partial setup")
self.teardown(j)
return False
j += 1
self.audit()
return True
def teardown(self, n_components=None):
"""
Tear down the scenario in the reverse order it was set up.
If n_components is not None, only tear down that many components.
"""
if not n_components:
n_components = len(self._components) - 1
j = n_components
while j >= 0:
self._components[j].teardown()
j -= 1
self.audit()
self._cm.install_support("uninstall")
def incr(self, name):
"""Increment the given stats key."""
if name not in self.stats:
self.stats[name] = 0
self.stats[name] += 1
def run(self, iterations):
"""Run all tests in the scenario the given number of times."""
self._cm.oprofile_start()
try:
self._run_loop(iterations)
self._cm.oprofile_stop()
except: # noqa: E722
self._cm.oprofile_stop()
raise
def _run_loop(self, iterations):
"""Run all the tests the given number of times."""
raise NotImplementedError
def run_test(self, test, testcount):
"""
Run the given test.
testcount is the number of tests (including this one) that have been
run across all iterations.
"""
nodechoice = self._cm.env.random_node()
ret = True
did_run = False
self._cm.clear_instance_errors_to_ignore()
- choice = "(%s)" % nodechoice
- self._cm.log("Running test {:<22} {:<15} [{:>3}]".format(test.name, choice, testcount))
+ self._cm.log(f"Running test {test.name:<22} {f'({nodechoice})':<15} [{testcount:>3}]")
starttime = test.set_timer()
if not test.setup(nodechoice):
self._cm.log("Setup failed")
ret = False
else:
did_run = True
ret = test(nodechoice)
if not test.teardown(nodechoice):
self._cm.log("Teardown failed")
if not should_continue(self._cm.env):
- raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice))
+ raise ValueError(f"Teardown of {test.name} on {nodechoice} failed")
ret = False
stoptime = time.time()
self._cm.oprofile_save(testcount)
elapsed_time = stoptime - starttime
test_time = stoptime - test.get_timer()
if "min_time" not in test.stats:
test.stats["elapsed_time"] = elapsed_time
test.stats["min_time"] = test_time
test.stats["max_time"] = test_time
else:
test.stats["elapsed_time"] += elapsed_time
if test_time < test.stats["min_time"]:
test.stats["min_time"] = test_time
if test_time > test.stats["max_time"]:
test.stats["max_time"] = test_time
if ret:
self.incr("success")
test.log_timer()
else:
self.incr("failure")
self._cm.statall()
did_run = True # Force the test count to be incremented anyway so test extraction works
self.audit(test.errors_to_ignore)
return did_run
def summarize(self):
"""Output scenario results."""
self._cm.log("****************")
self._cm.log("Overall Results:%r" % self.stats)
self._cm.log("****************")
stat_filter = {
"calls": 0,
"failure": 0,
"skipped": 0,
"auditfail": 0,
}
self._cm.log("Test Summary")
for test in self.tests:
for key in stat_filter:
stat_filter[key] = test.stats[key]
- name = "Test %s:" % test.name
- self._cm.log("{:<25} {!r}".format(name, stat_filter))
+ self._cm.log(f"{f'Test {test.name}':<25} {stat_filter!r}")
self._cm.debug("Detailed Results")
for test in self.tests:
- name = "Test %s:" % test.name
- self._cm.debug("{:<25} {!r}".format(name, stat_filter))
+ self._cm.debug(f"{f'Test {test.name}: ':<25} {test.stats!r}")
self._cm.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
def audit(self, local_ignore=None):
"""
Perform all scenario audits and log results.
If there are too many failures, prompt the user to confirm that the
scenario should continue running.
"""
errcount = 0
ignorelist = ["CTS:"]
if local_ignore:
ignorelist.extend(local_ignore)
ignorelist.extend(self._cm.errors_to_ignore)
ignorelist.extend(self._cm.instance_errors_to_ignore)
# This makes sure everything is stabilized before starting...
failed = 0
for audit in self._audits:
if not audit():
- self._cm.log("Audit %s FAILED." % audit.name)
+ self._cm.log(f"Audit {audit.name} FAILED.")
failed += 1
else:
- self._cm.debug("Audit %s passed." % audit.name)
+ self._cm.debug(f"Audit {audit.name} passed.")
while errcount < 1000:
match = None
if self._bad_news:
match = self._bad_news.look(0)
if match:
add_err = True
for ignore in ignorelist:
if add_err and re.search(ignore, match):
add_err = False
if add_err:
- self._cm.log("BadNews: %s" % match)
+ self._cm.log(f"BadNews: {match}")
self.incr("BadNews")
errcount += 1
else:
break
else:
print("Big problems")
if not should_continue(self._cm.env):
self._cm.log("Shutting down.")
self.summarize()
self.teardown()
raise ValueError("Looks like we hit a BadNews jackpot!")
if self._bad_news:
self._bad_news.end()
return failed
class AllOnce(Scenario):
"""Every Test Once."""
def _run_loop(self, iterations):
testcount = 1
for test in self.tests:
self.run_test(test, testcount)
testcount += 1
class RandomTests(Scenario):
"""Random Test Execution."""
def _run_loop(self, iterations):
testcount = 1
while testcount <= iterations:
test = self._cm.env.random_gen.choice(self.tests)
self.run_test(test, testcount)
testcount += 1
class Sequence(Scenario):
"""Named Tests in Sequence."""
def _run_loop(self, iterations):
testcount = 1
while testcount <= iterations:
for test in self.tests:
self.run_test(test, testcount)
testcount += 1
class Boot(Scenario):
"""Start the Cluster."""
def _run_loop(self, iterations):
return
class BootCluster(ScenarioComponent):
"""
Start the cluster manager on all nodes.
Wait for each to come up before starting in order to account for the
possibility that a given node might have been rebooted or crashed
beforehand.
"""
def is_applicable(self):
"""Return whether this scenario is applicable."""
return True
def setup(self):
"""Set up the component, returning True on success."""
self._cm.prepare()
# Clear out the cobwebs ;-)
self._cm.stopall(verbose=True, force=True)
# Now start the Cluster Manager on all the nodes.
self._cm.log("Starting Cluster Manager on all nodes.")
return self._cm.startall(verbose=True, quick=True)
def teardown(self):
"""Tear down the component."""
self._cm.log("Stopping Cluster Manager on all nodes")
self._cm.stopall(verbose=True, force=False)
class LeaveBooted(BootCluster):
"""Leave all nodes up when the scenario is complete."""
def teardown(self):
"""Tear down the component."""
self._cm.log("Leaving Cluster running on all nodes")
diff --git a/python/pacemaker/_cts/test.py b/python/pacemaker/_cts/test.py
index 4342d72338..ecdb692e0c 100644
--- a/python/pacemaker/_cts/test.py
+++ b/python/pacemaker/_cts/test.py
@@ -1,529 +1,532 @@
"""
A module providing base classes.
These classes are used for defining regression tests and groups of regression
tests. Everything exported here should be considered an abstract class that
needs to be subclassed in order to do anything useful. Various functions
will raise NotImplementedError if not overridden by a subclass.
"""
-__copyright__ = "Copyright 2009-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2009-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+)"
__all__ = ["Test", "Tests"]
import io
import os
import re
import shlex
import signal
import subprocess
import sys
import time
from pacemaker._cts.errors import ExitCodeError, OutputFoundError, OutputNotFoundError, XmlValidationError
from pacemaker._cts.process import pipe_communicate
from pacemaker._cts.validate import validate
from pacemaker.exitstatus import ExitStatus
class Pattern:
"""A class for checking log files for a given pattern."""
def __init__(self, pat, negative=False, regex=False):
"""
Create a new Pattern instance.
Arguments:
pat -- The string to search for
negative -- If True, pat must not be found in any input
regex -- If True, pat is a regex and not a substring
"""
self._pat = pat
self.negative = negative
self.regex = regex
def __str__(self):
return self._pat
def match(self, line):
"""Return True if this pattern is found in the given line."""
if self.regex:
return re.search(self._pat, line) is not None
return self._pat in line
class Test:
"""
The base class for a single regression test.
A single regression test may still run multiple commands as part of its
execution.
"""
def __init__(self, name, description, **kwargs):
"""
Create a new Test instance.
This method must be provided by all subclasses, which must call
Test.__init__ first.
Arguments:
description -- A user-readable description of the test, helpful in
identifying what test is running or has failed.
name -- The name of the test. Command line tools use this
attribute to allow running only tests with the exact
name, or tests whose name matches a given pattern.
This should be unique among all tests.
Keyword arguments:
force_wait --
logdir -- The base directory under which to create a directory
to store output and temporary data.
timeout -- How long to wait for the test to complete.
verbose -- Whether to print additional information, including
verbose command output and daemon log files.
"""
self.description = description
self.executed = False
self.name = name
self.force_wait = kwargs.get("force_wait", False)
self.logdir = kwargs.get("logdir", "/tmp")
self.timeout = kwargs.get("timeout", 2)
self.verbose = kwargs.get("verbose", False)
self._cmds = []
self._patterns = []
self._daemon_location = None
self._daemon_output = ""
self._daemon_process = None
self._result_exitcode = ExitStatus.OK
self._result_txt = ""
#
# PROPERTIES
#
@property
def exitcode(self):
"""
Return the final exitcode of the Test.
If all commands pass, this property will be ExitStatus.OK. Otherwise,
this property will be the exitcode of the first command to fail.
"""
return self._result_exitcode
@exitcode.setter
def exitcode(self, value):
self._result_exitcode = value
@property
def logpath(self):
"""
Return the path to the log for whatever daemon is being tested.
Note that this requires all subclasses to set self._daemon_location
before accessing this property or an exception will be raised.
"""
- return os.path.join(self.logdir, "%s.log" % self._daemon_location)
+ return os.path.join(self.logdir, f"{self._daemon_location}.log")
#
# PRIVATE METHODS
#
def _kill_daemons(self):
"""Kill any running daemons in preparation for executing the test."""
raise NotImplementedError("_kill_daemons not provided by subclass")
def _match_log_patterns(self):
"""
Check test output for expected patterns.
Set self.exitcode and self._result_txt as appropriate. Not all subclass
will need to do this.
"""
if len(self._patterns) == 0:
return
n_failed_matches = 0
n_negative_matches = 0
output = self._daemon_output.split("\n")
for pat in self._patterns:
positive_match = False
for line in output:
if pat.match(line):
if pat.negative:
n_negative_matches += 1
if self.verbose:
- print("This pattern should not have matched = '%s" % pat)
+ print(f"This pattern should not have matched = '{pat}'")
break
positive_match = True
break
if not pat.negative and not positive_match:
n_failed_matches += 1
- print("Pattern Not Matched = '%s'" % pat)
+ print(f"Pattern Not Matched = '{pat}'")
if n_failed_matches > 0 or n_negative_matches > 0:
msg = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches."
self._result_txt = msg % (self.name, n_failed_matches, len(self._patterns), n_negative_matches)
self.exitcode = ExitStatus.ERROR
def _start_daemons(self):
"""Start any necessary daemons in preparation for executing the test."""
raise NotImplementedError("_start_daemons not provided by subclass")
#
# PUBLIC METHODS
#
def add_cmd(self, cmd=None, **kwargs):
"""
Add a command to be executed as part of this test.
Arguments:
cmd -- The program to run.
Keyword arguments:
args -- Commands line arguments to pass to cmd, as a string.
check_rng -- If True and validate is True, command output will
additionally be checked against the api-result.rng file.
check_stderr -- If True, the stderr of cmd will be included in output.
env -- If not None, variables to set in the environment
expected_exitcode -- The expected exit code of cmd. This can be used to run
a command that is expected to fail.
kill -- A command to be run after cmd, typically in order to
kill a failed process. This should be the entire
command line including arguments as a single string.
no_wait -- Do not wait for cmd to complete.
stdout_match -- If not None, a string that is expected to be present
in the stdout of cmd. This can be a regular
expression.
stdout_no_match -- If not None, a string that is expected to be missing
in the stdout of cmd. This can be a regular
expression.
validate -- If True, the output of cmd will be passed to xmllint
for validation. If validation fails,
XmlValidationError will be raised.
"""
if cmd is None:
raise ValueError("cmd cannot be None")
self._cmds.append(
{
"args": kwargs.get("args", ""),
"check_rng": kwargs.get("check_rng", True),
"check_stderr": kwargs.get("check_stderr", True),
"cmd": cmd,
"expected_exitcode": kwargs.get("expected_exitcode", ExitStatus.OK),
"kill": kwargs.get("kill"),
"no_wait": kwargs.get("no_wait", False),
"stdout_match": kwargs.get("stdout_match"),
"stdout_no_match": kwargs.get("stdout_no_match"),
"validate": kwargs.get("validate", True),
"env": kwargs.get("env"),
}
)
def add_log_pattern(self, pattern, negative=False, regex=False):
"""Add a pattern that should appear in the test's logs."""
self._patterns.append(Pattern(pattern, negative=negative, regex=regex))
def _signal_dict(self):
"""Return a dictionary mapping signal numbers to their names."""
# FIXME: When we support python >= 3.5, this function can be replaced with:
# signal.Signals(self.daemon_process.returncode).name
return {
getattr(signal, _signame): _signame
for _signame in dir(signal)
if _signame.startswith("SIG") and not _signame.startswith("SIG_")
}
def clean_environment(self):
"""Clean up the host after executing a test."""
if self._daemon_process:
if self._daemon_process.poll() is None:
self._daemon_process.terminate()
self._daemon_process.wait()
else:
rc = self._daemon_process.returncode
- signame = self._signal_dict().get(-rc, "RET=%s" % rc)
+ signame = self._signal_dict().get(-rc, f"RET={rc}")
msg = "FAILURE - '%s' failed. %s abnormally exited during test (%s)."
self._result_txt = msg % (self.name, self._daemon_location, signame)
self.exitcode = ExitStatus.ERROR
self._daemon_process = None
self._daemon_output = ""
# the default for utf-8 encoding would error out if e.g. memory corruption
# makes fenced output any kind of 8 bit value - while still interesting
# for debugging and we'd still like the regression-test to go over the
# full set of test-cases
with open(self.logpath, 'rt', encoding="ISO-8859-1") as logfile:
for line in logfile.readlines():
self._daemon_output += line
if self.verbose:
print("Daemon Output Start")
print(self._daemon_output)
print("Daemon Output End")
def print_result(self, filler):
"""Print the result of the last test execution."""
- print("%s%s" % (filler, self._result_txt))
+ print(f"{filler}{self._result_txt}")
def run(self):
"""Execute this test."""
i = 1
self.start_environment()
if self.verbose:
- print("\n--- START TEST - %s" % self.name)
+ print(f"\n--- START TEST - {self.name}")
- self._result_txt = "SUCCESS - '%s'" % (self.name)
+ self._result_txt = f"SUCCESS - '{self.name}'"
self.exitcode = ExitStatus.OK
for cmd in self._cmds:
try:
self.run_cmd(cmd)
except ExitCodeError as e:
- print("Step %d FAILED - command returned %s, expected %d" % (i, e, cmd['expected_exitcode']))
+ print(f"Step {i} FAILED - command returned {e}, expected {cmd['expected_exitcode']}")
self.set_error(i, cmd)
break
except OutputNotFoundError as e:
- print("Step %d FAILED - '%s' was not found in command output: %s" % (i, cmd['stdout_match'], e))
+ print(f"""Step {i} FAILED - '{cmd["stdout_match"]}' was not found in command output: {e}""")
self.set_error(i, cmd)
break
except OutputFoundError as e:
- print("Step %d FAILED - '%s' was found in command output: %s" % (i, cmd['stdout_no_match'], e))
+ print(f"""Step {i} FAILED - '{cmd["stdout_no_match"]}' was found in command output: {e}""")
self.set_error(i, cmd)
break
except XmlValidationError as e:
- print("Step %d FAILED - xmllint failed: %s" % (i, e))
+ print(f"Step {i} FAILED - xmllint failed: {e}")
self.set_error(i, cmd)
break
if self.verbose:
- print("Step %d SUCCESS" % (i))
+ print(f"Step {i} SUCCESS")
i += 1
self.clean_environment()
if self.exitcode == ExitStatus.OK:
self._match_log_patterns()
print(self._result_txt)
if self.verbose:
- print("--- END TEST - %s\n" % self.name)
+ print(f"--- END TEST - {self.name}\n")
self.executed = True
def run_cmd(self, args):
"""Execute a command as part of this test."""
cmd = shlex.split(args['args'])
cmd.insert(0, args['cmd'])
if self.verbose:
- print("\n\nRunning: %s" % " ".join(cmd))
+ s = " ".join(cmd)
+ print(f"\n\nRunning: {s}")
# FIXME: Using "with" here breaks fencing merge tests.
# pylint: disable=consider-using-with
if args['env']:
new_env = os.environ.copy()
new_env.update(args['env'])
test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=new_env)
else:
test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if args['kill']:
if self.verbose:
- print("Also running: %s" % args['kill'])
+ print(f"Also running: {args['kill']}")
# Typically, the kill argument is used to detect some sort of
# failure. Without yielding for a few seconds here, the process
# launched earlier that is listening for the failure may not have
# time to connect to pacemaker-execd.
time.sleep(2)
subprocess.Popen(shlex.split(args['kill']))
if not args['no_wait']:
test.wait()
else:
return ExitStatus.OK
output = pipe_communicate(test, check_stderr=args['check_stderr'])
if self.verbose:
print(output)
if test.returncode != args['expected_exitcode']:
raise ExitCodeError(test.returncode)
if args['stdout_match'] is not None and \
re.search(args['stdout_match'], output) is None:
raise OutputNotFoundError(output)
if args['stdout_no_match'] is not None and \
re.search(args['stdout_no_match'], output) is not None:
raise OutputFoundError(output)
if args['validate']:
output = validate(output, args['check_rng'], self.verbose)
if self.verbose:
print(output)
return ExitStatus.OK
def set_error(self, step, cmd):
"""Record failure of this test."""
msg = "FAILURE - '%s' failed at step %d. Command: %s %s"
self._result_txt = msg % (self.name, step, cmd['cmd'], cmd['args'])
self.exitcode = ExitStatus.ERROR
def start_environment(self):
"""Prepare the host for executing a test."""
if os.path.exists(self.logpath):
os.remove(self.logpath)
self._kill_daemons()
self._start_daemons()
logfile = None
init_time = time.time()
update_time = init_time
while True:
# FIXME: Eventually use 'with' here, which seems complicated given
# everything happens in a loop.
# pylint: disable=consider-using-with
time.sleep(0.1)
if not self.force_wait and logfile is None \
and os.path.exists(self.logpath):
logfile = io.open(self.logpath, 'rt', encoding="ISO-8859-1")
if not self.force_wait and logfile is not None:
for line in logfile.readlines():
if "successfully started" in line:
return
now = time.time()
if self.timeout > 0 and (now - init_time) >= self.timeout:
if not self.force_wait:
- print("\tDaemon %s doesn't seem to have been initialized within %fs."
- "\n\tConsider specifying a longer '--timeout' value."
- % (self._daemon_location, self.timeout))
+ print(f"\tDaemon {self._daemon_location} doesn't seem to have "
+ f"been initialized within {self.timeout}s.\n\tConsider "
+ "specifying a longer '--timeout' value.")
return
if self.verbose and (now - update_time) >= 5:
- print("Waiting for %s to be initialized: %fs ..."
- % (self._daemon_location, now - init_time))
+ print(f"Waiting for {self._daemon_location} to be initialized: "
+ f"{now - init_time}s ...")
update_time = now
class Tests:
"""The base class for a collection of regression tests."""
def __init__(self, **kwargs):
"""
Create a new Tests instance.
This method must be provided by all subclasses, which must call
Tests.__init__ first.
Keywork arguments:
force_wait --
logdir -- The base directory under which to create a directory
to store output and temporary data.
timeout -- How long to wait for the test to complete.
verbose -- Whether to print additional information, including
verbose command output and daemon log files.
"""
self.force_wait = kwargs.get("force_wait", False)
self.logdir = kwargs.get("logdir", "/tmp")
self.timeout = kwargs.get("timeout", 2)
self.verbose = kwargs.get("verbose", False)
self._tests = []
def exit(self):
"""Exit (with error status code if any test failed)."""
for test in self._tests:
if not test.executed:
continue
if test.exitcode != ExitStatus.OK:
sys.exit(ExitStatus.ERROR)
sys.exit(ExitStatus.OK)
def print_list(self):
"""List all registered tests."""
- print("\n==== %d TESTS FOUND ====" % len(self._tests))
- print("%35s - %s" % ("TEST NAME", "TEST DESCRIPTION"))
- print("%35s - %s" % ("--------------------", "--------------------"))
+ print(f"\n==== {len(self._tests)} TESTS FOUND ====")
+ s = "TEST NAME"
+ print(f"{s:35} - TEST DESCRIPTION")
+ s = "--------------------"
+ print(f"{s:35} - {s}")
for test in self._tests:
- print("%35s - %s" % (test.name, test.description))
+ print(f"{test.name:35} - {test.description}")
print("==== END OF LIST ====\n")
def print_results(self):
"""Print summary of results of executed tests."""
failures = 0
success = 0
print("\n\n======= FINAL RESULTS ==========")
print("\n--- FAILURE RESULTS:")
for test in self._tests:
if not test.executed:
continue
if test.exitcode != ExitStatus.OK:
failures += 1
test.print_result(" ")
else:
success += 1
if failures == 0:
print(" None")
- print("\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures))
+ print(f"\n--- TOTALS\n Pass:{success}\n Fail:{failures}\n")
def run_single(self, name):
"""Run a single named test."""
for test in self._tests:
if test.name == name:
test.run()
break
def run_tests(self):
"""Run all tests."""
for test in self._tests:
test.run()
def run_tests_matching(self, pattern):
"""Run all tests whose name matches a pattern."""
for test in self._tests:
if test.name.count(pattern) != 0:
test.run()
diff --git a/python/pacemaker/_cts/tests/componentfail.py b/python/pacemaker/_cts/tests/componentfail.py
index 08fae3add8..e21d399feb 100644
--- a/python/pacemaker/_cts/tests/componentfail.py
+++ b/python/pacemaker/_cts/tests/componentfail.py
@@ -1,166 +1,166 @@
"""Kill a pacemaker daemon and test how the cluster recovers."""
__all__ = ["ComponentFail"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
from pacemaker._cts.audits import AuditResource
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
# @TODO Separate this into a separate test for each component, so the patterns
# can be made specific to each component, investigating failures is a little
# easier, and specific testing can be done for each component (for example,
# set attributes before and after killing pacemaker-attrd and check values).
class ComponentFail(CTSTest):
"""Kill a random pacemaker daemon and wait for the cluster to recover."""
def __init__(self, cm):
"""
Create a new ComponentFail instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.is_unsafe = True
self.name = "ComponentFail"
self._complist = cm.components
self._okerrpatterns = []
self._patterns = []
self._startall = SimulStartLite(cm)
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
self._patterns = []
self._okerrpatterns = []
# start all nodes
ret = self._startall(None)
if not ret:
return self.failure("Setup failed")
if not self._cm.cluster_stable(self._env["StableTime"]):
return self.failure("Setup failed - unstable")
node_is_dc = self._cm.is_node_dc(node, None)
# select a component to kill
chosen = self._env.random_gen.choice(self._complist)
while chosen.dc_only and not node_is_dc:
chosen = self._env.random_gen.choice(self._complist)
- self.debug("...component %s (dc=%s)" % (chosen.name, node_is_dc))
+ self.debug(f"...component {chosen.name} (dc={node_is_dc})")
self.incr(chosen.name)
if chosen.name != "corosync":
self._patterns.extend([
self.templates["Pat:ChildKilled"] % (node, chosen.name),
self.templates["Pat:ChildRespawn"] % (node, chosen.name),
])
self._patterns.extend(chosen.pats)
if node_is_dc:
self._patterns.extend(chosen.dc_pats)
# @TODO this should be a flag in the Component
if chosen.name in ["corosync", "pacemaker-based", "pacemaker-fenced"]:
# Ignore actions for fence devices if fencer will respawn
# (their registration will be lost, and probes will fail)
self._okerrpatterns = [
self.templates["Pat:Fencing_active"],
]
(_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self._cm, line)
if r.rclass == "stonith":
self._okerrpatterns.extend([
self.templates["Pat:Fencing_recover"] % r.id,
self.templates["Pat:Fencing_probe"] % r.id,
])
# supply a copy so self.patterns doesn't end up empty
tmp_pats = self._patterns.copy()
self._patterns.extend(chosen.badnews_ignore)
# Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
stonith_pats = [
self.templates["Pat:Fencing_ok"] % node
]
stonith = self.create_watch(stonith_pats, 0)
stonith.set_watch()
# set the watch for stable
watch = self.create_watch(
tmp_pats, self._env["DeadTime"] + self._env["StableTime"] + self._env["StartTime"])
watch.set_watch()
# kill the component
chosen.kill(node)
self.debug("Waiting for the cluster to recover")
self._cm.cluster_stable()
self.debug("Waiting for any fenced node to come back up")
self._cm.ns.wait_for_all_nodes(self._env["nodes"], 600)
self.debug("Waiting for the cluster to re-stabilize with all nodes")
self._cm.cluster_stable(self._env["StartTime"])
- self.debug("Checking if %s was shot" % node)
+ self.debug(f"Checking if {node} was shot")
shot = stonith.look(60)
if shot:
- self.debug("Found: %r" % shot)
+ self.debug(f"Found: {shot!r}")
self._okerrpatterns.append(self.templates["Pat:Fencing_start"] % node)
if not self._env["at-boot"]:
self._cm.expected_status[node] = "down"
# If fencing occurred, chances are many (if not all) the expected logs
# will not be sent - or will be lost when the node reboots
return self.success()
# check for logs indicating a graceful recovery
matched = watch.look_for_all(allow_multiple_matches=True)
if watch.unmatched:
- self._logger.log("Patterns not found: %r" % watch.unmatched)
+ self._logger.log(f"Patterns not found: {watch.unmatched!r}")
self.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self._cm.cluster_stable(self._env["StartTime"])
if not matched:
- return self.failure("Didn't find all expected %s patterns" % chosen.name)
+ return self.failure(f"Didn't find all expected {chosen.name} patterns")
if not is_stable:
- return self.failure("Cluster did not become stable after killing %s" % chosen.name)
+ return self.failure(f"Cluster did not become stable after killing {chosen.name}")
return self.success()
@property
def errors_to_ignore(self):
"""Return a list of errors which should be ignored."""
# Note that okerrpatterns refers to the last time we ran this test
# The good news is that this works fine for us...
self._okerrpatterns.extend(self._patterns)
return self._okerrpatterns
diff --git a/python/pacemaker/_cts/tests/ctstest.py b/python/pacemaker/_cts/tests/ctstest.py
index 7f876b1809..976b34a477 100644
--- a/python/pacemaker/_cts/tests/ctstest.py
+++ b/python/pacemaker/_cts/tests/ctstest.py
@@ -1,246 +1,246 @@
"""Base classes for CTS tests."""
__all__ = ["CTSTest"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
from pacemaker._cts.environment import EnvFactory
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.patterns import PatternSelector
from pacemaker._cts.remote import RemoteFactory
from pacemaker._cts.timer import Timer
from pacemaker._cts.watcher import LogWatcher
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
class CTSTest:
"""
The base class for all cluster tests.
This implements a basic set of properties and behaviors like setup, tear
down, time keeping, and statistics tracking. It is up to specific tests
to implement their own specialized behavior on top of this class.
"""
def __init__(self, cm):
"""
Create a new CTSTest instance.
Arguments:
cm -- A ClusterManager instance
"""
# pylint: disable=invalid-name
self.audits = []
self.name = None
self.templates = PatternSelector(cm["Name"])
self.stats = {
"auditfail": 0,
"calls": 0,
"failure": 0,
"skipped": 0,
"success": 0
}
self._cm = cm
self._env = EnvFactory().getInstance()
self._rsh = RemoteFactory().getInstance()
self._logger = LogFactory()
self._timers = {}
self.benchmark = True # which tests to benchmark
self.failed = False
self.is_experimental = False
self.is_loop = False
self.is_unsafe = False
self.is_valgrind = False
self.passed = True
def log(self, args):
"""Log a message."""
self._logger.log(args)
def debug(self, args):
"""Log a debug message."""
self._logger.debug(args)
def get_timer(self, key="test"):
"""Get the start time of the given timer."""
try:
return self._timers[key].start_time
except KeyError:
return 0
def set_timer(self, key="test"):
"""Set the start time of the given timer to now, and return that time."""
if key not in self._timers:
self._timers[key] = Timer(self._logger, self.name, key)
self._timers[key].start()
return self._timers[key].start_time
def log_timer(self, key="test"):
"""Log the elapsed time of the given timer."""
if key not in self._timers:
return
elapsed = self._timers[key].elapsed
- self.debug("%s:%s runtime: %.2f" % (self.name, key, elapsed))
+ self.debug(f"{self.name}:{key} runtime: {elapsed:.2f}")
del self._timers[key]
def incr(self, name):
"""Increment the given stats key."""
if name not in self.stats:
self.stats[name] = 0
self.stats[name] += 1
# Reset the test passed boolean
if name == "calls":
self.passed = True
def failure(self, reason="none"):
"""Increment the failure count, with an optional failure reason."""
self.passed = False
self.incr("failure")
- self._logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason)
+ self._logger.log(f"{f'Test {self.name}':<35} FAILED: {reason}")
return False
def success(self):
"""Increment the success count."""
self.incr("success")
return True
def skipped(self):
"""Increment the skipped count."""
self.incr("skipped")
return True
def __call__(self, node):
"""Perform this test."""
raise NotImplementedError
def audit(self):
"""Perform all the relevant audits (see ClusterAudit), returning whether or not they all passed."""
passed = True
for audit in self.audits:
if not audit():
- self._logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name))
+ self._logger.log(f"Internal {self.name} Audit {audit.name} FAILED.")
self.incr("auditfail")
passed = False
return passed
def setup(self, node):
"""Set up this test."""
# node is used in subclasses
# pylint: disable=unused-argument
return self.success()
def teardown(self, node):
"""Tear down this test."""
# node is used in subclasses
# pylint: disable=unused-argument
return self.success()
def create_watch(self, patterns, timeout, name=None):
"""
Create a new LogWatcher object.
This object can be used to search log files for matching patterns
during this test's run.
Arguments:
patterns -- A list of regular expressions to match against the log
timeout -- Default number of seconds to watch a log file at a time;
this can be overridden by the timeout= parameter to
self.look on an as-needed basis
name -- A unique name to use when logging about this watch
"""
if not name:
name = self.name
return LogWatcher(self._env["LogFileName"], patterns,
self._env["nodes"], self._env["log_kind"], name,
timeout)
def local_badnews(self, prefix, watch, local_ignore=None):
"""
Search through log files for messages.
Arguments:
prefix -- The string to look for at the beginning of lines,
or "LocalBadNews:" if None.
watch -- The LogWatcher object to use for searching.
local_ignore -- A list of regexes that, if found in a line, will
cause that line to be ignored.
Return the number of matches found.
"""
errcount = 0
if not prefix:
prefix = "LocalBadNews:"
ignorelist = [" CTS: ", prefix]
if local_ignore:
ignorelist += local_ignore
while errcount < 100:
match = watch.look(0)
if match:
add_err = True
for ignore in ignorelist:
if add_err and re.search(ignore, match):
add_err = False
if add_err:
- self._logger.log("%s %s" % (prefix, match))
+ self._logger.log(f"{prefix} {match}")
errcount += 1
else:
break
else:
self._logger.log("Too many errors!")
watch.end()
return errcount
def is_applicable(self):
"""
Return True if this test is applicable in the current test configuration.
This method must be implemented by all subclasses.
"""
if self.is_loop and not self._env["loop-tests"]:
return False
if self.is_unsafe and not self._env["unsafe-tests"]:
return False
if self.is_valgrind and not self._env["valgrind-tests"]:
return False
if self.is_experimental and not self._env["experimental-tests"]:
return False
if self._env["benchmark"] and not self.benchmark:
return False
return True
@property
def errors_to_ignore(self):
"""Return a list of errors which should be ignored."""
return []
diff --git a/python/pacemaker/_cts/tests/fliptest.py b/python/pacemaker/_cts/tests/fliptest.py
index 52692f4100..2a87265d7a 100644
--- a/python/pacemaker/_cts/tests/fliptest.py
+++ b/python/pacemaker/_cts/tests/fliptest.py
@@ -1,59 +1,59 @@
"""Stop running nodes, and start stopped nodes."""
__all__ = ["FlipTest"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import time
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.starttest import StartTest
from pacemaker._cts.tests.stoptest import StopTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class FlipTest(CTSTest):
"""Stop running nodes and start stopped nodes."""
def __init__(self, cm):
"""
Create a new FlipTest instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "Flip"
self._start = StartTest(cm)
self._stop = StopTest(cm)
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
if self._cm.expected_status[node] == "up":
self.incr("stopped")
ret = self._stop(node)
kind = "up->down"
# Give the cluster time to recognize it's gone...
time.sleep(self._env["StableTime"])
elif self._cm.expected_status[node] == "down":
self.incr("started")
ret = self._start(node)
kind = "down->up"
else:
return self.skipped()
self.incr(kind)
if ret:
return self.success()
- return self.failure("%s failure" % kind)
+ return self.failure(f"{kind} failure")
diff --git a/python/pacemaker/_cts/tests/maintenancemode.py b/python/pacemaker/_cts/tests/maintenancemode.py
index 7ec061a1aa..5026a6cf0e 100644
--- a/python/pacemaker/_cts/tests/maintenancemode.py
+++ b/python/pacemaker/_cts/tests/maintenancemode.py
@@ -1,228 +1,228 @@
"""Toggle nodes in and out of maintenance mode."""
__all__ = ["MaintenanceMode"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
from pacemaker._cts.audits import AuditResource
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.tests.starttest import StartTest
from pacemaker._cts.timer import Timer
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
class MaintenanceMode(CTSTest):
"""Toggle nodes in and ount of maintenance mode."""
def __init__(self, cm):
"""
Create a new MaintenanceMode instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.benchmark = True
self.name = "MaintenanceMode"
self._action = "asyncmon"
self._rid = "maintenanceDummy"
self._start = StartTest(cm)
self._startall = SimulStartLite(cm)
def _toggle_maintenance_mode(self, node, enabled):
"""Toggle maintenance mode on the given node."""
pats = [
self.templates["Pat:DC_IDLE"]
]
if enabled:
action = "On"
else:
action = "Off"
# fail the resource right after turning Maintenance mode on
# verify it is not recovered until maintenance mode is turned off
if enabled:
pats.append(self.templates["Pat:RscOpFail"] % (self._action, self._rid))
else:
pats.extend([
self.templates["Pat:RscOpOK"] % ("stop", self._rid),
self.templates["Pat:RscOpOK"] % ("start", self._rid)
])
watch = self.create_watch(pats, 60)
watch.set_watch()
- self.debug("Turning maintenance mode %s" % action)
- self._rsh(node, self.templates["MaintenanceMode%s" % action])
+ self.debug(f"Turning maintenance mode {action}")
+ self._rsh(node, self.templates[f"MaintenanceMode{action}"])
if enabled:
- self._rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self._rid, node))
+ self._rsh(node, f"crm_resource -V -F -r {self._rid} -H {node} &>/dev/null")
- with Timer(self._logger, self.name, "recover%s" % action):
+ with Timer(self._logger, self.name, f"recover{action}"):
watch.look_for_all()
if watch.unmatched:
- self.debug("Failed to find patterns when turning maintenance mode %s" % action)
+ self.debug(f"Failed to find patterns when turning maintenance mode {action}")
return repr(watch.unmatched)
return ""
def _insert_maintenance_dummy(self, node):
"""Create a dummy resource on the given node."""
pats = [
- ("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self._rid))
+ f"{node}.*" + (self.templates["Pat:RscOpOK"] % ("start", self._rid))
]
watch = self.create_watch(pats, 60)
watch.set_watch()
self._cm.add_dummy_rsc(node, self._rid)
with Timer(self._logger, self.name, "addDummy"):
watch.look_for_all()
if watch.unmatched:
self.debug("Failed to find patterns when adding maintenance dummy resource")
return repr(watch.unmatched)
return ""
def _remove_maintenance_dummy(self, node):
"""Remove the previously created dummy resource on the given node."""
pats = [
self.templates["Pat:RscOpOK"] % ("stop", self._rid)
]
watch = self.create_watch(pats, 60)
watch.set_watch()
self._cm.remove_dummy_rsc(node, self._rid)
with Timer(self._logger, self.name, "removeDummy"):
watch.look_for_all()
if watch.unmatched:
self.debug("Failed to find patterns when removing maintenance dummy resource")
return repr(watch.unmatched)
return ""
def _managed_rscs(self, node):
"""Return a list of all resources managed by the cluster."""
rscs = []
(_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
for line in lines:
if re.search("^Resource", line):
tmp = AuditResource(self._cm, line)
if tmp.managed:
rscs.append(tmp.id)
return rscs
def _verify_resources(self, node, rscs, managed):
"""Verify that all resources are managed or unmanaged as expected."""
managed_rscs = rscs
managed_str = "managed"
if not managed:
managed_str = "unmanaged"
(_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
for line in lines:
if re.search("^Resource", line):
tmp = AuditResource(self._cm, line)
if managed and not tmp.managed:
continue
if not managed and tmp.managed:
continue
if managed_rscs.count(tmp.id):
managed_rscs.remove(tmp.id)
if not managed_rscs:
- self.debug("Found all %s resources on %s" % (managed_str, node))
+ self.debug(f"Found all {managed_str} resources on {node}")
return True
- self._logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managed_rscs))
+ self._logger.log(f"Could not find all {managed_str} resources on {node}. {managed_rscs}")
return False
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
verify_managed = False
verify_unmanaged = False
fail_pat = ""
if not self._startall(None):
return self.failure("Setup failed")
# get a list of all the managed resources. We use this list
# after enabling maintenance mode to verify all managed resources
# become un-managed. After maintenance mode is turned off, we use
# this list to verify all the resources become managed again.
managed_rscs = self._managed_rscs(node)
if not managed_rscs:
- self._logger.log("No managed resources on %s" % node)
+ self._logger.log(f"No managed resources on {node}")
return self.skipped()
# insert a fake resource we can fail during maintenance mode
# so we can verify recovery does not take place until after maintenance
# mode is disabled.
fail_pat += self._insert_maintenance_dummy(node)
# toggle maintenance mode ON, then fail dummy resource.
fail_pat += self._toggle_maintenance_mode(node, True)
# verify all the resources are now unmanaged
if self._verify_resources(node, managed_rscs, False):
verify_unmanaged = True
# Toggle maintenance mode OFF, verify dummy is recovered.
fail_pat += self._toggle_maintenance_mode(node, False)
# verify all the resources are now managed again
if self._verify_resources(node, managed_rscs, True):
verify_managed = True
# Remove our maintenance dummy resource.
fail_pat += self._remove_maintenance_dummy(node)
self._cm.cluster_stable()
if fail_pat != "":
- return self.failure("Unmatched patterns: %s" % fail_pat)
+ return self.failure(f"Unmatched patterns: {fail_pat}")
if not verify_unmanaged:
return self.failure("Failed to verify resources became unmanaged during maintenance mode")
if not verify_managed:
return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode")
return self.success()
@property
def errors_to_ignore(self):
"""Return a list of errors which should be ignored."""
return [
- r"Updating failcount for %s" % self._rid,
- r"schedulerd.*: Recover\s+%s\s+\(.*\)" % self._rid,
+ f"Updating failcount for {self._rid}",
+ fr"schedulerd.*: Recover\s+{self._rid}\s+\(.*\)",
r"Unknown operation: fail",
self.templates["Pat:RscOpOK"] % (self._action, self._rid),
- r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self._rid, self._action, 0)
+ f"(ERROR|error).*: Action {self._rid}_{self._action}_0 .* initiated outside of a transition",
]
diff --git a/python/pacemaker/_cts/tests/nearquorumpointtest.py b/python/pacemaker/_cts/tests/nearquorumpointtest.py
index 343cc6e162..955926a028 100644
--- a/python/pacemaker/_cts/tests/nearquorumpointtest.py
+++ b/python/pacemaker/_cts/tests/nearquorumpointtest.py
@@ -1,121 +1,121 @@
"""Randomly start and stop nodes to bring the cluster close to the quorum point."""
__all__ = ["NearQuorumPointTest"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class NearQuorumPointTest(CTSTest):
"""Randomly start and stop nodes to bring the cluster close to the quorum point."""
def __init__(self, cm):
"""
Create a new NearQuorumPointTest instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "NearQuorumPoint"
def __call__(self, dummy):
"""Perform this test."""
self.incr("calls")
startset = []
stopset = []
stonith = self._cm.prepare_fencing_watcher()
# decide what to do with each node
for node in self._env["nodes"]:
action = self._env.random_gen.choice(["start", "stop"])
if action == "start":
startset.append(node)
elif action == "stop":
stopset.append(node)
- self.debug("start nodes:%r" % startset)
- self.debug("stop nodes:%r" % stopset)
+ self.debug(f"start nodes:{startset!r}")
+ self.debug(f"stop nodes:{stopset!r}")
# add search patterns
watchpats = []
for node in stopset:
if self._cm.expected_status[node] == "up":
watchpats.append(self.templates["Pat:We_stopped"] % node)
for node in startset:
if self._cm.expected_status[node] == "down":
watchpats.append(self.templates["Pat:Local_started"] % node)
else:
for stopping in stopset:
if self._cm.expected_status[stopping] == "up":
watchpats.append(self.templates["Pat:They_stopped"] % (node, stopping))
if not watchpats:
return self.skipped()
if startset:
watchpats.append(self.templates["Pat:DC_IDLE"])
watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
watch.set_watch()
# begin actions
for node in stopset:
if self._cm.expected_status[node] == "up":
self._cm.stop_cm_async(node)
for node in startset:
if self._cm.expected_status[node] == "down":
self._cm.start_cm_async(node)
# get the result
if watch.look_for_all():
self._cm.cluster_stable()
self._cm.fencing_cleanup("NearQuorumPoint", stonith)
return self.success()
- self._logger.log("Warn: Patterns not found: %r" % watch.unmatched)
+ self._logger.log(f"Warn: Patterns not found: {watch.unmatched!r}")
# get the "bad" nodes
upnodes = []
for node in stopset:
if self._cm.stat_cm(node):
upnodes.append(node)
downnodes = []
for node in startset:
if not self._cm.stat_cm(node):
downnodes.append(node)
self._cm.fencing_cleanup("NearQuorumPoint", stonith)
if not upnodes and not downnodes:
self._cm.cluster_stable()
# Make sure they're completely down with no residule
for node in stopset:
self._rsh(node, self.templates["StopCmd"])
return self.success()
if upnodes:
- self._logger.log("Warn: Unstoppable nodes: %r" % upnodes)
+ self._logger.log(f"Warn: Unstoppable nodes: {upnodes!r}")
if downnodes:
- self._logger.log("Warn: Unstartable nodes: %r" % downnodes)
+ self._logger.log(f"Warn: Unstartable nodes: {downnodes!r}")
return self.failure()
diff --git a/python/pacemaker/_cts/tests/partialstart.py b/python/pacemaker/_cts/tests/partialstart.py
index 0cee4f311a..9455a7b486 100644
--- a/python/pacemaker/_cts/tests/partialstart.py
+++ b/python/pacemaker/_cts/tests/partialstart.py
@@ -1,72 +1,72 @@
"""Start a node and then tell it to stop before it is fully running."""
__all__ = ["PartialStart"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.tests.simulstoplite import SimulStopLite
from pacemaker._cts.tests.stoptest import StopTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class PartialStart(CTSTest):
"""Interrupt a node before it's finished starting up."""
def __init__(self, cm):
"""
Create a new PartialStart instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "PartialStart"
self._startall = SimulStartLite(cm)
self._stop = StopTest(cm)
self._stopall = SimulStopLite(cm)
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
ret = self._stopall(None)
if not ret:
return self.failure("Setup failed")
watchpats = [
"pacemaker-controld.*Connecting to .* cluster layer"
]
watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
watch.set_watch()
self._cm.start_cm_async(node)
ret = watch.look_for_all()
if not ret:
- self._logger.log("Patterns not found: %r" % watch.unmatched)
- return self.failure("Setup of %s failed" % node)
+ self._logger.log(f"Patterns not found: {watch.unmatched!r}")
+ return self.failure(f"Setup of {node} failed")
ret = self._stop(node)
if not ret:
- return self.failure("%s did not stop in time" % node)
+ return self.failure(f"{node} did not stop in time")
return self.success()
@property
def errors_to_ignore(self):
"""Return a list of errors which should be ignored."""
# We might do some fencing in the 2-node case if we make it up far enough
return [
r"Executing reboot fencing operation",
r"Requesting fencing \([^)]+\) targeting node "
]
diff --git a/python/pacemaker/_cts/tests/reattach.py b/python/pacemaker/_cts/tests/reattach.py
index beb8f8bfad..6d445e9818 100644
--- a/python/pacemaker/_cts/tests/reattach.py
+++ b/python/pacemaker/_cts/tests/reattach.py
@@ -1,207 +1,207 @@
"""Restart the cluster and verify resources remain running."""
__all__ = ["Reattach"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
import time
from pacemaker.exitstatus import ExitStatus
from pacemaker._cts.audits import AuditResource
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.tests.simulstoplite import SimulStopLite
from pacemaker._cts.tests.starttest import StartTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
class Reattach(CTSTest):
"""Restart the cluster and verify that resources remain running throughout."""
def __init__(self, cm):
"""
Create a new Reattach instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "Reattach"
self._startall = SimulStartLite(cm)
self._stopall = SimulStopLite(cm)
def _is_managed(self, node):
"""Return whether resources are managed by the cluster."""
(_, is_managed) = self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", verbose=1)
is_managed = is_managed[0].strip()
return is_managed == "true"
def _set_unmanaged(self, node):
"""Disable resource management."""
self.debug("Disable resource management")
self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false")
def _set_managed(self, node):
"""Enable resource management."""
self.debug("Re-enable resource management")
self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D")
def _disable_incompatible_rscs(self, node):
"""
Disable resources that are incompatible with this test.
Starts and stops of stonith-class resources are implemented internally
by Pacemaker, which means that they must stop when Pacemaker is
stopped, even if unmanaged. Disable them before running the Reattach
test so they don't affect resource placement.
Set target-role to "Stopped" for any of these resources in the CIB.
"""
self.debug("Disable incompatible resources")
xml = """'
' --scope rsc_defaults"""
return self._rsh(node, self._cm.templates['CibAddXml'] % xml)
def _enable_incompatible_rscs(self, node):
"""Re-enable resources that were incompatible with this test."""
self.debug("Re-enable incompatible resources")
xml = """"""
- return self._rsh(node, """cibadmin --delete --xml-text '%s'""" % xml)
+ return self._rsh(node, f"""cibadmin --delete --xml-text '{xml}'""")
def _reprobe(self, node):
"""
Reprobe all resources.
The placement of some resources (such as promotable-1 in the
lab-generated CIB) is affected by constraints using node-attribute-based
rules. An earlier test may have erased the relevant node attribute, so
do a reprobe, which should add the attribute back.
"""
return self._rsh(node, """crm_resource --refresh""")
def setup(self, node):
"""Set up this test."""
if not self._startall(None):
return self.failure("Startall failed")
(rc, _) = self._disable_incompatible_rscs(node)
if rc != ExitStatus.OK:
return self.failure("Couldn't modify CIB to stop incompatible resources")
(rc, _) = self._reprobe(node)
if rc != ExitStatus.OK:
return self.failure("Couldn't reprobe resources")
if not self._cm.cluster_stable(double_check=True):
return self.failure("Cluster did not stabilize after setup")
return self.success()
def teardown(self, node):
"""Tear down this test."""
# Make sure 'node' is up
start = StartTest(self._cm)
start(node)
if not self._is_managed(node):
self._set_managed(node)
(rc, _) = self._enable_incompatible_rscs(node)
if rc != ExitStatus.OK:
return self.failure("Couldn't modify CIB to re-enable incompatible resources")
if not self._cm.cluster_stable():
return self.failure("Cluster did not stabilize after teardown")
if not self._is_managed(node):
return self.failure("Could not re-enable resource management")
return self.success()
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
# Conveniently, the scheduler will display this message when disabling
# management, even if fencing is not enabled, so we can rely on it.
managed = self.create_watch(["No fencing will be done"], 60)
managed.set_watch()
self._set_unmanaged(node)
if not managed.look_for_all():
- self._logger.log("Patterns not found: %r" % managed.unmatched)
+ self._logger.log(f"Patterns not found: {managed.unmatched!r}")
return self.failure("Resource management not disabled")
pats = [
self.templates["Pat:RscOpOK"] % ("start", ".*"),
self.templates["Pat:RscOpOK"] % ("stop", ".*"),
self.templates["Pat:RscOpOK"] % ("promote", ".*"),
self.templates["Pat:RscOpOK"] % ("demote", ".*"),
self.templates["Pat:RscOpOK"] % ("migrate", ".*")
]
watch = self.create_watch(pats, 60, "ShutdownActivity")
watch.set_watch()
self.debug("Shutting down the cluster")
ret = self._stopall(None)
if not ret:
self._set_managed(node)
return self.failure("Couldn't shut down the cluster")
self.debug("Bringing the cluster back up")
ret = self._startall(None)
time.sleep(5) # allow ping to update the CIB
if not ret:
self._set_managed(node)
return self.failure("Couldn't restart the cluster")
if self.local_badnews("ResourceActivity:", watch):
self._set_managed(node)
return self.failure("Resources stopped or started during cluster restart")
watch = self.create_watch(pats, 60, "StartupActivity")
watch.set_watch()
# Re-enable resource management (and verify it happened).
self._set_managed(node)
self._cm.cluster_stable()
if not self._is_managed(node):
return self.failure("Could not re-enable resource management")
# Ignore actions for STONITH resources
ignore = []
(_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self._cm, line)
if r.rclass == "stonith":
- self.debug("Ignoring start actions for %s" % r.id)
+ self.debug(f"Ignoring start actions for {r.id}")
ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id))
if self.local_badnews("ResourceActivity:", watch, ignore):
return self.failure("Resources stopped or started after resource management was re-enabled")
return ret
@property
def errors_to_ignore(self):
"""Return a list of errors which should be ignored."""
return [
r"resource( was|s were) active at shutdown"
]
diff --git a/python/pacemaker/_cts/tests/remotedriver.py b/python/pacemaker/_cts/tests/remotedriver.py
index c24fe7f24d..a0d916d7b4 100644
--- a/python/pacemaker/_cts/tests/remotedriver.py
+++ b/python/pacemaker/_cts/tests/remotedriver.py
@@ -1,542 +1,542 @@
"""Base classes for CTS tests."""
__all__ = ["RemoteDriver"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import os
import time
import subprocess
import tempfile
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.tests.starttest import StartTest
from pacemaker._cts.tests.stoptest import StopTest
from pacemaker._cts.timer import Timer
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
class RemoteDriver(CTSTest):
"""
A specialized base class for cluster tests that run on Pacemaker Remote nodes.
This builds on top of CTSTest to provide methods for starting and stopping
services and resources, and managing remote nodes. This is still just an
abstract class -- specific tests need to implement their own specialized
behavior.
"""
def __init__(self, cm):
"""
Create a new RemoteDriver instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "RemoteDriver"
self._corosync_enabled = False
self._pacemaker_enabled = False
self._remote_node = None
self._remote_rsc = "remote-rsc"
self._start = StartTest(cm)
self._startall = SimulStartLite(cm)
self._stop = StopTest(cm)
self.reset()
def reset(self):
"""Reset the state of this test back to what it was before the test was run."""
self.failed = False
self.fail_string = ""
self._pcmk_started = False
self._remote_node_added = False
self._remote_rsc_added = False
self._remote_use_reconnect_interval = self._env.random_gen.choice([True, False])
def fail(self, msg):
"""Mark test as failed."""
self.failed = True
# Always log the failure.
self._logger.log(msg)
# Use first failure as test status, as it's likely to be most useful.
if not self.fail_string:
self.fail_string = msg
def _get_other_node(self, node):
"""
Get the first cluster node out of the environment that is not the given node.
Typically, this is used to find some node that will still be active that
we can run cluster commands on.
"""
for othernode in self._env["nodes"]:
if othernode == node:
# we don't want to try and use the cib that we just shutdown.
# find a cluster node that is not our soon to be remote-node.
continue
return othernode
def _del_rsc(self, node, rsc):
"""
Delete the given named resource from the cluster.
The given `node` is the cluster node on which we should *not* run the
delete command.
"""
othernode = self._get_other_node(node)
- (rc, _) = self._rsh(othernode, "crm_resource -D -r %s -t primitive" % rsc)
+ (rc, _) = self._rsh(othernode, f"crm_resource -D -r {rsc} -t primitive")
if rc != 0:
- self.fail("Removal of resource '%s' failed" % rsc)
+ self.fail(f"Removal of resource '{rsc}' failed")
def _add_rsc(self, node, rsc_xml):
"""
Add a resource given in XML format to the cluster.
The given `node` is the cluster node on which we should *not* run the
add command.
"""
othernode = self._get_other_node(node)
- (rc, _) = self._rsh(othernode, "cibadmin -C -o resources -X '%s'" % rsc_xml)
+ (rc, _) = self._rsh(othernode, f"cibadmin -C -o resources -X '{rsc_xml}'")
if rc != 0:
self.fail("resource creation failed")
def _add_primitive_rsc(self, node):
"""
Add a primitive heartbeat resource for the remote node to the cluster.
The given `node` is the cluster node on which we should *not* run the
add command.
"""
- rsc_xml = """
-
-
+ rsc_xml = f"""
+
+
-
+
-""" % {"node": self._remote_rsc}
+"""
self._add_rsc(node, rsc_xml)
if not self.failed:
self._remote_rsc_added = True
def _add_connection_rsc(self, node):
"""
Add a primitive connection resource for the remote node to the cluster.
The given `node` is the cluster node on which we should *not* run the
add command.
"""
- rsc_xml = """
-
-
-
-""" % {"node": self._remote_node, "server": node}
+ rsc_xml = f"""
+
+
+
+"""
if self._remote_use_reconnect_interval:
# Set reconnect interval on resource
- rsc_xml += """
-
-""" % self._remote_node
+ rsc_xml += f"""
+
+"""
- rsc_xml += """
+ rsc_xml += f"""
-
-
+
+
-""" % {"node": self._remote_node}
+"""
self._add_rsc(node, rsc_xml)
if not self.failed:
self._remote_node_added = True
def _disable_services(self, node):
"""Disable the corosync and pacemaker services on the given node."""
self._corosync_enabled = self._env.service_is_enabled(node, "corosync")
if self._corosync_enabled:
self._env.disable_service(node, "corosync")
self._pacemaker_enabled = self._env.service_is_enabled(node, "pacemaker")
if self._pacemaker_enabled:
self._env.disable_service(node, "pacemaker")
def _enable_services(self, node):
"""Enable the corosync and pacemaker services on the given node."""
if self._corosync_enabled:
self._env.enable_service(node, "corosync")
if self._pacemaker_enabled:
self._env.enable_service(node, "pacemaker")
def _stop_pcmk_remote(self, node):
"""Stop the Pacemaker Remote service on the given node."""
for _ in range(10):
(rc, _) = self._rsh(node, "service pacemaker_remote stop")
if rc != 0:
time.sleep(6)
else:
break
def _start_pcmk_remote(self, node):
"""Start the Pacemaker Remote service on the given node."""
for _ in range(10):
(rc, _) = self._rsh(node, "service pacemaker_remote start")
if rc != 0:
time.sleep(6)
else:
self._pcmk_started = True
break
def _freeze_pcmk_remote(self, node):
"""Simulate a Pacemaker Remote daemon failure."""
self._rsh(node, "killall -STOP pacemaker-remoted")
def _resume_pcmk_remote(self, node):
"""Simulate the Pacemaker Remote daemon recovering."""
self._rsh(node, "killall -CONT pacemaker-remoted")
def _start_metal(self, node):
"""
Set up a Pacemaker Remote configuration.
Remove any existing connection resources or nodes. Start the
pacemaker_remote service. Create a connection resource.
"""
# Cluster nodes are reused as remote nodes in remote tests. If cluster
# services were enabled at boot, in case the remote node got fenced, the
# cluster node would join instead of the expected remote one. Meanwhile
# pacemaker_remote would not be able to start. Depending on the chances,
# the situations might not be able to be orchestrated gracefully any more.
#
# Temporarily disable any enabled cluster serivces.
self._disable_services(node)
# make sure the resource doesn't already exist for some reason
- self._rsh(node, "crm_resource -D -r %s -t primitive" % self._remote_rsc)
- self._rsh(node, "crm_resource -D -r %s -t primitive" % self._remote_node)
+ self._rsh(node, f"crm_resource -D -r {self._remote_rsc} -t primitive")
+ self._rsh(node, f"crm_resource -D -r {self._remote_node} -t primitive")
if not self._stop(node):
- self.fail("Failed to shutdown cluster node %s" % node)
+ self.fail(f"Failed to shutdown cluster node {node}")
return
self._start_pcmk_remote(node)
if not self._pcmk_started:
- self.fail("Failed to start pacemaker_remote on node %s" % node)
+ self.fail(f"Failed to start pacemaker_remote on node {node}")
return
# Convert node to baremetal now that it has shutdown the cluster stack
pats = []
watch = self.create_watch(pats, 120)
watch.set_watch()
pats.extend([
self.templates["Pat:RscOpOK"] % ("start", self._remote_node),
self.templates["Pat:DC_IDLE"]
])
self._add_connection_rsc(node)
with Timer(self._logger, self.name, "remoteMetalInit"):
watch.look_for_all()
if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
+ self.fail(f"Unmatched patterns: {watch.unmatched}")
def migrate_connection(self, node):
"""Move the remote connection resource to any other available node."""
if self.failed:
return
pats = [
self.templates["Pat:RscOpOK"] % ("migrate_to", self._remote_node),
self.templates["Pat:RscOpOK"] % ("migrate_from", self._remote_node),
self.templates["Pat:DC_IDLE"]
]
watch = self.create_watch(pats, 120)
watch.set_watch()
- (rc, _) = self._rsh(node, "crm_resource -M -r %s" % self._remote_node, verbose=1)
+ (rc, _) = self._rsh(node, f"crm_resource -M -r {self._remote_node}", verbose=1)
if rc != 0:
self.fail("failed to move remote node connection resource")
return
with Timer(self._logger, self.name, "remoteMetalMigrate"):
watch.look_for_all()
if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
+ self.fail(f"Unmatched patterns: {watch.unmatched}")
def fail_rsc(self, node):
"""
Cause the dummy resource running on a Pacemaker Remote node to fail.
Verify that the failure is logged correctly.
"""
if self.failed:
return
watchpats = [
self.templates["Pat:RscRemoteOpOK"] % ("stop", self._remote_rsc, self._remote_node),
self.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node),
self.templates["Pat:DC_IDLE"]
]
watch = self.create_watch(watchpats, 120)
watch.set_watch()
self.debug("causing dummy rsc to fail.")
self._rsh(node, "rm -f /var/run/resource-agents/Dummy*")
with Timer(self._logger, self.name, "remoteRscFail"):
watch.look_for_all()
if watch.unmatched:
- self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched)
+ self.fail(f"Unmatched patterns during rsc fail: {watch.unmatched}")
def fail_connection(self, node):
"""
Cause the remote connection resource to fail.
Verify that the node is fenced and the connection resource is restarted
on another node.
"""
if self.failed:
return
watchpats = [
self.templates["Pat:Fencing_ok"] % self._remote_node,
self.templates["Pat:NodeFenced"] % self._remote_node
]
watch = self.create_watch(watchpats, 120)
watch.set_watch()
# freeze the pcmk remote daemon. this will result in fencing
self.debug("Force stopped active remote node")
self._freeze_pcmk_remote(node)
self.debug("Waiting for remote node to be fenced.")
with Timer(self._logger, self.name, "remoteMetalFence"):
watch.look_for_all()
if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
+ self.fail(f"Unmatched patterns: {watch.unmatched}")
return
self.debug("Waiting for the remote node to come back up")
self._cm.ns.wait_for_node(node, 120)
pats = []
watch = self.create_watch(pats, 240)
watch.set_watch()
pats.append(self.templates["Pat:RscOpOK"] % ("start", self._remote_node))
if self._remote_rsc_added:
pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node))
# start the remote node again watch it integrate back into cluster.
self._start_pcmk_remote(node)
if not self._pcmk_started:
- self.fail("Failed to start pacemaker_remote on node %s" % node)
+ self.fail(f"Failed to start pacemaker_remote on node {node}")
return
self.debug("Waiting for remote node to rejoin cluster after being fenced.")
with Timer(self._logger, self.name, "remoteMetalRestart"):
watch.look_for_all()
if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
+ self.fail(f"Unmatched patterns: {watch.unmatched}")
def _add_dummy_rsc(self, node):
"""Add a dummy resource that runs on the Pacemaker Remote node."""
if self.failed:
return
# verify we can put a resource on the remote node
pats = []
watch = self.create_watch(pats, 120)
watch.set_watch()
pats.extend([
self.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node),
self.templates["Pat:DC_IDLE"]
])
# Add a resource that must live on remote-node
self._add_primitive_rsc(node)
# force that rsc to prefer the remote node.
- (rc, _) = self._cm.rsh(node, "crm_resource -M -r %s -N %s -f" % (self._remote_rsc, self._remote_node), verbose=1)
+ (rc, _) = self._cm.rsh(node, f"crm_resource -M -r {self._remote_rsc} -N {self._remote_node} -f", verbose=1)
if rc != 0:
self.fail("Failed to place remote resource on remote node.")
return
with Timer(self._logger, self.name, "remoteMetalRsc"):
watch.look_for_all()
if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
+ self.fail(f"Unmatched patterns: {watch.unmatched}")
def test_attributes(self, node):
"""Verify that attributes can be set on the Pacemaker Remote node."""
if self.failed:
return
# This verifies permanent attributes can be set on a remote-node. It also
# verifies the remote-node can edit its own cib node section remotely.
- (rc, line) = self._cm.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % self._remote_node, verbose=1)
+ (rc, line) = self._cm.rsh(node, f"crm_attribute -l forever -n testattr -v testval -N {self._remote_node}", verbose=1)
if rc != 0:
- self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line))
+ self.fail(f"Failed to set remote-node attribute. rc:{rc} output:{line}")
return
- (rc, _) = self._cm.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % self._remote_node, verbose=1)
+ (rc, _) = self._cm.rsh(node, f"crm_attribute -l forever -n testattr -q -N {self._remote_node}", verbose=1)
if rc != 0:
self.fail("Failed to get remote-node attribute")
return
- (rc, _) = self._cm.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % self._remote_node, verbose=1)
+ (rc, _) = self._cm.rsh(node, f"crm_attribute -l forever -n testattr -D -N {self._remote_node}", verbose=1)
if rc != 0:
self.fail("Failed to delete remote-node attribute")
def cleanup_metal(self, node):
"""
Clean up the Pacemaker Remote node configuration previously created by _setup_metal.
Stop and remove dummy resources and connection resources. Stop the
pacemaker_remote service. Remove the remote node itself.
"""
self._enable_services(node)
if not self._pcmk_started:
return
pats = []
watch = self.create_watch(pats, 120)
watch.set_watch()
if self._remote_rsc_added:
pats.append(self.templates["Pat:RscOpOK"] % ("stop", self._remote_rsc))
if self._remote_node_added:
pats.append(self.templates["Pat:RscOpOK"] % ("stop", self._remote_node))
with Timer(self._logger, self.name, "remoteMetalCleanup"):
self._resume_pcmk_remote(node)
if self._remote_rsc_added:
# Remove dummy resource added for remote node tests
self.debug("Cleaning up dummy rsc put on remote node")
- self._rsh(self._get_other_node(node), "crm_resource -U -r %s" % self._remote_rsc)
+ self._rsh(self._get_other_node(node), f"crm_resource -U -r {self._remote_rsc}")
self._del_rsc(node, self._remote_rsc)
if self._remote_node_added:
# Remove remote node's connection resource
self.debug("Cleaning up remote node connection resource")
- self._rsh(self._get_other_node(node), "crm_resource -U -r %s" % self._remote_node)
+ self._rsh(self._get_other_node(node), f"crm_resource -U -r {self._remote_node}")
self._del_rsc(node, self._remote_node)
watch.look_for_all()
if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
+ self.fail(f"Unmatched patterns: {watch.unmatched}")
self._stop_pcmk_remote(node)
self.debug("Waiting for the cluster to recover")
self._cm.cluster_stable()
if self._remote_node_added:
# Remove remote node itself
self.debug("Cleaning up node entry for remote node")
- self._rsh(self._get_other_node(node), "crm_node --force --remove %s" % self._remote_node)
+ self._rsh(self._get_other_node(node), f"crm_node --force --remove {self._remote_node}")
def _setup_env(self, node):
"""
Set up the environment to allow Pacemaker Remote to function.
This involves generating a key and copying it to all nodes in the cluster.
"""
- self._remote_node = "remote-%s" % node
+ self._remote_node = f"remote-{node}"
# we are assuming if all nodes have a key, that it is
# the right key... If any node doesn't have a remote
# key, we regenerate it everywhere.
if self._rsh.exists_on_all("/etc/pacemaker/authkey", self._env["nodes"]):
return
# create key locally
(handle, keyfile) = tempfile.mkstemp(".cts")
os.close(handle)
- subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"],
+ subprocess.check_call(["dd", "if=/dev/urandom", f"of={keyfile}", "bs=4096", "count=1"],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# sync key throughout the cluster
for n in self._env["nodes"]:
self._rsh(n, "mkdir -p --mode=0750 /etc/pacemaker")
- self._rsh.copy(keyfile, "root@%s:/etc/pacemaker/authkey" % n)
+ self._rsh.copy(keyfile, f"root@{n}:/etc/pacemaker/authkey")
self._rsh(n, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey")
self._rsh(n, "chmod 0640 /etc/pacemaker/authkey")
os.unlink(keyfile)
def is_applicable(self):
"""Return True if this test is applicable in the current test configuration."""
if not CTSTest.is_applicable(self):
return False
for node in self._env["nodes"]:
(rc, _) = self._rsh(node, "which pacemaker-remoted >/dev/null 2>&1")
if rc != 0:
return False
return True
def start_new_test(self, node):
"""Prepare a remote test for running by setting up its environment and resources."""
self.incr("calls")
self.reset()
ret = self._startall(None)
if not ret:
return self.failure("setup failed: could not start all nodes")
self._setup_env(node)
self._start_metal(node)
self._add_dummy_rsc(node)
return True
def __call__(self, node):
"""Perform this test."""
raise NotImplementedError
@property
def errors_to_ignore(self):
"""Return list of errors which should be ignored."""
return [
r"""is running on remote.*which isn't allowed""",
r"""Connection terminated""",
r"""Could not send remote"""
]
diff --git a/python/pacemaker/_cts/tests/resourcerecover.py b/python/pacemaker/_cts/tests/resourcerecover.py
index e4c1336ea1..2d25900b8f 100644
--- a/python/pacemaker/_cts/tests/resourcerecover.py
+++ b/python/pacemaker/_cts/tests/resourcerecover.py
@@ -1,170 +1,171 @@
"""Fail a random resource and verify its fail count increases."""
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.audits import AuditResource
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.tests.starttest import StartTest
from pacemaker._cts.timer import Timer
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
class ResourceRecover(CTSTest):
"""Fail a random resource."""
def __init__(self, cm):
"""
Create a new ResourceRecover instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.benchmark = True
self.name = "ResourceRecover"
self._action = "asyncmon"
self._interval = 0
self._rid = None
self._rid_alt = None
self._start = StartTest(cm)
self._startall = SimulStartLite(cm)
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
if not self._startall(None):
return self.failure("Setup failed")
# List all resources active on the node (skip test if none)
resourcelist = self._cm.active_resources(node)
if not resourcelist:
- self._logger.log("No active resources on %s" % node)
+ self._logger.log(f"No active resources on {node}")
return self.skipped()
# Choose one resource at random
rsc = self._choose_resource(node, resourcelist)
if rsc is None:
- return self.failure("Could not get details of resource '%s'" % self._rid)
+ return self.failure(f"Could not get details of resource '{self._rid}'")
if rsc.id == rsc.clone_id:
- self.debug("Failing %s" % rsc.id)
+ self.debug(f"Failing {rsc.id}")
else:
- self.debug("Failing %s (also known as %s)" % (rsc.id, rsc.clone_id))
+ self.debug(f"Failing {rsc.id} (also known as {rsc.clone_id})")
# Log patterns to watch for (failure, plus restart if managed)
pats = [
self.templates["Pat:CloneOpFail"] % (self._action, rsc.id, rsc.clone_id)
]
if rsc.managed:
pats.append(self.templates["Pat:RscOpOK"] % ("stop", self._rid))
if rsc.unique:
pats.append(self.templates["Pat:RscOpOK"] % ("start", self._rid))
else:
# Anonymous clones may get restarted with a different clone number
pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*"))
# Fail resource. (Ideally, we'd fail it twice, to ensure the fail count
# is incrementing properly, but it might restart on a different node.
# We'd have to temporarily ban it from all other nodes and ensure the
# migration-threshold hasn't been reached.)
if self._fail_resource(rsc, node, pats) is None:
# self.failure() already called
return None
return self.success()
def _choose_resource(self, node, resourcelist):
"""Choose a random resource to target."""
self._rid = self._env.random_gen.choice(resourcelist)
self._rid_alt = self._rid
(_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
for line in lines:
if line.startswith("Resource: "):
rsc = AuditResource(self._cm, line)
if rsc.id == self._rid:
# Handle anonymous clones that get renamed
self._rid = rsc.clone_id
return rsc
return None
def _get_failcount(self, node):
"""Check the fail count of targeted resource on given node."""
cmd = "crm_failcount --quiet --query --resource %s --operation %s --interval %d --node %s"
(rc, lines) = self._rsh(node, cmd % (self._rid, self._action, self._interval, node),
verbose=1)
if rc != 0 or len(lines) != 1:
lines = [line.strip() for line in lines]
- self._logger.log("crm_failcount on %s failed (%d): %s" % (node, rc, " // ".join(lines)))
+ s = " // ".join(lines)
+ self._logger.log(f"crm_failcount on {node} failed ({rc}): {s}")
return -1
try:
failcount = int(lines[0])
except (IndexError, ValueError):
- self._logger.log("crm_failcount output on %s unparseable: %s" % (node, " ".join(lines)))
+ s = " ".join(lines)
+ self._logger.log(f"crm_failcount output on {node} unparseable: {s}")
return -1
return failcount
def _fail_resource(self, rsc, node, pats):
"""Fail the targeted resource, and verify as expected."""
orig_failcount = self._get_failcount(node)
watch = self.create_watch(pats, 60)
watch.set_watch()
- self._rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self._rid, node))
+ self._rsh(node, f"crm_resource -V -F -r {self._rid} -H {node} &>/dev/null")
with Timer(self._logger, self.name, "recover"):
watch.look_for_all()
self._cm.cluster_stable()
recovered = self._cm.resource_location(self._rid)
if watch.unmatched:
- return self.failure("Patterns not found: %r" % watch.unmatched)
+ return self.failure(f"Patterns not found: {watch.unmatched!r}")
if rsc.unique and len(recovered) > 1:
- return self.failure("%s is now active on more than one node: %r" % (self._rid, recovered))
+ return self.failure(f"{self._rid} is now active on more than one node: {recovered!r}")
if recovered:
- self.debug("%s is running on: %r" % (self._rid, recovered))
+ self.debug(f"{self._rid} is running on: {recovered!r}")
elif rsc.managed:
- return self.failure("%s was not recovered and is inactive" % self._rid)
+ return self.failure(f"{self._rid} was not recovered and is inactive")
new_failcount = self._get_failcount(node)
if new_failcount != orig_failcount + 1:
- return self.failure("%s fail count is %d not %d"
- % (self._rid, new_failcount, orig_failcount + 1))
+ return self.failure(f"{self._rid} fail count is {new_failcount} not {orig_failcount + 1}")
# Anything but None is success
return 0
@property
def errors_to_ignore(self):
"""Return a list of errors which should be ignored."""
return [
- r"Updating failcount for %s" % self._rid,
- r"schedulerd.*: Recover\s+(%s|%s)\s+\(.*\)" % (self._rid, self._rid_alt),
+ f"Updating failcount for {self._rid}",
+ fr"schedulerd.*: Recover\s+({self._rid}|{self._rid_alt})\s+\(.*\)",
r"Unknown operation: fail",
self.templates["Pat:RscOpOK"] % (self._action, self._rid),
- r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self._rid, self._action, self._interval)
+ f"(ERROR|error).*: Action {self._rid}_{self._action}_{self._interval} .* initiated outside of a transition",
]
diff --git a/python/pacemaker/_cts/tests/restartonebyone.py b/python/pacemaker/_cts/tests/restartonebyone.py
index 953a4f05af..84a2214538 100644
--- a/python/pacemaker/_cts/tests/restartonebyone.py
+++ b/python/pacemaker/_cts/tests/restartonebyone.py
@@ -1,56 +1,56 @@
"""Restart all nodes in order."""
__all__ = ["RestartOnebyOne"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.restarttest import RestartTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class RestartOnebyOne(CTSTest):
"""Restart all nodes in order."""
def __init__(self, cm):
"""
Create a new RestartOnebyOne instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "RestartOnebyOne"
self._restart = None
self._startall = SimulStartLite(cm)
def __call__(self, dummy):
"""Perform the test."""
self.incr("calls")
ret = self._startall(None)
if not ret:
return self.failure("Setup failed")
did_fail = []
self.set_timer()
self._restart = RestartTest(self._cm)
for node in self._env["nodes"]:
if not self._restart(node):
did_fail.append(node)
if did_fail:
- return self.failure("Could not restart %d nodes: %r" % (len(did_fail), did_fail))
+ return self.failure(f"Could not restart {len(did_fail)} nodes: {did_fail!r}")
return self.success()
diff --git a/python/pacemaker/_cts/tests/restarttest.py b/python/pacemaker/_cts/tests/restarttest.py
index be8c2acb6e..1ad33dcce7 100644
--- a/python/pacemaker/_cts/tests/restarttest.py
+++ b/python/pacemaker/_cts/tests/restarttest.py
@@ -1,47 +1,47 @@
"""Stop and restart a node."""
__all__ = ["RestartTest"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.starttest import StartTest
from pacemaker._cts.tests.stoptest import StopTest
class RestartTest(CTSTest):
"""Stop and restart a node."""
def __init__(self, cm):
"""
Create a new RestartTest instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.benchmark = True
self.name = "Restart"
self._start = StartTest(cm)
self._stop = StopTest(cm)
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
- self.incr("node:%s" % node)
+ self.incr(f"node:{node}")
if self._cm.stat_cm(node):
self.incr("WasStopped")
if not self._start(node):
- return self.failure("start (setup) failure: %s" % node)
+ return self.failure(f"start (setup) failure: {node}")
self.set_timer()
if not self._stop(node):
- return self.failure("stop failure: %s" % node)
+ return self.failure(f"stop failure: {node}")
if not self._start(node):
- return self.failure("start failure: %s" % node)
+ return self.failure(f"start failure: {node}")
return self.success()
diff --git a/python/pacemaker/_cts/tests/resynccib.py b/python/pacemaker/_cts/tests/resynccib.py
index e9fa0e09bd..91b49ad90d 100644
--- a/python/pacemaker/_cts/tests/resynccib.py
+++ b/python/pacemaker/_cts/tests/resynccib.py
@@ -1,70 +1,70 @@
"""Start the cluster without a CIB and verify it gets copied from another node."""
__all__ = ["ResyncCIB"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker import BuildOptions
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.restarttest import RestartTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.tests.simulstoplite import SimulStopLite
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
class ResyncCIB(CTSTest):
"""Start the cluster on a node without a CIB and verify the CIB is copied over later."""
def __init__(self, cm):
"""
Create a new ResyncCIB instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "ResyncCIB"
self._restart1 = RestartTest(cm)
self._startall = SimulStartLite(cm)
self._stopall = SimulStopLite(cm)
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
# Shut down all the nodes...
if not self._stopall(None):
return self.failure("Could not stop all nodes")
# Test config recovery when the other nodes come up
- self._rsh(node, "rm -f %s/cib*" % BuildOptions.CIB_DIR)
+ self._rsh(node, f"rm -f {BuildOptions.CIB_DIR}/cib*")
# Start the selected node
if not self._restart1(node):
- return self.failure("Could not start %s" % node)
+ return self.failure(f"Could not start {node}")
# Start all remaining nodes
if not self._startall(None):
return self.failure("Could not start the remaining nodes")
return self.success()
@property
def errors_to_ignore(self):
"""Return a list of errors which should be ignored."""
# Errors that occur as a result of the CIB being wiped
return [
r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed",
r"error.*: Resource start-up disabled since no STONITH resources have been defined",
r"error.*: Either configure some or disable STONITH with the stonith-enabled option",
r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity"
]
diff --git a/python/pacemaker/_cts/tests/simulstartlite.py b/python/pacemaker/_cts/tests/simulstartlite.py
index 666b3a1496..a327e39d1b 100644
--- a/python/pacemaker/_cts/tests/simulstartlite.py
+++ b/python/pacemaker/_cts/tests/simulstartlite.py
@@ -1,128 +1,128 @@
"""Simultaneously start stopped nodes."""
__all__ = ["SimulStartLite"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class SimulStartLite(CTSTest):
"""
A pseudo-test that sets up conditions before running some other test.
This class starts any stopped nodes more or less simultaneously. Other test
classes should not use this one as a superclass.
"""
def __init__(self, cm):
"""
Create a new SimulStartLite instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "SimulStartLite"
def __call__(self, dummy):
"""Return whether starting all stopped nodes more or less simultaneously succeeds."""
self.incr("calls")
- self.debug("Setup: %s" % self.name)
+ self.debug(f"Setup: {self.name}")
# We ignore the "node" parameter...
node_list = []
for node in self._env["nodes"]:
if self._cm.expected_status[node] == "down":
self.incr("WasStopped")
node_list.append(node)
self.set_timer()
while len(node_list) > 0:
# Repeat until all nodes come up
uppat = self.templates["Pat:NonDC_started"]
if self._cm.upcount() == 0:
uppat = self.templates["Pat:Local_started"]
watchpats = [
self.templates["Pat:DC_IDLE"]
]
for node in node_list:
watchpats.extend([uppat % node,
self.templates["Pat:InfraUp"] % node,
self.templates["Pat:PacemakerUp"] % node])
# Start all the nodes - at about the same time...
watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
watch.set_watch()
stonith = self._cm.prepare_fencing_watcher()
for node in node_list:
self._cm.start_cm_async(node)
watch.look_for_all()
node_list = self._cm.fencing_cleanup(self.name, stonith)
if node_list is None:
return self.failure("Cluster did not stabilize")
# Remove node_list messages from watch.unmatched
for node in node_list:
- self._logger.debug("Dealing with stonith operations for %s" % node_list)
+ self._logger.debug(f"Dealing with stonith operations for {node_list}")
if watch.unmatched:
try:
watch.unmatched.remove(uppat % node)
except ValueError:
- self.debug("Already matched: %s" % (uppat % node))
+ self.debug(f"Already matched: {uppat % node}")
try:
watch.unmatched.remove(self.templates["Pat:InfraUp"] % node)
except ValueError:
- self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node))
+ self.debug(f"Already matched: {self.templates['Pat:InfraUp'] % node}")
try:
watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node)
except ValueError:
- self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node))
+ self.debug(f"Already matched: {self.templates['Pat:PacemakerUp'] % node}")
if watch.unmatched:
for regex in watch.unmatched:
- self._logger.log("Warn: Startup pattern not found: %s" % regex)
+ self._logger.log(f"Warn: Startup pattern not found: {regex}")
if not self._cm.cluster_stable():
return self.failure("Cluster did not stabilize")
did_fail = False
unstable = []
for node in self._env["nodes"]:
if not self._cm.stat_cm(node):
did_fail = True
unstable.append(node)
if did_fail:
- return self.failure("Unstarted nodes exist: %s" % unstable)
+ return self.failure(f"Unstarted nodes exist: {unstable}")
unstable = []
for node in self._env["nodes"]:
if not self._cm.node_stable(node):
did_fail = True
unstable.append(node)
if did_fail:
- return self.failure("Unstable cluster nodes exist: %s" % unstable)
+ return self.failure(f"Unstable cluster nodes exist: {unstable}")
return self.success()
def is_applicable(self):
"""Return True if this test is applicable in the current test configuration."""
return False
diff --git a/python/pacemaker/_cts/tests/simulstoplite.py b/python/pacemaker/_cts/tests/simulstoplite.py
index 69f1b9c4d2..1bb8ddc5a0 100644
--- a/python/pacemaker/_cts/tests/simulstoplite.py
+++ b/python/pacemaker/_cts/tests/simulstoplite.py
@@ -1,86 +1,86 @@
"""Simultaneously stop running nodes."""
__all__ = ["SimulStopLite"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class SimulStopLite(CTSTest):
"""
A pseudo-test that sets up conditions before running some other test.
This class stops any running nodes more or less simultaneously. It can be
used both to set up a test or to clean up a test. Other test classes
should not use this one as a superclass.
"""
def __init__(self, cm):
"""
Create a new SimulStopLite instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "SimulStopLite"
def __call__(self, dummy):
"""Return whether stopping all running nodes more or less simultaneously succeeds."""
self.incr("calls")
- self.debug("Setup: %s" % self.name)
+ self.debug(f"Setup: {self.name}")
# We ignore the "node" parameter...
watchpats = []
for node in self._env["nodes"]:
if self._cm.expected_status[node] == "up":
self.incr("WasStarted")
watchpats.append(self.templates["Pat:We_stopped"] % node)
if len(watchpats) == 0:
return self.success()
# Stop all the nodes - at about the same time...
watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
watch.set_watch()
self.set_timer()
for node in self._env["nodes"]:
if self._cm.expected_status[node] == "up":
self._cm.stop_cm_async(node)
if watch.look_for_all():
# Make sure they're completely down with no residule
for node in self._env["nodes"]:
self._rsh(node, self.templates["StopCmd"])
return self.success()
did_fail = False
up_nodes = []
for node in self._env["nodes"]:
if self._cm.stat_cm(node):
did_fail = True
up_nodes.append(node)
if did_fail:
- return self.failure("Active nodes exist: %s" % up_nodes)
+ return self.failure(f"Active nodes exist: {up_nodes}")
- self._logger.log("Warn: All nodes stopped but CTS didn't detect: %s" % watch.unmatched)
- return self.failure("Missing log message: %s " % watch.unmatched)
+ self._logger.log(f"Warn: All nodes stopped but CTS didn't detect: {watch.unmatched}")
+ return self.failure(f"Missing log message: {watch.unmatched}")
def is_applicable(self):
"""Return True if this test is applicable in the current test configuration."""
return False
diff --git a/python/pacemaker/_cts/tests/splitbraintest.py b/python/pacemaker/_cts/tests/splitbraintest.py
index 711e6dad31..e020f4263e 100644
--- a/python/pacemaker/_cts/tests/splitbraintest.py
+++ b/python/pacemaker/_cts/tests/splitbraintest.py
@@ -1,211 +1,211 @@
"""Create a split brain cluster and verify a resource is multiply managed."""
__all__ = ["SplitBrainTest"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import time
from pacemaker._cts.input import should_continue
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.tests.starttest import StartTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class SplitBrainTest(CTSTest):
"""
Create a split brain cluster.
This test verifies that one node in each partition takes over the
resource, resulting in two nodes running the same resource.
"""
def __init__(self, cm):
"""
Create a new SplitBrainTest instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.is_experimental = True
self.name = "SplitBrain"
self._start = StartTest(cm)
self._startall = SimulStartLite(cm)
def _isolate_partition(self, partition):
"""Create a new partition containing the given nodes."""
other_nodes = self._env["nodes"].copy()
for node in partition:
try:
other_nodes.remove(node)
except ValueError:
- self._logger.log("Node %s not in %r from %r" % (node, self._env["nodes"], partition))
+ self._logger.log(f"Node {node} not in {self._env['nodes']!r} from {partition!r}")
if not other_nodes:
return
- self.debug("Creating partition: %r" % partition)
- self.debug("Everyone else: %r" % other_nodes)
+ self.debug(f"Creating partition: {partition!r}")
+ self.debug(f"Everyone else: {other_nodes!r}")
for node in partition:
if not self._cm.isolate_node(node, other_nodes):
- self._logger.log("Could not isolate %s" % node)
+ self._logger.log(f"Could not isolate {node}")
return
def _heal_partition(self, partition):
"""Move the given nodes out of their own partition back into the cluster."""
other_nodes = self._env["nodes"].copy()
for node in partition:
try:
other_nodes.remove(node)
except ValueError:
- self._logger.log("Node %s not in %r" % (node, self._env["nodes"]))
+ self._logger.log(f"Node {node} not in {self._env['nodes']!r}")
if len(other_nodes) == 0:
return
- self.debug("Healing partition: %r" % partition)
- self.debug("Everyone else: %r" % other_nodes)
+ self.debug(f"Healing partition: {partition!r}")
+ self.debug(f"Everyone else: {other_nodes!r}")
for node in partition:
self._cm.unisolate_node(node, other_nodes)
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
self.passed = True
partitions = {}
if not self._startall(None):
return self.failure("Setup failed")
while True:
# Retry until we get multiple partitions
partitions = {}
p_max = len(self._env["nodes"])
for n in self._env["nodes"]:
p = self._env.random_gen.randint(1, p_max)
if p not in partitions:
partitions[p] = []
partitions[p].append(n)
p_max = len(partitions)
if p_max > 1:
break
# else, try again
- self.debug("Created %d partitions" % p_max)
+ self.debug(f"Created {p_max} partitions")
for (key, val) in partitions.items():
- self.debug("Partition[%s]:\t%r" % (key, val))
+ self.debug(f"Partition[{key}]:\t{val!r}")
# Disabling STONITH to reduce test complexity for now
self._rsh(node, "crm_attribute -V -n stonith-enabled -v false")
for val in partitions.values():
self._isolate_partition(val)
count = 30
while count > 0:
if len(self._cm.find_partitions()) != p_max:
time.sleep(10)
else:
break
else:
self.failure("Expected partitions were not created")
# Target number of partitions formed - wait for stability
if not self._cm.cluster_stable():
self.failure("Partitioned cluster not stable")
# Now audit the cluster state
self._cm.partitions_expected = p_max
if not self.audit():
self.failure("Audits failed")
self._cm.partitions_expected = 1
# And heal them again
for val in partitions.values():
self._heal_partition(val)
# Wait for a single partition to form
count = 30
while count > 0:
if len(self._cm.find_partitions()) != 1:
time.sleep(10)
count -= 1
else:
break
else:
self.failure("Cluster did not reform")
# Wait for it to have the right number of members
count = 30
while count > 0:
members = []
partitions = self._cm.find_partitions()
if partitions:
members = partitions[0].split()
if len(members) != len(self._env["nodes"]):
time.sleep(10)
count -= 1
else:
break
else:
self.failure("Cluster did not completely reform")
# Wait up to 20 minutes - the delay is more preferable than
# trying to continue with in a messed up state
if not self._cm.cluster_stable(1200):
self.failure("Reformed cluster not stable")
if not should_continue(self._env):
raise ValueError("Reformed cluster not stable")
# Turn fencing back on
if self._env["DoFencing"]:
self._rsh(node, "crm_attribute -V -D -n stonith-enabled")
self._cm.cluster_stable()
if self.passed:
return self.success()
return self.failure("See previous errors")
@property
def errors_to_ignore(self):
"""Return a list of errors which should be ignored."""
return [
r"Another DC detected:",
r"(ERROR|error).*: .*Application of an update diff failed",
r"pacemaker-controld.*:.*not in our membership list",
r"CRIT:.*node.*returning after partition"
]
def is_applicable(self):
"""Return True if this test is applicable in the current test configuration."""
if not CTSTest.is_applicable(self):
return False
return len(self._env["nodes"]) > 2
diff --git a/python/pacemaker/_cts/tests/standbytest.py b/python/pacemaker/_cts/tests/standbytest.py
index a3e17343a8..7992dd0bc4 100644
--- a/python/pacemaker/_cts/tests/standbytest.py
+++ b/python/pacemaker/_cts/tests/standbytest.py
@@ -1,106 +1,106 @@
"""Put a node into standby mode and check that resources migrate."""
__all__ = ["StandbyTest"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.tests.starttest import StartTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class StandbyTest(CTSTest):
"""Put a node into standby and check that resources migrate away from it."""
def __init__(self, cm):
"""
Create a new StandbyTest instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.benchmark = True
self.name = "Standby"
self._start = StartTest(cm)
self._startall = SimulStartLite(cm)
# make sure the node is active
# set the node to standby mode
# check resources, none resource should be running on the node
# set the node to active mode
# check resources, resources should have been migrated back (SHOULD THEY?)
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
ret = self._startall(None)
if not ret:
return self.failure("Start all nodes failed")
- self.debug("Make sure node %s is active" % node)
+ self.debug(f"Make sure node {node} is active")
if self._cm.in_standby_mode(node):
if not self._cm.set_standby_mode(node, False):
- return self.failure("can't set node %s to active mode" % node)
+ return self.failure(f"can't set node {node} to active mode")
self._cm.cluster_stable()
if self._cm.in_standby_mode(node):
- return self.failure("standby status of %s is [on] but we expect [off]" % node)
+ return self.failure(f"standby status of {node} is [on] but we expect [off]")
watchpats = [
r"State transition .* -> S_POLICY_ENGINE",
]
watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
watch.set_watch()
- self.debug("Setting node %s to standby mode" % node)
+ self.debug(f"Setting node {node} to standby mode")
if not self._cm.set_standby_mode(node, True):
- return self.failure("can't set node %s to standby mode" % node)
+ return self.failure(f"can't set node {node} to standby mode")
self.set_timer("on")
ret = watch.look_for_all()
if not ret:
- self._logger.log("Patterns not found: %r" % watch.unmatched)
+ self._logger.log(f"Patterns not found: {watch.unmatched!r}")
self._cm.set_standby_mode(node, False)
- return self.failure("cluster didn't react to standby change on %s" % node)
+ return self.failure(f"cluster didn't react to standby change on {node}")
self._cm.cluster_stable()
if not self._cm.in_standby_mode(node):
- return self.failure("standby status of %s is [off] but we expect [on]" % node)
+ return self.failure(f"standby status of {node} is [off] but we expect [on]")
self.log_timer("on")
self.debug("Checking resources")
rscs_on_node = self._cm.active_resources(node)
if rscs_on_node:
- rc = self.failure("%s set to standby, %r is still running on it" % (node, rscs_on_node))
- self.debug("Setting node %s to active mode" % node)
+ rc = self.failure(f"{node} set to standby, {rscs_on_node!r} is still running on it")
+ self.debug(f"Setting node {node} to active mode")
self._cm.set_standby_mode(node, False)
return rc
- self.debug("Setting node %s to active mode" % node)
+ self.debug(f"Setting node {node} to active mode")
if not self._cm.set_standby_mode(node, False):
- return self.failure("can't set node %s to active mode" % node)
+ return self.failure(f"can't set node {node} to active mode")
self.set_timer("off")
self._cm.cluster_stable()
if self._cm.in_standby_mode(node):
- return self.failure("standby status of %s is [on] but we expect [off]" % node)
+ return self.failure(f"standby status of {node} is [on] but we expect [off]")
self.log_timer("off")
return self.success()
diff --git a/python/pacemaker/_cts/tests/startonebyone.py b/python/pacemaker/_cts/tests/startonebyone.py
index 550dacd392..879c84780a 100644
--- a/python/pacemaker/_cts/tests/startonebyone.py
+++ b/python/pacemaker/_cts/tests/startonebyone.py
@@ -1,53 +1,53 @@
"""Start all stopped nodes serially."""
__all__ = ["StartOnebyOne"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstoplite import SimulStopLite
from pacemaker._cts.tests.starttest import StartTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class StartOnebyOne(CTSTest):
"""Start all stopped nodes serially."""
def __init__(self, cm):
"""
Create a new StartOnebyOne instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "StartOnebyOne"
self._start = StartTest(cm)
self._stopall = SimulStopLite(cm)
def __call__(self, dummy):
"""Perform this test."""
self.incr("calls")
ret = self._stopall(None)
if not ret:
return self.failure("Test setup failed")
failed = []
self.set_timer()
for node in self._env["nodes"]:
if not self._start(node):
failed.append(node)
if failed:
- return self.failure("Some node failed to start: %r" % failed)
+ return self.failure(f"Some node failed to start: {failed!r}")
return self.success()
diff --git a/python/pacemaker/_cts/tests/starttest.py b/python/pacemaker/_cts/tests/starttest.py
index b8103d4038..dc05531f14 100644
--- a/python/pacemaker/_cts/tests/starttest.py
+++ b/python/pacemaker/_cts/tests/starttest.py
@@ -1,52 +1,51 @@
"""Start the cluster manager on a given node."""
__all__ = ["StartTest"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class StartTest(CTSTest):
"""
A pseudo-test that sets up conditions before running some other test.
This class starts the cluster manager on a given node. Other test classes
should not use this one as a superclass.
"""
def __init__(self, cm):
"""
Create a new StartTest instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "Start"
def __call__(self, node):
"""Start the given node, returning whether this succeeded or not."""
self.incr("calls")
if self._cm.upcount() == 0:
self.incr("us")
else:
self.incr("them")
if self._cm.expected_status[node] != "down":
return self.skipped()
if self._cm.start_cm(node):
return self.success()
- return self.failure("Startup %s on node %s failed"
- % (self._env["Name"], node))
+ return self.failure(f"Startup {self._env['Name']} on node {node} failed")
diff --git a/python/pacemaker/_cts/tests/stonithdtest.py b/python/pacemaker/_cts/tests/stonithdtest.py
index 7c654592e9..c2e59f80bc 100644
--- a/python/pacemaker/_cts/tests/stonithdtest.py
+++ b/python/pacemaker/_cts/tests/stonithdtest.py
@@ -1,141 +1,141 @@
"""Fence a running node and wait for it to restart."""
__all__ = ["StonithdTest"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker.exitstatus import ExitStatus
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.timer import Timer
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class StonithdTest(CTSTest):
"""Fence a running node and wait for it to restart."""
def __init__(self, cm):
"""
Create a new StonithdTest instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.benchmark = True
self.name = "Stonithd"
self._startall = SimulStartLite(cm)
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
if len(self._env["nodes"]) < 2:
return self.skipped()
ret = self._startall(None)
if not ret:
return self.failure("Setup failed")
watchpats = [
self.templates["Pat:Fencing_ok"] % node,
self.templates["Pat:NodeFenced"] % node,
]
if not self._env["at-boot"]:
- self.debug("Expecting %s to stay down" % node)
+ self.debug(f"Expecting {node} to stay down")
self._cm.expected_status[node] = "down"
else:
- self.debug("Expecting %s to come up again %d" % (node, self._env["at-boot"]))
+ self.debug(f"Expecting {node} to come up again {self._env['at-boot']}")
watchpats.extend([
- "%s.* S_STARTING -> S_PENDING" % node,
- "%s.* S_PENDING -> S_NOT_DC" % node,
+ f"{node}.* S_STARTING -> S_PENDING",
+ f"{node}.* S_PENDING -> S_NOT_DC",
])
watch = self.create_watch(watchpats, 30 + self._env["DeadTime"] + self._env["StableTime"] + self._env["StartTime"])
watch.set_watch()
origin = self._env.random_gen.choice(self._env["nodes"])
- (rc, _) = self._rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node)
+ (rc, _) = self._rsh(origin, f"stonith_admin --reboot {node} -VVVVVV")
if rc == ExitStatus.TIMEOUT:
# Look for the patterns, usually this means the required
# device was running on the node to be fenced - or that
# the required devices were in the process of being loaded
# and/or moved
#
# Effectively the node committed suicide so there will be
# no confirmation, but pacemaker should be watching and
# fence the node again
- self._logger.log("Fencing command on %s to fence %s timed out" % (origin, node))
+ self._logger.log(f"Fencing command on {origin} to fence {node} timed out")
elif origin != node and rc != 0:
self.debug("Waiting for the cluster to recover")
self._cm.cluster_stable()
self.debug("Waiting for fenced node to come back up")
self._cm.ns.wait_for_all_nodes(self._env["nodes"], 600)
- self._logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc))
+ self._logger.log(f"Fencing command on {origin} failed to fence {node} (rc={rc})")
elif origin == node and rc != 255:
# 255 == broken pipe, ie. the node was fenced as expected
- self._logger.log("Locally originated fencing returned %d" % rc)
+ self._logger.log(f"Locally originated fencing returned {rc}")
with Timer(self._logger, self.name, "fence"):
matched = watch.look_for_all()
self.set_timer("reform")
if watch.unmatched:
- self._logger.log("Patterns not found: %r" % watch.unmatched)
+ self._logger.log(f"Patterns not found: {watch.unmatched!r}")
self.debug("Waiting for the cluster to recover")
self._cm.cluster_stable()
self.debug("Waiting for fenced node to come back up")
self._cm.ns.wait_for_all_nodes(self._env["nodes"], 600)
self.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self._cm.cluster_stable(self._env["StartTime"])
if not matched:
return self.failure("Didn't find all expected patterns")
if not is_stable:
return self.failure("Cluster did not become stable")
self.log_timer("reform")
return self.success()
@property
def errors_to_ignore(self):
"""Return a list of errors which should be ignored."""
return [
self.templates["Pat:Fencing_start"] % ".*",
self.templates["Pat:Fencing_ok"] % ".*",
self.templates["Pat:Fencing_active"],
r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired"
]
def is_applicable(self):
"""Return True if this test is applicable in the current test configuration."""
if not CTSTest.is_applicable(self):
return False
# pylint gets confused because of EnvFactory here.
# pylint: disable=unsupported-membership-test
if "DoFencing" in self._env:
return self._env["DoFencing"]
return True
diff --git a/python/pacemaker/_cts/tests/stoponebyone.py b/python/pacemaker/_cts/tests/stoponebyone.py
index 4fdfe5c8a2..17af1c6979 100644
--- a/python/pacemaker/_cts/tests/stoponebyone.py
+++ b/python/pacemaker/_cts/tests/stoponebyone.py
@@ -1,54 +1,54 @@
"""Stop all running nodes serially."""
__all__ = ["StopOnebyOne"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.tests.stoptest import StopTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class StopOnebyOne(CTSTest):
"""Stop all running nodes serially."""
def __init__(self, cm):
"""
Create a new StartOnebyOne instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "StopOnebyOne"
self._startall = SimulStartLite(cm)
self._stop = StopTest(cm)
def __call__(self, dummy):
"""Perform this test."""
self.incr("calls")
ret = self._startall(None)
if not ret:
return self.failure("Setup failed")
failed = []
self.set_timer()
for node in self._env["nodes"]:
if not self._stop(node):
failed.append(node)
if failed:
- return self.failure("Some node failed to stop: %r" % failed)
+ return self.failure(f"Some node failed to stop: {failed!r}")
return self.success()
diff --git a/python/pacemaker/_cts/tests/stoptest.py b/python/pacemaker/_cts/tests/stoptest.py
index 48ef73ac0f..c4d9b559a2 100644
--- a/python/pacemaker/_cts/tests/stoptest.py
+++ b/python/pacemaker/_cts/tests/stoptest.py
@@ -1,97 +1,97 @@
"""Stop the cluster manager on a given node."""
__all__ = ["StopTest"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from pacemaker._cts.tests.ctstest import CTSTest
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
class StopTest(CTSTest):
"""
A pseudo-test that sets up conditions before running some other test.
This class stops the cluster manager on a given node. Other test classes
should not use this one as a superclass.
"""
def __init__(self, cm):
"""
Create a new StopTest instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "Stop"
def __call__(self, node):
"""Stop the given node, returning whether this succeeded or not."""
self.incr("calls")
if self._cm.expected_status[node] != "up":
return self.skipped()
# Technically we should always be able to notice ourselves stopping
patterns = [
self.templates["Pat:We_stopped"] % node,
]
# Any active node needs to notice this one left
# (note that this won't work if we have multiple partitions)
for other in self._env["nodes"]:
if self._cm.expected_status[other] == "up" and other != node:
patterns.append(self.templates["Pat:They_stopped"] % (other, node))
watch = self.create_watch(patterns, self._env["DeadTime"])
watch.set_watch()
if node == self._cm.our_node:
self.incr("us")
else:
if self._cm.upcount() <= 1:
self.incr("all")
else:
self.incr("them")
self._cm.stop_cm(node)
watch.look_for_all()
failreason = None
unmatched_str = "||"
if watch.unmatched:
(_, output) = self._rsh(node, "/bin/ps axf", verbose=1)
for line in output:
self.debug(line)
(_, output) = self._rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", verbose=1)
for line in output:
self.debug(line)
for regex in watch.unmatched:
- self._logger.log("ERROR: Shutdown pattern not found: %s" % regex)
- unmatched_str += "%s||" % regex
+ self._logger.log(f"ERROR: Shutdown pattern not found: {regex}")
+ unmatched_str += f"{regex}||"
failreason = "Missing shutdown pattern"
self._cm.cluster_stable(self._env["DeadTime"])
if not watch.unmatched or self._cm.upcount() == 0:
return self.success()
if len(watch.unmatched) >= self._cm.upcount():
- return self.failure("no match against (%s)" % unmatched_str)
+ return self.failure(f"no match against ({unmatched_str})")
if failreason is None:
return self.success()
return self.failure(failreason)
diff --git a/python/pacemaker/_cts/timer.py b/python/pacemaker/_cts/timer.py
index e31f18b3b3..159a8cb1c3 100644
--- a/python/pacemaker/_cts/timer.py
+++ b/python/pacemaker/_cts/timer.py
@@ -1,63 +1,63 @@
"""Timer-related utilities for CTS."""
__all__ = ["Timer"]
-__copyright__ = "Copyright 2000-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import time
class Timer:
"""
A class for measuring the runtime of some task.
A Timer may be used manually or as a context manager, like so:
with Timer(logger, "SomeTest", "SomeTimer"):
...
A Timer runs from when start() is called until the timer is deleted or
reset() is called. There is no explicit stop method.
"""
def __init__(self, logger, test_name, timer_name):
"""
Create a new Timer instance.
Arguments:
logger -- A Logger instance that can be used to record when
the timer stopped
test_name -- The name of the test this timer is being run for
timer_name -- The name of this timer
"""
self._logger = logger
self._start_time = None
self._test_name = test_name
self._timer_name = timer_name
def __enter__(self):
"""When used as a context manager, start the timer."""
self.start()
return self
def __exit__(self, *args):
"""When used as a context manager, log the elapsed time."""
- self._logger.debug("%s:%s runtime: %.2f" % (self._test_name, self._timer_name, self.elapsed))
+ self._logger.debug(f"{self._test_name}:{self._timer_name} runtime: {self.elapsed:.2f}")
def reset(self):
"""Restart the timer."""
self.start()
def start(self):
"""Start the timer."""
self._start_time = time.time()
@property
def start_time(self):
"""Return when the timer started."""
return self._start_time
@property
def elapsed(self):
"""Return how long the timer has been running for."""
return time.time() - self._start_time
diff --git a/python/pacemaker/_cts/validate.py b/python/pacemaker/_cts/validate.py
index a396a38c2c..804c3608fa 100644
--- a/python/pacemaker/_cts/validate.py
+++ b/python/pacemaker/_cts/validate.py
@@ -1,62 +1,63 @@
"""A module providing XML validation utilities."""
-__copyright__ = "Copyright 2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2024-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+)"
__all__ = ["validate"]
import os
import subprocess
import sys
from pacemaker._cts.errors import XmlValidationError
from pacemaker._cts.process import pipe_communicate
from pacemaker.buildoptions import BuildOptions
def find_validator(rng_file):
"""
Return the command line used to validate XML output.
If no validator is found, return None.
"""
if os.access(BuildOptions.XMLLINT_PATH, os.X_OK):
if rng_file is None:
return [BuildOptions.XMLLINT_PATH, "-"]
return [BuildOptions.XMLLINT_PATH, "--relaxng", rng_file, "-"]
- raise FileNotFoundError("Could not find validator for %s" % rng_file)
+ raise FileNotFoundError(f"Could not find validator for {rng_file}")
def rng_directory():
"""Return the directory containing RNG schema files."""
if os.environ.get("PCMK_schema_directory", "") != "":
return os.environ["PCMK_schema_directory"]
for path in sys.path:
- if os.path.exists("%s/cts-fencing.in" % path):
- return "%s/xml" % path
+ if os.path.exists(f"{path}/cts-fencing.in"):
+ return f"{path}/xml"
return BuildOptions.SCHEMA_DIR
def validate(xml, check_rng=True, verbose=False):
"""Validate the given XML input string against a schema."""
if check_rng:
- rng_file = "%s/api/api-result.rng" % rng_directory()
+ rng_file = f"{rng_directory()}/api/api-result.rng"
else:
rng_file = None
cmd = find_validator(rng_file)
if verbose:
- print("\nRunning: %s" % " ".join(cmd))
+ s = " ".join(cmd)
+ print(f"\nRunning: {s}")
with subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as validator:
output = pipe_communicate(validator, check_stderr=True, stdin=xml)
if validator.returncode != 0:
raise XmlValidationError(output, validator.returncode)
return output
diff --git a/python/pacemaker/_cts/watcher.py b/python/pacemaker/_cts/watcher.py
index c9078abc78..eb247c62db 100644
--- a/python/pacemaker/_cts/watcher.py
+++ b/python/pacemaker/_cts/watcher.py
@@ -1,627 +1,621 @@
"""Log searching classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["LogKind", "LogWatcher"]
-__copyright__ = "Copyright 2014-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2014-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
from enum import Enum, auto, unique
import re
import time
import threading
from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.errors import OutputNotFoundError
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
# dateutil >=2.7.0 provides isoparser, but we support older versions
try:
from dateutil.parser import isoparser
except ImportError:
import datetime
import subprocess
# pylint: disable=ungrouped-imports
from pacemaker._cts.process import pipe_communicate
# pylint: disable=invalid-name
class isoparser():
"""Mimic isoparser.isoparse when not available."""
def isoparse(self, timestamp):
"""Mimic isoparser.isoparse when not available."""
with subprocess.Popen(["iso8601", "--output-as", "xml", "-d", timestamp],
stdout=subprocess.PIPE) as result:
output = pipe_communicate(result)
if result.returncode != 0:
raise RuntimeError("Unable to run iso8601 command")
match = re.search(r".*\s*(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)Z",
output)
if match is None:
raise RuntimeError("Unable to parse iso8601 command output")
return datetime.datetime(int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
int(match.group(4)),
int(match.group(5)),
int(match.group(6)))
-CTS_SUPPORT_BIN = "%s/cts-support" % BuildOptions.DAEMON_DIR
+CTS_SUPPORT_BIN = f"{BuildOptions.DAEMON_DIR}/cts-support"
@unique
class LogKind(Enum):
"""The various kinds of log files that can be watched."""
LOCAL_FILE = auto() # From a local aggregation file on the exerciser
REMOTE_FILE = auto() # From a file on each cluster node
JOURNAL = auto() # From the systemd journal on each cluster node
def __str__(self):
"""Return a printable string for a LogKind value."""
return self.name.lower().replace('_', ' ')
class SearchObj:
"""
The base class for various kinds of log watchers.
Log-specific watchers need to be built on top of this one.
"""
def __init__(self, filename, host=None, name=None):
"""
Create a new SearchObj instance.
Arguments:
filename -- The log to watch
host -- The cluster node on which to watch the log
name -- A unique name to use when logging about this watch
"""
self.filename = filename
self.limit = None
self.logger = LogFactory()
self.name = name
self.offset = "EOF"
self.rsh = RemoteFactory().getInstance()
if host:
self.host = host
else:
self.host = "localhost"
self._cache = []
self._delegate = None
async_task = self.harvest_async()
async_task.join()
def __str__(self):
if self.host:
- return "%s:%s" % (self.host, self.filename)
+ return f"{self.host}:{self.filename}"
return self.filename
def log(self, args):
"""Log a message."""
- message = "lw: %s: %s" % (self, args)
+ message = f"lw: {self}: {args}"
self.logger.log(message)
def debug(self, args):
"""Log a debug message."""
- message = "lw: %s: %s" % (self, args)
+ message = f"lw: {self}: {args}"
self.logger.debug(message)
def harvest_async(self, delegate=None):
"""
Collect lines from a log asynchronously.
Optionally, also call delegate when complete. This method must be
implemented by all subclasses.
"""
raise NotImplementedError
def harvest_cached(self):
"""Return cached logs from before the limit timestamp."""
raise NotImplementedError
def end(self):
"""
Mark that a log is done being watched.
This function also resets internal data structures to the beginning
of the file. Subsequent watches will therefore start from the
beginning again.
"""
self.debug("Clearing cache and unsetting limit")
self._cache = []
self.limit = None
class FileObj(SearchObj):
"""A specialized SearchObj subclass for watching log files."""
def __init__(self, filename, host=None, name=None):
"""
Create a new FileObj instance.
Arguments:
filename -- The file to watch
host -- The cluster node on which to watch the file
name -- A unique name to use when logging about this watch
"""
SearchObj.__init__(self, filename, host, name)
def async_complete(self, pid, returncode, out, err):
"""
Handle completion of an asynchronous log file read.
This function saves the output from that read for look()/look_for_all()
to process and records the current position in the journal. Future
reads will pick back up from that spot.
Arguments:
pid -- The ID of the process that did the read
returncode -- The return code of the process that did the read
out -- stdout from the file read
err -- stderr from the file read
"""
messages = []
for line in out:
match = re.search(r"^CTSwatcher:Last read: (\d+)", line)
if match:
self.offset = match.group(1)
- self.debug("Got %d lines, new offset: %s %r" % (len(out), self.offset, self._delegate))
+ self.debug(f"Got {len(out)} lines, new offset: {self.offset} {self._delegate!r}")
elif re.search(r"^CTSwatcher:.*truncated", line):
self.log(line)
elif re.search(r"^CTSwatcher:", line):
- self.debug("Got control line: %s" % line)
+ self.debug(f"Got control line: {line}")
else:
messages.append(line)
if self._delegate:
self._delegate.async_complete(pid, returncode, messages, err)
def harvest_async(self, delegate=None):
"""
Collect lines from the log file on a single host asynchronously.
Optionally, call delegate when complete. This can be called
repeatedly, reading a chunk each time or until the end of the
log file is hit.
"""
self._delegate = delegate
if self.limit and (self.offset == "EOF" or int(self.offset) > self.limit):
if self._delegate:
self._delegate.async_complete(-1, -1, [], [])
return None
- cmd = ("%s watch -p CTSwatcher: -l 200 -f %s -o %s"
- % (CTS_SUPPORT_BIN, self.filename, self.offset))
-
+ cmd = f"{CTS_SUPPORT_BIN} watch -p CTSwatcher: -l 200 -f {self.filename} -o {self.offset}"
return self.rsh.call_async(self.host, cmd, delegate=self)
def harvest_cached(self):
"""Return cached logs from before the limit timestamp."""
# cts-log-watcher script renders caching unnecessary for FileObj.
# @TODO Caching might be slightly more efficient, if not too complex.
return []
def set_end(self):
"""
Internally record where we expect to find the end of a log file.
Calls to harvest from the log file will not go any farther than
what this function records.
"""
if self.limit:
return
- cmd = ("%s watch -p CTSwatcher: -l 2 -f %s -o EOF"
- % (CTS_SUPPORT_BIN, self.filename))
+ cmd = f"{CTS_SUPPORT_BIN} watch -p CTSwatcher: -l 2 -f {self.filename} -o EOF"
# pylint: disable=not-callable
(_, lines) = self.rsh(self.host, cmd, verbose=0)
for line in lines:
match = re.search(r"^CTSwatcher:Last read: (\d+)", line)
if match:
self.limit = int(match.group(1))
- self.debug("Set limit to: %d" % self.limit)
+ self.debug(f"Set limit to: {self.limit}")
class JournalObj(SearchObj):
"""A specialized SearchObj subclass for watching systemd journals."""
def __init__(self, host=None, name=None):
"""
Create a new JournalObj instance.
Arguments:
host -- The cluster node on which to watch the journal
name -- A unique name to use when logging about this watch
"""
SearchObj.__init__(self, "journal", host, name)
self._parser = isoparser()
def _msg_after_limit(self, msg):
"""
Check whether a message was logged after the limit timestamp.
Arguments:
msg -- Message to check
Returns `True` if `msg` was logged after `self.limit`, or `False`
otherwise.
"""
if not self.limit:
return False
match = re.search(r"^\S+", msg)
if not match:
return False
msg_timestamp = match.group(0)
msg_dt = self._parser.isoparse(msg_timestamp)
return msg_dt > self.limit
def _split_msgs_by_limit(self, msgs):
"""
Split a sorted list of messages relative to the limit timestamp.
Arguments:
msgs -- List of messages to split
Returns a tuple:
(list of messages logged on or before limit timestamp,
list of messages logged after limit timestamp).
"""
# If last message was logged before limit, all messages were
if msgs and self._msg_after_limit(msgs[-1]):
# Else find index of first message logged after limit
for idx, msg in enumerate(msgs):
if self._msg_after_limit(msg):
- self.debug("Got %d lines before passing limit timestamp"
- % idx)
+ self.debug(f"Got {idx} lines before passing limit timestamp")
return msgs[:idx], msgs[idx:]
- self.debug("Got %s lines" % len(msgs))
+ self.debug(f"Got {len(msgs)} lines")
return msgs, []
def async_complete(self, pid, returncode, out, err):
"""
Handle completion of an asynchronous journal read.
This function saves the output from that read for look()/look_for_all()
to process and records the current position in the journal. Future
reads will pick back up from that spot.
Arguments:
pid -- The ID of the process that did the journal read
returncode -- The return code of the process that did the journal read
out -- stdout from the journal read
err -- stderr from the journal read
"""
if out:
# Cursor should always be last line of journalctl output
out, cursor_line = out[:-1], out[-1]
match = re.search(r"^-- cursor: ([^.]+)", cursor_line)
if not match:
- raise OutputNotFoundError('Cursor not found at end of output:'
- + '\n%s' % out)
+ raise OutputNotFoundError(f"Cursor not found at end of output:\n{out}")
self.offset = match.group(1).strip()
- self.debug("Got new cursor: %s" % self.offset)
+ self.debug(f"Got new cursor: {self.offset}")
before, after = self._split_msgs_by_limit(out)
# Save remaining messages after limit for later processing
self._cache.extend(after)
if self._delegate:
self._delegate.async_complete(pid, returncode, before, err)
def harvest_async(self, delegate=None):
"""
Collect lines from the journal on a single host asynchronously.
Optionally, call delegate when complete. This can be called
repeatedly, reading a chunk each time or until the end of the journal
is hit.
"""
self._delegate = delegate
# Use --lines to prevent journalctl from overflowing the Popen input
# buffer
command = "journalctl --quiet --output=short-iso --show-cursor"
if self.offset == "EOF":
command += " --lines 0"
else:
- command += " --after-cursor='%s' --lines=200" % self.offset
+ command += f" --after-cursor='{self.offset}' --lines=200"
return self.rsh.call_async(self.host, command, delegate=self)
def harvest_cached(self):
"""Return cached logs from before the limit timestamp."""
before, self._cache = self._split_msgs_by_limit(self._cache)
return before
def set_end(self):
"""
Internally record where we expect to find the end of a host's journal.
Calls to harvest from the journal will not go any farther than what
this function records.
"""
if self.limit:
return
# --iso-8601=seconds yields YYYY-MM-DDTHH:MM:SSZ, where Z is timezone
# as offset from UTC
# pylint: disable=not-callable
(rc, lines) = self.rsh(self.host, "date --iso-8601=seconds", verbose=0)
if rc == 0 and len(lines) == 1:
self.limit = self._parser.isoparse(lines[0].strip())
- self.debug("Set limit to: %s" % self.limit)
+ self.debug(f"Set limit to: {self.limit}")
else:
- self.debug("Unable to set limit for %s because date returned %d lines with status %d"
- % (self.host, len(lines), rc))
+ self.debug(f"Unable to set limit for {self.host} because date returned "
+ f"{len(lines)} lines with status {rc}")
class LogWatcher:
"""
Watch a single log file or journal across multiple hosts.
Instances of this class look for lines that match given regular
expressions.
The way you use this class is as follows:
- Construct a LogWatcher object
- Call set_watch() when you want to start watching the log
- Call look() to scan the log looking for the patterns
"""
def __init__(self, log, regexes, hosts, kind, name="Anon", timeout=10,
silent=False):
"""
Create a new LogWatcher instance.
Arguments:
log -- The log file to watch
regexes -- A list of regular expressions to match against the log
hosts -- A list of cluster nodes on which to watch the log
kind -- What type of log is this object watching?
name -- A unique name to use when logging about this watch
timeout -- Default number of seconds to watch a log file at a time;
this can be overridden by the timeout= parameter to
self.look on an as-needed basis
silent -- If False, log extra information
"""
self.filename = log
self.hosts = hosts
self.kind = kind
self.name = name
self.regexes = regexes
self.unmatched = None
self.whichmatch = -1
self._cache_lock = threading.Lock()
self._file_list = []
self._line_cache = []
self._logger = LogFactory()
self._timeout = int(timeout)
# Validate our arguments. Better sooner than later ;-)
for regex in regexes:
re.compile(regex)
if not self.hosts:
raise ValueError("LogWatcher requires hosts argument")
if self.kind != LogKind.JOURNAL and not self.filename:
raise ValueError("LogWatcher requires log file name if not journal")
if not silent:
for regex in self.regexes:
- self._debug("Looking for regex: %s" % regex)
+ self._debug(f"Looking for regex: {regex}")
def _debug(self, args):
"""Log a debug message."""
- message = "lw: %s: %s" % (self.name, args)
+ message = f"lw: {self.name}: {args}"
self._logger.debug(message)
def set_watch(self):
"""Mark the place to start watching the log from."""
if self.kind == LogKind.LOCAL_FILE:
self._file_list.append(FileObj(self.filename))
elif self.kind == LogKind.REMOTE_FILE:
for node in self.hosts:
self._file_list.append(FileObj(self.filename, node, self.name))
elif self.kind == LogKind.JOURNAL:
for node in self.hosts:
self._file_list.append(JournalObj(node, self.name))
def async_complete(self, pid, returncode, out, err):
"""
Handle completion of an asynchronous log file read.
This function saves the output from that read for look()/look_for_all()
to process and records the current position. Future reads will pick
back up from that spot.
Arguments:
pid -- The ID of the process that did the read
returncode -- The return code of the process that did the read
out -- stdout from the file read
err -- stderr from the file read
"""
# Called as delegate through {File,Journal}Obj.async_complete()
# pylint: disable=unused-argument
# TODO: Probably need a lock for updating self._line_cache
- self._logger.debug("%s: Got %d lines from %d (total %d)" % (self.name, len(out), pid, len(self._line_cache)))
+ self._logger.debug(f"{self.name}: Got {len(out)} lines from {pid} (total {len(self._line_cache)})")
if out:
with self._cache_lock:
self._line_cache.extend(out)
def __get_lines(self):
"""Iterate over all watched log files and collect new lines from each."""
if not self._file_list:
raise ValueError("No sources to read from")
pending = []
for f in self._file_list:
cached = f.harvest_cached()
if cached:
- self._debug("Got %d lines from %s cache (total %d)"
- % (len(cached), f.name, len(self._line_cache)))
+ self._debug(f"Got {len(cached)} lines from {f.name} cache (total {len(self._line_cache)})")
with self._cache_lock:
self._line_cache.extend(cached)
else:
t = f.harvest_async(self)
if t:
pending.append(t)
for t in pending:
t.join(60.0)
if t.is_alive():
- self._logger.log("%s: Aborting after 20s waiting for %r logging commands" % (self.name, t))
+ self._logger.log(f"{self.name}: Aborting after 20s waiting for {t!r} logging commands")
return
def end(self):
"""
Mark that a log is done being watched.
This function also resets internal data structures to the beginning
of the file. Subsequent watches will therefore start from the
beginning again.
"""
for f in self._file_list:
f.end()
def look(self, timeout=None):
"""
Examine the log looking for the regexes in this object.
It starts looking from the place marked by set_watch(), continuing
through the file in the fashion of `tail -f`. It properly recovers
from log file truncation but not from removing and recreating the log.
Arguments:
timeout -- Number of seconds to watch the log file; defaults to
seconds argument passed when this object was created
Returns the first line which matches any regex
"""
if not timeout:
timeout = self._timeout
lines = 0
begin = time.time()
end = begin + timeout + 1
if not self.regexes:
self._debug("Nothing to look for")
return None
if timeout == 0:
for f in self._file_list:
f.set_end()
while True:
if self._line_cache:
lines += 1
with self._cache_lock:
line = self._line_cache[0]
self._line_cache.remove(line)
which = -1
if re.search("CTS:", line):
continue
for regex in self.regexes:
which += 1
matchobj = re.search(regex, line)
if matchobj:
self.whichmatch = which
- self._debug("Matched: %s" % line)
+ self._debug(f"Matched: {line}")
return line
elif timeout > 0 and end < time.time():
timeout = 0
for f in self._file_list:
f.set_end()
else:
self.__get_lines()
if not self._line_cache and end < time.time():
- self._debug("Single search terminated: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), lines))
+ self._debug(f"Single search terminated: start={begin}, end={end}, now={time.time()}, lines={lines}")
return None
- self._debug("Waiting: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), len(self._line_cache)))
+ self._debug(f"Waiting: start={begin}, end={end}, now={time.time()}, lines={len(self._line_cache)}")
time.sleep(1)
def look_for_all(self, allow_multiple_matches=False, silent=False):
"""
Like look(), but looks for matches for multiple regexes.
This function returns when the timeout is reached or all regexes were
matched. As a side effect, self.unmatched will contain regexes that
were not matched. This can be inspected by the caller.
Arguments:
allow_multiple_matches -- If True, allow each regex to match more than
once. If False (the default), once a regex
matches a line, it will no longer be searched
for.
silent -- If False, log extra information
Returns the matching lines if all regexes are matched, or None.
"""
save_regexes = self.regexes
result = []
if not silent:
- self._debug("starting search: timeout=%d" % self._timeout)
+ self._debug(f"starting search: timeout={self._timeout}")
while self.regexes:
one_result = self.look(self._timeout)
if not one_result:
self.unmatched = self.regexes
self.regexes = save_regexes
self.end()
return None
result.append(one_result)
if not allow_multiple_matches:
del self.regexes[self.whichmatch]
else:
# Allow multiple regexes to match a single line
tmp_regexes = self.regexes
self.regexes = []
for regex in tmp_regexes:
matchobj = re.search(regex, one_result)
if not matchobj:
self.regexes.append(regex)
self.unmatched = None
self.regexes = save_regexes
return result
diff --git a/python/pacemaker/_library.py b/python/pacemaker/_library.py
index cd6bc4ef44..1cd1bd500d 100644
--- a/python/pacemaker/_library.py
+++ b/python/pacemaker/_library.py
@@ -1,38 +1,38 @@
"""A module providing private library management code."""
__all__ = ["_libcrmcommon"]
-__copyright__ = "Copyright 2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2024-2025 the Pacemaker project contributors"
__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)"
import ctypes
from ctypes.util import find_library
from glob import glob
import os
from pacemaker.buildoptions import BuildOptions
def load_library(basename):
"""Find and load the library with the given base name."""
path = find_library(basename)
# If the library was not found anywhere in the default locations, also search
# for it in the build directory
if path is None:
# pylint: disable=protected-access
- for d in glob("%s/lib/*/.libs" % BuildOptions._BUILD_DIR):
- path = "%s/lib%s.so" % (d, basename)
+ for d in glob(f"{BuildOptions._BUILD_DIR}/lib/*/.libs"):
+ path = f"{d}/lib{basename}.so"
if os.path.exists(path):
break
path = None
if path is None:
raise FileNotFoundError(basename)
return ctypes.cdll.LoadLibrary(path)
_libcrmcommon = load_library("crmcommon")
_libcrmcommon.crm_exit_str.restype = ctypes.c_char_p