diff --git a/python/pacemaker/_cts/CTS.py b/python/pacemaker/_cts/CTS.py
index 81b50ab50e..192618e60d 100644
--- a/python/pacemaker/_cts/CTS.py
+++ b/python/pacemaker/_cts/CTS.py
@@ -1,221 +1,243 @@
"""Main classes for Pacemaker's Cluster Test Suite (CTS)."""
__all__ = ["CtsLab", "NodeStatus", "Process"]
__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import sys
import time
import traceback
from pacemaker.exitstatus import ExitStatus
from pacemaker._cts.environment import EnvFactory
from pacemaker._cts.input import should_continue
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
class CtsLab:
"""
A class that defines the Lab Environment for the Cluster Test System.
It defines those things which are expected to change from test
environment to test environment for the same cluster manager.
This is where you define the set of nodes that are in your test lab,
what kind of reset mechanism you use, etc. All this data is stored
as key/value pairs in an Environment instance constructed from arguments
passed to this class.
The CTS code ignores names it doesn't know about or need. Individual
tests have access to this information, and it is perfectly acceptable
to provide hints, tweaks, fine-tuning directions, or other information
to the tests through this mechanism.
"""
def __init__(self, args=None):
"""
Create a new CtsLab instance.
This class can be treated kind of like a dictionary due to the presence
of typical dict functions like __contains__, __getitem__, and __setitem__.
However, it is not a dictionary so do not rely on standard dictionary
behavior.
Arguments:
args -- A list of command line parameters, minus the program name.
"""
self._env = EnvFactory().getInstance(args)
self._logger = LogFactory()
def dump(self):
"""Print the current environment."""
self._env.dump()
def __contains__(self, key):
"""Return True if the given environment key exists."""
# pylint gets confused because of EnvFactory here.
# pylint: disable=unsupported-membership-test
return key in self._env
def __getitem__(self, key):
"""Return the given environment key, or raise KeyError if it does not exist."""
# Throughout this file, pylint has trouble understanding that EnvFactory
# and RemoteFactory are singleton instances that can be treated as callable
# and subscriptable objects. Various warnings are disabled because of this.
# See also a comment about self._rsh in environment.py.
# pylint: disable=unsubscriptable-object
return self._env[key]
def __setitem__(self, key, value):
"""Set the given environment key to the given value, overriding any previous value."""
# pylint: disable=unsupported-assignment-operation
self._env[key] = value
def run(self, scenario, iterations):
"""
Run the given scenario the given number of times.
Returns ExitStatus.OK on success, or ExitStatus.ERROR on error.
"""
if not scenario:
self._logger.log("No scenario was defined")
return ExitStatus.ERROR
self._logger.log("Cluster nodes: ")
# pylint: disable=unsubscriptable-object
for node in self._env["nodes"]:
self._logger.log(f" * {node}")
if not scenario.setup():
return ExitStatus.ERROR
# We want to alert on any exceptions caused by running a scenario, so
# here it's okay to disable the pylint warning.
# pylint: disable=bare-except
try:
scenario.run(iterations)
except: # noqa: E722
self._logger.log(f"Exception by {sys.exc_info()[0]}")
self._logger.traceback(traceback)
scenario.summarize()
scenario.teardown()
return ExitStatus.ERROR
scenario.teardown()
scenario.summarize()
if scenario.stats["failure"] > 0:
return ExitStatus.ERROR
if scenario.stats["success"] != iterations:
self._logger.log("No failure count but success != requested iterations")
return ExitStatus.ERROR
return ExitStatus.OK
class NodeStatus:
"""
A class for querying the status of cluster nodes.
Are nodes up? Do they respond to SSH connections?
"""
def __init__(self, env):
"""
Create a new NodeStatus instance.
Arguments:
env -- An Environment instance
"""
self._env = env
def _node_booted(self, node):
"""Return True if the given node is booted (responds to pings)."""
# pylint: disable=not-callable
(rc, _) = RemoteFactory().getInstance()("localhost", f"ping -nq -c1 -w1 {node}", verbose=0)
return rc == 0
def _sshd_up(self, node):
"""Return true if sshd responds on the given node."""
# pylint: disable=not-callable
(rc, _) = RemoteFactory().getInstance()(node, "true", verbose=0)
return rc == 0
def wait_for_node(self, node, timeout=300):
"""
Wait for a node to become available.
Should the timeout be reached, the user will be given a choice whether
to continue or not. If not, ValueError will be raised.
Returns True when the node is available, or False if the timeout is
reached.
"""
initial_timeout = timeout
anytimeouts = False
while timeout > 0:
if self._node_booted(node) and self._sshd_up(node):
if anytimeouts:
# Fudge to wait for the system to finish coming up
time.sleep(30)
LogFactory().debug(f"Node {node} now up")
return True
time.sleep(30)
if not anytimeouts:
LogFactory().debug(f"Waiting for node {node} to come up")
anytimeouts = True
timeout -= 1
LogFactory().log(f"{node} did not come up within {initial_timeout} tries")
if not should_continue(self._env["continue"]):
raise ValueError(f"{node} did not come up within {initial_timeout} tries")
return False
def wait_for_all_nodes(self, nodes, timeout=300):
"""Return True when all nodes come up, or False if the timeout is reached."""
for node in nodes:
if not self.wait_for_node(node, timeout):
return False
return True
class Process:
"""A class for managing a Pacemaker daemon."""
# pylint: disable=invalid-name
def __init__(self, cm, name, pats=None, badnews_ignore=None):
"""
Create a new Process instance.
Arguments:
cm -- A ClusterManager instance
name -- The command being run
pats -- Regexes we expect to find in log files
badnews_ignore -- Regexes for lines in the log that can be ignored
"""
self._cm = cm
self.badnews_ignore = badnews_ignore
self.name = name
self.pats = pats
if self.badnews_ignore is None:
self.badnews_ignore = []
if self.pats is None:
self.pats = []
- def kill(self, node):
- """Kill the instance of this process running on the given node."""
- (rc, _) = self._cm.rsh(node, f"killall -9 {self.name}")
-
+ def signal(self, sig, node):
+ """Send a signal to the instance of this process running on the given node."""
+ # Using psutil would be nice but we need a shell command line.
+
+ # Word boundaries. It's not clear how portable \<, \>, \b, and \W are.
+ non_word_char = "[^_[:alnum:]]"
+ word_begin = f"(^|{non_word_char})"
+ word_end = f"($|{non_word_char})"
+
+ # Match this process, possibly running under valgrind
+ search_re = f"({word_begin}valgrind )?.*{word_begin}{self.name}{word_end}"
+
+ if sig in ["SIGKILL", "KILL", 9, "SIGTERM", "TERM", 15]:
+ (rc, _) = self._cm.rsh(node, f"pgrep --full '{search_re}'")
+ if rc == 1:
+ # No matching process, so nothing to kill/terminate
+ return
+ if rc != 0:
+ # 2 or 3: Syntax error or fatal error (like out of memory)
+ self._cm.log(f"ERROR: pgrep for {self.name} failed on node {node}")
+ return
+
+ # 0: One or more processes were successfully signaled.
+ # 1: No processes matched or none of them could be signalled.
+ # This is why we check for no matching process above.
+ (rc, _) = self._cm.rsh(node, f"pkill --signal {sig} --full '{search_re}'")
if rc != 0:
- self._cm.log(f"ERROR: Kill {self.name} failed on node {node}")
+ self._cm.log(f"ERROR: Sending signal {sig} to {self.name} failed on node {node}")
diff --git a/python/pacemaker/_cts/tests/componentfail.py b/python/pacemaker/_cts/tests/componentfail.py
index 7135328488..2edb4aee83 100644
--- a/python/pacemaker/_cts/tests/componentfail.py
+++ b/python/pacemaker/_cts/tests/componentfail.py
@@ -1,161 +1,161 @@
"""Kill a pacemaker daemon and test how the cluster recovers."""
__all__ = ["ComponentFail"]
__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import re
from pacemaker._cts.audits import AuditResource
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
# pylint doesn't understand that self._env is subscriptable.
# pylint: disable=unsubscriptable-object
# @TODO Separate this into a separate test for each component, so the patterns
# can be made specific to each component, investigating failures is a little
# easier, and specific testing can be done for each component (for example,
# set attributes before and after killing pacemaker-attrd and check values).
class ComponentFail(CTSTest):
"""Kill a random pacemaker daemon and wait for the cluster to recover."""
def __init__(self, cm):
"""
Create a new ComponentFail instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.is_unsafe = True
self.name = "ComponentFail"
self._complist = cm.components
self._okerrpatterns = []
self._patterns = []
self._startall = SimulStartLite(cm)
def __call__(self, node):
"""Perform this test."""
self.incr("calls")
self._patterns = []
self._okerrpatterns = []
# start all nodes
ret = self._startall(None)
if not ret:
return self.failure("Setup failed")
if not self._cm.cluster_stable(self._env["stable_time"]):
return self.failure("Setup failed - unstable")
# select a component to kill
chosen = self._env.random_gen.choice(self._complist)
node_is_dc = self._cm.is_node_dc(node, None)
self.debug(f"...component {chosen.name} (dc={node_is_dc})")
self.incr(chosen.name)
if chosen.name != "corosync":
self._patterns.extend([
self._cm.templates["Pat:ChildKilled"] % (node, chosen.name),
self._cm.templates["Pat:ChildRespawn"] % (node, chosen.name),
])
self._patterns.extend(chosen.pats)
# @TODO this should be a flag in the Component
if chosen.name in ["corosync", "pacemaker-based", "pacemaker-fenced"]:
# Ignore actions for fence devices if fencer will respawn
# (their registration will be lost, and probes will fail)
self._okerrpatterns = [
self._cm.templates["Pat:Fencing_active"],
]
(_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
for line in lines:
if re.search("^Resource", line):
r = AuditResource(self._cm, line)
if r.rclass == "stonith":
self._okerrpatterns.extend([
self._cm.templates["Pat:Fencing_recover"] % r.id,
self._cm.templates["Pat:Fencing_probe"] % r.id,
])
# supply a copy so self.patterns doesn't end up empty
tmp_pats = self._patterns.copy()
self._patterns.extend(chosen.badnews_ignore)
# Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
stonith_pats = [
self._cm.templates["Pat:Fencing_ok"] % node
]
stonith = self.create_watch(stonith_pats, 0)
stonith.set_watch()
# set the watch for stable
watch = self.create_watch(
tmp_pats, self._env["dead_time"] + self._env["stable_time"] + self._env["start_time"])
watch.set_watch()
# kill the component
- chosen.kill(node)
+ chosen.signal("KILL", node)
self.debug("Waiting for the cluster to recover")
self._cm.cluster_stable()
self.debug("Waiting for any fenced node to come back up")
self._cm.ns.wait_for_all_nodes(self._env["nodes"], 600)
self.debug("Waiting for the cluster to re-stabilize with all nodes")
self._cm.cluster_stable(self._env["start_time"])
self.debug(f"Checking if {node} was shot")
shot = stonith.look(60)
if shot:
self.debug(f"Found: {shot!r}")
self._okerrpatterns.append(self._cm.templates["Pat:Fencing_start"] % node)
if not self._env["at-boot"]:
self._cm.expected_status[node] = "down"
# If fencing occurred, chances are many (if not all) the expected logs
# will not be sent - or will be lost when the node reboots
return self.success()
# check for logs indicating a graceful recovery
matched = watch.look_for_all(allow_multiple_matches=True)
if watch.unmatched:
self._logger.log(f"Patterns not found: {watch.unmatched!r}")
self.debug("Waiting for the cluster to re-stabilize with all nodes")
is_stable = self._cm.cluster_stable(self._env["start_time"])
if not matched:
return self.failure(f"Didn't find all expected {chosen.name} patterns")
if not is_stable:
return self.failure(f"Cluster did not become stable after killing {chosen.name}")
return self.success()
@property
def errors_to_ignore(self):
"""Return a list of errors which should be ignored."""
# Note that okerrpatterns refers to the last time we ran this test
# The good news is that this works fine for us...
self._okerrpatterns.extend(self._patterns)
return self._okerrpatterns
diff --git a/python/pacemaker/_cts/tests/remotedriver.py b/python/pacemaker/_cts/tests/remotedriver.py
index 535eacf1d7..5cb2335f05 100644
--- a/python/pacemaker/_cts/tests/remotedriver.py
+++ b/python/pacemaker/_cts/tests/remotedriver.py
@@ -1,542 +1,543 @@
"""Base classes for CTS tests."""
__all__ = ["RemoteDriver"]
__copyright__ = "Copyright 2000-2025 the Pacemaker project contributors"
__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
import os
import time
import subprocess
import tempfile
+from pacemaker._cts.CTS import Process
from pacemaker._cts.tests.ctstest import CTSTest
from pacemaker._cts.tests.simulstartlite import SimulStartLite
from pacemaker._cts.tests.starttest import StartTest
from pacemaker._cts.tests.stoptest import StopTest
from pacemaker._cts.timer import Timer
# Disable various pylint warnings that occur in so many places throughout this
# file it's easiest to just take care of them globally. This does introduce the
# possibility that we'll miss some other cause of the same warning, but we'll
# just have to be careful.
# pylint doesn't understand that self._rsh is callable.
# pylint: disable=not-callable
class RemoteDriver(CTSTest):
"""
A specialized base class for cluster tests that run on Pacemaker Remote nodes.
This builds on top of CTSTest to provide methods for starting and stopping
services and resources, and managing remote nodes. This is still just an
abstract class -- specific tests need to implement their own specialized
behavior.
"""
def __init__(self, cm):
"""
Create a new RemoteDriver instance.
Arguments:
cm -- A ClusterManager instance
"""
CTSTest.__init__(self, cm)
self.name = "RemoteDriver"
self._corosync_enabled = False
self._pacemaker_enabled = False
self._remote_node = None
self._remote_rsc = "remote-rsc"
self._start = StartTest(cm)
self._startall = SimulStartLite(cm)
self._stop = StopTest(cm)
self.reset()
def reset(self):
"""Reset the state of this test back to what it was before the test was run."""
self.failed = False
self.fail_string = ""
self._pcmk_started = False
self._remote_node_added = False
self._remote_rsc_added = False
self._remote_use_reconnect_interval = self._env.random_gen.choice([True, False])
def fail(self, msg):
"""Mark test as failed."""
self.failed = True
# Always log the failure.
self._logger.log(msg)
# Use first failure as test status, as it's likely to be most useful.
if not self.fail_string:
self.fail_string = msg
def _get_other_node(self, node):
"""
Get the first cluster node out of the environment that is not the given node.
Typically, this is used to find some node that will still be active that
we can run cluster commands on.
"""
for othernode in self._env["nodes"]:
if othernode == node:
# we don't want to try and use the cib that we just shutdown.
# find a cluster node that is not our soon to be remote-node.
continue
return othernode
def _del_rsc(self, node, rsc):
"""
Delete the given named resource from the cluster.
The given `node` is the cluster node on which we should *not* run the
delete command.
"""
othernode = self._get_other_node(node)
(rc, _) = self._rsh(othernode, f"crm_resource -D -r {rsc} -t primitive")
if rc != 0:
self.fail(f"Removal of resource '{rsc}' failed")
def _add_rsc(self, node, rsc_xml):
"""
Add a resource given in XML format to the cluster.
The given `node` is the cluster node on which we should *not* run the
add command.
"""
othernode = self._get_other_node(node)
(rc, _) = self._rsh(othernode, f"cibadmin -C -o resources -X '{rsc_xml}'")
if rc != 0:
self.fail("resource creation failed")
def _add_primitive_rsc(self, node):
"""
Add a primitive heartbeat resource for the remote node to the cluster.
The given `node` is the cluster node on which we should *not* run the
add command.
"""
rsc_xml = f"""
"""
self._add_rsc(node, rsc_xml)
if not self.failed:
self._remote_rsc_added = True
def _add_connection_rsc(self, node):
"""
Add a primitive connection resource for the remote node to the cluster.
The given `node` is the cluster node on which we should *not* run the
add command.
"""
rsc_xml = f"""
"""
if self._remote_use_reconnect_interval:
# Set reconnect interval on resource
rsc_xml += f"""
"""
rsc_xml += f"""
"""
self._add_rsc(node, rsc_xml)
if not self.failed:
self._remote_node_added = True
def _disable_services(self, node):
"""Disable the corosync and pacemaker services on the given node."""
self._corosync_enabled = self._env.service_is_enabled(node, "corosync")
if self._corosync_enabled:
self._env.disable_service(node, "corosync")
self._pacemaker_enabled = self._env.service_is_enabled(node, "pacemaker")
if self._pacemaker_enabled:
self._env.disable_service(node, "pacemaker")
def _enable_services(self, node):
"""Enable the corosync and pacemaker services on the given node."""
if self._corosync_enabled:
self._env.enable_service(node, "corosync")
if self._pacemaker_enabled:
self._env.enable_service(node, "pacemaker")
def _stop_pcmk_remote(self, node):
"""Stop the Pacemaker Remote service on the given node."""
for _ in range(10):
(rc, _) = self._rsh(node, "service pacemaker_remote stop")
if rc != 0:
time.sleep(6)
else:
break
def _start_pcmk_remote(self, node):
"""Start the Pacemaker Remote service on the given node."""
for _ in range(10):
(rc, _) = self._rsh(node, "service pacemaker_remote start")
if rc != 0:
time.sleep(6)
else:
self._pcmk_started = True
break
def _freeze_pcmk_remote(self, node):
"""Simulate a Pacemaker Remote daemon failure."""
- self._rsh(node, "killall -STOP pacemaker-remoted")
+ Process(self._cm, "pacemaker-remoted").signal("STOP", node)
def _resume_pcmk_remote(self, node):
"""Simulate the Pacemaker Remote daemon recovering."""
- self._rsh(node, "killall -CONT pacemaker-remoted")
+ Process(self._cm, "pacemaker-remoted").signal("CONT", node)
def _start_metal(self, node):
"""
Set up a Pacemaker Remote configuration.
Remove any existing connection resources or nodes. Start the
pacemaker_remote service. Create a connection resource.
"""
# Cluster nodes are reused as remote nodes in remote tests. If cluster
# services were enabled at boot, in case the remote node got fenced, the
# cluster node would join instead of the expected remote one. Meanwhile
# pacemaker_remote would not be able to start. Depending on the chances,
# the situations might not be able to be orchestrated gracefully any more.
#
# Temporarily disable any enabled cluster serivces.
self._disable_services(node)
# make sure the resource doesn't already exist for some reason
self._rsh(node, f"crm_resource -D -r {self._remote_rsc} -t primitive")
self._rsh(node, f"crm_resource -D -r {self._remote_node} -t primitive")
if not self._stop(node):
self.fail(f"Failed to shutdown cluster node {node}")
return
self._start_pcmk_remote(node)
if not self._pcmk_started:
self.fail(f"Failed to start pacemaker_remote on node {node}")
return
# Convert node to baremetal now that it has shutdown the cluster stack
pats = []
watch = self.create_watch(pats, 120)
watch.set_watch()
pats.extend([
self._cm.templates["Pat:RscOpOK"] % ("start", self._remote_node),
self._cm.templates["Pat:DC_IDLE"]
])
self._add_connection_rsc(node)
with Timer(self._logger, self.name, "remoteMetalInit"):
watch.look_for_all()
if watch.unmatched:
self.fail(f"Unmatched patterns: {watch.unmatched}")
def migrate_connection(self, node):
"""Move the remote connection resource to any other available node."""
if self.failed:
return
pats = [
self._cm.templates["Pat:RscOpOK"] % ("migrate_to", self._remote_node),
self._cm.templates["Pat:RscOpOK"] % ("migrate_from", self._remote_node),
self._cm.templates["Pat:DC_IDLE"]
]
watch = self.create_watch(pats, 120)
watch.set_watch()
(rc, _) = self._rsh(node, f"crm_resource -M -r {self._remote_node}", verbose=1)
if rc != 0:
self.fail("failed to move remote node connection resource")
return
with Timer(self._logger, self.name, "remoteMetalMigrate"):
watch.look_for_all()
if watch.unmatched:
self.fail(f"Unmatched patterns: {watch.unmatched}")
def fail_rsc(self, node):
"""
Cause the dummy resource running on a Pacemaker Remote node to fail.
Verify that the failure is logged correctly.
"""
if self.failed:
return
watchpats = [
self._cm.templates["Pat:RscRemoteOpOK"] % ("stop", self._remote_rsc, self._remote_node),
self._cm.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node),
self._cm.templates["Pat:DC_IDLE"]
]
watch = self.create_watch(watchpats, 120)
watch.set_watch()
self.debug("causing dummy rsc to fail.")
self._rsh(node, "rm -f /var/run/resource-agents/Dummy*")
with Timer(self._logger, self.name, "remoteRscFail"):
watch.look_for_all()
if watch.unmatched:
self.fail(f"Unmatched patterns during rsc fail: {watch.unmatched}")
def fail_connection(self, node):
"""
Cause the remote connection resource to fail.
Verify that the node is fenced and the connection resource is restarted
on another node.
"""
if self.failed:
return
watchpats = [
self._cm.templates["Pat:Fencing_ok"] % self._remote_node,
self._cm.templates["Pat:NodeFenced"] % self._remote_node
]
watch = self.create_watch(watchpats, 120)
watch.set_watch()
# freeze the pcmk remote daemon. this will result in fencing
self.debug("Force stopped active remote node")
self._freeze_pcmk_remote(node)
self.debug("Waiting for remote node to be fenced.")
with Timer(self._logger, self.name, "remoteMetalFence"):
watch.look_for_all()
if watch.unmatched:
self.fail(f"Unmatched patterns: {watch.unmatched}")
return
self.debug("Waiting for the remote node to come back up")
self._cm.ns.wait_for_node(node, 120)
pats = []
watch = self.create_watch(pats, 240)
watch.set_watch()
pats.append(self._cm.templates["Pat:RscOpOK"] % ("start", self._remote_node))
if self._remote_rsc_added:
pats.append(self._cm.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node))
# start the remote node again watch it integrate back into cluster.
self._start_pcmk_remote(node)
if not self._pcmk_started:
self.fail(f"Failed to start pacemaker_remote on node {node}")
return
self.debug("Waiting for remote node to rejoin cluster after being fenced.")
with Timer(self._logger, self.name, "remoteMetalRestart"):
watch.look_for_all()
if watch.unmatched:
self.fail(f"Unmatched patterns: {watch.unmatched}")
def _add_dummy_rsc(self, node):
"""Add a dummy resource that runs on the Pacemaker Remote node."""
if self.failed:
return
# verify we can put a resource on the remote node
pats = []
watch = self.create_watch(pats, 120)
watch.set_watch()
pats.extend([
self._cm.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node),
self._cm.templates["Pat:DC_IDLE"]
])
# Add a resource that must live on remote-node
self._add_primitive_rsc(node)
# force that rsc to prefer the remote node.
(rc, _) = self._cm.rsh(node, f"crm_resource -M -r {self._remote_rsc} -N {self._remote_node} -f", verbose=1)
if rc != 0:
self.fail("Failed to place remote resource on remote node.")
return
with Timer(self._logger, self.name, "remoteMetalRsc"):
watch.look_for_all()
if watch.unmatched:
self.fail(f"Unmatched patterns: {watch.unmatched}")
def test_attributes(self, node):
"""Verify that attributes can be set on the Pacemaker Remote node."""
if self.failed:
return
# This verifies permanent attributes can be set on a remote-node. It also
# verifies the remote-node can edit its own cib node section remotely.
(rc, line) = self._cm.rsh(node, f"crm_attribute -l forever -n testattr -v testval -N {self._remote_node}", verbose=1)
if rc != 0:
self.fail(f"Failed to set remote-node attribute. rc:{rc} output:{line}")
return
(rc, _) = self._cm.rsh(node, f"crm_attribute -l forever -n testattr -q -N {self._remote_node}", verbose=1)
if rc != 0:
self.fail("Failed to get remote-node attribute")
return
(rc, _) = self._cm.rsh(node, f"crm_attribute -l forever -n testattr -D -N {self._remote_node}", verbose=1)
if rc != 0:
self.fail("Failed to delete remote-node attribute")
def cleanup_metal(self, node):
"""
Clean up the Pacemaker Remote node configuration previously created by _setup_metal.
Stop and remove dummy resources and connection resources. Stop the
pacemaker_remote service. Remove the remote node itself.
"""
self._enable_services(node)
if not self._pcmk_started:
return
pats = []
watch = self.create_watch(pats, 120)
watch.set_watch()
if self._remote_rsc_added:
pats.append(self._cm.templates["Pat:RscOpOK"] % ("stop", self._remote_rsc))
if self._remote_node_added:
pats.append(self._cm.templates["Pat:RscOpOK"] % ("stop", self._remote_node))
with Timer(self._logger, self.name, "remoteMetalCleanup"):
self._resume_pcmk_remote(node)
if self._remote_rsc_added:
# Remove dummy resource added for remote node tests
self.debug("Cleaning up dummy rsc put on remote node")
self._rsh(self._get_other_node(node), f"crm_resource -U -r {self._remote_rsc}")
self._del_rsc(node, self._remote_rsc)
if self._remote_node_added:
# Remove remote node's connection resource
self.debug("Cleaning up remote node connection resource")
self._rsh(self._get_other_node(node), f"crm_resource -U -r {self._remote_node}")
self._del_rsc(node, self._remote_node)
watch.look_for_all()
if watch.unmatched:
self.fail(f"Unmatched patterns: {watch.unmatched}")
self._stop_pcmk_remote(node)
self.debug("Waiting for the cluster to recover")
self._cm.cluster_stable()
if self._remote_node_added:
# Remove remote node itself
self.debug("Cleaning up node entry for remote node")
self._rsh(self._get_other_node(node), f"crm_node --force --remove {self._remote_node}")
def _setup_env(self, node):
"""
Set up the environment to allow Pacemaker Remote to function.
This involves generating a key and copying it to all nodes in the cluster.
"""
self._remote_node = f"remote-{node}"
# we are assuming if all nodes have a key, that it is
# the right key... If any node doesn't have a remote
# key, we regenerate it everywhere.
if self._rsh.exists_on_all("/etc/pacemaker/authkey", self._env["nodes"]):
return
# create key locally
(handle, keyfile) = tempfile.mkstemp(".cts")
os.close(handle)
subprocess.check_call(["dd", "if=/dev/urandom", f"of={keyfile}", "bs=4096", "count=1"],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# sync key throughout the cluster
for n in self._env["nodes"]:
self._rsh(n, "mkdir -p --mode=0750 /etc/pacemaker")
self._rsh.copy(keyfile, f"root@{n}:/etc/pacemaker/authkey")
self._rsh(n, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey")
self._rsh(n, "chmod 0640 /etc/pacemaker/authkey")
os.unlink(keyfile)
def is_applicable(self):
"""Return True if this test is applicable in the current test configuration."""
if not CTSTest.is_applicable(self):
return False
for node in self._env["nodes"]:
(rc, _) = self._rsh(node, "which pacemaker-remoted >/dev/null 2>&1")
if rc != 0:
return False
return True
def start_new_test(self, node):
"""Prepare a remote test for running by setting up its environment and resources."""
self.incr("calls")
self.reset()
ret = self._startall(None)
if not ret:
return self.failure("setup failed: could not start all nodes")
self._setup_env(node)
self._start_metal(node)
self._add_dummy_rsc(node)
return True
def __call__(self, node):
"""Perform this test."""
raise NotImplementedError
@property
def errors_to_ignore(self):
"""Return list of errors which should be ignored."""
return [
r"""is running on remote.*which isn't allowed""",
r"""Connection terminated""",
r"""Could not send remote"""
]