diff --git a/python/pacemaker/_cts/CTS.py b/python/pacemaker/_cts/CTS.py
index 108dbe7251..3bf0f48870 100644
--- a/python/pacemaker/_cts/CTS.py
+++ b/python/pacemaker/_cts/CTS.py
@@ -1,239 +1,239 @@
 """ Main classes for Pacemaker's Cluster Test Suite (CTS) """
 
 __all__ = ["CtsLab", "NodeStatus", "Process"]
 __copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import sys
 import time
 import traceback
 
 from pacemaker.exitstatus import ExitStatus
 from pacemaker._cts.environment import EnvFactory
 from pacemaker._cts.input import should_continue
 from pacemaker._cts.logging import LogFactory
 from pacemaker._cts.remote import RemoteFactory
 
 class CtsLab:
     """ A class that defines the Lab Environment for the Cluster Test System.
         It defines those things which are expected to change from test
         environment to test environment for the same cluster manager.
 
         This is where you define the set of nodes that are in your test lab,
         what kind of reset mechanism you use, etc.  All this data is stored
         as key/value pairs in an Environment instance constructed from arguments
         passed to this class.
 
         The CTS code ignores names it doesn't know about or need.  Individual
         tests have access to this information, and it is perfectly acceptable
         to provide hints, tweaks, fine-tuning directions, or other information
         to the tests through this mechanism.
     """
 
     def __init__(self, args=None):
         """ Create a new CtsLab instance.  This class can be treated kind
             of like a dictionary due to the presence of typical dict functions
             like __contains__, __getitem__, and __setitem__.  However, it is not a
             dictionary so do not rely on standard dictionary behavior.
 
             Arguments:
 
             args -- A list of command line parameters, minus the program name.
         """
 
         self._env = EnvFactory().getInstance(args)
         self._logger = LogFactory()
 
     def dump(self):
         """ Print the current environment """
 
         self._env.dump()
 
     def __contains__(self, key):
         """ Does the given environment key exist? """
 
         # pylint gets confused because of EnvFactory here.
         # pylint: disable=unsupported-membership-test
         return key in self._env
 
     def __getitem__(self, key):
         """ Return the given environment key, or raise KeyError if it does
             not exist
         """
 
         # Throughout this file, pylint has trouble understanding that EnvFactory
         # and RemoteFactory are singleton instances that can be treated as callable
         # and subscriptable objects.  Various warnings are disabled because of this.
         # See also a comment about self._rsh in environment.py.
         # pylint: disable=unsubscriptable-object
         return self._env[key]
 
     def __setitem__(self, key, value):
         """ Set the given environment key to the given value, overriding any
             previous value
         """
 
         # pylint: disable=unsupported-assignment-operation
         self._env[key] = value
 
     def run(self, scenario, iterations):
         """ Run the given scenario the given number of times.
 
             Returns:
 
             ExitStatus.OK on success, or ExitStatus.ERROR on error
         """
 
         if not scenario:
             self._logger.log("No scenario was defined")
             return ExitStatus.ERROR
 
         self._logger.log("Cluster nodes: ")
         # pylint: disable=unsubscriptable-object
         for node in self._env["nodes"]:
             self._logger.log("    * %s" % (node))
 
-        if not scenario.SetUp():
+        if not scenario.setup():
             return ExitStatus.ERROR
 
         # We want to alert on any exceptions caused by running a scenario, so
         # here it's okay to disable the pylint warning.
         # pylint: disable=bare-except
         try:
             scenario.run(iterations)
         except:
             self._logger.log("Exception by %s" % sys.exc_info()[0])
             self._logger.traceback(traceback)
 
             scenario.summarize()
             scenario.TearDown()
             return ExitStatus.ERROR
 
         scenario.TearDown()
         scenario.summarize()
 
         if scenario.Stats["failure"] > 0:
             return ExitStatus.ERROR
 
         if scenario.Stats["success"] != iterations:
             self._logger.log("No failure count but success != requested iterations")
             return ExitStatus.ERROR
 
         return ExitStatus.OK
 
 
 class NodeStatus:
     """ A class for querying the status of cluster nodes - are nodes up?  Do
         they respond to SSH connections?
     """
 
     def __init__(self, env):
         """ Create a new NodeStatus instance
 
             Arguments:
 
             env -- An Environment instance
         """
         self._env = env
 
     def _node_booted(self, node):
         """ Return True if the given node is booted (responds to pings) """
 
         # pylint: disable=not-callable
         (rc, _) = RemoteFactory().getInstance()("localhost", "ping -nq -c1 -w1 %s" % node, verbose=0)
         return rc == 0
 
     def _sshd_up(self, node):
         """ Return true if sshd responds on the given node """
 
         # pylint: disable=not-callable
         (rc, _) = RemoteFactory().getInstance()(node, "true", verbose=0)
         return rc == 0
 
     def wait_for_node(self, node, timeout=300):
         """ Wait for a node to become available.  Should the timeout be reached,
             the user will be given a choice whether to continue or not.  If not,
             ValueError will be raised.
 
             Returns:
 
             True when the node is available, or False if the timeout is reached.
         """
 
         initial_timeout = timeout
         anytimeouts = False
 
         while timeout > 0:
             if self._node_booted(node) and self._sshd_up(node):
                 if anytimeouts:
                     # Fudge to wait for the system to finish coming up
                     time.sleep(30)
                     LogFactory().debug("Node %s now up" % node)
 
                 return True
 
             time.sleep(30)
             if not anytimeouts:
                 LogFactory().debug("Waiting for node %s to come up" % node)
 
             anytimeouts = True
             timeout -= 1
 
         LogFactory().log("%s did not come up within %d tries" % (node, initial_timeout))
         if not should_continue(self._env["continue"]):
             raise ValueError("%s did not come up within %d tries" % (node, initial_timeout))
 
         return False
 
     def wait_for_all_nodes(self, nodes, timeout=300):
         """ Return True when all nodes come up, or False if the timeout is reached """
 
         for node in nodes:
             if not self.wait_for_node(node, timeout):
                 return False
 
         return True
 
 
 class Process:
     """ A class for managing a Pacemaker daemon """
 
     # pylint: disable=invalid-name
     def __init__(self, cm, name, dc_only=False, pats=None, dc_pats=None,
                  badnews_ignore=None):
         """ Create a new Process instance.
 
             Arguments:
 
             cm              -- A ClusterManager instance
             name            -- The command being run
             dc_only         -- Should this daemon be killed only on the DC?
             pats            -- Regexes we expect to find in log files
             dc_pats         -- Additional DC-specific regexes we expect to find
                                in log files
             badnews_ignore  -- Regexes for lines in the log that can be ignored
         """
 
         self._cm = cm
         self.badnews_ignore = badnews_ignore
         self.dc_only = dc_only
         self.dc_pats = dc_pats
         self.name = name
         self.pats = pats
 
         if self.badnews_ignore is None:
             self.badnews_ignore = []
 
         if self.dc_pats is None:
             self.dc_pats = []
 
         if self.pats is None:
             self.pats = []
 
     def kill(self, node):
         """ Kill the instance of this process running on the given node """
 
         (rc, _) = self._cm.rsh(node, "killall -9 %s" % self.name)
 
         if rc != 0:
             self._cm.log ("ERROR: Kill %s failed on node %s" % (self.name, node))
diff --git a/python/pacemaker/_cts/scenarios.py b/python/pacemaker/_cts/scenarios.py
index ccfaac18c7..9996f82b58 100644
--- a/python/pacemaker/_cts/scenarios.py
+++ b/python/pacemaker/_cts/scenarios.py
@@ -1,357 +1,358 @@
 """ Test scenario classes for Pacemaker's Cluster Test Suite (CTS)
 """
 
 __all__ = [ "AllOnce", "Boot", "BootCluster", "LeaveBooted", "RandomTests", "Sequence" ]
 __copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import os
 import re
 import sys
 import time
 
 from pacemaker._cts.audits import ClusterAudit
 from pacemaker._cts.input import should_continue
 from pacemaker._cts.tests.ctstest import CTSTest
 from pacemaker._cts.watcher import LogWatcher
 
 class ScenarioComponent(object):
 
     def __init__(self, Env):
         self.Env = Env
 
     def is_applicable(self):
         '''Return True if the current ScenarioComponent is applicable
         in the given LabEnvironment given to the constructor.
         '''
 
         raise NotImplementedError
 
-    def SetUp(self, CM):
+    def setup(self, CM):
         '''Set up the given ScenarioComponent'''
-        raise ValueError("Abstract Class member (Setup)")
+
+        raise NotImplementedError
 
     def TearDown(self, CM):
         '''Tear down (undo) the given ScenarioComponent'''
         raise ValueError("Abstract Class member (Setup)")
 
 
 class Scenario(object):
     (
 '''The basic idea of a scenario is that of an ordered list of
-ScenarioComponent objects.  Each ScenarioComponent is SetUp() in turn,
+ScenarioComponent objects.  Each ScenarioComponent is setup() in turn,
 and then after the tests have been run, they are torn down using TearDown()
 (in reverse order).
 
 A Scenario is applicable to a particular cluster manager iff each
 ScenarioComponent is applicable.
 
 A partially set up scenario is torn down if it fails during setup.
 ''')
 
     def __init__(self, ClusterManager, Components, Audits, Tests):
 
         "Initialize the Scenario from the list of ScenarioComponents"
 
         self.ClusterManager = ClusterManager
         self.Components = Components
         self.Audits  = Audits
         self.Tests = Tests
 
         self.BadNews = None
         self.TestSets = []
         self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0}
         self.Sets = []
 
         #self.ns=CTS.NodeStatus(self.Env)
 
         for comp in Components:
             if not issubclass(comp.__class__, ScenarioComponent):
                 raise ValueError("Init value must be subclass of ScenarioComponent")
 
         for audit in Audits:
             if not issubclass(audit.__class__, ClusterAudit):
                 raise ValueError("Init value must be subclass of ClusterAudit")
 
         for test in Tests:
             if not issubclass(test.__class__, CTSTest):
                 raise ValueError("Init value must be a subclass of CTSTest")
 
     def is_applicable(self):
         (
 '''A Scenario is_applicable() iff each of its ScenarioComponents is_applicable()
 '''
         )
 
         for comp in self.Components:
             if not comp.is_applicable():
                 return False
 
         return True
 
-    def SetUp(self):
+    def setup(self):
         '''Set up the Scenario. Return TRUE on success.'''
 
         self.ClusterManager.prepare()
         self.audit() # Also detects remote/local log config
         self.ClusterManager.ns.wait_for_all_nodes(self.ClusterManager.Env["nodes"])
 
         self.audit()
         self.ClusterManager.install_support()
 
         self.BadNews = LogWatcher(self.ClusterManager.Env["LogFileName"],
                                   self.ClusterManager.templates.get_patterns("BadNews"),
                                   self.ClusterManager.Env["nodes"],
                                   self.ClusterManager.Env["LogWatcher"],
                                   "BadNews", 0)
         self.BadNews.set_watch() # Call after we've figured out what type of log watching to do in LogAudit
 
         j = 0
         while j < len(self.Components):
-            if not self.Components[j].SetUp(self.ClusterManager):
+            if not self.Components[j].setup(self.ClusterManager):
                 # OOPS!  We failed.  Tear partial setups down.
                 self.audit()
                 self.ClusterManager.log("Tearing down partial setup")
                 self.TearDown(j)
-                return None
+                return False
             j += 1
 
         self.audit()
-        return 1
+        return True
 
     def TearDown(self, max=None):
 
         '''Tear Down the Scenario - in reverse order.'''
 
         if max == None:
             max = len(self.Components)-1
         j = max
         while j >= 0:
             self.Components[j].TearDown(self.ClusterManager)
             j -= 1
 
         self.audit()
         self.ClusterManager.install_support("uninstall")
 
     def incr(self, name):
         '''Increment (or initialize) the value associated with the given name'''
         if not name in self.Stats:
             self.Stats[name] = 0
         self.Stats[name] += 1
 
     def run(self, Iterations):
         self.ClusterManager.oprofileStart()
         try:
             self.run_loop(Iterations)
             self.ClusterManager.oprofileStop()
         except:
             self.ClusterManager.oprofileStop()
             raise
 
     def run_loop(self, Iterations):
         raise ValueError("Abstract Class member (run_loop)")
 
     def run_test(self, test, testcount):
         nodechoice = self.ClusterManager.Env.random_node()
 
         ret = True
         did_run = 0
 
         self.ClusterManager.instance_errorstoignore_clear()
         self.ClusterManager.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) + "[" + ("%d" % testcount).rjust(3) + "]")
 
         starttime = test.set_timer()
         if not test.setup(nodechoice):
             self.ClusterManager.log("Setup failed")
             ret = False
 
         elif not test.can_run_now(nodechoice):
             self.ClusterManager.log("Skipped")
             test.skipped()
 
         else:
             did_run = 1
             ret = test(nodechoice)
 
         if not test.teardown(nodechoice):
             self.ClusterManager.log("Teardown failed")
 
             if not should_continue(self.ClusterManager.Env):
                 raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice))
 
             ret = False
 
         stoptime = time.time()
         self.ClusterManager.oprofileSave(testcount)
 
         elapsed_time = stoptime - starttime
         test_time = stoptime - test.get_timer()
         if "min_time" not in test.stats:
             test.stats["elapsed_time"] = elapsed_time
             test.stats["min_time"] = test_time
             test.stats["max_time"] = test_time
         else:
             test.stats["elapsed_time"] += elapsed_time
             if test_time < test.stats["min_time"]:
                 test.stats["min_time"] = test_time
             if test_time > test.stats["max_time"]:
                 test.stats["max_time"] = test_time
 
         if ret:
             self.incr("success")
             test.log_timer()
         else:
             self.incr("failure")
             self.ClusterManager.statall()
             did_run = 1  # Force the test count to be incremented anyway so test extraction works
 
         self.audit(test.errors_to_ignore)
         return did_run
 
     def summarize(self):
         self.ClusterManager.log("****************")
         self.ClusterManager.log("Overall Results:" + repr(self.Stats))
         self.ClusterManager.log("****************")
 
         stat_filter = {
             "calls":0,
             "failure":0,
             "skipped":0,
             "auditfail":0,
             }
         self.ClusterManager.log("Test Summary")
         for test in self.Tests:
             for key in list(stat_filter.keys()):
                 stat_filter[key] = test.stats[key]
             self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter))
 
         self.ClusterManager.debug("Detailed Results")
         for test in self.Tests:
             self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.stats))
 
         self.ClusterManager.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
 
     def audit(self, LocalIgnore=[]):
         errcount = 0
         ignorelist = []
         ignorelist.append("CTS:")
         ignorelist.extend(LocalIgnore)
         ignorelist.extend(self.ClusterManager.errorstoignore())
         ignorelist.extend(self.ClusterManager.instance_errorstoignore())
 
         # This makes sure everything is stabilized before starting...
         failed = 0
         for audit in self.Audits:
             if not audit():
                 self.ClusterManager.log("Audit " + audit.name + " FAILED.")
                 failed += 1
             else:
                 self.ClusterManager.debug("Audit " + audit.name + " passed.")
 
         while errcount < 1000:
             match = None
             if self.BadNews:
                 match = self.BadNews.look(0)
 
             if match:
                 add_err = 1
                 for ignore in ignorelist:
                     if add_err == 1 and re.search(ignore, match):
                         add_err = 0
                 if add_err == 1:
                     self.ClusterManager.log("BadNews: " + match)
                     self.incr("BadNews")
                     errcount += 1
             else:
                 break
         else:
             print("Big problems")
             if not should_continue(self.ClusterManager.Env):
                 self.ClusterManager.log("Shutting down.")
                 self.summarize()
                 self.TearDown()
                 raise ValueError("Looks like we hit a BadNews jackpot!")
 
         if self.BadNews:
             self.BadNews.end()
         return failed
 
 
 class AllOnce(Scenario):
     '''Every Test Once''' # Accessable as __doc__
     def run_loop(self, Iterations):
         testcount = 1
         for test in self.Tests:
             self.run_test(test, testcount)
             testcount += 1
 
 
 class RandomTests(Scenario):
     '''Random Test Execution'''
     def run_loop(self, Iterations):
         testcount = 1
         while testcount <= Iterations:
             test = self.ClusterManager.Env.random_gen.choice(self.Tests)
             self.run_test(test, testcount)
             testcount += 1
 
 
 class Sequence(Scenario):
     '''Named Tests in Sequence'''
     def run_loop(self, Iterations):
         testcount = 1
         while testcount <= Iterations:
             for test in self.Tests:
                 self.run_test(test, testcount)
                 testcount += 1
 
 
 class Boot(Scenario):
     '''Start the Cluster'''
     def run_loop(self, Iterations):
         testcount = 0
 
 
 class BootCluster(ScenarioComponent):
     (
 '''BootCluster is the most basic of ScenarioComponents.
 This ScenarioComponent simply starts the cluster manager on all the nodes.
 It is fairly robust as it waits for all nodes to come up before starting
 as they might have been rebooted or crashed for some reason beforehand.
 ''')
     def __init__(self, Env):
         pass
 
     def is_applicable(self):
         '''BootCluster is so generic it is always Applicable'''
         return True
 
-    def SetUp(self, CM):
+    def setup(self, CM):
         '''Basic Cluster Manager startup.  Start everything'''
 
         CM.prepare()
 
         #        Clear out the cobwebs ;-)
         CM.stopall(verbose=True, force=True)
 
         # Now start the Cluster Manager on all the nodes.
         CM.log("Starting Cluster Manager on all nodes.")
         return CM.startall(verbose=True, quick=True)
 
     def TearDown(self, CM, force=False):
         '''Set up the given ScenarioComponent'''
 
         # Stop the cluster manager everywhere
 
         CM.log("Stopping Cluster Manager on all nodes")
         return CM.stopall(verbose=True, force=force)
 
 
 class LeaveBooted(BootCluster):
     def TearDown(self, CM):
         '''Set up the given ScenarioComponent'''
 
         # Stop the cluster manager everywhere
 
         CM.log("Leaving Cluster running on all nodes")
         return 1