diff --git a/python/pacemaker/_cts/clustermanager.py b/python/pacemaker/_cts/clustermanager.py
index f1683daf7d..b9211eba9a 100644
--- a/python/pacemaker/_cts/clustermanager.py
+++ b/python/pacemaker/_cts/clustermanager.py
@@ -1,900 +1,901 @@
 """ClusterManager class for Pacemaker's Cluster Test Suite (CTS)."""
 
 __all__ = ["ClusterManager"]
-__copyright__ = """Copyright 2000-2024 the Pacemaker project contributors.
+__copyright__ = """Copyright 2000-2025 the Pacemaker project contributors.
 Certain portions by Huang Zhen <zhenhltc@cn.ibm.com> are copyright 2004
 International Business Machines. The version control history for this file
 may have further details."""
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import os
 import re
 import time
 
 from collections import UserDict
 
 from pacemaker.buildoptions import BuildOptions
 from pacemaker.exitstatus import ExitStatus
 from pacemaker._cts.CTS import NodeStatus
 from pacemaker._cts.audits import AuditResource
 from pacemaker._cts.cib import ConfigFactory
 from pacemaker._cts.environment import EnvFactory
 from pacemaker._cts.logging import LogFactory
 from pacemaker._cts.patterns import PatternSelector
 from pacemaker._cts.remote import RemoteFactory
 from pacemaker._cts.watcher import LogWatcher
 
 # pylint doesn't understand that self._rsh is callable (it stores the
 # singleton instance of RemoteExec, as returned by the getInstance method
-# of RemoteFactory).  It's possible we could fix this with type annotations,
-# but those were introduced with python 3.5 and we only support python 3.4.
+# of RemoteFactory).
+# @TODO See if type annotations fix this.
+
 # I think we could also fix this by getting rid of the getInstance methods,
 # but that's a project for another day.  For now, just disable the warning.
 # pylint: disable=not-callable
 
 # ClusterManager has a lot of methods.
 # pylint: disable=too-many-public-methods
 
 
 class ClusterManager(UserDict):
     """
     An abstract base class for managing the cluster.
 
     This class implements high-level operations on the cluster and/or its cluster
     managers.  Actual cluster-specific management classes should be subclassed
     from this one.
 
     Among other things, this class tracks the state every node is expected to be in.
     """
 
     def _final_conditions(self):
         """Check all keys to make sure they have a non-None value."""
         for (key, val) in self._data.items():
             if val is None:
                 raise ValueError("Improper derivation: self[%s] must be overridden by subclass." % key)
 
     def __init__(self):
         """
         Create a new ClusterManager instance.
 
         This class can be treated kind of like a dictionary due to the process
         of certain dict functions like __getitem__ and __setitem__.  This is
         because it contains a lot of name/value pairs.  However, it is not
         actually a dictionary so do not rely on standard dictionary behavior.
         """
         # Eventually, ClusterManager should not be a UserDict subclass.  Until
         # that point...
         # pylint: disable=super-init-not-called
         self.__instance_errors_to_ignore = []
 
         self._cib_installed = False
         self._data = {}
         self._logger = LogFactory()
 
         self.env = EnvFactory().getInstance()
         self.expected_status = {}
         self.name = self.env["Name"]
         # pylint: disable=invalid-name
         self.ns = NodeStatus(self.env)
         self.our_node = os.uname()[1].lower()
         self.partitions_expected = 1
         self.rsh = RemoteFactory().getInstance()
         self.templates = PatternSelector(self.env["Name"])
 
         self._final_conditions()
 
         self._cib_factory = ConfigFactory(self)
         self._cib = self._cib_factory.create_config(self.env["Schema"])
         self._cib_sync = {}
 
     def __getitem__(self, key):
         """
         Return the given key, checking for it in several places.
 
         If key is "Name", return the name of the cluster manager.  If the key
         was previously added to the dictionary via __setitem__, return that.
         Otherwise, return the template pattern for the key.
 
         This method should not be used and may be removed in the future.
         """
         if key == "Name":
             return self.name
 
         print("FIXME: Getting %s from %r" % (key, self))
         if key in self._data:
             return self._data[key]
 
         return self.templates.get_patterns(key)
 
     def __setitem__(self, key, value):
         """
         Set the given key to the given value, overriding any previous value.
 
         This method should not be used and may be removed in the future.
         """
         print("FIXME: Setting %s=%s on %r" % (key, value, self))
         self._data[key] = value
 
     def clear_instance_errors_to_ignore(self):
         """Reset instance-specific errors to ignore on each iteration."""
         self.__instance_errors_to_ignore = []
 
     @property
     def instance_errors_to_ignore(self):
         """Return a list of known errors that should be ignored for a specific test instance."""
         return self.__instance_errors_to_ignore
 
     @property
     def errors_to_ignore(self):
         """Return a list of known error messages that should be ignored."""
         return self.templates.get_patterns("BadNewsIgnore")
 
     def log(self, args):
         """Log a message."""
         self._logger.log(args)
 
     def debug(self, args):
         """Log a debug message."""
         self._logger.debug(args)
 
     def upcount(self):
         """Return how many nodes are up."""
         count = 0
 
         for node in self.env["nodes"]:
             if self.expected_status[node] == "up":
                 count += 1
 
         return count
 
     def install_support(self, command="install"):
         """
         Install or uninstall the CTS support files.
 
         This includes various init scripts and data, daemons, fencing agents, etc.
         """
         for node in self.env["nodes"]:
             self.rsh(node, "%s/cts-support %s" % (BuildOptions.DAEMON_DIR, command))
 
     def prepare_fencing_watcher(self):
         """Return a LogWatcher object that watches for fencing log messages."""
         # If we don't have quorum now but get it as a result of starting this node,
         # then a bunch of nodes might get fenced
         if self.has_quorum(None):
             self.debug("Have quorum")
             return None
 
         if not self.templates["Pat:Fencing_start"]:
             print("No start pattern")
             return None
 
         if not self.templates["Pat:Fencing_ok"]:
             print("No ok pattern")
             return None
 
         stonith = None
         stonith_pats = []
         for peer in self.env["nodes"]:
             if self.expected_status[peer] == "up":
                 continue
 
             stonith_pats.extend([
                 self.templates["Pat:Fencing_ok"] % peer,
                 self.templates["Pat:Fencing_start"] % peer,
             ])
 
         stonith = LogWatcher(self.env["LogFileName"], stonith_pats, self.env["nodes"],
                              self.env["log_kind"], "StartupFencing", 0)
         stonith.set_watch()
         return stonith
 
     def fencing_cleanup(self, node, stonith):
         """Wait for a previously fenced node to return to the cluster."""
         peer_list = []
         peer_state = {}
 
         self.debug("Looking for nodes that were fenced as a result of %s starting" % node)
 
         # If we just started a node, we may now have quorum (and permission to fence)
         if not stonith:
             self.debug("Nothing to do")
             return peer_list
 
         q = self.has_quorum(None)
         if not q and len(self.env["nodes"]) > 2:
             # We didn't gain quorum - we shouldn't have shot anyone
             self.debug("Quorum: %s Len: %d" % (q, len(self.env["nodes"])))
             return peer_list
 
         for n in self.env["nodes"]:
             peer_state[n] = "unknown"
 
         # Now see if any states need to be updated
         self.debug("looking for: %r" % stonith.regexes)
         shot = stonith.look(0)
 
         while shot:
             self.debug("Found: %r" % shot)
             del stonith.regexes[stonith.whichmatch]
 
             # Extract node name
             for n in self.env["nodes"]:
                 if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
                     peer = n
                     peer_state[peer] = "complete"
                     self.__instance_errors_to_ignore.append(self.templates["Pat:Fencing_ok"] % peer)
 
                 elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
                     # TODO: Correctly detect multiple fencing operations for the same host
                     peer = n
                     peer_state[peer] = "in-progress"
                     self.__instance_errors_to_ignore.append(self.templates["Pat:Fencing_start"] % peer)
 
             if not peer:
                 self._logger.log("ERROR: Unknown stonith match: %r" % shot)
 
             elif peer not in peer_list:
                 self.debug("Found peer: %s" % peer)
                 peer_list.append(peer)
 
             # Get the next one
             shot = stonith.look(60)
 
         for peer in peer_list:
             self.debug("   Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
             if self.env["at-boot"]:
                 self.expected_status[peer] = "up"
             else:
                 self.expected_status[peer] = "down"
 
             if peer_state[peer] == "in-progress":
                 # Wait for any in-progress operations to complete
                 shot = stonith.look(60)
 
                 while stonith.regexes and shot:
                     self.debug("Found: %r" % shot)
                     del stonith.regexes[stonith.whichmatch]
                     shot = stonith.look(60)
 
             # Now make sure the node is alive too
             self.ns.wait_for_node(peer, self.env["DeadTime"])
 
             # Poll until it comes up
             if self.env["at-boot"]:
                 if not self.stat_cm(peer):
                     time.sleep(self.env["StartTime"])
 
                 if not self.stat_cm(peer):
                     self._logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
                     return None
 
         return peer_list
 
     def start_cm(self, node, verbose=False):
         """Start up the cluster manager on a given node."""
         if verbose:
             self._logger.log("Starting %s on node %s" % (self.templates["Name"], node))
         else:
             self.debug("Starting %s on node %s" % (self.templates["Name"], node))
 
         if node not in self.expected_status:
             self.expected_status[node] = "down"
 
         if self.expected_status[node] != "down":
             return True
 
         # Technically we should always be able to notice ourselves starting
         patterns = [
             self.templates["Pat:Local_started"] % node,
         ]
 
         if self.upcount() == 0:
             patterns.append(self.templates["Pat:DC_started"] % node)
         else:
             patterns.append(self.templates["Pat:NonDC_started"] % node)
 
         watch = LogWatcher(self.env["LogFileName"], patterns,
                            self.env["nodes"], self.env["log_kind"],
                            "StartaCM", self.env["StartTime"] + 10)
 
         self.install_config(node)
 
         self.expected_status[node] = "any"
 
         if self.stat_cm(node) and self.cluster_stable(self.env["DeadTime"]):
             self._logger.log("%s was already started" % node)
             return True
 
         stonith = self.prepare_fencing_watcher()
         watch.set_watch()
 
         (rc, _) = self.rsh(node, self.templates["StartCmd"])
         if rc != 0:
             self._logger.log("Warn: Start command failed on node %s" % node)
             self.fencing_cleanup(node, stonith)
             return False
 
         self.expected_status[node] = "up"
         watch_result = watch.look_for_all()
 
         if watch.unmatched:
             for regex in watch.unmatched:
                 self._logger.log("Warn: Startup pattern not found: %s" % regex)
 
         if watch_result and self.cluster_stable(self.env["DeadTime"]):
             self.fencing_cleanup(node, stonith)
             return True
 
         if self.stat_cm(node) and self.cluster_stable(self.env["DeadTime"]):
             self.fencing_cleanup(node, stonith)
             return True
 
         self._logger.log("Warn: Start failed for node %s" % node)
         return False
 
     def start_cm_async(self, node, verbose=False):
         """Start up the cluster manager on a given node without blocking."""
         if verbose:
             self._logger.log("Starting %s on node %s" % (self["Name"], node))
         else:
             self.debug("Starting %s on node %s" % (self["Name"], node))
 
         self.install_config(node)
         self.rsh(node, self.templates["StartCmd"], synchronous=False)
         self.expected_status[node] = "up"
 
     def stop_cm(self, node, verbose=False, force=False):
         """Stop the cluster manager on a given node."""
         if verbose:
             self._logger.log("Stopping %s on node %s" % (self["Name"], node))
         else:
             self.debug("Stopping %s on node %s" % (self["Name"], node))
 
         if self.expected_status[node] != "up" and not force:
             return True
 
         (rc, _) = self.rsh(node, self.templates["StopCmd"])
         if rc == 0:
             # Make sure we can continue even if corosync leaks
             self.expected_status[node] = "down"
             self.cluster_stable(self.env["DeadTime"])
             return True
 
         self._logger.log("ERROR: Could not stop %s on node %s" % (self["Name"], node))
         return False
 
     def stop_cm_async(self, node):
         """Stop the cluster manager on a given node without blocking."""
         self.debug("Stopping %s on node %s" % (self["Name"], node))
 
         self.rsh(node, self.templates["StopCmd"], synchronous=False)
         self.expected_status[node] = "down"
 
     def startall(self, nodelist=None, verbose=False, quick=False):
         """Start the cluster manager on every node in the cluster, or on every node in nodelist."""
         if not nodelist:
             nodelist = self.env["nodes"]
 
         for node in nodelist:
             if self.expected_status[node] == "down":
                 self.ns.wait_for_all_nodes(nodelist, 300)
 
         if not quick:
             # This is used for "basic sanity checks", so only start one node ...
             return self.start_cm(nodelist[0], verbose=verbose)
 
         # Approximation of SimulStartList for --boot
         watchpats = [
             self.templates["Pat:DC_IDLE"],
         ]
         for node in nodelist:
             watchpats.extend([
                 self.templates["Pat:InfraUp"] % node,
                 self.templates["Pat:PacemakerUp"] % node,
                 self.templates["Pat:Local_started"] % node,
                 self.templates["Pat:They_up"] % (nodelist[0], node),
             ])
 
         #   Start all the nodes - at about the same time...
         watch = LogWatcher(self.env["LogFileName"], watchpats, self.env["nodes"],
                            self.env["log_kind"], "fast-start",
                            self.env["DeadTime"] + 10)
         watch.set_watch()
 
         if not self.start_cm(nodelist[0], verbose=verbose):
             return False
 
         for node in nodelist:
             self.start_cm_async(node, verbose=verbose)
 
         watch.look_for_all()
         if watch.unmatched:
             for regex in watch.unmatched:
                 self._logger.log("Warn: Startup pattern not found: %s" % regex)
 
         if not self.cluster_stable():
             self._logger.log("Cluster did not stabilize")
             return False
 
         return True
 
     def stopall(self, nodelist=None, verbose=False, force=False):
         """Stop the cluster manager on every node in the cluster, or on every node in nodelist."""
         ret = True
 
         if not nodelist:
             nodelist = self.env["nodes"]
 
         for node in self.env["nodes"]:
             if self.expected_status[node] == "up" or force:
                 if not self.stop_cm(node, verbose=verbose, force=force):
                     ret = False
 
         return ret
 
     def statall(self, nodelist=None):
         """Return the status of the cluster manager on every node in the cluster, or on every node in nodelist."""
         result = {}
 
         if not nodelist:
             nodelist = self.env["nodes"]
 
         for node in nodelist:
             if self.stat_cm(node):
                 result[node] = "up"
             else:
                 result[node] = "down"
 
         return result
 
     def isolate_node(self, target, nodes=None):
         """Break communication between the target node and all other nodes in the cluster, or nodes."""
         if not nodes:
             nodes = self.env["nodes"]
 
         for node in nodes:
             if node == target:
                 continue
 
             (rc, _) = self.rsh(target, self.templates["BreakCommCmd"] % node)
             if rc != 0:
                 self._logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
                 return False
 
             self.debug("Communication cut between %s and %s" % (target, node))
 
         return True
 
     def unisolate_node(self, target, nodes=None):
         """Re-establish communication between the target node and all other nodes in the cluster, or nodes."""
         if not nodes:
             nodes = self.env["nodes"]
 
         for node in nodes:
             if node == target:
                 continue
 
             # Limit the amount of time we have asynchronous connectivity for
             # Restore both sides as simultaneously as possible
             self.rsh(target, self.templates["FixCommCmd"] % node, synchronous=False)
             self.rsh(node, self.templates["FixCommCmd"] % target, synchronous=False)
             self.debug("Communication restored between %s and %s" % (target, node))
 
     def oprofile_start(self, node=None):
         """Start profiling on the given node, or all nodes in the cluster."""
         if not node:
             for n in self.env["oprofile"]:
                 self.oprofile_start(n)
 
         elif node in self.env["oprofile"]:
             self.debug("Enabling oprofile on %s" % node)
             self.rsh(node, "opcontrol --init")
             self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
             self.rsh(node, "opcontrol --start")
             self.rsh(node, "opcontrol --reset")
 
     def oprofile_save(self, test, node=None):
         """Save profiling data and restart profiling on the given node, or all nodes in the cluster."""
         if not node:
             for n in self.env["oprofile"]:
                 self.oprofile_save(test, n)
 
         elif node in self.env["oprofile"]:
             self.rsh(node, "opcontrol --dump")
             self.rsh(node, "opcontrol --save=cts.%d" % test)
             # Read back with: opreport -l session:cts.0 image:<directory>/c*
             self.oprofile_stop(node)
             self.oprofile_start(node)
 
     def oprofile_stop(self, node=None):
         """
         Start profiling on the given node, or all nodes in the cluster.
 
         This does not save profiling data, so call oprofile_save first if needed.
         """
         if not node:
             for n in self.env["oprofile"]:
                 self.oprofile_stop(n)
 
         elif node in self.env["oprofile"]:
             self.debug("Stopping oprofile on %s" % node)
             self.rsh(node, "opcontrol --reset")
             self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
 
     def install_config(self, node):
         """Remove and re-install the CIB on the first node in the cluster."""
         if not self.ns.wait_for_node(node):
             self.log("Node %s is not up." % node)
             return
 
         if node in self._cib_sync or not self.env["ClobberCIB"]:
             return
 
         self._cib_sync[node] = True
         self.rsh(node, "rm -f %s/cib*" % BuildOptions.CIB_DIR)
 
         # Only install the CIB on the first node, all the other ones will pick it up from there
         if self._cib_installed:
             return
 
         self._cib_installed = True
         if self.env["CIBfilename"] is None:
             self.log("Installing Generated CIB on node %s" % node)
             self._cib.install(node)
 
         else:
             self.log("Installing CIB (%s) on node %s" % (self.env["CIBfilename"], node))
 
             rc = self.rsh.copy(self.env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node))
 
             if rc != 0:
                 raise ValueError("Can not scp file to %s %d" % (node, rc))
 
         self.rsh(node, "chown %s %s/cib.xml" % (BuildOptions.DAEMON_USER, BuildOptions.CIB_DIR))
 
     def prepare(self):
         """
         Finish initialization.
 
         Clear out the expected status and record the current status of every
         node in the cluster.
         """
         self.partitions_expected = 1
         for node in self.env["nodes"]:
             self.expected_status[node] = ""
 
             if self.env["experimental-tests"]:
                 self.unisolate_node(node)
 
             self.stat_cm(node)
 
     def test_node_cm(self, node):
         """
         Check the status of a given node.
 
         Returns 0 if the node is down, 1 if the node is up but unstable, and 2
         if the node is up and stable.
         """
         watchpats = [
             "Current ping state: (S_IDLE|S_NOT_DC)",
             self.templates["Pat:NonDC_started"] % node,
             self.templates["Pat:DC_started"] % node,
         ]
 
         idle_watch = LogWatcher(self.env["LogFileName"], watchpats, [node],
                                 self.env["log_kind"], "ClusterIdle")
         idle_watch.set_watch()
 
         (_, out) = self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
 
         if not out:
             out = ""
         else:
             out = out[0].strip()
 
         self.debug("Node %s status: '%s'" % (node, out))
 
         if out.find('ok') < 0:
             if self.expected_status[node] == "up":
                 self.log("Node status for %s is %s but we think it should be %s"
                          % (node, "down", self.expected_status[node]))
 
             self.expected_status[node] = "down"
             return 0
 
         if self.expected_status[node] == "down":
             self.log("Node status for %s is %s but we think it should be %s: %s"
                      % (node, "up", self.expected_status[node], out))
 
         self.expected_status[node] = "up"
 
         # check the output first - because syslog-ng loses messages
         if out.find('S_NOT_DC') != -1:
             # Up and stable
             return 2
 
         if out.find('S_IDLE') != -1:
             # Up and stable
             return 2
 
         # fall back to syslog-ng and wait
         if not idle_watch.look():
             # just up
             self.debug("Warn: Node %s is unstable: %s" % (node, out))
             return 1
 
         # Up and stable
         return 2
 
     def stat_cm(self, node):
         """Report the status of the cluster manager on a given node."""
         return self.test_node_cm(node) > 0
 
     # Being up and being stable is not the same question...
     def node_stable(self, node):
         """Return whether or not the given node is stable."""
         if self.test_node_cm(node) == 2:
             return True
 
         self.log("Warn: Node %s not stable" % node)
         return False
 
     def partition_stable(self, nodes, timeout=None):
         """Return whether or not all nodes in the given partition are stable."""
         watchpats = [
             "Current ping state: S_IDLE",
             self.templates["Pat:DC_IDLE"],
         ]
 
         self.debug("Waiting for cluster stability...")
 
         if timeout is None:
             timeout = self.env["DeadTime"]
 
         if len(nodes) < 3:
             self.debug("Cluster is inactive")
             return True
 
         idle_watch = LogWatcher(self.env["LogFileName"], watchpats, nodes.split(),
                                 self.env["log_kind"], "ClusterStable", timeout)
         idle_watch.set_watch()
 
         for node in nodes.split():
             # have each node dump its current state
             self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
 
         ret = idle_watch.look()
 
         while ret:
             self.debug(ret)
 
             for node in nodes.split():
                 if re.search(node, ret):
                     return True
 
             ret = idle_watch.look()
 
         self.debug("Warn: Partition %r not IDLE after %ds" % (nodes, timeout))
         return False
 
     def cluster_stable(self, timeout=None, double_check=False):
         """Return whether or not all nodes in the cluster are stable."""
         partitions = self.find_partitions()
 
         for partition in partitions:
             if not self.partition_stable(partition, timeout):
                 return False
 
         if not double_check:
             return True
 
         # Make sure we are really stable and that all resources,
         # including those that depend on transient node attributes,
         # are started if they were going to be
         time.sleep(5)
         for partition in partitions:
             if not self.partition_stable(partition, timeout):
                 return False
 
         return True
 
     def is_node_dc(self, node, status_line=None):
         """
         Return whether or not the given node is the cluster DC.
 
         Check the given status_line, or query the cluster if None.
         """
         if not status_line:
             (_, out) = self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
 
             if out:
                 status_line = out[0].strip()
 
         if not status_line:
             return False
 
         if status_line.find('S_IDLE') != -1:
             return True
 
         if status_line.find('S_INTEGRATION') != -1:
             return True
 
         if status_line.find('S_FINALIZE_JOIN') != -1:
             return True
 
         if status_line.find('S_POLICY_ENGINE') != -1:
             return True
 
         if status_line.find('S_TRANSITION_ENGINE') != -1:
             return True
 
         return False
 
     def active_resources(self, node):
         """Return a list of primitive resources active on the given node."""
         (_, output) = self.rsh(node, "crm_resource -c", verbose=1)
         resources = []
         for line in output:
             if not re.search("^Resource", line):
                 continue
 
             tmp = AuditResource(self, line)
             if tmp.type == "primitive" and tmp.host == node:
                 resources.append(tmp.id)
 
         return resources
 
     def resource_location(self, rid):
         """Return a list of nodes on which the given resource is running."""
         resource_nodes = []
         for node in self.env["nodes"]:
             if self.expected_status[node] != "up":
                 continue
 
             cmd = self.templates["RscRunning"] % rid
             (rc, lines) = self.rsh(node, cmd)
 
             if rc == 127:
                 self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd)
                 for line in lines:
                     self.log("Output: %s " % line)
 
             elif rc == 0:
                 resource_nodes.append(node)
 
         return resource_nodes
 
     def find_partitions(self):
         """
         Return a list of all partitions in the cluster.
 
         Each element of the list is itself a list of all active nodes in that
         partition.
         """
         ccm_partitions = []
 
         for node in self.env["nodes"]:
             if self.expected_status[node] != "up":
                 self.debug("Node %s is down... skipping" % node)
                 continue
 
             (_, out) = self.rsh(node, self.templates["PartitionCmd"], verbose=1)
 
             if not out:
                 self.log("no partition details for %s" % node)
                 continue
 
             partition = out[0].strip()
 
             if len(partition) <= 2:
                 self.log("bad partition details for %s" % node)
                 continue
 
             nodes = partition.split()
             nodes.sort()
             partition = ' '.join(nodes)
 
             found = 0
             for a_partition in ccm_partitions:
                 if partition == a_partition:
                     found = 1
 
             if found == 0:
                 self.debug("Adding partition from %s: %s" % (node, partition))
                 ccm_partitions.append(partition)
             else:
                 self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node))
 
         self.debug("Found partitions: %r" % ccm_partitions)
         return ccm_partitions
 
     def has_quorum(self, node_list):
         """Return whether or not the cluster has quorum."""
         # If we are auditing a partition, then one side will
         #   have quorum and the other not.
         # So the caller needs to tell us which we are checking
         # If no value for node_list is specified... assume all nodes
         if not node_list:
             node_list = self.env["nodes"]
 
         for node in node_list:
             if self.expected_status[node] != "up":
                 continue
 
             (rc, quorum) = self.rsh(node, self.templates["QuorumCmd"], verbose=1)
             if rc != ExitStatus.OK:
                 self.debug("WARN: Quorum check on %s returned error (%d)" % (node, rc))
                 continue
 
             quorum = quorum[0].strip()
             if quorum.find("1") != -1:
                 return True
             if quorum.find("0") != -1:
                 return False
             self.debug("WARN: Unexpected quorum test result from %s:%s" % (node, quorum))
 
         return False
 
     @property
     def components(self):
         """
         Return a list of all patterns that should be ignored for the cluster's components.
 
         This must be provided by all subclasses.
         """
         raise NotImplementedError
 
     def in_standby_mode(self, node):
         """Return whether or not the node is in Standby."""
         (_, out) = self.rsh(node, self.templates["StandbyQueryCmd"] % node, verbose=1)
 
         if not out:
             return False
 
         out = out[0].strip()
         self.debug("Standby result: %s" % out)
         return out == "on"
 
     def set_standby_mode(self, node, status):
         """
         Set node to Standby if status is True, or Active if status is False.
 
         Return whether the node is now in the requested status.
         """
         current_status = self.in_standby_mode(node)
 
         if current_status == status:
             return True
 
         if status:
             cmd = self.templates["StandbyCmd"] % (node, "on")
         else:
             cmd = self.templates["StandbyCmd"] % (node, "off")
 
         (rc, _) = self.rsh(node, cmd)
         return rc == 0
 
     def add_dummy_rsc(self, node, rid):
         """Add a dummy resource with the given ID to the given node."""
         rsc_xml = """ '<resources>
                 <primitive class=\"ocf\" id=\"%s\" provider=\"pacemaker\" type=\"Dummy\">
                     <operations>
                         <op id=\"%s-interval-10s\" interval=\"10s\" name=\"monitor\"/>
                     </operations>
                 </primitive>
             </resources>'""" % (rid, rid)
         constraint_xml = """ '<constraints>
                 <rsc_location id=\"location-%s-%s\" node=\"%s\" rsc=\"%s\" score=\"INFINITY\"/>
             </constraints>'
             """ % (rid, node, node, rid)
 
         self.rsh(node, self.templates['CibAddXml'] % rsc_xml)
         self.rsh(node, self.templates['CibAddXml'] % constraint_xml)
 
     def remove_dummy_rsc(self, node, rid):
         """Remove the previously added dummy resource given by rid on the given node."""
         constraint = "\"//rsc_location[@rsc='%s']\"" % rid
         rsc = "\"//primitive[@id='%s']\"" % rid
 
         self.rsh(node, self.templates['CibDelXpath'] % constraint)
         self.rsh(node, self.templates['CibDelXpath'] % rsc)
diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py
index 7b5f47df33..1315e45265 100644
--- a/python/pacemaker/_cts/environment.py
+++ b/python/pacemaker/_cts/environment.py
@@ -1,644 +1,645 @@
 """Test environment classes for Pacemaker's Cluster Test Suite (CTS)."""
 
 __all__ = ["EnvFactory"]
-__copyright__ = "Copyright 2014-2024 the Pacemaker project contributors"
+__copyright__ = "Copyright 2014-2025 the Pacemaker project contributors"
 __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
 
 import argparse
 from contextlib import suppress
 import os
 import random
 import socket
 import sys
 import time
 
 from pacemaker.buildoptions import BuildOptions
 from pacemaker._cts.logging import LogFactory
 from pacemaker._cts.remote import RemoteFactory
 from pacemaker._cts.watcher import LogKind
 
 
 class Environment:
     """
     A class for managing the CTS environment.
 
     This consists largely of processing and storing command line parameters.
     """
 
     # pylint doesn't understand that self._rsh is callable (it stores the
     # singleton instance of RemoteExec, as returned by the getInstance method
-    # of RemoteFactory).  It's possible we could fix this with type annotations,
-    # but those were introduced with python 3.5 and we only support python 3.4.
+    # of RemoteFactory).
+    # @TODO See if type annotations fix this.
+
     # I think we could also fix this by getting rid of the getInstance methods,
     # but that's a project for another day.  For now, just disable the warning.
     # pylint: disable=not-callable
 
     def __init__(self, args):
         """
         Create a new Environment instance.
 
         This class can be treated kind of like a dictionary due to the presence
         of typical dict functions like __contains__, __getitem__, and __setitem__.
         However, it is not a dictionary so do not rely on standard dictionary
         behavior.
 
         Arguments:
         args -- A list of command line parameters, minus the program name.
                 If None, sys.argv will be used.
         """
         self.data = {}
         self._nodes = []
 
         # Set some defaults before processing command line arguments.  These are
         # either not set by any command line parameter, or they need a default
         # that can't be set in add_argument.
         self["DeadTime"] = 300
         self["StartTime"] = 300
         self["StableTime"] = 30
         self["tests"] = []
         self["IPagent"] = "IPaddr2"
         self["DoFencing"] = True
         self["ClobberCIB"] = False
         self["CIBfilename"] = None
         self["CIBResource"] = False
         self["log_kind"] = None
         self["node-limit"] = 0
         self["scenario"] = "random"
 
         self.random_gen = random.Random()
 
         self._logger = LogFactory()
         self._rsh = RemoteFactory().getInstance()
         self._target = "localhost"
 
         self._seed_random()
         self._parse_args(args)
 
         if not self["ListTests"]:
             self._validate()
             self._discover()
 
     def _seed_random(self, seed=None):
         """
         Initialize the random number generator.
 
         Arguments:
         seed -- Use this to see the random number generator, or use the
                 current time if None.
         """
         if not seed:
             seed = int(time.time())
 
         self["RandSeed"] = seed
         self.random_gen.seed(str(seed))
 
     def dump(self):
         """Print the current environment."""
         keys = []
         for key in list(self.data.keys()):
             keys.append(key)
 
         keys.sort()
         for key in keys:
             s = "Environment[%s]" % key
             self._logger.debug("{key:35}: {val}".format(key=s, val=str(self[key])))
 
     def keys(self):
         """Return a list of all environment keys stored in this instance."""
         return list(self.data.keys())
 
     def __contains__(self, key):
         """Return True if the given key exists in the environment."""
         if key == "nodes":
             return True
 
         return key in self.data
 
     def __getitem__(self, key):
         """Return the given environment key, or None if it does not exist."""
         if str(key) == "0":
             raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead")
 
         if key == "nodes":
             return self._nodes
 
         if key == "Name":
             return self._get_stack_short()
 
         return self.data.get(key)
 
     def __setitem__(self, key, value):
         """Set the given environment key to the given value, overriding any previous value."""
         if key == "Stack":
             self._set_stack(value)
 
         elif key == "node-limit":
             self.data[key] = value
             self._filter_nodes()
 
         elif key == "nodes":
             self._nodes = []
             for node in value:
                 # I don't think I need the IP address, etc. but this validates
                 # the node name against /etc/hosts and/or DNS, so it's a
                 # GoodThing(tm).
                 try:
                     n = node.strip()
                     # @TODO This only handles IPv4, use getaddrinfo() instead
                     # (here and in _discover())
                     socket.gethostbyname_ex(n)
                     self._nodes.append(n)
                 except socket.herror:
                     self._logger.log("%s not found in DNS... aborting" % node)
                     raise
 
             self._filter_nodes()
 
         else:
             self.data[key] = value
 
     def random_node(self):
         """Choose a random node from the cluster."""
         return self.random_gen.choice(self["nodes"])
 
     def get(self, key, default=None):
         """Return the value for key if key is in the environment, else default."""
         if key == "nodes":
             return self._nodes
 
         return self.data.get(key, default)
 
     def _set_stack(self, name):
         """Normalize the given cluster stack name."""
         if name in ["corosync", "cs", "mcp"]:
             self.data["Stack"] = "corosync 2+"
 
         else:
             raise ValueError("Unknown stack: %s" % name)
 
     def _get_stack_short(self):
         """Return the short name for the currently set cluster stack."""
         if "Stack" not in self.data:
             return "unknown"
 
         if self.data["Stack"] == "corosync 2+":
             return "crm-corosync"
 
         LogFactory().log("Unknown stack: %s" % self["stack"])
         raise ValueError("Unknown stack: %s" % self["stack"])
 
     def _detect_systemd(self):
         """Detect whether systemd is in use on the target node."""
         if "have_systemd" not in self.data:
             (rc, _) = self._rsh(self._target, "systemctl list-units", verbose=0)
             self["have_systemd"] = rc == 0
 
     def _detect_syslog(self):
         """Detect the syslog variant in use on the target node (if any)."""
         if "syslogd" in self.data:
             return
 
         if self["have_systemd"]:
             # Systemd
             (_, lines) = self._rsh(self._target, r"systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", verbose=1)
         else:
             # SYS-V
             (_, lines) = self._rsh(self._target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", verbose=1)
 
         with suppress(IndexError):
             self["syslogd"] = lines[0].strip()
 
     def disable_service(self, node, service):
         """Disable the given service on the given node."""
         if self["have_systemd"]:
             # Systemd
             (rc, _) = self._rsh(node, "systemctl disable %s" % service)
             return rc
 
         # SYS-V
         (rc, _) = self._rsh(node, "chkconfig %s off" % service)
         return rc
 
     def enable_service(self, node, service):
         """Enable the given service on the given node."""
         if self["have_systemd"]:
             # Systemd
             (rc, _) = self._rsh(node, "systemctl enable %s" % service)
             return rc
 
         # SYS-V
         (rc, _) = self._rsh(node, "chkconfig %s on" % service)
         return rc
 
     def service_is_enabled(self, node, service):
         """Return True if the given service is enabled on the given node."""
         if self["have_systemd"]:
             # Systemd
 
             # With "systemctl is-enabled", we should check if the service is
             # explicitly "enabled" instead of the return code. For example it returns
             # 0 if the service is "static" or "indirect", but they don't really count
             # as "enabled".
             (rc, _) = self._rsh(node, "systemctl is-enabled %s | grep enabled" % service)
             return rc == 0
 
         # SYS-V
         (rc, _) = self._rsh(node, "chkconfig --list | grep -e %s.*on" % service)
         return rc == 0
 
     def _detect_at_boot(self):
         """Detect if the cluster starts at boot."""
         if "at-boot" not in self.data:
             self["at-boot"] = self.service_is_enabled(self._target, "corosync") \
                 or self.service_is_enabled(self._target, "pacemaker")
 
     def _detect_ip_offset(self):
         """Detect the offset for IPaddr resources."""
         if self["CIBResource"] and "IPBase" not in self.data:
             (_, lines) = self._rsh(self._target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0)
             network = lines[0].strip()
 
             (_, lines) = self._rsh(self._target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, verbose=0)
 
             try:
                 self["IPBase"] = lines[0].strip()
             except (IndexError, TypeError):
                 self["IPBase"] = None
 
             if not self["IPBase"]:
                 self["IPBase"] = " fe80::1234:56:7890:1000"
                 self._logger.log("Could not determine an offset for IPaddr resources.  Perhaps nmap is not installed on the nodes.")
                 self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
                 return
 
             # pylint thinks self["IPBase"] is a list, not a string, which causes it
             # to error out because a list doesn't have split().
             # pylint: disable=no-member
             if int(self["IPBase"].split('.')[3]) >= 240:
                 self._logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s"
                                  % (self["IPBase"], self["IPBase"].split('.')[3]))
                 self["IPBase"] = " fe80::1234:56:7890:1000"
                 self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
 
     def _filter_nodes(self):
         """
         Filter the list of cluster nodes.
 
         If --limit-nodes is given, keep that many nodes from the front of the
         list of cluster nodes and drop the rest.
         """
         if self["node-limit"] > 0:
             if len(self["nodes"]) > self["node-limit"]:
                 # pylint thinks self["node-limit"] is a list even though we initialize
                 # it as an int in __init__ and treat it as an int everywhere.
                 # pylint: disable=bad-string-format-type
                 self._logger.log("Limiting the number of nodes configured=%d (max=%d)"
                                  % (len(self["nodes"]), self["node-limit"]))
 
                 while len(self["nodes"]) > self["node-limit"]:
                     self["nodes"].pop(len(self["nodes"]) - 1)
 
     def _validate(self):
         """Check that we were given all required command line parameters."""
         if not self["nodes"]:
             raise ValueError("No nodes specified!")
 
     def _discover(self):
         """Probe cluster nodes to figure out how to log and manage services."""
         self._target = random.Random().choice(self["nodes"])
 
         exerciser = socket.gethostname()
 
         # Use the IP where possible to avoid name lookup failures
         for ip in socket.gethostbyname_ex(exerciser)[2]:
             if ip != "127.0.0.1":
                 exerciser = ip
                 break
 
         self["cts-exerciser"] = exerciser
 
         self._detect_systemd()
         self._detect_syslog()
         self._detect_at_boot()
         self._detect_ip_offset()
 
     def _parse_args(self, argv):
         """
         Parse and validate command line parameters.
 
         Set the appropriate values in the environment dictionary.  If argv is
         None, use sys.argv instead.
         """
         if not argv:
             argv = sys.argv[1:]
 
         parser = argparse.ArgumentParser(epilog="%s -g virt1 -r --stonith ssh --schema pacemaker-2.0 500" % sys.argv[0])
 
         grp1 = parser.add_argument_group("Common options")
         grp1.add_argument("-g", "--dsh-group", "--group",
                           metavar="GROUP", dest="group",
                           help="Use the nodes listed in the named DSH group (~/.dsh/groups/$name)")
         grp1.add_argument("-l", "--limit-nodes",
                           type=int, default=0,
                           metavar="MAX",
                           help="Only use the first MAX cluster nodes supplied with --nodes")
         grp1.add_argument("--benchmark",
                           action="store_true",
                           help="Add timing information")
         grp1.add_argument("--list", "--list-tests",
                           action="store_true", dest="list_tests",
                           help="List the valid tests")
         grp1.add_argument("--nodes",
                           metavar="NODES",
                           help="List of cluster nodes separated by whitespace")
         grp1.add_argument("--stack",
                           default="corosync",
                           metavar="STACK",
                           help="Which cluster stack is installed")
 
         grp2 = parser.add_argument_group("Options that CTS will usually auto-detect correctly")
         grp2.add_argument("-L", "--logfile",
                           metavar="PATH",
                           help="Where to look for logs from cluster nodes (or 'journal' for systemd journal)")
         grp2.add_argument("--at-boot", "--cluster-starts-at-boot",
                           choices=["1", "0", "yes", "no"],
                           help="Does the cluster software start at boot time?")
         grp2.add_argument("--facility", "--syslog-facility",
                           default="daemon",
                           metavar="NAME",
                           help="Which syslog facility to log to")
         grp2.add_argument("--ip", "--test-ip-base",
                           metavar="IP",
                           help="Offset for generated IP address resources")
 
         grp3 = parser.add_argument_group("Options for release testing")
         grp3.add_argument("-r", "--populate-resources",
                           action="store_true",
                           help="Generate a sample configuration")
         grp3.add_argument("--choose",
                           metavar="NAME",
                           help="Run only the named tests, separated by whitespace")
         grp3.add_argument("--fencing", "--stonith",
                           choices=["1", "0", "yes", "no", "lha", "openstack", "rhcs", "rhevm", "scsi", "ssh", "virt", "xvm"],
                           default="1",
                           help="What fencing agent to use")
         grp3.add_argument("--once",
                           action="store_true",
                           help="Run all valid tests once")
 
         grp4 = parser.add_argument_group("Additional (less common) options")
         grp4.add_argument("-c", "--clobber-cib",
                           action="store_true",
                           help="Erase any existing configuration")
         grp4.add_argument("-y", "--yes",
                           action="store_true", dest="always_continue",
                           help="Continue to run whenever prompted")
         grp4.add_argument("--boot",
                           action="store_true",
                           help="")
         grp4.add_argument("--cib-filename",
                           metavar="PATH",
                           help="Install the given CIB file to the cluster")
         grp4.add_argument("--experimental-tests",
                           action="store_true",
                           help="Include experimental tests")
         grp4.add_argument("--loop-minutes",
                           type=int, default=60,
                           help="")
         grp4.add_argument("--no-loop-tests",
                           action="store_true",
                           help="Don't run looping/time-based tests")
         grp4.add_argument("--no-unsafe-tests",
                           action="store_true",
                           help="Don't run tests that are unsafe for use with ocfs2/drbd")
         grp4.add_argument("--notification-agent",
                           metavar="PATH",
                           default="/var/lib/pacemaker/notify.sh",
                           help="Script to configure for Pacemaker alerts")
         grp4.add_argument("--notification-recipient",
                           metavar="R",
                           default="/var/lib/pacemaker/notify.log",
                           help="Recipient to pass to alert script")
         grp4.add_argument("--oprofile",
                           metavar="NODES",
                           help="List of cluster nodes to run oprofile on")
         grp4.add_argument("--outputfile",
                           metavar="PATH",
                           help="Location to write logs to")
         grp4.add_argument("--qarsh",
                           action="store_true",
                           help="Use QARSH to access nodes instead of SSH")
         grp4.add_argument("--schema",
                           metavar="SCHEMA",
                           default="pacemaker-%s" % BuildOptions.CIB_SCHEMA_VERSION,
                           help="Create a CIB conforming to the given schema")
         grp4.add_argument("--seed",
                           metavar="SEED",
                           help="Use the given string as the random number seed")
         grp4.add_argument("--set",
                           action="append",
                           metavar="ARG",
                           default=[],
                           help="Set key=value pairs (can be specified multiple times)")
         grp4.add_argument("--stonith-args",
                           metavar="ARGS",
                           default="hostlist=all,livedangerously=yes",
                           help="")
         grp4.add_argument("--stonith-type",
                           metavar="TYPE",
                           default="external/ssh",
                           help="")
         grp4.add_argument("--trunc",
                           action="store_true", dest="truncate",
                           help="Truncate log file before starting")
         grp4.add_argument("--valgrind-procs",
                           metavar="PROCS",
                           default="pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd",
                           help="Run valgrind against the given space-separated list of processes")
         grp4.add_argument("--valgrind-tests",
                           action="store_true",
                           help="Include tests using valgrind")
         grp4.add_argument("--warn-inactive",
                           action="store_true",
                           help="Warn if a resource is assigned to an inactive node")
 
         parser.add_argument("iterations",
                             nargs='?',
                             type=int, default=1,
                             help="Number of tests to run")
 
         args = parser.parse_args(args=argv)
 
         # Set values on this object based on what happened with command line
         # processing.  This has to be done in several blocks.
 
         # These values can always be set.  They get a default from the add_argument
         # calls, only do one thing, and they do not have any side effects.
         self["ClobberCIB"] = args.clobber_cib
         self["ListTests"] = args.list_tests
         self["Schema"] = args.schema
         self["Stack"] = args.stack
         self["SyslogFacility"] = args.facility
         self["TruncateLog"] = args.truncate
         self["at-boot"] = args.at_boot in ["1", "yes"]
         self["benchmark"] = args.benchmark
         self["continue"] = args.always_continue
         self["experimental-tests"] = args.experimental_tests
         self["iterations"] = args.iterations
         self["loop-minutes"] = args.loop_minutes
         self["loop-tests"] = not args.no_loop_tests
         self["notification-agent"] = args.notification_agent
         self["notification-recipient"] = args.notification_recipient
         self["node-limit"] = args.limit_nodes
         self["stonith-params"] = args.stonith_args
         self["stonith-type"] = args.stonith_type
         self["unsafe-tests"] = not args.no_unsafe_tests
         self["valgrind-procs"] = args.valgrind_procs
         self["valgrind-tests"] = args.valgrind_tests
         self["warn-inactive"] = args.warn_inactive
 
         # Nodes and groups are mutually exclusive, so their defaults cannot be
         # set in their add_argument calls.  Additionally, groups does more than
         # just set a value.  Here, set nodes first and then if a group is
         # specified, override the previous nodes value.
         if args.nodes:
             self["nodes"] = args.nodes.split(" ")
         else:
             self["nodes"] = []
 
         if args.group:
             self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args.dsh_group)
             LogFactory().add_file(self["OutputFile"], "CTS")
 
             dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args.dsh_group)
 
             if os.path.isfile(dsh_file):
                 self["nodes"] = []
 
                 with open(dsh_file, "r", encoding="utf-8") as f:
                     for line in f:
                         stripped = line.strip()
 
                         if not stripped.startswith('#'):
                             self["nodes"].append(stripped)
             else:
                 print("Unknown DSH group: %s" % args.dsh_group)
 
         # Everything else either can't have a default set in an add_argument
         # call (likely because we don't want to always have a value set for it)
         # or it does something fancier than just set a single value.  However,
         # order does not matter for these as long as the user doesn't provide
         # conflicting arguments on the command line.  So just do Everything
         # alphabetically.
         if args.boot:
             self["scenario"] = "boot"
 
         if args.cib_filename:
             self["CIBfilename"] = args.cib_filename
         else:
             self["CIBfilename"] = None
 
         if args.choose:
             self["scenario"] = "sequence"
             self["tests"].extend(args.choose.split())
             self["iterations"] = len(self["tests"])
 
         if args.fencing:
             if args.fencing in ["0", "no"]:
                 self["DoFencing"] = False
             else:
                 self["DoFencing"] = True
 
                 if args.fencing in ["rhcs", "virt", "xvm"]:
                     self["stonith-type"] = "fence_xvm"
 
                 elif args.fencing == "scsi":
                     self["stonith-type"] = "fence_scsi"
 
                 elif args.fencing in ["lha", "ssh"]:
                     self["stonith-params"] = "hostlist=all,livedangerously=yes"
                     self["stonith-type"] = "external/ssh"
 
                 elif args.fencing == "openstack":
                     self["stonith-type"] = "fence_openstack"
 
                     print("Obtaining OpenStack credentials from the current environment")
                     self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % (
                         os.environ['OS_REGION_NAME'],
                         os.environ['OS_TENANT_NAME'],
                         os.environ['OS_AUTH_URL'],
                         os.environ['OS_USERNAME'],
                         os.environ['OS_PASSWORD']
                     )
 
                 elif args.fencing == "rhevm":
                     self["stonith-type"] = "fence_rhevm"
 
                     print("Obtaining RHEV-M credentials from the current environment")
                     self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % (
                         os.environ['RHEVM_USERNAME'],
                         os.environ['RHEVM_PASSWORD'],
                         os.environ['RHEVM_SERVER'],
                         os.environ['RHEVM_PORT'],
                     )
 
         if args.ip:
             self["CIBResource"] = True
             self["ClobberCIB"] = True
             self["IPBase"] = args.ip
 
         if args.logfile == "journal":
             self["LogAuditDisabled"] = True
             self["log_kind"] = LogKind.JOURNAL
         elif args.logfile:
             self["LogAuditDisabled"] = True
             self["LogFileName"] = args.logfile
             self["log_kind"] = LogKind.REMOTE_FILE
         else:
             # We can't set this as the default on the parser.add_argument call
             # for this option because then args.logfile will be set, which means
             # the above branch will be taken and those other values will also be
             # set.
             self["LogFileName"] = "/var/log/messages"
 
         if args.once:
             self["scenario"] = "all-once"
 
         if args.oprofile:
             self["oprofile"] = args.oprofile.split(" ")
         else:
             self["oprofile"] = []
 
         if args.outputfile:
             self["OutputFile"] = args.outputfile
             LogFactory().add_file(self["OutputFile"])
 
         if args.populate_resources:
             self["CIBResource"] = True
             self["ClobberCIB"] = True
 
         if args.qarsh:
             self._rsh.enable_qarsh()
 
         for kv in args.set:
             (name, value) = kv.split("=")
             self[name] = value
             print("Setting %s = %s" % (name, value))
 
 
 class EnvFactory:
     """A class for constructing a singleton instance of an Environment object."""
 
     instance = None
 
     # pylint: disable=invalid-name
     def getInstance(self, args=None):
         """
         Return the previously created instance of Environment.
 
         If no instance exists, create a new instance and return that.
         """
         if not EnvFactory.instance:
             EnvFactory.instance = Environment(args)
 
         return EnvFactory.instance