diff --git a/python/pacemaker/_cts/CTS.py b/python/pacemaker/_cts/CTS.py index cf9f5e35a1..3dc36a2253 100644 --- a/python/pacemaker/_cts/CTS.py +++ b/python/pacemaker/_cts/CTS.py @@ -1,229 +1,230 @@ """Main classes for Pacemaker's Cluster Test Suite (CTS).""" __all__ = ["CtsLab", "NodeStatus", "Process"] __copyright__ = "Copyright 2000-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import sys import time import traceback from pacemaker.exitstatus import ExitStatus from pacemaker._cts.environment import EnvFactory from pacemaker._cts.input import should_continue from pacemaker._cts.logging import LogFactory from pacemaker._cts.remote import RemoteFactory + class CtsLab: """ A class that defines the Lab Environment for the Cluster Test System. It defines those things which are expected to change from test environment to test environment for the same cluster manager. This is where you define the set of nodes that are in your test lab, what kind of reset mechanism you use, etc. All this data is stored as key/value pairs in an Environment instance constructed from arguments passed to this class. The CTS code ignores names it doesn't know about or need. Individual tests have access to this information, and it is perfectly acceptable to provide hints, tweaks, fine-tuning directions, or other information to the tests through this mechanism. """ def __init__(self, args=None): """ Create a new CtsLab instance. This class can be treated kind of like a dictionary due to the presence of typical dict functions like __contains__, __getitem__, and __setitem__. However, it is not a dictionary so do not rely on standard dictionary behavior. Arguments: args -- A list of command line parameters, minus the program name. """ self._env = EnvFactory().getInstance(args) self._logger = LogFactory() def dump(self): """Print the current environment.""" self._env.dump() def __contains__(self, key): """Return True if the given environment key exists.""" # pylint gets confused because of EnvFactory here. # pylint: disable=unsupported-membership-test return key in self._env def __getitem__(self, key): """Return the given environment key, or raise KeyError if it does not exist.""" # Throughout this file, pylint has trouble understanding that EnvFactory # and RemoteFactory are singleton instances that can be treated as callable # and subscriptable objects. Various warnings are disabled because of this. # See also a comment about self._rsh in environment.py. # pylint: disable=unsubscriptable-object return self._env[key] def __setitem__(self, key, value): """Set the given environment key to the given value, overriding any previous value.""" # pylint: disable=unsupported-assignment-operation self._env[key] = value def run(self, scenario, iterations): """ Run the given scenario the given number of times. Returns ExitStatus.OK on success, or ExitStatus.ERROR on error. """ if not scenario: self._logger.log("No scenario was defined") return ExitStatus.ERROR self._logger.log("Cluster nodes: ") # pylint: disable=unsubscriptable-object for node in self._env["nodes"]: self._logger.log(" * %s" % (node)) if not scenario.setup(): return ExitStatus.ERROR # We want to alert on any exceptions caused by running a scenario, so # here it's okay to disable the pylint warning. # pylint: disable=bare-except try: scenario.run(iterations) except: self._logger.log("Exception by %s" % sys.exc_info()[0]) self._logger.traceback(traceback) scenario.summarize() scenario.teardown() return ExitStatus.ERROR scenario.teardown() scenario.summarize() if scenario.stats["failure"] > 0: return ExitStatus.ERROR if scenario.stats["success"] != iterations: self._logger.log("No failure count but success != requested iterations") return ExitStatus.ERROR return ExitStatus.OK class NodeStatus: """ A class for querying the status of cluster nodes. Are nodes up? Do they respond to SSH connections? """ def __init__(self, env): """ Create a new NodeStatus instance. Arguments: env -- An Environment instance """ self._env = env def _node_booted(self, node): """Return True if the given node is booted (responds to pings).""" # pylint: disable=not-callable (rc, _) = RemoteFactory().getInstance()("localhost", "ping -nq -c1 -w1 %s" % node, verbose=0) return rc == 0 def _sshd_up(self, node): """Return true if sshd responds on the given node.""" # pylint: disable=not-callable (rc, _) = RemoteFactory().getInstance()(node, "true", verbose=0) return rc == 0 def wait_for_node(self, node, timeout=300): """ Wait for a node to become available. Should the timeout be reached, the user will be given a choice whether to continue or not. If not, ValueError will be raised. Returns True when the node is available, or False if the timeout is reached. """ initial_timeout = timeout anytimeouts = False while timeout > 0: if self._node_booted(node) and self._sshd_up(node): if anytimeouts: # Fudge to wait for the system to finish coming up time.sleep(30) LogFactory().debug("Node %s now up" % node) return True time.sleep(30) if not anytimeouts: LogFactory().debug("Waiting for node %s to come up" % node) anytimeouts = True timeout -= 1 LogFactory().log("%s did not come up within %d tries" % (node, initial_timeout)) if not should_continue(self._env["continue"]): raise ValueError("%s did not come up within %d tries" % (node, initial_timeout)) return False def wait_for_all_nodes(self, nodes, timeout=300): """Return True when all nodes come up, or False if the timeout is reached.""" for node in nodes: if not self.wait_for_node(node, timeout): return False return True class Process: """A class for managing a Pacemaker daemon.""" # pylint: disable=invalid-name def __init__(self, cm, name, dc_only=False, pats=None, dc_pats=None, badnews_ignore=None): """ Create a new Process instance. Arguments: cm -- A ClusterManager instance name -- The command being run dc_only -- Should this daemon be killed only on the DC? pats -- Regexes we expect to find in log files dc_pats -- Additional DC-specific regexes we expect to find in log files badnews_ignore -- Regexes for lines in the log that can be ignored """ self._cm = cm self.badnews_ignore = badnews_ignore self.dc_only = dc_only self.dc_pats = dc_pats self.name = name self.pats = pats if self.badnews_ignore is None: self.badnews_ignore = [] if self.dc_pats is None: self.dc_pats = [] if self.pats is None: self.pats = [] def kill(self, node): """Kill the instance of this process running on the given node.""" (rc, _) = self._cm.rsh(node, "killall -9 %s" % self.name) if rc != 0: self._cm.log("ERROR: Kill %s failed on node %s" % (self.name, node)) diff --git a/python/pacemaker/_cts/clustermanager.py b/python/pacemaker/_cts/clustermanager.py index d18a019671..7fa016be17 100644 --- a/python/pacemaker/_cts/clustermanager.py +++ b/python/pacemaker/_cts/clustermanager.py @@ -1,895 +1,896 @@ """ClusterManager class for Pacemaker's Cluster Test Suite (CTS).""" __all__ = ["ClusterManager"] __copyright__ = """Copyright 2000-2024 the Pacemaker project contributors. Certain portions by Huang Zhen are copyright 2004 International Business Machines. The version control history for this file may have further details.""" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import os import re import time from collections import UserDict from pacemaker.buildoptions import BuildOptions from pacemaker._cts.CTS import NodeStatus from pacemaker._cts.audits import AuditResource from pacemaker._cts.cib import ConfigFactory from pacemaker._cts.environment import EnvFactory from pacemaker._cts.logging import LogFactory from pacemaker._cts.patterns import PatternSelector from pacemaker._cts.remote import RemoteFactory from pacemaker._cts.watcher import LogWatcher # pylint doesn't understand that self._rsh is callable (it stores the # singleton instance of RemoteExec, as returned by the getInstance method # of RemoteFactory). It's possible we could fix this with type annotations, # but those were introduced with python 3.5 and we only support python 3.4. # I think we could also fix this by getting rid of the getInstance methods, # but that's a project for another day. For now, just disable the warning. # pylint: disable=not-callable # ClusterManager has a lot of methods. # pylint: disable=too-many-public-methods + class ClusterManager(UserDict): """ An abstract base class for managing the cluster. This class implements high-level operations on the cluster and/or its cluster managers. Actual cluster-specific management classes should be subclassed from this one. Among other things, this class tracks the state every node is expected to be in. """ def _final_conditions(self): """Check all keys to make sure they have a non-None value.""" for (key, val) in self._data.items(): if val is None: raise ValueError("Improper derivation: self[%s] must be overridden by subclass." % key) def __init__(self): """ Create a new ClusterManager instance. This class can be treated kind of like a dictionary due to the process of certain dict functions like __getitem__ and __setitem__. This is because it contains a lot of name/value pairs. However, it is not actually a dictionary so do not rely on standard dictionary behavior. """ # Eventually, ClusterManager should not be a UserDict subclass. Until # that point... # pylint: disable=super-init-not-called self.__instance_errors_to_ignore = [] self._cib_installed = False self._data = {} self._logger = LogFactory() self.env = EnvFactory().getInstance() self.expected_status = {} self.name = self.env["Name"] # pylint: disable=invalid-name self.ns = NodeStatus(self.env) self.our_node = os.uname()[1].lower() self.partitions_expected = 1 self.rsh = RemoteFactory().getInstance() self.templates = PatternSelector(self.env["Name"]) self._final_conditions() self._cib_factory = ConfigFactory(self) self._cib = self._cib_factory.create_config(self.env["Schema"]) self._cib_sync = {} def __getitem__(self, key): """ Return the given key, checking for it in several places. If key is "Name", return the name of the cluster manager. If the key was previously added to the dictionary via __setitem__, return that. Otherwise, return the template pattern for the key. This method should not be used and may be removed in the future. """ if key == "Name": return self.name print("FIXME: Getting %s from %r" % (key, self)) if key in self._data: return self._data[key] return self.templates.get_patterns(key) def __setitem__(self, key, value): """ Set the given key to the given value, overriding any previous value. This method should not be used and may be removed in the future. """ print("FIXME: Setting %s=%s on %r" % (key, value, self)) self._data[key] = value def clear_instance_errors_to_ignore(self): """Reset instance-specific errors to ignore on each iteration.""" self.__instance_errors_to_ignore = [] @property def instance_errors_to_ignore(self): """Return a list of known errors that should be ignored for a specific test instance.""" return self.__instance_errors_to_ignore @property def errors_to_ignore(self): """Return a list of known error messages that should be ignored.""" return self.templates.get_patterns("BadNewsIgnore") def log(self, args): """Log a message.""" self._logger.log(args) def debug(self, args): """Log a debug message.""" self._logger.debug(args) def upcount(self): """Return how many nodes are up.""" count = 0 for node in self.env["nodes"]: if self.expected_status[node] == "up": count += 1 return count def install_support(self, command="install"): """ Install or uninstall the CTS support files. This includes various init scripts and data, daemons, fencing agents, etc. """ for node in self.env["nodes"]: self.rsh(node, "%s/cts-support %s" % (BuildOptions.DAEMON_DIR, command)) def prepare_fencing_watcher(self): """Return a LogWatcher object that watches for fencing log messages.""" # If we don't have quorum now but get it as a result of starting this node, # then a bunch of nodes might get fenced if self.has_quorum(None): self.debug("Have quorum") return None if not self.templates["Pat:Fencing_start"]: print("No start pattern") return None if not self.templates["Pat:Fencing_ok"]: print("No ok pattern") return None stonith = None stonith_pats = [] for peer in self.env["nodes"]: if self.expected_status[peer] == "up": continue stonith_pats.extend([ self.templates["Pat:Fencing_ok"] % peer, self.templates["Pat:Fencing_start"] % peer, ]) stonith = LogWatcher(self.env["LogFileName"], stonith_pats, self.env["nodes"], self.env["LogWatcher"], "StartupFencing", 0) stonith.set_watch() return stonith def fencing_cleanup(self, node, stonith): """Wait for a previously fenced node to return to the cluster.""" peer_list = [] peer_state = {} self.debug("Looking for nodes that were fenced as a result of %s starting" % node) # If we just started a node, we may now have quorum (and permission to fence) if not stonith: self.debug("Nothing to do") return peer_list q = self.has_quorum(None) if not q and len(self.env["nodes"]) > 2: # We didn't gain quorum - we shouldn't have shot anyone self.debug("Quorum: %s Len: %d" % (q, len(self.env["nodes"]))) return peer_list for n in self.env["nodes"]: peer_state[n] = "unknown" # Now see if any states need to be updated self.debug("looking for: %r" % stonith.regexes) shot = stonith.look(0) while shot: self.debug("Found: %r" % shot) del stonith.regexes[stonith.whichmatch] # Extract node name for n in self.env["nodes"]: if re.search(self.templates["Pat:Fencing_ok"] % n, shot): peer = n peer_state[peer] = "complete" self.__instance_errors_to_ignore.append(self.templates["Pat:Fencing_ok"] % peer) elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot): # TODO: Correctly detect multiple fencing operations for the same host peer = n peer_state[peer] = "in-progress" self.__instance_errors_to_ignore.append(self.templates["Pat:Fencing_start"] % peer) if not peer: self._logger.log("ERROR: Unknown stonith match: %r" % shot) elif not peer in peer_list: self.debug("Found peer: %s" % peer) peer_list.append(peer) # Get the next one shot = stonith.look(60) for peer in peer_list: self.debug(" Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer])) if self.env["at-boot"]: self.expected_status[peer] = "up" else: self.expected_status[peer] = "down" if peer_state[peer] == "in-progress": # Wait for any in-progress operations to complete shot = stonith.look(60) while stonith.regexes and shot: self.debug("Found: %r" % shot) del stonith.regexes[stonith.whichmatch] shot = stonith.look(60) # Now make sure the node is alive too self.ns.wait_for_node(peer, self.env["DeadTime"]) # Poll until it comes up if self.env["at-boot"]: if not self.stat_cm(peer): time.sleep(self.env["StartTime"]) if not self.stat_cm(peer): self._logger.log("ERROR: Peer %s failed to restart after being fenced" % peer) return None return peer_list def start_cm(self, node, verbose=False): """Start up the cluster manager on a given node.""" if verbose: self._logger.log("Starting %s on node %s" % (self.templates["Name"], node)) else: self.debug("Starting %s on node %s" % (self.templates["Name"], node)) if not node in self.expected_status: self.expected_status[node] = "down" if self.expected_status[node] != "down": return True # Technically we should always be able to notice ourselves starting patterns = [ self.templates["Pat:Local_started"] % node, ] if self.upcount() == 0: patterns.append(self.templates["Pat:DC_started"] % node) else: patterns.append(self.templates["Pat:NonDC_started"] % node) watch = LogWatcher(self.env["LogFileName"], patterns, self.env["nodes"], self.env["LogWatcher"], "StartaCM", self.env["StartTime"] + 10) self.install_config(node) self.expected_status[node] = "any" if self.stat_cm(node) and self.cluster_stable(self.env["DeadTime"]): self._logger.log("%s was already started" % node) return True stonith = self.prepare_fencing_watcher() watch.set_watch() (rc, _) = self.rsh(node, self.templates["StartCmd"]) if rc != 0: self._logger.log("Warn: Start command failed on node %s" % node) self.fencing_cleanup(node, stonith) return False self.expected_status[node] = "up" watch_result = watch.look_for_all() if watch.unmatched: for regex in watch.unmatched: self._logger.log("Warn: Startup pattern not found: %s" % regex) if watch_result and self.cluster_stable(self.env["DeadTime"]): self.fencing_cleanup(node, stonith) return True if self.stat_cm(node) and self.cluster_stable(self.env["DeadTime"]): self.fencing_cleanup(node, stonith) return True self._logger.log("Warn: Start failed for node %s" % node) return False def start_cm_async(self, node, verbose=False): """Start up the cluster manager on a given node without blocking.""" if verbose: self._logger.log("Starting %s on node %s" % (self["Name"], node)) else: self.debug("Starting %s on node %s" % (self["Name"], node)) self.install_config(node) self.rsh(node, self.templates["StartCmd"], synchronous=False) self.expected_status[node] = "up" def stop_cm(self, node, verbose=False, force=False): """Stop the cluster manager on a given node.""" if verbose: self._logger.log("Stopping %s on node %s" % (self["Name"], node)) else: self.debug("Stopping %s on node %s" % (self["Name"], node)) if self.expected_status[node] != "up" and not force: return True (rc, _) = self.rsh(node, self.templates["StopCmd"]) if rc == 0: # Make sure we can continue even if corosync leaks self.expected_status[node] = "down" self.cluster_stable(self.env["DeadTime"]) return True self._logger.log("ERROR: Could not stop %s on node %s" % (self["Name"], node)) return False def stop_cm_async(self, node): """Stop the cluster manager on a given node without blocking.""" self.debug("Stopping %s on node %s" % (self["Name"], node)) self.rsh(node, self.templates["StopCmd"], synchronous=False) self.expected_status[node] = "down" def startall(self, nodelist=None, verbose=False, quick=False): """Start the cluster manager on every node in the cluster, or on every node in nodelist.""" if not nodelist: nodelist = self.env["nodes"] for node in nodelist: if self.expected_status[node] == "down": self.ns.wait_for_all_nodes(nodelist, 300) if not quick: # This is used for "basic sanity checks", so only start one node ... return self.start_cm(nodelist[0], verbose=verbose) # Approximation of SimulStartList for --boot watchpats = [ self.templates["Pat:DC_IDLE"], ] for node in nodelist: watchpats.extend([ self.templates["Pat:InfraUp"] % node, self.templates["Pat:PacemakerUp"] % node, self.templates["Pat:Local_started"] % node, self.templates["Pat:They_up"] % (nodelist[0], node), ]) # Start all the nodes - at about the same time... watch = LogWatcher(self.env["LogFileName"], watchpats, self.env["nodes"], self.env["LogWatcher"], "fast-start", self.env["DeadTime"] + 10) watch.set_watch() if not self.start_cm(nodelist[0], verbose=verbose): return False for node in nodelist: self.start_cm_async(node, verbose=verbose) watch.look_for_all() if watch.unmatched: for regex in watch.unmatched: self._logger.log("Warn: Startup pattern not found: %s" % regex) if not self.cluster_stable(): self._logger.log("Cluster did not stabilize") return False return True def stopall(self, nodelist=None, verbose=False, force=False): """Stop the cluster manager on every node in the cluster, or on every node in nodelist.""" ret = True if not nodelist: nodelist = self.env["nodes"] for node in self.env["nodes"]: if self.expected_status[node] == "up" or force: if not self.stop_cm(node, verbose=verbose, force=force): ret = False return ret def statall(self, nodelist=None): """Return the status of the cluster manager on every node in the cluster, or on every node in nodelist.""" result = {} if not nodelist: nodelist = self.env["nodes"] for node in nodelist: if self.stat_cm(node): result[node] = "up" else: result[node] = "down" return result def isolate_node(self, target, nodes=None): """Break communication between the target node and all other nodes in the cluster, or nodes.""" if not nodes: nodes = self.env["nodes"] for node in nodes: if node == target: continue (rc, _) = self.rsh(target, self.templates["BreakCommCmd"] % node) if rc != 0: self._logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc)) return False self.debug("Communication cut between %s and %s" % (target, node)) return True def unisolate_node(self, target, nodes=None): """Re-establish communication between the target node and all other nodes in the cluster, or nodes.""" if not nodes: nodes = self.env["nodes"] for node in nodes: if node == target: continue # Limit the amount of time we have asynchronous connectivity for # Restore both sides as simultaneously as possible self.rsh(target, self.templates["FixCommCmd"] % node, synchronous=False) self.rsh(node, self.templates["FixCommCmd"] % target, synchronous=False) self.debug("Communication restored between %s and %s" % (target, node)) def oprofile_start(self, node=None): """Start profiling on the given node, or all nodes in the cluster.""" if not node: for n in self.env["oprofile"]: self.oprofile_start(n) elif node in self.env["oprofile"]: self.debug("Enabling oprofile on %s" % node) self.rsh(node, "opcontrol --init") self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all") self.rsh(node, "opcontrol --start") self.rsh(node, "opcontrol --reset") def oprofile_save(self, test, node=None): """Save profiling data and restart profiling on the given node, or all nodes in the cluster.""" if not node: for n in self.env["oprofile"]: self.oprofile_save(test, n) elif node in self.env["oprofile"]: self.rsh(node, "opcontrol --dump") self.rsh(node, "opcontrol --save=cts.%d" % test) # Read back with: opreport -l session:cts.0 image:/c* self.oprofile_stop(node) self.oprofile_start(node) def oprofile_stop(self, node=None): """ Start profiling on the given node, or all nodes in the cluster. This does not save profiling data, so call oprofile_save first if needed. """ if not node: for n in self.env["oprofile"]: self.oprofile_stop(n) elif node in self.env["oprofile"]: self.debug("Stopping oprofile on %s" % node) self.rsh(node, "opcontrol --reset") self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null") def install_config(self, node): """Remove and re-install the CIB on the first node in the cluster.""" if not self.ns.wait_for_node(node): self.log("Node %s is not up." % node) return if node in self._cib_sync or not self.env["ClobberCIB"]: return self._cib_sync[node] = True self.rsh(node, "rm -f %s/cib*" % BuildOptions.CIB_DIR) # Only install the CIB on the first node, all the other ones will pick it up from there if self._cib_installed: return self._cib_installed = True if self.env["CIBfilename"] is None: self.log("Installing Generated CIB on node %s" % node) self._cib.install(node) else: self.log("Installing CIB (%s) on node %s" % (self.env["CIBfilename"], node)) rc = self.rsh.copy(self.env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node)) if rc != 0: raise ValueError("Can not scp file to %s %d" % (node, rc)) self.rsh(node, "chown %s %s/cib.xml" % (BuildOptions.DAEMON_USER, BuildOptions.CIB_DIR)) def prepare(self): """ Finish initialization. Clear out the expected status and record the current status of every node in the cluster. """ self.partitions_expected = 1 for node in self.env["nodes"]: self.expected_status[node] = "" if self.env["experimental-tests"]: self.unisolate_node(node) self.stat_cm(node) def test_node_cm(self, node): """ Check the status of a given node. Returns 0 if the node is down, 1 if the node is up but unstable, and 2 if the node is up and stable. """ watchpats = [ "Current ping state: (S_IDLE|S_NOT_DC)", self.templates["Pat:NonDC_started"] % node, self.templates["Pat:DC_started"] % node, ] idle_watch = LogWatcher(self.env["LogFileName"], watchpats, [node], self.env["LogWatcher"], "ClusterIdle") idle_watch.set_watch() (_, out) = self.rsh(node, self.templates["StatusCmd"] % node, verbose=1) if not out: out = "" else: out = out[0].strip() self.debug("Node %s status: '%s'" % (node, out)) if out.find('ok') < 0: if self.expected_status[node] == "up": self.log("Node status for %s is %s but we think it should be %s" % (node, "down", self.expected_status[node])) self.expected_status[node] = "down" return 0 if self.expected_status[node] == "down": self.log("Node status for %s is %s but we think it should be %s: %s" % (node, "up", self.expected_status[node], out)) self.expected_status[node] = "up" # check the output first - because syslog-ng loses messages if out.find('S_NOT_DC') != -1: # Up and stable return 2 if out.find('S_IDLE') != -1: # Up and stable return 2 # fall back to syslog-ng and wait if not idle_watch.look(): # just up self.debug("Warn: Node %s is unstable: %s" % (node, out)) return 1 # Up and stable return 2 def stat_cm(self, node): """Report the status of the cluster manager on a given node.""" return self.test_node_cm(node) > 0 # Being up and being stable is not the same question... def node_stable(self, node): """Return whether or not the given node is stable.""" if self.test_node_cm(node) == 2: return True self.log("Warn: Node %s not stable" % node) return False def partition_stable(self, nodes, timeout=None): """Return whether or not all nodes in the given partition are stable.""" watchpats = [ "Current ping state: S_IDLE", self.templates["Pat:DC_IDLE"], ] self.debug("Waiting for cluster stability...") if timeout is None: timeout = self.env["DeadTime"] if len(nodes) < 3: self.debug("Cluster is inactive") return True idle_watch = LogWatcher(self.env["LogFileName"], watchpats, nodes.split(), self.env["LogWatcher"], "ClusterStable", timeout) idle_watch.set_watch() for node in nodes.split(): # have each node dump its current state self.rsh(node, self.templates["StatusCmd"] % node, verbose=1) ret = idle_watch.look() while ret: self.debug(ret) for node in nodes.split(): if re.search(node, ret): return True ret = idle_watch.look() self.debug("Warn: Partition %r not IDLE after %ds" % (nodes, timeout)) return False def cluster_stable(self, timeout=None, double_check=False): """Return whether or not all nodes in the cluster are stable.""" partitions = self.find_partitions() for partition in partitions: if not self.partition_stable(partition, timeout): return False if not double_check: return True # Make sure we are really stable and that all resources, # including those that depend on transient node attributes, # are started if they were going to be time.sleep(5) for partition in partitions: if not self.partition_stable(partition, timeout): return False return True def is_node_dc(self, node, status_line=None): """ Return whether or not the given node is the cluster DC. Check the given status_line, or query the cluster if None. """ if not status_line: (_, out) = self.rsh(node, self.templates["StatusCmd"] % node, verbose=1) if out: status_line = out[0].strip() if not status_line: return False if status_line.find('S_IDLE') != -1: return True if status_line.find('S_INTEGRATION') != -1: return True if status_line.find('S_FINALIZE_JOIN') != -1: return True if status_line.find('S_POLICY_ENGINE') != -1: return True if status_line.find('S_TRANSITION_ENGINE') != -1: return True return False def active_resources(self, node): """Return a list of primitive resources active on the given node.""" (_, output) = self.rsh(node, "crm_resource -c", verbose=1) resources = [] for line in output: if not re.search("^Resource", line): continue tmp = AuditResource(self, line) if tmp.type == "primitive" and tmp.host == node: resources.append(tmp.id) return resources def resource_location(self, rid): """Return a list of nodes on which the given resource is running.""" resource_nodes = [] for node in self.env["nodes"]: if self.expected_status[node] != "up": continue cmd = self.templates["RscRunning"] % rid (rc, lines) = self.rsh(node, cmd) if rc == 127: self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd) for line in lines: self.log("Output: %s " % line) elif rc == 0: resource_nodes.append(node) return resource_nodes def find_partitions(self): """ Return a list of all partitions in the cluster. Each element of the list is itself a list of all active nodes in that partition. """ ccm_partitions = [] for node in self.env["nodes"]: if self.expected_status[node] != "up": self.debug("Node %s is down... skipping" % node) continue (_, out) = self.rsh(node, self.templates["PartitionCmd"], verbose=1) if not out: self.log("no partition details for %s" % node) continue partition = out[0].strip() if len(partition) <= 2: self.log("bad partition details for %s" % node) continue nodes = partition.split() nodes.sort() partition = ' '.join(nodes) found = 0 for a_partition in ccm_partitions: if partition == a_partition: found = 1 if found == 0: self.debug("Adding partition from %s: %s" % (node, partition)) ccm_partitions.append(partition) else: self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node)) self.debug("Found partitions: %r" % ccm_partitions) return ccm_partitions def has_quorum(self, node_list): """Return whether or not the cluster has quorum.""" # If we are auditing a partition, then one side will # have quorum and the other not. # So the caller needs to tell us which we are checking # If no value for node_list is specified... assume all nodes if not node_list: node_list = self.env["nodes"] for node in node_list: if self.expected_status[node] != "up": continue (_, quorum) = self.rsh(node, self.templates["QuorumCmd"], verbose=1) quorum = quorum[0].strip() if quorum.find("1") != -1: return True if quorum.find("0") != -1: return False self.debug("WARN: Unexpected quorum test result from %s:%s" % (node, quorum)) return False @property def components(self): """ Return a list of all patterns that should be ignored for the cluster's components. This must be provided by all subclasses. """ raise NotImplementedError def in_standby_mode(self, node): """Return whether or not the node is in Standby.""" (_, out) = self.rsh(node, self.templates["StandbyQueryCmd"] % node, verbose=1) if not out: return False out = out[0].strip() self.debug("Standby result: %s" % out) return out == "on" def set_standby_mode(self, node, status): """ Set node to Standby if status is True, or Active if status is False. Return whether the node is now in the requested status. """ current_status = self.in_standby_mode(node) if current_status == status: return True if status: cmd = self.templates["StandbyCmd"] % (node, "on") else: cmd = self.templates["StandbyCmd"] % (node, "off") (rc, _) = self.rsh(node, cmd) return rc == 0 def add_dummy_rsc(self, node, rid): """Add a dummy resource with the given ID to the given node.""" rsc_xml = """ ' '""" % (rid, rid) constraint_xml = """ ' ' """ % (rid, node, node, rid) self.rsh(node, self.templates['CibAddXml'] % rsc_xml) self.rsh(node, self.templates['CibAddXml'] % constraint_xml) def remove_dummy_rsc(self, node, rid): """Remove the previously added dummy resource given by rid on the given node.""" constraint = "\"//rsc_location[@rsc='%s']\"" % rid rsc = "\"//primitive[@id='%s']\"" % rid self.rsh(node, self.templates['CibDelXpath'] % constraint) self.rsh(node, self.templates['CibDelXpath'] % rsc) diff --git a/python/pacemaker/_cts/cmcorosync.py b/python/pacemaker/_cts/cmcorosync.py index a9d9872ed0..f64b811b94 100644 --- a/python/pacemaker/_cts/cmcorosync.py +++ b/python/pacemaker/_cts/cmcorosync.py @@ -1,74 +1,75 @@ """Corosync-specific class for Pacemaker's Cluster Test Suite (CTS).""" __all__ = ["Corosync2"] __copyright__ = "Copyright 2007-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" from pacemaker._cts.CTS import Process from pacemaker._cts.clustermanager import ClusterManager from pacemaker._cts.patterns import PatternSelector # Throughout this file, pylint has trouble understanding that EnvFactory # is a singleton instance that can be treated as a subscriptable object. # Various warnings are disabled because of this. See also a comment about # self._rsh in environment.py. # pylint: disable=unsubscriptable-object + class Corosync2(ClusterManager): """A subclass of ClusterManager specialized to handle corosync2 and later based clusters.""" def __init__(self): """Create a new Corosync2 instance.""" ClusterManager.__init__(self) self._fullcomplist = {} self.templates = PatternSelector(self.name) @property def components(self): """Return a list of patterns that should be ignored for the cluster's components.""" complist = [] if not self._fullcomplist: common_ignore = self.templates.get_component("common-ignore") daemons = [ "pacemaker-based", "pacemaker-controld", "pacemaker-attrd", "pacemaker-execd", "pacemaker-fenced" ] for c in daemons: badnews = self.templates.get_component("%s-ignore" % c) + common_ignore proc = Process(self, c, pats=self.templates.get_component(c), badnews_ignore=badnews) self._fullcomplist[c] = proc # the scheduler uses dc_pats instead of pats badnews = self.templates.get_component("pacemaker-schedulerd-ignore") + common_ignore proc = Process(self, "pacemaker-schedulerd", dc_pats=self.templates.get_component("pacemaker-schedulerd"), badnews_ignore=badnews) self._fullcomplist["pacemaker-schedulerd"] = proc # add (or replace) extra components badnews = self.templates.get_component("corosync-ignore") + common_ignore proc = Process(self, "corosync", pats=self.templates.get_component("corosync"), badnews_ignore=badnews) self._fullcomplist["corosync"] = proc # Processes running under valgrind can't be shot with "killall -9 processname", # so don't include them in the returned list vgrind = self.env["valgrind-procs"].split() for (key, val) in self._fullcomplist.items(): if self.env["valgrind-tests"] and key in vgrind: self.log("Filtering %s from the component list as it is being profiled by valgrind" % key) continue if key == "pacemaker-fenced" and not self.env["DoFencing"]: continue complist.append(val) return complist diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py index 021fb4261a..d983b9bbc6 100644 --- a/python/pacemaker/_cts/environment.py +++ b/python/pacemaker/_cts/environment.py @@ -1,636 +1,638 @@ """Test environment classes for Pacemaker's Cluster Test Suite (CTS).""" __all__ = ["EnvFactory"] __copyright__ = "Copyright 2014-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import argparse import os import random import socket import sys import time from pacemaker.buildoptions import BuildOptions from pacemaker._cts.logging import LogFactory from pacemaker._cts.remote import RemoteFactory from pacemaker._cts.watcher import LogKind + class Environment: """ A class for managing the CTS environment. This consists largely of processing and storing command line parameters. """ # pylint doesn't understand that self._rsh is callable (it stores the # singleton instance of RemoteExec, as returned by the getInstance method # of RemoteFactory). It's possible we could fix this with type annotations, # but those were introduced with python 3.5 and we only support python 3.4. # I think we could also fix this by getting rid of the getInstance methods, # but that's a project for another day. For now, just disable the warning. # pylint: disable=not-callable def __init__(self, args): """ Create a new Environment instance. This class can be treated kind of like a dictionary due to the presence of typical dict functions like __contains__, __getitem__, and __setitem__. However, it is not a dictionary so do not rely on standard dictionary behavior. Arguments: args -- A list of command line parameters, minus the program name. If None, sys.argv will be used. """ self.data = {} self._nodes = [] # Set some defaults before processing command line arguments. These are # either not set by any command line parameter, or they need a default # that can't be set in add_argument. self["DeadTime"] = 300 self["StartTime"] = 300 self["StableTime"] = 30 self["tests"] = [] self["IPagent"] = "IPaddr2" self["DoFencing"] = True self["ClobberCIB"] = False self["CIBfilename"] = None self["CIBResource"] = False self["LogWatcher"] = LogKind.ANY self["node-limit"] = 0 self["scenario"] = "random" self.random_gen = random.Random() self._logger = LogFactory() self._rsh = RemoteFactory().getInstance() self._target = "localhost" self._seed_random() self._parse_args(args) if not self["ListTests"]: self._validate() self._discover() def _seed_random(self, seed=None): """ Initialize the random number generator. Arguments: seed -- Use this to see the random number generator, or use the current time if None. """ if not seed: seed = int(time.time()) self["RandSeed"] = seed self.random_gen.seed(str(seed)) def dump(self): """Print the current environment.""" keys = [] for key in list(self.data.keys()): keys.append(key) keys.sort() for key in keys: s = "Environment[%s]" % key self._logger.debug("{key:35}: {val}".format(key=s, val=str(self[key]))) def keys(self): """Return a list of all environment keys stored in this instance.""" return list(self.data.keys()) def __contains__(self, key): """Return True if the given key exists in the environment.""" if key == "nodes": return True return key in self.data def __getitem__(self, key): """Return the given environment key, or None if it does not exist.""" if str(key) == "0": raise ValueError("Bad call to 'foo in X', should reference 'foo in X.keys()' instead") if key == "nodes": return self._nodes if key == "Name": return self._get_stack_short() return self.data.get(key) def __setitem__(self, key, value): """Set the given environment key to the given value, overriding any previous value.""" if key == "Stack": self._set_stack(value) elif key == "node-limit": self.data[key] = value self._filter_nodes() elif key == "nodes": self._nodes = [] for node in value: # I don't think I need the IP address, etc. but this validates # the node name against /etc/hosts and/or DNS, so it's a # GoodThing(tm). try: n = node.strip() socket.gethostbyname_ex(n) self._nodes.append(n) except: self._logger.log("%s not found in DNS... aborting" % node) raise self._filter_nodes() else: self.data[key] = value def random_node(self): """Choose a random node from the cluster.""" return self.random_gen.choice(self["nodes"]) def get(self, key, default=None): """Return the value for key if key is in the environment, else default.""" if key == "nodes": return self._nodes return self.data.get(key, default) def _set_stack(self, name): """Normalize the given cluster stack name.""" if name in ["corosync", "cs", "mcp"]: self.data["Stack"] = "corosync 2+" else: raise ValueError("Unknown stack: %s" % name) def _get_stack_short(self): """Return the short name for the currently set cluster stack.""" if "Stack" not in self.data: return "unknown" if self.data["Stack"] == "corosync 2+": return "crm-corosync" LogFactory().log("Unknown stack: %s" % self["stack"]) raise ValueError("Unknown stack: %s" % self["stack"]) def _detect_systemd(self): """Detect whether systemd is in use on the target node.""" if "have_systemd" not in self.data: (rc, _) = self._rsh(self._target, "systemctl list-units", verbose=0) self["have_systemd"] = rc == 0 def _detect_syslog(self): """Detect the syslog variant in use on the target node.""" if "syslogd" not in self.data: if self["have_systemd"]: # Systemd (_, lines) = self._rsh(self._target, r"systemctl list-units | grep syslog.*\.service.*active.*running | sed 's:.service.*::'", verbose=1) self["syslogd"] = lines[0].strip() else: # SYS-V (_, lines) = self._rsh(self._target, "chkconfig --list | grep syslog.*on | awk '{print $1}' | head -n 1", verbose=1) self["syslogd"] = lines[0].strip() if "syslogd" not in self.data or not self["syslogd"]: # default self["syslogd"] = "rsyslog" def disable_service(self, node, service): """Disable the given service on the given node.""" if self["have_systemd"]: # Systemd (rc, _) = self._rsh(node, "systemctl disable %s" % service) return rc # SYS-V (rc, _) = self._rsh(node, "chkconfig %s off" % service) return rc def enable_service(self, node, service): """Enable the given service on the given node.""" if self["have_systemd"]: # Systemd (rc, _) = self._rsh(node, "systemctl enable %s" % service) return rc # SYS-V (rc, _) = self._rsh(node, "chkconfig %s on" % service) return rc def service_is_enabled(self, node, service): """Return True if the given service is enabled on the given node.""" if self["have_systemd"]: # Systemd # With "systemctl is-enabled", we should check if the service is # explicitly "enabled" instead of the return code. For example it returns # 0 if the service is "static" or "indirect", but they don't really count # as "enabled". (rc, _) = self._rsh(node, "systemctl is-enabled %s | grep enabled" % service) return rc == 0 # SYS-V (rc, _) = self._rsh(node, "chkconfig --list | grep -e %s.*on" % service) return rc == 0 def _detect_at_boot(self): """Detect if the cluster starts at boot.""" if "at-boot" not in self.data: self["at-boot"] = self.service_is_enabled(self._target, "corosync") \ or self.service_is_enabled(self._target, "pacemaker") def _detect_ip_offset(self): """Detect the offset for IPaddr resources.""" if self["CIBResource"] and "IPBase" not in self.data: (_, lines) = self._rsh(self._target, "ip addr | grep inet | grep -v -e link -e inet6 -e '/32' -e ' lo' | awk '{print $2}'", verbose=0) network = lines[0].strip() (_, lines) = self._rsh(self._target, "nmap -sn -n %s | grep 'scan report' | awk '{print $NF}' | sed 's:(::' | sed 's:)::' | sort -V | tail -n 1" % network, verbose=0) try: self["IPBase"] = lines[0].strip() except (IndexError, TypeError): self["IPBase"] = None if not self["IPBase"]: self["IPBase"] = " fe80::1234:56:7890:1000" self._logger.log("Could not determine an offset for IPaddr resources. Perhaps nmap is not installed on the nodes.") self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"]) return # pylint thinks self["IPBase"] is a list, not a string, which causes it # to error out because a list doesn't have split(). # pylint: disable=no-member if int(self["IPBase"].split('.')[3]) >= 240: self._logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s" % (self["IPBase"], self["IPBase"].split('.')[3])) self["IPBase"] = " fe80::1234:56:7890:1000" self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"]) def _filter_nodes(self): """ Filter the list of cluster nodes. If --limit-nodes is given, keep that many nodes from the front of the list of cluster nodes and drop the rest. """ if self["node-limit"] > 0: if len(self["nodes"]) > self["node-limit"]: # pylint thinks self["node-limit"] is a list even though we initialize # it as an int in __init__ and treat it as an int everywhere. # pylint: disable=bad-string-format-type self._logger.log("Limiting the number of nodes configured=%d (max=%d)" % (len(self["nodes"]), self["node-limit"])) while len(self["nodes"]) > self["node-limit"]: self["nodes"].pop(len(self["nodes"])-1) def _validate(self): """Check that we were given all required command line parameters.""" if not self["nodes"]: raise ValueError("No nodes specified!") def _discover(self): """Probe cluster nodes to figure out how to log and manage services.""" self._target = random.Random().choice(self["nodes"]) exerciser = socket.gethostname() # Use the IP where possible to avoid name lookup failures for ip in socket.gethostbyname_ex(exerciser)[2]: if ip != "127.0.0.1": exerciser = ip break self["cts-exerciser"] = exerciser self._detect_systemd() self._detect_syslog() self._detect_at_boot() self._detect_ip_offset() def _parse_args(self, argv): """ Parse and validate command line parameters. Set the appropriate values in the environment dictionary. If argv is None, use sys.argv instead. """ if not argv: argv = sys.argv[1:] parser = argparse.ArgumentParser(epilog="%s -g virt1 -r --stonith ssh --schema pacemaker-2.0 500" % sys.argv[0]) grp1 = parser.add_argument_group("Common options") grp1.add_argument("-g", "--dsh-group", "--group", metavar="GROUP", dest="group", help="Use the nodes listed in the named DSH group (~/.dsh/groups/$name)") grp1.add_argument("-l", "--limit-nodes", type=int, default=0, metavar="MAX", help="Only use the first MAX cluster nodes supplied with --nodes") grp1.add_argument("--benchmark", action="store_true", help="Add timing information") grp1.add_argument("--list", "--list-tests", action="store_true", dest="list_tests", help="List the valid tests") grp1.add_argument("--nodes", metavar="NODES", help="List of cluster nodes separated by whitespace") grp1.add_argument("--stack", default="corosync", metavar="STACK", help="Which cluster stack is installed") grp2 = parser.add_argument_group("Options that CTS will usually auto-detect correctly") grp2.add_argument("-L", "--logfile", metavar="PATH", help="Where to look for logs from cluster nodes") grp2.add_argument("--at-boot", "--cluster-starts-at-boot", choices=["1", "0", "yes", "no"], help="Does the cluster software start at boot time?") grp2.add_argument("--facility", "--syslog-facility", default="daemon", metavar="NAME", help="Which syslog facility to log to") grp2.add_argument("--ip", "--test-ip-base", metavar="IP", help="Offset for generated IP address resources") grp3 = parser.add_argument_group("Options for release testing") grp3.add_argument("-r", "--populate-resources", action="store_true", help="Generate a sample configuration") grp3.add_argument("--choose", metavar="NAME", help="Run only the named test") grp3.add_argument("--fencing", "--stonith", choices=["1", "0", "yes", "no", "lha", "openstack", "rhcs", "rhevm", "scsi", "ssh", "virt", "xvm"], default="1", help="What fencing agent to use") grp3.add_argument("--once", action="store_true", help="Run all valid tests once") grp4 = parser.add_argument_group("Additional (less common) options") grp4.add_argument("-c", "--clobber-cib", action="store_true", help="Erase any existing configuration") grp4.add_argument("-y", "--yes", action="store_true", dest="always_continue", help="Continue to run whenever prompted") grp4.add_argument("--boot", action="store_true", help="") grp4.add_argument("--cib-filename", metavar="PATH", help="Install the given CIB file to the cluster") grp4.add_argument("--experimental-tests", action="store_true", help="Include experimental tests") grp4.add_argument("--loop-minutes", type=int, default=60, help="") grp4.add_argument("--no-loop-tests", action="store_true", help="Don't run looping/time-based tests") grp4.add_argument("--no-unsafe-tests", action="store_true", help="Don't run tests that are unsafe for use with ocfs2/drbd") grp4.add_argument("--notification-agent", metavar="PATH", default="/var/lib/pacemaker/notify.sh", help="Script to configure for Pacemaker alerts") grp4.add_argument("--notification-recipient", metavar="R", default="/var/lib/pacemaker/notify.log", help="Recipient to pass to alert script") grp4.add_argument("--oprofile", metavar="NODES", help="List of cluster nodes to run oprofile on") grp4.add_argument("--outputfile", metavar="PATH", help="Location to write logs to") grp4.add_argument("--qarsh", action="store_true", help="Use QARSH to access nodes instead of SSH") grp4.add_argument("--schema", metavar="SCHEMA", default="pacemaker-%s" % BuildOptions.CIB_SCHEMA_VERSION, help="Create a CIB conforming to the given schema") grp4.add_argument("--seed", metavar="SEED", help="Use the given string as the random number seed") grp4.add_argument("--set", action="append", metavar="ARG", default=[], help="Set key=value pairs (can be specified multiple times)") grp4.add_argument("--stonith-args", metavar="ARGS", default="hostlist=all,livedangerously=yes", help="") grp4.add_argument("--stonith-type", metavar="TYPE", default="external/ssh", help="") grp4.add_argument("--trunc", action="store_true", dest="truncate", help="Truncate log file before starting") grp4.add_argument("--valgrind-procs", metavar="PROCS", default="pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-fenced pacemaker-schedulerd", help="Run valgrind against the given space-separated list of processes") grp4.add_argument("--valgrind-tests", action="store_true", help="Include tests using valgrind") grp4.add_argument("--warn-inactive", action="store_true", help="Warn if a resource is assigned to an inactive node") parser.add_argument("iterations", nargs='?', type=int, default=1, help="Number of tests to run") args = parser.parse_args(args=argv) # Set values on this object based on what happened with command line # processing. This has to be done in several blocks. # These values can always be set. They get a default from the add_argument # calls, only do one thing, and they do not have any side effects. self["ClobberCIB"] = args.clobber_cib self["ListTests"] = args.list_tests self["Schema"] = args.schema self["Stack"] = args.stack self["SyslogFacility"] = args.facility self["TruncateLog"] = args.truncate self["at-boot"] = args.at_boot in ["1", "yes"] self["benchmark"] = args.benchmark self["continue"] = args.always_continue self["experimental-tests"] = args.experimental_tests self["iterations"] = args.iterations self["loop-minutes"] = args.loop_minutes self["loop-tests"] = not args.no_loop_tests self["notification-agent"] = args.notification_agent self["notification-recipient"] = args.notification_recipient self["node-limit"] = args.limit_nodes self["stonith-params"] = args.stonith_args self["stonith-type"] = args.stonith_type self["unsafe-tests"] = not args.no_unsafe_tests self["valgrind-procs"] = args.valgrind_procs self["valgrind-tests"] = args.valgrind_tests self["warn-inactive"] = args.warn_inactive # Nodes and groups are mutually exclusive, so their defaults cannot be # set in their add_argument calls. Additionally, groups does more than # just set a value. Here, set nodes first and then if a group is # specified, override the previous nodes value. if args.nodes: self["nodes"] = args.nodes.split(" ") else: self["nodes"] = [] if args.group: self["OutputFile"] = "%s/cluster-%s.log" % (os.environ['HOME'], args.dsh_group) LogFactory().add_file(self["OutputFile"], "CTS") dsh_file = "%s/.dsh/group/%s" % (os.environ['HOME'], args.dsh_group) if os.path.isfile(dsh_file): self["nodes"] = [] with open(dsh_file, "r", encoding="utf-8") as f: for line in f: l = line.strip() if not l.startswith('#'): self["nodes"].append(l) else: print("Unknown DSH group: %s" % args.dsh_group) # Everything else either can't have a default set in an add_argument # call (likely because we don't want to always have a value set for it) # or it does something fancier than just set a single value. However, # order does not matter for these as long as the user doesn't provide # conflicting arguments on the command line. So just do Everything # alphabetically. if args.boot: self["scenario"] = "boot" if args.cib_filename: self["CIBfilename"] = args.cib_filename else: self["CIBfilename"] = None if args.choose: self["scenario"] = "sequence" self["tests"].append(args.choose) if args.fencing: if args.fencing in ["0", "no"]: self["DoFencing"] = False else: self["DoFencing"] = True if args.fencing in ["rhcs", "virt", "xvm"]: self["stonith-type"] = "fence_xvm" elif args.fencing == "scsi": self["stonith-type"] = "fence_scsi" elif args.fencing in ["lha", "ssh"]: self["stonith-params"] = "hostlist=all,livedangerously=yes" self["stonith-type"] = "external/ssh" elif args.fencing == "openstack": self["stonith-type"] = "fence_openstack" print("Obtaining OpenStack credentials from the current environment") self["stonith-params"] = "region=%s,tenant=%s,auth=%s,user=%s,password=%s" % ( os.environ['OS_REGION_NAME'], os.environ['OS_TENANT_NAME'], os.environ['OS_AUTH_URL'], os.environ['OS_USERNAME'], os.environ['OS_PASSWORD'] ) elif args.fencing == "rhevm": self["stonith-type"] = "fence_rhevm" print("Obtaining RHEV-M credentials from the current environment") self["stonith-params"] = "login=%s,passwd=%s,ipaddr=%s,ipport=%s,ssl=1,shell_timeout=10" % ( os.environ['RHEVM_USERNAME'], os.environ['RHEVM_PASSWORD'], os.environ['RHEVM_SERVER'], os.environ['RHEVM_PORT'], ) if args.ip: self["CIBResource"] = True self["ClobberCIB"] = True self["IPBase"] = args.ip if args.logfile: self["LogAuditDisabled"] = True self["LogFileName"] = args.logfile self["LogWatcher"] = LogKind.REMOTE_FILE else: # We can't set this as the default on the parser.add_argument call # for this option because then args.logfile will be set, which means # the above branch will be taken and those other values will also be # set. self["LogFileName"] = "/var/log/messages" if args.once: self["scenario"] = "all-once" if args.oprofile: self["oprofile"] = args.oprofile.split(" ") else: self["oprofile"] = [] if args.outputfile: self["OutputFile"] = args.outputfile LogFactory().add_file(self["OutputFile"]) if args.populate_resources: self["CIBResource"] = True self["ClobberCIB"] = True if args.qarsh: self._rsh.enable_qarsh() for kv in args.set: (name, value) = kv.split("=") self[name] = value print("Setting %s = %s" % (name, value)) + class EnvFactory: """A class for constructing a singleton instance of an Environment object.""" instance = None # pylint: disable=invalid-name def getInstance(self, args=None): """ Return the previously created instance of Environment. If no instance exists, create a new instance and return that. """ if not EnvFactory.instance: EnvFactory.instance = Environment(args) return EnvFactory.instance diff --git a/python/pacemaker/_cts/input.py b/python/pacemaker/_cts/input.py index 86afcdb3cc..739d3718f3 100644 --- a/python/pacemaker/_cts/input.py +++ b/python/pacemaker/_cts/input.py @@ -1,17 +1,18 @@ """User input related utilities for CTS.""" __all__ = ["should_continue"] __copyright__ = "Copyright 2000-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" + def should_continue(env): """On failure, prompt the user to see if we should continue.""" if env["continue"]: return True try: answer = input("Continue? [yN]") except EOFError: answer = "n" return answer in ["y", "Y"] diff --git a/python/pacemaker/_cts/network.py b/python/pacemaker/_cts/network.py index 0f92f71e8b..6ba776c97d 100644 --- a/python/pacemaker/_cts/network.py +++ b/python/pacemaker/_cts/network.py @@ -1,58 +1,59 @@ """Network related utilities for CTS.""" __all__ = ["next_ip"] __copyright__ = "Copyright 2000-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" # pylint: disable=global-statement CURRENT_IP = None + def next_ip(ip_base=None, reset=False): """ Return the next available IP address. This function only increments the last portion of the IP address. Once it has hit the upper limit, ValueError will be raised. Arguments: ip_base -- The initial IP address to start from. The first call to next_ip will return the next IP address from this base. Each subsequent call will return the next address from the previous call, so you can just omit this argument for subsequent calls. reset -- Force next_ip to start from ip_base again. This requires also passing the ip_base argument. (Mostly useful for unit testing, but may be useful elsewhere). """ global CURRENT_IP if CURRENT_IP is None or reset: CURRENT_IP = ip_base new_ip = None # Split the existing IP address up into a tuple of: # (everything except the last part of the addr, the separator, the last part of the addr). # For instance, "192.168.1.2" becomes ("192.168.1", ".", "2"). Then, # increment the last part of the address and paste everything back # together. if ":" in CURRENT_IP: # This is an IPv6 address fields = CURRENT_IP.rpartition(":") new_ip = int(fields[2], 16) + 1 if new_ip > 0xffff: raise ValueError("No more available IP addresses") # hex() puts "0x" at the front of the string, so strip it off. new_ip = hex(new_ip)[2:] else: # This is an IPv4 address fields = CURRENT_IP.rpartition(".") new_ip = int(fields[2]) + 1 if new_ip > 255: raise ValueError("No more available IP addresses") CURRENT_IP = "%s%s%s" % (fields[0], fields[1], new_ip) return CURRENT_IP diff --git a/python/pacemaker/_cts/patterns.py b/python/pacemaker/_cts/patterns.py index 9fbde5d33f..5d77c71429 100644 --- a/python/pacemaker/_cts/patterns.py +++ b/python/pacemaker/_cts/patterns.py @@ -1,395 +1,396 @@ """Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS).""" __all__ = ["PatternSelector"] __copyright__ = "Copyright 2008-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+)" import argparse from pacemaker.buildoptions import BuildOptions + class BasePatterns: """ The base class for holding a stack-specific set of command and log file/stdout patterns. Stack-specific classes need to be built on top of this one. """ def __init__(self): """Create a new BasePatterns instance which holds a very minimal set of basic patterns.""" self._bad_news = [] self._components = {} self._name = "crm-base" self._ignore = [ "avoid confusing Valgrind", # Logging bug in some versions of libvirtd r"libvirtd.*: internal error: Failed to parse PCI config address", # pcs can log this when node is fenced, but fencing is OK in some # tests (and we will catch it in pacemaker logs when not OK) r"pcs.daemon:No response from: .* request: get_configs, error:", # This is overbroad, but there's no way to say that only certain # transition errors are acceptable. We have to rely on causes of a # transition error logging their own error message, which should # always be the case. r"pacemaker-schedulerd.* Calculated transition .*/pe-error", ] self._commands = { "StatusCmd" : "crmadmin -t 60 -S %s 2>/dev/null", "CibQuery" : "cibadmin -Ql", "CibAddXml" : "cibadmin --modify -c --xml-text %s", "CibDelXpath" : "cibadmin --delete --xpath %s", "RscRunning" : BuildOptions.DAEMON_DIR + "/cts-exec-helper -R -r %s", "CIBfile" : "%s:" + BuildOptions.CIB_DIR + "/cib.xml", "TmpDir" : "/tmp", "BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1", "FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1", "MaintenanceModeOn" : "cibadmin --modify -c --xml-text ''", "MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"", "StandbyCmd" : "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null", "StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null", } self._search = { "Pat:DC_IDLE" : r"pacemaker-controld.*State transition.*-> S_IDLE", # This won't work if we have multiple partitions "Pat:Local_started" : r"%s\W.*controller successfully started", "Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC", "Pat:DC_started" : r"%s\W.*State transition.*-> S_IDLE", "Pat:We_stopped" : r"%s\W.*OVERRIDE THIS PATTERN", "Pat:They_stopped" : r"%s\W.*LOST:.* %s ", "Pat:They_dead" : r"node %s.*: is dead", "Pat:They_up" : r"%s %s\W.*OVERRIDE THIS PATTERN", "Pat:TransitionComplete" : "Transition status: Complete: complete", "Pat:Fencing_start" : r"Requesting peer fencing .* targeting %s", "Pat:Fencing_ok" : r"pacemaker-fenced.*:\s*Operation .* targeting %s by .* for .*@.*: OK", "Pat:Fencing_recover" : r"pacemaker-schedulerd.*: Recover\s+%s", "Pat:Fencing_active" : r"stonith resource .* is active on 2 nodes (attempting recovery)", "Pat:Fencing_probe" : r"pacemaker-controld.* Result of probe operation for %s on .*: Error", "Pat:RscOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok", "Pat:RscOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of %s ", "Pat:CloneOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of (%s|%s) ", "Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok", "Pat:NodeFenced" : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK", } def get_component(self, key): """ Return the patterns for a single component as a list, given by key. This is typically the name of some subprogram (pacemaker-based, pacemaker-fenced, etc.) or various special purpose keys. If key is unknown, return an empty list. """ if key in self._components: return self._components[key] print("Unknown component '%s' for %s" % (key, self._name)) return [] def get_patterns(self, key): """ Return various patterns supported by this object, given by key. Depending on the key, this could either be a list or a hash. If key is unknown, return None. """ if key == "BadNews": return self._bad_news if key == "BadNewsIgnore": return self._ignore if key == "Commands": return self._commands if key == "Search": return self._search if key == "Components": return self._components print("Unknown pattern '%s' for %s" % (key, self._name)) return None def __getitem__(self, key): if key == "Name": return self._name if key in self._commands: return self._commands[key] if key in self._search: return self._search[key] print("Unknown template '%s' for %s" % (key, self._name)) return None class Corosync2Patterns(BasePatterns): """Patterns for Corosync version 2 cluster manager class.""" def __init__(self): BasePatterns.__init__(self) self._name = "crm-corosync" self._commands.update({ "StartCmd" : "service corosync start && service pacemaker start", "StopCmd" : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop", "EpochCmd" : "crm_node -e", "QuorumCmd" : "crm_node -q", "PartitionCmd" : "crm_node -p", }) self._search.update({ # Close enough ... "Corosync Cluster Engine exiting normally" isn't # printed reliably. "Pat:We_stopped" : r"%s\W.*Unloading all Corosync service engines", "Pat:They_stopped" : r"%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost", "Pat:They_dead" : r"pacemaker-controld.*Node %s(\[|\s).*state is now lost", "Pat:They_up" : r"\W%s\W.*pacemaker-controld.*Node %s state is now member", "Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(", # "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes() "Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)", "Pat:ChildRespawn" : r"%s\W.*pacemakerd.*Respawning %s subdaemon after unexpected exit", "Pat:InfraUp" : r"%s\W.*corosync.*Initializing transport", "Pat:PacemakerUp" : r"%s\W.*pacemakerd.*Starting Pacemaker", }) self._ignore += [ r"crm_mon:", r"crmadmin:", r"update_trace_data", r"async_notify:.*strange, client not found", r"Parse error: Ignoring unknown option .*nodename", r"error.*: Operation 'reboot' .* using FencingFail returned ", r"getinfo response error: 1$", r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE", r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)", ] self._bad_news = [ r"[^(]error:", r"crit:", r"ERROR:", r"CRIT:", r"Shutting down...NOW", r"Timer I_TERMINATE just popped", r"input=I_ERROR", r"input=I_FAIL", r"input=I_INTEGRATED cause=C_TIMER_POPPED", r"input=I_FINALIZED cause=C_TIMER_POPPED", r"input=I_ERROR", r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting", r"schedulerd.*Attempting recovery of resource", r"is taking more than 2x its timeout", r"Confirm not received from", r"Welcome reply not received from", r"Attempting to schedule .* after a stop", r"Resource .* was active at shutdown", r"duplicate entries for call_id", r"Search terminated:", r":global_timer_callback", r"Faking parameter digest creation", r"Parameters to .* action changed:", r"Parameters to .* changed", r"pacemakerd.*\[[0-9]+\] terminated( with signal| as IPC server|$)", r"pacemaker-schedulerd.*Recover\s+.*\(.* -\> .*\)", r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting", r"Peer is not part of our cluster", r"We appear to be in an election loop", r"Unknown node -> we will not deliver message", r"(Blackbox dump requested|Problem detected)", r"pacemakerd.*Could not connect to Cluster Configuration Database API", r"Receiving messages from a node we think is dead", r"share the same cluster nodeid", r"share the same name", r"pacemaker-controld:.*Transition failed: terminated", r"Local CIB .* differs from .*:", r"warn.*:\s*Continuing but .* will NOT be used", r"warn.*:\s*Cluster configuration file .* is corrupt", r"Election storm", r"stalled the FSA with pending inputs", ] self._components["common-ignore"] = [ r"Pending action:", r"resource( was|s were) active at shutdown", r"pending LRM operations at shutdown", r"Lost connection to the CIB manager", r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported", r"pacemaker-controld.*:\s*Exiting now due to errors", r".*:\s*Requesting fencing \([^)]+\) targeting node ", r"(Blackbox dump requested|Problem detected)", ] self._components["corosync-ignore"] = [ r"Could not connect to Corosync CFG: CS_ERR_LIBRARY", r"error:.*Connection to the CPG API failed: Library error", r"\[[0-9]+\] exited with status [0-9]+ \(", r"\[[0-9]+\] terminated with signal 15", r"pacemaker-based.*error:.*Corosync connection lost", r"pacemaker-fenced.*error:.*Corosync connection terminated", r"pacemaker-controld.*State transition .* S_RECOVERY", r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state", r"pacemaker-controld.*error:.*Could not recover from internal error", r"error:.*Connection to cib_(shm|rw).* (failed|closed)", r"error:.*cib_(shm|rw) IPC provider disconnected while waiting", r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"error: Lost fencer connection", ] self._components["corosync"] = [ # We expect each daemon to lose its cluster connection. # However, if the CIB manager loses its connection first, # it's possible for another daemon to lose that connection and # exit before losing the cluster connection. r"pacemakerd.*:\s*warning:.*Lost connection to cluster layer", r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (Corosync process group|the CIB manager)", r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer", r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)", r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)", r"schedulerd.*Scheduling node .* for fencing", r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK", ] self._components["pacemaker-based"] = [ r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102", r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1", r"pacemakerd.* Respawning pacemaker-attrd subdaemon after unexpected exit", r"pacemakerd.* Respawning pacemaker-based subdaemon after unexpected exit", r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit", r"pacemakerd.* Respawning pacemaker-fenced subdaemon after unexpected exit", r"pacemaker-.* Connection to cib_.* (failed|closed)", r"pacemaker-attrd.*:.*Lost connection to the CIB manager", r"pacemaker-controld.*:.*Lost connection to the CIB manager", r"pacemaker-controld.*I_ERROR.*handle_cib_disconnect", r"pacemaker-controld.* State transition .* S_RECOVERY", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*Could not recover from internal error", ] self._components["pacemaker-based-ignore"] = [ r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error \(Lost connection to fencer\)", r"pacemaker-controld.*:Could not connect to attrd: Connection refused", ] self._components["pacemaker-execd"] = [ r"pacemaker-controld.*Lost connection to local executor", r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy", r"pacemaker-controld.*State transition .* S_RECOVERY", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*Could not recover from internal error", r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1", r"pacemakerd.* Respawning pacemaker-execd subdaemon after unexpected exit", r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit", ] self._components["pacemaker-execd-ignore"] = [ r"pacemaker-(attrd|controld).*Connection to lrmd.* (failed|closed)", r"pacemaker-(attrd|controld).*Could not execute alert", ] self._components["pacemaker-controld"] = [ r"State transition .* -> S_IDLE", ] self._components["pacemaker-controld-ignore"] = [] self._components["pacemaker-attrd"] = [] self._components["pacemaker-attrd-ignore"] = [] self._components["pacemaker-schedulerd"] = [ r"State transition .* S_RECOVERY", r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit", r"pacemaker-controld\[[0-9]+\] exited with status 1 \(", r"pacemaker-controld.*Lost connection to the scheduler", r"pacemaker-controld.*I_ERROR.*save_cib_contents", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*Could not recover from internal error", ] self._components["pacemaker-schedulerd-ignore"] = [ r"Connection to pengine.* (failed|closed)", ] self._components["pacemaker-fenced"] = [ r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"Lost fencer connection", r"pacemaker-controld.*Fencer successfully connected", ] self._components["pacemaker-fenced-ignore"] = [ r"(error|warning):.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"error:.*Lost fencer connection", r"error:.*Fencer connection failed \(will retry\)", r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error \(Lost connection to fencer\)", ] self._components["pacemaker-fenced-ignore"].extend(self._components["common-ignore"]) patternVariants = { "crm-base": BasePatterns, "crm-corosync": Corosync2Patterns } class PatternSelector: """Choose from among several Pattern objects and return the information from that object.""" def __init__(self, name="crm-corosync"): """ Create a new PatternSelector object. Instantiate whatever class is given by name. Defaults to Corosync2Patterns for "crm-corosync" or None. While other objects could be supported in the future, only this and the base object are supported at this time. """ self._name = name # If no name was given, use the default. Otherwise, look up the appropriate # class in patternVariants, instantiate it, and use that. if not name: self._base = Corosync2Patterns() else: self._base = patternVariants[name]() def get_patterns(self, kind): """Call get_patterns on the previously instantiated pattern object.""" return self._base.get_patterns(kind) def get_template(self, key): """ Return a single pattern from the previously instantiated pattern object. If no pattern exists for the given key, return None. """ return self._base[key] def get_component(self, kind): """Call get_component on the previously instantiated pattern object.""" return self._base.get_component(kind) def __getitem__(self, key): """Return the pattern for the given key, or None if it does not exist.""" return self.get_template(key) # PYTHONPATH=python python python/pacemaker/_cts/patterns.py -k crm-corosync -t StartCmd if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-k", "--kind", metavar="KIND") parser.add_argument("-t", "--template", metavar="TEMPLATE") args = parser.parse_args() print(PatternSelector(args.kind)[args.template]) diff --git a/python/pacemaker/_cts/process.py b/python/pacemaker/_cts/process.py index 4405dc9fa3..c25ce33ea7 100644 --- a/python/pacemaker/_cts/process.py +++ b/python/pacemaker/_cts/process.py @@ -1,71 +1,72 @@ """A module for managing and communicating with external processes.""" __all__ = ["killall", "exit_if_proc_running", "pipe_communicate", "stdout_from_command"] __copyright__ = "Copyright 2009-2023 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+)" import subprocess import sys import psutil from pacemaker.exitstatus import ExitStatus + def killall(process_names, terminate=False): """Kill all instances of every process in a list.""" if not process_names: return if not isinstance(process_names, list): process_names = [process_names] procs = [] for proc in psutil.process_iter(["name"]): if proc.info["name"] in process_names: procs.append(proc) if terminate: for proc in procs: proc.terminate() _, alive = psutil.wait_procs(procs, timeout=3) procs = alive for proc in procs: proc.kill() def is_proc_running(process_name): """Check whether a process with a given name is running.""" for proc in psutil.process_iter(["name"]): if proc.info["name"] == process_name: return True return False def exit_if_proc_running(process_name): """Exit with error if a given process is running.""" if is_proc_running(process_name): print("Error: %s is already running!" % process_name) print("Run %s only when the cluster is stopped." % sys.argv[0]) sys.exit(ExitStatus.ERROR) def pipe_communicate(pipes, check_stderr=False, stdin=None): """Get text output from pipes.""" if stdin is not None: pipe_outputs = pipes.communicate(input=stdin.encode()) else: pipe_outputs = pipes.communicate() output = pipe_outputs[0].decode(sys.stdout.encoding) if check_stderr: output += pipe_outputs[1].decode(sys.stderr.encoding) return output def stdout_from_command(args): """Execute command and return its standard output.""" with subprocess.Popen(args, stdout=subprocess.PIPE) as p: p.wait() return pipe_communicate(p).split("\n") diff --git a/python/pacemaker/_cts/remote.py b/python/pacemaker/_cts/remote.py index b216998e3b..ba5b878f13 100644 --- a/python/pacemaker/_cts/remote.py +++ b/python/pacemaker/_cts/remote.py @@ -1,276 +1,279 @@ """Remote command runner for Pacemaker's Cluster Test Suite (CTS).""" __all__ = ["RemoteExec", "RemoteFactory"] __copyright__ = "Copyright 2014-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import re import os from subprocess import Popen, PIPE from threading import Thread from pacemaker._cts.logging import LogFactory + def convert2string(lines): """ Convert byte strings to UTF-8 strings. Lists of byte strings are converted to a list of UTF-8 strings. All other text formats are passed through. """ if isinstance(lines, bytes): return lines.decode("utf-8") if isinstance(lines, list): lst = [] for line in lines: if isinstance(line, bytes): line = line.decode("utf-8") lst.append(line) return lst return lines + class AsyncCmd(Thread): """A class for doing the hard work of running a command on another machine.""" def __init__(self, node, command, proc=None, delegate=None): """ Create a new AsyncCmd instance. Arguments: node -- The remote machine to run on command -- The ssh command string to use for remote execution proc -- If not None, a process object previously created with Popen. Instead of spawning a new process, we will then wait on this process to finish and handle its output. delegate -- When the command completes, call the async_complete method on this object """ self._command = command self._delegate = delegate self._logger = LogFactory() self._node = node self._proc = proc Thread.__init__(self) def run(self): """Run the previously instantiated AsyncCmd object.""" out = None err = None if not self._proc: # pylint: disable=consider-using-with self._proc = Popen(self._command, stdout=PIPE, stderr=PIPE, close_fds=True, shell=True) self._logger.debug("cmd: async: target=%s, pid=%d: %s" % (self._node, self._proc.pid, self._command)) self._proc.wait() if self._delegate: self._logger.debug("cmd: pid %d returned %d to %r" % (self._proc.pid, self._proc.returncode, self._delegate)) else: self._logger.debug("cmd: pid %d returned %d" % (self._proc.pid, self._proc.returncode)) if self._proc.stderr: err = self._proc.stderr.readlines() self._proc.stderr.close() for line in err: self._logger.debug("cmd: stderr[%d]: %s" % (self._proc.pid, line)) err = convert2string(err) if self._proc.stdout: out = self._proc.stdout.readlines() self._proc.stdout.close() out = convert2string(out) if self._delegate: self._delegate.async_complete(self._proc.pid, self._proc.returncode, out, err) + class RemoteExec: """ An abstract class for remote execution. It runs a command on another machine using ssh and scp. """ def __init__(self, command, cp_command, silent=False): """ Create a new RemoteExec instance. Arguments: command -- The ssh command string to use for remote execution cp_command -- The scp command string to use for copying files silent -- Should we log command status? """ self._command = command self._cp_command = cp_command self._logger = LogFactory() self._silent = silent self._our_node = os.uname()[1].lower() def _fixcmd(self, cmd): """Perform shell escapes on certain characters in the input cmd string.""" return re.sub("\'", "'\\''", cmd) def _cmd(self, args): """Given a list of arguments, return the string that will be run on the remote system.""" sysname = args[0] command = args[1] if sysname is None or sysname.lower() in [self._our_node, "localhost"]: ret = command else: ret = "%s %s '%s'" % (self._command, sysname, self._fixcmd(command)) return ret def _log(self, args): """Log a message.""" if not self._silent: self._logger.log(args) def _debug(self, args): """Log a message at the debug level.""" if not self._silent: self._logger.debug(args) def call_async(self, node, command, delegate=None): """ Run the given command on the given remote system and do not wait for it to complete. Arguments: node -- The remote machine to run on command -- The command to run, as a string delegate -- When the command completes, call the async_complete method on this object Returns the running process object. """ aproc = AsyncCmd(node, self._cmd([node, command]), delegate=delegate) aproc.start() return aproc def __call__(self, node, command, synchronous=True, verbose=2): """ Run the given command on the given remote system. If you call this class like a function, this is what gets called. It's approximately the same as a system() call on the remote machine. Arguments: node -- The remote machine to run on command -- The command to run, as a string synchronous -- Should we wait for the command to complete? verbose -- If 0, do not lo:g anything. If 1, log the command and its return code but not its output. If 2, additionally log command output. Returns a tuple of (return code, command output). """ rc = 0 result = None # pylint: disable=consider-using-with proc = Popen(self._cmd([node, command]), stdout=PIPE, stderr=PIPE, close_fds=True, shell=True) if not synchronous and proc.pid > 0 and not self._silent: aproc = AsyncCmd(node, command, proc=proc) aproc.start() return (rc, result) if proc.stdout: result = proc.stdout.readlines() proc.stdout.close() else: self._log("No stdout stream") rc = proc.wait() if verbose > 0: self._debug("cmd: target=%s, rc=%d: %s" % (node, rc, command)) result = convert2string(result) if proc.stderr: errors = proc.stderr.readlines() proc.stderr.close() for err in errors: self._debug("cmd: stderr: %s" % err) if verbose == 2: for line in result: self._debug("cmd: stdout: %s" % line) return (rc, result) def copy(self, source, target, silent=False): """ Perform a copy of the source file to the remote target. This function uses the cp_command provided when the RemoteExec object was created. Returns the return code of the cp_command. """ cmd = "%s '%s' '%s'" % (self._cp_command, source, target) rc = os.system(cmd) if not silent: self._debug("cmd: rc=%d: %s" % (rc, cmd)) return rc def exists_on_all(self, filename, hosts): """Return True if specified file exists on all specified hosts.""" for host in hosts: rc = self(host, "test -r %s" % filename) if rc != 0: return False return True class RemoteFactory: """A class for constructing a singleton instance of a RemoteExec object.""" # Class variables # -n: no stdin, -x: no X11, # -o ServerAliveInterval=5: disconnect after 3*5s if the server # stops responding command = ("ssh -l root -n -x -o ServerAliveInterval=5 " "-o ConnectTimeout=10 -o TCPKeepAlive=yes " "-o ServerAliveCountMax=3 ") # -B: batch mode, -q: no stats (quiet) cp_command = "scp -B -q" instance = None # pylint: disable=invalid-name def getInstance(self): """ Return the previously created instance of RemoteExec. If no instance exists, create one and then return that. """ if not RemoteFactory.instance: RemoteFactory.instance = RemoteExec(RemoteFactory.command, RemoteFactory.cp_command, False) return RemoteFactory.instance def enable_qarsh(self): """Enable the QA remote shell.""" # http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/ print("Using QARSH for connections to cluster nodes") RemoteFactory.command = "qarsh -t 300 -l root" RemoteFactory.cp_command = "qacp -q" diff --git a/python/pacemaker/_cts/scenarios.py b/python/pacemaker/_cts/scenarios.py index 0d7302e020..0da004ced1 100644 --- a/python/pacemaker/_cts/scenarios.py +++ b/python/pacemaker/_cts/scenarios.py @@ -1,423 +1,424 @@ """Test scenario classes for Pacemaker's Cluster Test Suite (CTS).""" __all__ = [ "AllOnce", "Boot", "BootCluster", "LeaveBooted", "RandomTests", "Sequence", ] __copyright__ = "Copyright 2000-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import re import time from pacemaker._cts.audits import ClusterAudit from pacemaker._cts.input import should_continue from pacemaker._cts.tests.ctstest import CTSTest from pacemaker._cts.watcher import LogWatcher + class ScenarioComponent: """ The base class for all scenario components. A scenario component is one single step in a scenario. Each component is basically just a setup and teardown method. """ def __init__(self, cm, env): """ Create a new ScenarioComponent instance. Arguments: cm -- A ClusterManager instance env -- An Environment instance """ # pylint: disable=invalid-name self._cm = cm self._env = env def is_applicable(self): """ Return True if this component is applicable in the given Environment. This method must be provided by all subclasses. """ raise NotImplementedError def setup(self): """ Set up the component, returning True on success. This method must be provided by all subclasses. """ raise NotImplementedError def teardown(self): """ Tear down the given component. This method must be provided by all subclasses. """ raise NotImplementedError class Scenario: """ The base class for scenarios. A scenario is an ordered list of ScenarioComponent objects. A scenario proceeds by setting up all its components in sequence, running a list of tests and audits, and then tearing down its components in reverse. """ def __init__(self, cm, components, audits, tests): """ Create a new Scenario instance. Arguments: cm -- A ClusterManager instance components -- A list of ScenarioComponents comprising this Scenario audits -- A list of ClusterAudits that will be performed as part of this Scenario tests -- A list of CTSTests that will be run """ # pylint: disable=invalid-name self.stats = { "success": 0, "failure": 0, "BadNews": 0, "skipped": 0 } self.tests = tests self._audits = audits self._bad_news = None self._cm = cm self._components = components for comp in components: if not issubclass(comp.__class__, ScenarioComponent): raise ValueError("Init value must be subclass of ScenarioComponent") for audit in audits: if not issubclass(audit.__class__, ClusterAudit): raise ValueError("Init value must be subclass of ClusterAudit") for test in tests: if not issubclass(test.__class__, CTSTest): raise ValueError("Init value must be a subclass of CTSTest") def is_applicable(self): """Return True if all ScenarioComponents are applicable.""" for comp in self._components: if not comp.is_applicable(): return False return True def setup(self): """ Set up the scenario, returning True on success. If setup fails at some point, tear down those components that did successfully set up. """ self._cm.prepare() self.audit() # Also detects remote/local log config self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"]) self.audit() self._cm.install_support() self._bad_news = LogWatcher(self._cm.env["LogFileName"], self._cm.templates.get_patterns("BadNews"), self._cm.env["nodes"], self._cm.env["LogWatcher"], "BadNews", 0) self._bad_news.set_watch() # Call after we've figured out what type of log watching to do in LogAudit j = 0 while j < len(self._components): if not self._components[j].setup(): # OOPS! We failed. Tear partial setups down. self.audit() self._cm.log("Tearing down partial setup") self.teardown(j) return False j += 1 self.audit() return True def teardown(self, n_components=None): """ Tear down the scenario in the reverse order it was set up. If n_components is not None, only tear down that many components. """ if not n_components: n_components = len(self._components)-1 j = n_components while j >= 0: self._components[j].teardown() j -= 1 self.audit() self._cm.install_support("uninstall") def incr(self, name): """Increment the given stats key.""" if not name in self.stats: self.stats[name] = 0 self.stats[name] += 1 def run(self, iterations): """Run all tests in the scenario the given number of times.""" self._cm.oprofile_start() try: self._run_loop(iterations) self._cm.oprofile_stop() except: self._cm.oprofile_stop() raise def _run_loop(self, iterations): """Run all the tests the given number of times.""" raise NotImplementedError def run_test(self, test, testcount): """ Run the given test. testcount is the number of tests (including this one) that have been run across all iterations. """ nodechoice = self._cm.env.random_node() ret = True did_run = False self._cm.clear_instance_errors_to_ignore() choice = "(%s)" % nodechoice self._cm.log("Running test {:<22} {:<15} [{:>3}]".format(test.name, choice, testcount)) starttime = test.set_timer() if not test.setup(nodechoice): self._cm.log("Setup failed") ret = False else: did_run = True ret = test(nodechoice) if not test.teardown(nodechoice): self._cm.log("Teardown failed") if not should_continue(self._cm.env): raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice)) ret = False stoptime = time.time() self._cm.oprofile_save(testcount) elapsed_time = stoptime - starttime test_time = stoptime - test.get_timer() if "min_time" not in test.stats: test.stats["elapsed_time"] = elapsed_time test.stats["min_time"] = test_time test.stats["max_time"] = test_time else: test.stats["elapsed_time"] += elapsed_time if test_time < test.stats["min_time"]: test.stats["min_time"] = test_time if test_time > test.stats["max_time"]: test.stats["max_time"] = test_time if ret: self.incr("success") test.log_timer() else: self.incr("failure") self._cm.statall() did_run = True # Force the test count to be incremented anyway so test extraction works self.audit(test.errors_to_ignore) return did_run def summarize(self): """Output scenario results.""" self._cm.log("****************") self._cm.log("Overall Results:%r" % self.stats) self._cm.log("****************") stat_filter = { "calls": 0, "failure": 0, "skipped": 0, "auditfail": 0, } self._cm.log("Test Summary") for test in self.tests: for key in stat_filter: stat_filter[key] = test.stats[key] name = "Test %s:" % test.name self._cm.log("{:<25} {!r}".format(name, stat_filter)) self._cm.debug("Detailed Results") for test in self.tests: name = "Test %s:" % test.name self._cm.debug("{:<25} {!r}".format(name, stat_filter)) self._cm.log("<<<<<<<<<<<<<<<< TESTS COMPLETED") def audit(self, local_ignore=None): """ Perform all scenario audits and log results. If there are too many failures, prompt the user to confirm that the scenario should continue running. """ errcount = 0 ignorelist = ["CTS:"] if local_ignore: ignorelist.extend(local_ignore) ignorelist.extend(self._cm.errors_to_ignore) ignorelist.extend(self._cm.instance_errors_to_ignore) # This makes sure everything is stabilized before starting... failed = 0 for audit in self._audits: if not audit(): self._cm.log("Audit %s FAILED." % audit.name) failed += 1 else: self._cm.debug("Audit %s passed." % audit.name) while errcount < 1000: match = None if self._bad_news: match = self._bad_news.look(0) if match: add_err = True for ignore in ignorelist: if add_err and re.search(ignore, match): add_err = False if add_err: self._cm.log("BadNews: %s" % match) self.incr("BadNews") errcount += 1 else: break else: print("Big problems") if not should_continue(self._cm.env): self._cm.log("Shutting down.") self.summarize() self.teardown() raise ValueError("Looks like we hit a BadNews jackpot!") if self._bad_news: self._bad_news.end() return failed class AllOnce(Scenario): """Every Test Once.""" def _run_loop(self, iterations): testcount = 1 for test in self.tests: self.run_test(test, testcount) testcount += 1 class RandomTests(Scenario): """Random Test Execution.""" def _run_loop(self, iterations): testcount = 1 while testcount <= iterations: test = self._cm.env.random_gen.choice(self.tests) self.run_test(test, testcount) testcount += 1 class Sequence(Scenario): """Named Tests in Sequence.""" def _run_loop(self, iterations): testcount = 1 while testcount <= iterations: for test in self.tests: self.run_test(test, testcount) testcount += 1 class Boot(Scenario): """Start the Cluster.""" def _run_loop(self, iterations): return class BootCluster(ScenarioComponent): """ Start the cluster manager on all nodes. Wait for each to come up before starting in order to account for the possibility that a given node might have been rebooted or crashed beforehand. """ def is_applicable(self): """Return whether this scenario is applicable.""" return True def setup(self): """Set up the component, returning True on success.""" self._cm.prepare() # Clear out the cobwebs ;-) self._cm.stopall(verbose=True, force=True) # Now start the Cluster Manager on all the nodes. self._cm.log("Starting Cluster Manager on all nodes.") return self._cm.startall(verbose=True, quick=True) def teardown(self): """Tear down the component.""" self._cm.log("Stopping Cluster Manager on all nodes") self._cm.stopall(verbose=True, force=False) class LeaveBooted(BootCluster): """Leave all nodes up when the scenario is complete.""" def teardown(self): """Tear down the component.""" self._cm.log("Leaving Cluster running on all nodes") diff --git a/python/pacemaker/_cts/test.py b/python/pacemaker/_cts/test.py index a2f0310884..4f94f50edd 100644 --- a/python/pacemaker/_cts/test.py +++ b/python/pacemaker/_cts/test.py @@ -1,594 +1,594 @@ """ A module providing base classes. These classes are used for defining regression tests and groups of regression tests. Everything exported here should be considered an abstract class that needs to be subclassed in order to do anything useful. Various functions will raise NotImplementedError if not overridden by a subclass. """ __copyright__ = "Copyright 2009-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+)" __all__ = ["Test", "Tests"] import io import os import re import shlex import signal import subprocess import sys import time from pacemaker._cts.errors import ExitCodeError, OutputFoundError, OutputNotFoundError, XmlValidationError from pacemaker._cts.process import pipe_communicate from pacemaker.buildoptions import BuildOptions from pacemaker.exitstatus import ExitStatus + def find_validator(rng_file): """ Return the command line used to validate XML output. If no validator is found, return None. """ if os.access("/usr/bin/xmllint", os.X_OK): if rng_file is None: return ["xmllint", "-"] return ["xmllint", "--relaxng", rng_file, "-"] return None def rng_directory(): """Return the directory containing RNG schema files.""" if "PCMK_schema_directory" in os.environ: return os.environ["PCMK_schema_directory"] if os.path.exists("%s/cts-fencing.in" % sys.path[0]): return "xml" return BuildOptions.SCHEMA_DIR class Pattern: """A class for checking log files for a given pattern.""" def __init__(self, pat, negative=False, regex=False): """ Create a new Pattern instance. Arguments: pat -- The string to search for negative -- If True, pat must not be found in any input regex -- If True, pat is a regex and not a substring """ self._pat = pat self.negative = negative self.regex = regex def __str__(self): return self._pat def match(self, line): """Return True if this pattern is found in the given line.""" if self.regex: return re.search(self._pat, line) is not None return self._pat in line class Test: """ The base class for a single regression test. A single regression test may still run multiple commands as part of its execution. """ def __init__(self, name, description, **kwargs): """ Create a new Test instance. This method must be provided by all subclasses, which must call Test.__init__ first. Arguments: description -- A user-readable description of the test, helpful in identifying what test is running or has failed. name -- The name of the test. Command line tools use this attribute to allow running only tests with the exact name, or tests whose name matches a given pattern. This should be unique among all tests. Keyword arguments: force_wait -- logdir -- The base directory under which to create a directory to store output and temporary data. timeout -- How long to wait for the test to complete. verbose -- Whether to print additional information, including verbose command output and daemon log files. """ self.description = description self.executed = False self.name = name self.force_wait = kwargs.get("force_wait", False) self.logdir = kwargs.get("logdir", "/tmp") self.timeout = kwargs.get("timeout", 2) self.verbose = kwargs.get("verbose", False) self._cmds = [] self._patterns = [] self._daemon_location = None self._daemon_output = "" self._daemon_process = None self._result_exitcode = ExitStatus.OK self._result_txt = "" ### ### PROPERTIES ### @property def exitcode(self): """ Return the final exitcode of the Test. If all commands pass, this property will be ExitStatus.OK. Otherwise, this property will be the exitcode of the first command to fail. """ return self._result_exitcode @exitcode.setter def exitcode(self, value): self._result_exitcode = value @property def logpath(self): """ Return the path to the log for whatever daemon is being tested. Note that this requires all subclasses to set self._daemon_location before accessing this property or an exception will be raised. """ return os.path.join(self.logdir, "%s.log" % self._daemon_location) ### ### PRIVATE METHODS ### def _kill_daemons(self): """Kill any running daemons in preparation for executing the test.""" raise NotImplementedError("_kill_daemons not provided by subclass") def _match_log_patterns(self): """ Check test output for expected patterns. Set self.exitcode and self._result_txt as appropriate. Not all subclass will need to do this. """ if len(self._patterns) == 0: return n_failed_matches = 0 n_negative_matches = 0 output = self._daemon_output.split("\n") for pat in self._patterns: positive_match = False for line in output: if pat.match(line): if pat.negative: n_negative_matches += 1 if self.verbose: print("This pattern should not have matched = '%s" % pat) break positive_match = True break if not pat.negative and not positive_match: n_failed_matches += 1 print("Pattern Not Matched = '%s'" % pat) if n_failed_matches > 0 or n_negative_matches > 0: msg = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." self._result_txt = msg % (self.name, n_failed_matches, len(self._patterns), n_negative_matches) self.exitcode = ExitStatus.ERROR - def _new_cmd(self, cmd, args, exitcode, **kwargs): """ Add a command to be executed as part of this test. Arguments: cmd -- The program to run. args -- Commands line arguments to pass to cmd, as a string. exitcode -- The expected exit code of cmd. This can be used to run a command that is expected to fail. Keyword arguments: stdout_match -- If not None, a string that is expected to be present in the stdout of cmd. This can be a regular expression. no_wait -- Do not wait for cmd to complete. stdout_negative_match -- If not None, a string that is expected to be missing in the stdout of cmd. This can be a regualr expression. kill -- A command to be run after cmd, typically in order to kill a failed process. This should be the entire command line including arguments as a single string. validate -- If True, the output of cmd will be passed to xmllint for validation. If validation fails, XmlValidationError will be raised. check_rng -- If True and validate is True, command output will additionally be checked against the api-result.rng file. check_stderr -- If True, the stderr of cmd will be included in output. env -- If not None, variables to set in the environment """ self._cmds.append( { "args": args, "check_rng": kwargs.get("check_rng", True), "check_stderr": kwargs.get("check_stderr", True), "cmd": cmd, "expected_exitcode": exitcode, "kill": kwargs.get("kill", None), "no_wait": kwargs.get("no_wait", False), "stdout_match": kwargs.get("stdout_match", None), "stdout_negative_match": kwargs.get("stdout_negative_match", None), "validate": kwargs.get("validate", True), "env": kwargs.get("env", None), } ) def _start_daemons(self): """Start any necessary daemons in preparation for executing the test.""" raise NotImplementedError("_start_daemons not provided by subclass") ### ### PUBLIC METHODS ### def add_cmd(self, cmd, args, validate=True, check_rng=True, check_stderr=True, env=None): """Add a simple command to be executed as part of this test.""" self._new_cmd(cmd, args, ExitStatus.OK, validate=validate, check_rng=check_rng, check_stderr=check_stderr, env=env) def add_cmd_and_kill(self, cmd, args, kill_proc): """Add a command and system command to be executed as part of this test.""" self._new_cmd(cmd, args, ExitStatus.OK, kill=kill_proc) def add_cmd_check_stdout(self, cmd, args, match, no_match=None, env=None): """Add a simple command with expected output to be executed as part of this test.""" self._new_cmd(cmd, args, ExitStatus.OK, stdout_match=match, stdout_negative_match=no_match, env=env) def add_cmd_expected_fail(self, cmd, args, exitcode=ExitStatus.ERROR): """Add a command that is expected to fail to be executed as part of this test.""" self._new_cmd(cmd, args, exitcode) def add_cmd_no_wait(self, cmd, args): """Add a simple command to be executed (without waiting) as part of this test.""" self._new_cmd(cmd, args, ExitStatus.OK, no_wait=True) def add_log_pattern(self, pattern, negative=False, regex=False): """Add a pattern that should appear in the test's logs.""" self._patterns.append(Pattern(pattern, negative=negative, regex=regex)) def _signal_dict(self): """Return a dictionary mapping signal numbers to their names.""" # FIXME: When we support python >= 3.5, this function can be replaced with: # signal.Signals(self.daemon_process.returncode).name return { getattr(signal, _signame): _signame for _signame in dir(signal) if _signame.startswith("SIG") and not _signame.startswith("SIG_") } def clean_environment(self): """Clean up the host after executing a test.""" if self._daemon_process: if self._daemon_process.poll() is None: self._daemon_process.terminate() self._daemon_process.wait() else: rc = self._daemon_process.returncode signame = self._signal_dict().get(-rc, "RET=%s" % rc) msg = "FAILURE - '%s' failed. %s abnormally exited during test (%s)." self._result_txt = msg % (self.name, self._daemon_location, signame) self.exitcode = ExitStatus.ERROR self._daemon_process = None self._daemon_output = "" # the default for utf-8 encoding would error out if e.g. memory corruption # makes fenced output any kind of 8 bit value - while still interesting # for debugging and we'd still like the regression-test to go over the # full set of test-cases with open(self.logpath, 'rt', encoding="ISO-8859-1") as logfile: for line in logfile.readlines(): self._daemon_output += line if self.verbose: print("Daemon Output Start") print(self._daemon_output) print("Daemon Output End") def print_result(self, filler): """Print the result of the last test execution.""" print("%s%s" % (filler, self._result_txt)) def run(self): """Execute this test.""" i = 1 self.start_environment() if self.verbose: print("\n--- START TEST - %s" % self.name) self._result_txt = "SUCCESS - '%s'" % (self.name) self.exitcode = ExitStatus.OK for cmd in self._cmds: try: self.run_cmd(cmd) except ExitCodeError as e: print("Step %d FAILED - command returned %s, expected %d" % (i, e, cmd['expected_exitcode'])) self.set_error(i, cmd) break except OutputNotFoundError as e: print("Step %d FAILED - '%s' was not found in command output: %s" % (i, cmd['stdout_match'], e)) self.set_error(i, cmd) break except OutputFoundError as e: print("Step %d FAILED - '%s' was found in command output: %s" % (i, cmd['stdout_negative_match'], e)) self.set_error(i, cmd) break except XmlValidationError as e: print("Step %d FAILED - xmllint failed: %s" % (i, e)) self.set_error(i, cmd) break if self.verbose: print("Step %d SUCCESS" % (i)) i += 1 self.clean_environment() if self.exitcode == ExitStatus.OK: self._match_log_patterns() print(self._result_txt) if self.verbose: print("--- END TEST - %s\n" % self.name) self.executed = True def run_cmd(self, args): """Execute a command as part of this test.""" cmd = shlex.split(args['args']) cmd.insert(0, args['cmd']) if self.verbose: print("\n\nRunning: %s" % " ".join(cmd)) # FIXME: Using "with" here breaks fencing merge tests. # pylint: disable=consider-using-with if args['env']: new_env = os.environ.copy() new_env.update(args['env']) test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=new_env) else: test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if args['kill']: if self.verbose: print("Also running: %s" % args['kill']) ### Typically, the kill argument is used to detect some sort of ### failure. Without yielding for a few seconds here, the process ### launched earlier that is listening for the failure may not have ### time to connect to pacemaker-execd. time.sleep(2) subprocess.Popen(shlex.split(args['kill'])) if not args['no_wait']: test.wait() else: return ExitStatus.OK output = pipe_communicate(test, check_stderr=args['check_stderr']) if self.verbose: print(output) if test.returncode != args['expected_exitcode']: raise ExitCodeError(test.returncode) if args['stdout_match'] is not None and \ re.search(args['stdout_match'], output) is None: raise OutputNotFoundError(output) if args['stdout_negative_match'] is not None and \ re.search(args['stdout_negative_match'], output) is not None: raise OutputFoundError(output) if args['validate']: if args['check_rng']: rng_file = "%s/api/api-result.rng" % rng_directory() else: rng_file = None cmd = find_validator(rng_file) if not cmd: raise XmlValidationError("Could not find validator for %s" % rng_file) if self.verbose: print("\nRunning: %s" % " ".join(cmd)) with subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as validator: output = pipe_communicate(validator, check_stderr=True, stdin=output) if self.verbose: print(output) if validator.returncode != 0: raise XmlValidationError(output) return ExitStatus.OK def set_error(self, step, cmd): """Record failure of this test.""" msg = "FAILURE - '%s' failed at step %d. Command: %s %s" self._result_txt = msg % (self.name, step, cmd['cmd'], cmd['args']) self.exitcode = ExitStatus.ERROR def start_environment(self): """Prepare the host for executing a test.""" if os.path.exists(self.logpath): os.remove(self.logpath) self._kill_daemons() self._start_daemons() logfile = None init_time = time.time() update_time = init_time while True: # FIXME: Eventually use 'with' here, which seems complicated given # everything happens in a loop. # pylint: disable=consider-using-with time.sleep(0.1) if not self.force_wait and logfile is None \ and os.path.exists(self.logpath): logfile = io.open(self.logpath, 'rt', encoding="ISO-8859-1") if not self.force_wait and logfile is not None: for line in logfile.readlines(): if "successfully started" in line: return now = time.time() if self.timeout > 0 and (now - init_time) >= self.timeout: if not self.force_wait: print("\tDaemon %s doesn't seem to have been initialized within %fs." "\n\tConsider specifying a longer '--timeout' value." %(self._daemon_location, self.timeout)) return if self.verbose and (now - update_time) >= 5: print("Waiting for %s to be initialized: %fs ..." %(self._daemon_location, now - init_time)) update_time = now class Tests: """The base class for a collection of regression tests.""" def __init__(self, **kwargs): """ Create a new Tests instance. This method must be provided by all subclasses, which must call Tests.__init__ first. Keywork arguments: force_wait -- logdir -- The base directory under which to create a directory to store output and temporary data. timeout -- How long to wait for the test to complete. verbose -- Whether to print additional information, including verbose command output and daemon log files. """ self.force_wait = kwargs.get("force_wait", False) self.logdir = kwargs.get("logdir", "/tmp") self.timeout = kwargs.get("timeout", 2) self.verbose = kwargs.get("verbose", False) self._tests = [] def exit(self): """Exit (with error status code if any test failed).""" for test in self._tests: if not test.executed: continue if test.exitcode != ExitStatus.OK: sys.exit(ExitStatus.ERROR) sys.exit(ExitStatus.OK) def print_list(self): """List all registered tests.""" print("\n==== %d TESTS FOUND ====" % len(self._tests)) print("%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")) print("%35s - %s" % ("--------------------", "--------------------")) for test in self._tests: print("%35s - %s" % (test.name, test.description)) print("==== END OF LIST ====\n") def print_results(self): """Print summary of results of executed tests.""" failures = 0 success = 0 print("\n\n======= FINAL RESULTS ==========") print("\n--- FAILURE RESULTS:") for test in self._tests: if not test.executed: continue if test.exitcode != ExitStatus.OK: failures += 1 test.print_result(" ") else: success += 1 if failures == 0: print(" None") print("\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures)) def run_single(self, name): """Run a single named test.""" for test in self._tests: if test.name == name: test.run() break def run_tests(self): """Run all tests.""" for test in self._tests: test.run() def run_tests_matching(self, pattern): """Run all tests whose name matches a pattern.""" for test in self._tests: if test.name.count(pattern) != 0: test.run() diff --git a/python/pacemaker/_cts/tests/__init__.py b/python/pacemaker/_cts/tests/__init__.py index 1d6179ee68..27bef12fdf 100644 --- a/python/pacemaker/_cts/tests/__init__.py +++ b/python/pacemaker/_cts/tests/__init__.py @@ -1,86 +1,87 @@ """Test classes for the `pacemaker._cts` package.""" __copyright__ = "Copyright 2023-2024 the Pacemaker project contributors" __license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)" from pacemaker._cts.tests.componentfail import ComponentFail from pacemaker._cts.tests.ctstest import CTSTest from pacemaker._cts.tests.fliptest import FlipTest from pacemaker._cts.tests.maintenancemode import MaintenanceMode from pacemaker._cts.tests.nearquorumpointtest import NearQuorumPointTest from pacemaker._cts.tests.partialstart import PartialStart from pacemaker._cts.tests.reattach import Reattach from pacemaker._cts.tests.restartonebyone import RestartOnebyOne from pacemaker._cts.tests.resourcerecover import ResourceRecover from pacemaker._cts.tests.restarttest import RestartTest from pacemaker._cts.tests.resynccib import ResyncCIB from pacemaker._cts.tests.remotebasic import RemoteBasic from pacemaker._cts.tests.remotedriver import RemoteDriver from pacemaker._cts.tests.remotemigrate import RemoteMigrate from pacemaker._cts.tests.remoterscfailure import RemoteRscFailure from pacemaker._cts.tests.remotestonithd import RemoteStonithd from pacemaker._cts.tests.simulstart import SimulStart from pacemaker._cts.tests.simulstop import SimulStop from pacemaker._cts.tests.simulstartlite import SimulStartLite from pacemaker._cts.tests.simulstoplite import SimulStopLite from pacemaker._cts.tests.splitbraintest import SplitBrainTest from pacemaker._cts.tests.standbytest import StandbyTest from pacemaker._cts.tests.starttest import StartTest from pacemaker._cts.tests.startonebyone import StartOnebyOne from pacemaker._cts.tests.stonithdtest import StonithdTest from pacemaker._cts.tests.stoponebyone import StopOnebyOne from pacemaker._cts.tests.stoptest import StopTest + def test_list(cm, audits): """ Return a list of runnable test class objects. These are objects that are enabled and whose is_applicable methods return True. """ # cm is a reasonable name here. # pylint: disable=invalid-name # A list of all enabled test classes, in the order that they should # be run (if we're doing --once). There are various other ways of # specifying which tests should be run, in which case the order here # will not matter. # # Note that just because a test is listed here doesn't mean it will # definitely be run - is_applicable is still taken into consideration. # Also note that there are other tests that are excluded from this # list for various reasons. enabled_test_classes = [ FlipTest, RestartTest, StonithdTest, StartOnebyOne, SimulStart, SimulStop, StopOnebyOne, RestartOnebyOne, PartialStart, StandbyTest, MaintenanceMode, ResourceRecover, ComponentFail, SplitBrainTest, Reattach, ResyncCIB, NearQuorumPointTest, RemoteBasic, RemoteStonithd, RemoteMigrate, RemoteRscFailure, ] result = [] for testclass in enabled_test_classes: bound_test = testclass(cm) if bound_test.is_applicable(): bound_test.audits = audits result.append(bound_test) return result diff --git a/python/pacemaker/_cts/timer.py b/python/pacemaker/_cts/timer.py index b3438a5b39..e31f18b3b3 100644 --- a/python/pacemaker/_cts/timer.py +++ b/python/pacemaker/_cts/timer.py @@ -1,62 +1,63 @@ """Timer-related utilities for CTS.""" __all__ = ["Timer"] __copyright__ = "Copyright 2000-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import time + class Timer: """ A class for measuring the runtime of some task. A Timer may be used manually or as a context manager, like so: with Timer(logger, "SomeTest", "SomeTimer"): ... A Timer runs from when start() is called until the timer is deleted or reset() is called. There is no explicit stop method. """ def __init__(self, logger, test_name, timer_name): """ Create a new Timer instance. Arguments: logger -- A Logger instance that can be used to record when the timer stopped test_name -- The name of the test this timer is being run for timer_name -- The name of this timer """ self._logger = logger self._start_time = None self._test_name = test_name self._timer_name = timer_name def __enter__(self): """When used as a context manager, start the timer.""" self.start() return self def __exit__(self, *args): """When used as a context manager, log the elapsed time.""" self._logger.debug("%s:%s runtime: %.2f" % (self._test_name, self._timer_name, self.elapsed)) def reset(self): """Restart the timer.""" self.start() def start(self): """Start the timer.""" self._start_time = time.time() @property def start_time(self): """Return when the timer started.""" return self._start_time @property def elapsed(self): """Return how long the timer has been running for.""" return time.time() - self._start_time diff --git a/python/pacemaker/_cts/watcher.py b/python/pacemaker/_cts/watcher.py index 250d0ee0ac..da3b5f12a7 100644 --- a/python/pacemaker/_cts/watcher.py +++ b/python/pacemaker/_cts/watcher.py @@ -1,550 +1,555 @@ """Log searching classes for Pacemaker's Cluster Test Suite (CTS).""" __all__ = ["LogKind", "LogWatcher"] __copyright__ = "Copyright 2014-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" from enum import Enum, unique import re import time import threading from pacemaker.buildoptions import BuildOptions from pacemaker._cts.logging import LogFactory from pacemaker._cts.remote import RemoteFactory LOG_WATCHER_BIN = "%s/cts-log-watcher" % BuildOptions.DAEMON_DIR + @unique class LogKind(Enum): """The various kinds of log files that can be watched.""" ANY = 0 FILE = 1 REMOTE_FILE = 2 JOURNAL = 3 def __str__(self): """Return a printable string for a LogKind value.""" if self.value == 0: return "any" if self.value == 1: return "combined syslog" if self.value == 2: return "remote" return "journal" + class SearchObj: """ The base class for various kinds of log watchers. Log-specific watchers need to be built on top of this one. """ def __init__(self, filename, host=None, name=None): """ Create a new SearchObj instance. Arguments: filename -- The log to watch host -- The cluster node on which to watch the log name -- A unique name to use when logging about this watch """ self.cache = [] self.filename = filename self.limit = None self.logger = LogFactory() self.name = name self.offset = "EOF" self.rsh = RemoteFactory().getInstance() if host: self.host = host else: self.host = "localhost" def __str__(self): if self.host: return "%s:%s" % (self.host, self.filename) return self.filename def log(self, args): """Log a message.""" message = "lw: %s: %s" % (self, args) self.logger.log(message) def debug(self, args): """Log a debug message.""" message = "lw: %s: %s" % (self, args) self.logger.debug(message) def harvest(self, delegate=None): """Collect lines from a log, optionally calling delegate when complete.""" async_task = self.harvest_async(delegate) async_task.join() def harvest_async(self, delegate=None): """ Collect lines from a log asynchronously. Optionally, also call delegate when complete. This method must be implemented by all subclasses. """ raise NotImplementedError def end(self): """ Mark that a log is done being watched. This function also resets internal data structures to the beginning of the file. Subsequent watches will therefore start from the beginning again. """ self.debug("Unsetting the limit") self.limit = None + class FileObj(SearchObj): """A specialized SearchObj subclass for watching log files.""" def __init__(self, filename, host=None, name=None): """ Create a new FileObj instance. Arguments: filename -- The file to watch host -- The cluster node on which to watch the file name -- A unique name to use when logging about this watch """ SearchObj.__init__(self, filename, host, name) self._delegate = None self.harvest() def async_complete(self, pid, returncode, out, err): """ Handle completion of an asynchronous log file read. This function saves the output from that read for look()/look_for_all() to process and records the current position in the journal. Future reads will pick back up from that spot. Arguments: pid -- The ID of the process that did the read returncode -- The return code of the process that did the read out -- stdout from the file read err -- stderr from the file read """ for line in out: match = re.search(r"^CTSwatcher:Last read: (\d+)", line) if match: self.offset = match.group(1) self.debug("Got %d lines, new offset: %s %r" % (len(out), self.offset, self._delegate)) elif re.search(r"^CTSwatcher:.*truncated", line): self.log(line) elif re.search(r"^CTSwatcher:", line): self.debug("Got control line: %s" % line) else: self.cache.append(line) if self._delegate: self._delegate.async_complete(pid, returncode, self.cache, err) def harvest_async(self, delegate=None): """ Collect lines from the log file on a single host asynchronously. Optionally, call delegate when complete. This can be called repeatedly, reading a chunk each time or until the end of the log file is hit. """ self._delegate = delegate self.cache = [] if self.limit and (self.offset == "EOF" or int(self.offset) > self.limit): if self._delegate: self._delegate.async_complete(-1, -1, [], []) return None return self.rsh.call_async(self.host, "%s -t %s -p CTSwatcher: -l 200 -f %s -o %s" % (LOG_WATCHER_BIN, self.name, self.filename, self.offset), delegate=self) def set_end(self): """ Internally record where we expect to find the end of a log file. Calls to harvest from the log file will not go any farther than what this function records. """ if self.limit: return # pylint: disable=not-callable (_, lines) = self.rsh(self.host, "%s -t %s -p CTSwatcher: -l 2 -f %s -o %s" % (LOG_WATCHER_BIN, self.name, self.filename, "EOF"), verbose=0) for line in lines: match = re.search(r"^CTSwatcher:Last read: (\d+)", line) if match: self.limit = int(match.group(1)) self.debug("Set limit to: %d" % self.limit) + class JournalObj(SearchObj): """A specialized SearchObj subclass for watching systemd journals.""" def __init__(self, host=None, name=None): """ Create a new JournalObj instance. Arguments: host -- The cluster node on which to watch the journal name -- A unique name to use when logging about this watch """ SearchObj.__init__(self, name, host, name) self._delegate = None self._hit_limit = False self.harvest() def async_complete(self, pid, returncode, out, err): """ Handle completion of an asynchronous journal read. This function saves the output from that read for look()/look_for_all() to process and records the current position in the journal. Future reads will pick back up from that spot. Arguments: pid -- The ID of the process that did the journal read returncode -- The return code of the process that did the journal read out -- stdout from the journal read err -- stderr from the journal read """ found_cursor = False for line in out: match = re.search(r"^-- cursor: ([^.]+)", line) if match: found_cursor = True self.offset = match.group(1).strip() self.debug("Got %d lines, new cursor: %s" % (len(out), self.offset)) else: self.cache.append(line) if self.limit and not found_cursor: self._hit_limit = True self.debug("Got %d lines but no cursor: %s" % (len(out), self.offset)) # Get the current cursor # pylint: disable=not-callable (_, out) = self.rsh(self.host, "journalctl -q -n 0 --show-cursor", verbose=0) for line in out: match = re.search(r"^-- cursor: ([^.]+)", line) if match: self.offset = match.group(1).strip() self.debug("Got %d lines, new cursor: %s" % (len(out), self.offset)) else: self.log("Not a new cursor: %s" % line) self.cache.append(line) if self._delegate: self._delegate.async_complete(pid, returncode, self.cache, err) def harvest_async(self, delegate=None): """ Collect lines from the journal on a single host asynchronously. Optionally, call delegate when complete. This can be called repeatedly, reading a chunk each time or until the end of the journal is hit. """ self._delegate = delegate self.cache = [] # Use --lines to prevent journalctl from overflowing the Popen input buffer if self.limit and self._hit_limit: return None if self.offset == "EOF": command = "journalctl -q -n 0 --show-cursor" elif self.limit: command = "journalctl -q --after-cursor='%s' --until '%s' --lines=200 --show-cursor" % (self.offset, self.limit) else: command = "journalctl -q --after-cursor='%s' --lines=200 --show-cursor" % (self.offset) return self.rsh.call_async(self.host, command, delegate=self) def set_end(self): """ Internally record where we expect to find the end of a host's journal. Calls to harvest from the journal will not go any farther than what this function records. """ if self.limit: return self._hit_limit = False # pylint: disable=not-callable (rc, lines) = self.rsh(self.host, "date +'%Y-%m-%d %H:%M:%S'", verbose=0) if rc == 0 and len(lines) == 1: self.limit = lines[0].strip() self.debug("Set limit to: %s" % self.limit) else: self.debug("Unable to set limit for %s because date returned %d lines with status %d" % (self.host, len(lines), rc)) + class LogWatcher: """ Watch a single log file or journal across multiple hosts. Instances of this class look for lines that match given regular expressions. The way you use this class is as follows: - Construct a LogWatcher object - Call set_watch() when you want to start watching the log - Call look() to scan the log looking for the patterns """ def __init__(self, log, regexes, hosts, kind=LogKind.ANY, name="Anon", timeout=10, silent=False): """ Create a new LogWatcher instance. Arguments: log -- The log file to watch regexes -- A list of regular expressions to match against the log hosts -- A list of cluster nodes on which to watch the log kind -- What type of log is this object watching? name -- A unique name to use when logging about this watch timeout -- Default number of seconds to watch a log file at a time; this can be overridden by the timeout= parameter to self.look on an as-needed basis silent -- If False, log extra information """ self.filename = log self.hosts = hosts self.kind = kind self.name = name self.regexes = regexes self.unmatched = None self.whichmatch = -1 self._cache_lock = threading.Lock() self._file_list = [] self._line_cache = [] self._logger = LogFactory() self._timeout = int(timeout) # Validate our arguments. Better sooner than later ;-) for regex in regexes: re.compile(regex) if not self.hosts: raise ValueError("LogWatcher requires hosts argument") if not self.filename: raise ValueError("LogWatcher requires log argument") if not silent: for regex in self.regexes: self._debug("Looking for regex: %s" % regex) def _debug(self, args): """Log a debug message.""" message = "lw: %s: %s" % (self.name, args) self._logger.debug(message) def set_watch(self): """Mark the place to start watching the log from.""" if self.kind == LogKind.REMOTE_FILE: for node in self.hosts: self._file_list.append(FileObj(self.filename, node, self.name)) elif self.kind == LogKind.JOURNAL: for node in self.hosts: self._file_list.append(JournalObj(node, self.name)) else: self._file_list.append(FileObj(self.filename)) def async_complete(self, pid, returncode, out, err): """ Handle completion of an asynchronous log file read. This function saves the output from that read for look()/look_for_all() to process and records the current position. Future reads will pick back up from that spot. Arguments: pid -- The ID of the process that did the read returncode -- The return code of the process that did the read out -- stdout from the file read err -- stderr from the file read """ # It's not clear to me whether this function ever gets called as # delegate somewhere, which is what would pass returncode and err # as parameters. Just disable the warning for now. # pylint: disable=unused-argument # TODO: Probably need a lock for updating self._line_cache self._logger.debug("%s: Got %d lines from %d (total %d)" % (self.name, len(out), pid, len(self._line_cache))) if out: with self._cache_lock: self._line_cache.extend(out) def __get_lines(self): """Iterate over all watched log files and collect new lines from each.""" if not self._file_list: raise ValueError("No sources to read from") pending = [] for f in self._file_list: t = f.harvest_async(self) if t: pending.append(t) for t in pending: t.join(60.0) if t.is_alive(): self._logger.log("%s: Aborting after 20s waiting for %r logging commands" % (self.name, t)) return def end(self): """ Mark that a log is done being watched. This function also resets internal data structures to the beginning of the file. Subsequent watches will therefore start from the beginning again. """ for f in self._file_list: f.end() def look(self, timeout=None): """ Examine the log looking for the regexes in this object. It starts looking from the place marked by set_watch(), continuing through the file in the fashion of `tail -f`. It properly recovers from log file truncation but not from removing and recreating the log. Arguments: timeout -- Number of seconds to watch the log file; defaults to seconds argument passed when this object was created Returns the first line which matches any regex """ if not timeout: timeout = self._timeout lines = 0 begin = time.time() end = begin + timeout + 1 if not self.regexes: self._debug("Nothing to look for") return None if timeout == 0: for f in self._file_list: f.set_end() while True: if self._line_cache: lines += 1 with self._cache_lock: line = self._line_cache[0] self._line_cache.remove(line) which = -1 if re.search("CTS:", line): continue for regex in self.regexes: which += 1 matchobj = re.search(regex, line) if matchobj: self.whichmatch = which self._debug("Matched: %s" % line) return line elif timeout > 0 and end < time.time(): timeout = 0 for f in self._file_list: f.set_end() else: self.__get_lines() if not self._line_cache and end < time.time(): self._debug("Single search terminated: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), lines)) return None self._debug("Waiting: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), len(self._line_cache))) time.sleep(1) self._debug("How did we get here") return None def look_for_all(self, allow_multiple_matches=False, silent=False): """ Like look(), but looks for matches for multiple regexes. This function returns when the timeout is reached or all regexes were matched. As a side effect, self.unmatched will contain regexes that were not matched. This can be inspected by the caller. Arguments: allow_multiple_matches -- If True, allow each regex to match more than once. If False (the default), once a regex matches a line, it will no longer be searched for. silent -- If False, log extra information Returns the matching lines if all regexes are matched, or None. """ save_regexes = self.regexes result = [] if not silent: self._debug("starting search: timeout=%d" % self._timeout) while self.regexes: one_result = self.look(self._timeout) if not one_result: self.unmatched = self.regexes self.regexes = save_regexes self.end() return None result.append(one_result) if not allow_multiple_matches: del self.regexes[self.whichmatch] else: # Allow multiple regexes to match a single line tmp_regexes = self.regexes self.regexes = [] for regex in tmp_regexes: matchobj = re.search(regex, one_result) if not matchobj: self.regexes.append(regex) self.unmatched = None self.regexes = save_regexes return result diff --git a/python/pacemaker/buildoptions.py.in b/python/pacemaker/buildoptions.py.in index aeb6dc26b6..a97640cd32 100644 --- a/python/pacemaker/buildoptions.py.in +++ b/python/pacemaker/buildoptions.py.in @@ -1,64 +1,65 @@ """A module providing information on build-time configuration of pacemaker.""" __all__ = ["BuildOptions"] __copyright__ = "Copyright 2023-2024 the Pacemaker project contributors" __license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)" + class BuildOptions: """ Variables generated as part of the ./configure && make process. These affect how pacemaker was configured and where its various parts get installed. """ BASH_PATH = "@BASH_PATH@" """Path to the bash shell.""" _BUILD_DIR = "@abs_top_builddir@" """ Top-level build directory. NOTE: This is not especially useful on installed systems, but is useful for running various programs from a source checkout """ CIB_DIR = "@CRM_CONFIG_DIR@" """Where CIB files are stored.""" CIB_SCHEMA_VERSION = "@CIB_VERSION@" """Latest supported CIB schema version number.""" COROSYNC_CONFIG_FILE = "@PCMK__COROSYNC_CONF@" """Path to the corosync config file.""" DAEMON_DIR = "@CRM_DAEMON_DIR@" """Where Pacemaker daemons are installed.""" DAEMON_USER = "@CRM_DAEMON_USER@" """User to run Pacemaker daemons as.""" LOCAL_STATE_DIR = "@localstatedir@" """Where miscellaneous temporary state files are stored.""" LOG_DIR = "@CRM_LOG_DIR@" """Where Pacemaker log files are stored.""" OCF_RA_INSTALL_DIR = "@OCF_RA_INSTALL_DIR@" """Where resource agents are installed.""" OCF_ROOT_DIR = "@OCF_ROOT_DIR@" """Root directory for OCF resource agents and libraries.""" RSC_TMP_DIR = "@CRM_RSCTMP_DIR@" """Where resource agents should keep state files.""" # pylint: disable=comparison-of-constants REMOTE_ENABLED = "@PC_NAME_GNUTLS@" != "" """True if Pacemaker Remote support is enabled.""" SBIN_DIR = "@sbindir@" """Where administrative programs are installed.""" SCHEMA_DIR = "@CRM_SCHEMA_DIRECTORY@" """Where Relax-NG schema files are stored.""" diff --git a/python/pacemaker/exitstatus.py b/python/pacemaker/exitstatus.py index 1dba1d5c68..b2078519b7 100644 --- a/python/pacemaker/exitstatus.py +++ b/python/pacemaker/exitstatus.py @@ -1,61 +1,62 @@ """A module providing constants relating to why a process or function exited.""" __all__ = ["ExitStatus"] __copyright__ = "Copyright 2023-2024 the Pacemaker project contributors" __license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)" from enum import IntEnum, unique + # These values must be kept in sync with include/crm/common/results.h @unique class ExitStatus(IntEnum): """ Exit status codes for a function or process. These constants describe both success and failure conditions. """ OK = 0 ERROR = 1 INVALID_PARAM = 2 UNIMPLEMENT_FEATURE = 3 INSUFFICIENT_PRIV = 4 NOT_INSTALLED = 5 NOT_CONFIGURED = 6 NOT_RUNNING = 7 PROMOTED = 8 FAILED_PROMOTED = 9 USAGE = 64 DATAERR = 65 NOINPUT = 66 NOUSER = 67 NOHOST = 68 UNAVAILABLE = 69 SOFTWARE = 70 OSERR = 71 OSFILE = 72 CANTCREAT = 73 IOERR = 74 TEMPFAIL = 75 PROTOCOL = 76 NOPERM = 77 CONFIG = 78 FATAL = 100 PANIC = 101 DISCONNECT = 102 OLD = 103 DIGEST = 104 NOSUCH = 105 QUORUM = 106 UNSAFE = 107 EXISTS = 108 MULTIPLE = 109 EXPIRED = 110 NOT_YET_IN_EFFECT = 111 INDETERMINATE = 112 UNSATISFIED = 113 TIMEOUT = 124 DEGRADED = 190 DEGRADED_PROMOTED = 191 NONE = 193 MAX = 255 diff --git a/python/tests/test_cts_network.py b/python/tests/test_cts_network.py index 4aea8b91a7..3dec000cc4 100644 --- a/python/tests/test_cts_network.py +++ b/python/tests/test_cts_network.py @@ -1,37 +1,38 @@ # These warnings are not useful in unit tests. # pylint: disable=missing-class-docstring,missing-function-docstring,missing-module-docstring __copyright__ = "Copyright 2023 the Pacemaker project contributors" __license__ = "GPLv2+" import unittest from pacemaker._cts.network import next_ip + # next_ip makes a bunch of assumptions that we are not going to test here: # # * The env argument actually contains an "IPBase" key with a string in it # * The string is a properly formatted IPv4 or IPv6 address, with no extra # leading or trailing whitespace class NextIPTestCase(unittest.TestCase): def test_ipv4(self): # The first time next_ip is called, it will read the IPBase out of # the environment. After that, it just goes off whatever it # previously returned, so the environment value doesn't matter. self.assertEqual(next_ip("1.1.1.1"), "1.1.1.2") self.assertEqual(next_ip(), "1.1.1.3") # Passing reset=True will force it to re-read the environment. Here, # we use that to ask it for a value outside of the available range. self.assertRaises(ValueError, next_ip, "1.1.1.255", reset=True) def test_ipv6(self): # Same comments as for the test_ipv4 function, plus we need to reset # here because otherwise it will remember what happened in that function. self.assertEqual(next_ip("fe80::fc54:ff:fe09:101", reset=True), "fe80::fc54:ff:fe09:102") self.assertEqual(next_ip(), "fe80::fc54:ff:fe09:103") self.assertRaises(ValueError, next_ip, "fe80::fc54:ff:fe09:ffff", reset=True) diff --git a/python/tests/test_exitstatus.py b/python/tests/test_exitstatus.py index 571f6b4778..b8543aa8fa 100644 --- a/python/tests/test_exitstatus.py +++ b/python/tests/test_exitstatus.py @@ -1,14 +1,15 @@ # These warnings are not useful in unit tests. # pylint: disable=missing-class-docstring,missing-function-docstring,missing-module-docstring __copyright__ = "Copyright 2023 the Pacemaker project contributors" __license__ = "GPLv2+" import unittest from pacemaker.exitstatus import ExitStatus + class ExitStatusTestCase(unittest.TestCase): def test_min_max(self): self.assertEqual(ExitStatus.OK, 0) self.assertEqual(ExitStatus.MAX, 255)