diff --git a/python/pacemaker/__init__.py b/python/pacemaker/__init__.py index 712368d217..e5d992e8f3 100644 --- a/python/pacemaker/__init__.py +++ b/python/pacemaker/__init__.py @@ -1,9 +1,9 @@ """ API reference documentation for the `pacemaker` package. """ __copyright__ = "Copyright 2023 the Pacemaker project contributors" -__license__ = "LGPLv2.1+" +__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)" from pacemaker.buildoptions import BuildOptions from pacemaker.exitstatus import ExitStatus diff --git a/python/pacemaker/_cts/__init__.py b/python/pacemaker/_cts/__init__.py index c4ad2265f8..dfc05ad534 100644 --- a/python/pacemaker/_cts/__init__.py +++ b/python/pacemaker/_cts/__init__.py @@ -1,6 +1,6 @@ """ Internal Python API for the `pacemaker` package. """ __copyright__ = "Copyright 2023 the Pacemaker project contributors" -__license__ = "LGPLv2.1+" +__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)" diff --git a/python/pacemaker/_cts/corosync.py b/python/pacemaker/_cts/corosync.py index 9ffa3ebd19..aabaecd755 100644 --- a/python/pacemaker/_cts/corosync.py +++ b/python/pacemaker/_cts/corosync.py @@ -1,169 +1,169 @@ """ A module providing functions for manipulating corosync """ __all__ = ["Corosync", "localname"] __copyright__ = "Copyright 2009-2023 the Pacemaker project contributors" -__license__ = "LGPLv2.1+" +__license__ = "GNU General Public License version 2 or later (GPLv2+)" import os import subprocess import time from pacemaker.buildoptions import BuildOptions from pacemaker._cts.process import killall, stdout_from_command AUTOGEN_COROSYNC_TEMPLATE = """ totem { version: 2 cluster_name: %s crypto_cipher: none crypto_hash: none transport: udp } nodelist { node { nodeid: 1 name: %s ring0_addr: 127.0.0.1 } } logging { debug: off to_syslog: no to_stderr: no to_logfile: yes logfile: %s } """ def corosync_cfg_exists(): """ Does the corosync config file exist? """ return os.path.exists(BuildOptions.COROSYNC_CONFIG_FILE) def corosync_log_file(cfgfile): """ Where does corosync log to? """ with open(cfgfile, "r", encoding="utf-8") as f: for line in f.readlines(): # "to_logfile:" could also be in the config file, so check for a # slash to make sure it's a path we're looking at. if "logfile: /" in line: return line.split()[-1] return None def generate_corosync_cfg(logdir, cluster_name, node_name): """ Generate the corosync config file, if it does not already exist """ if corosync_cfg_exists(): return False logfile = os.path.join(logdir, "corosync.log") with open(BuildOptions.COROSYNC_CONFIG_FILE, "w", encoding="utf-8") as corosync_cfg: corosync_cfg.write(AUTOGEN_COROSYNC_TEMPLATE % (cluster_name, node_name, logfile)) return True def localname(): """ Return the uname of the local host """ our_uname = stdout_from_command(["uname", "-n"]) if our_uname: our_uname = our_uname[0] else: our_uname = "localhost" return our_uname class Corosync: """ A class for managing corosync processes and config files """ def __init__(self, verbose, logdir, cluster_name): """ Create a new Corosync instance. Arguments: verbose -- Whether to print the corosync log file logdir -- The base directory under which to store log files cluster_name -- The name of the cluster """ self.verbose = verbose self.logdir = logdir self.cluster_name = cluster_name self._generated_cfg_file = False def _ready(self, logfile, timeout=10): """ Is corosync ready to go? """ i = 0 while i < timeout: with open(logfile, "r", encoding="utf-8") as corosync_log: for line in corosync_log.readlines(): if line.endswith("ready to provide service.\n"): # Even once the line is in the log file, we may still need to wait just # a little bit longer before corosync is really ready to go. time.sleep(1) return time.sleep(1) i += 1 raise TimeoutError def start(self, kill_first=False, timeout=10): """ Start the corosync process Arguments: kill_first -- Whether to kill any pre-existing corosync processes before starting a new one timeout -- If corosync does not start within this many seconds, raise TimeoutError """ if kill_first: killall(["corosync"]) self._generated_cfg_file = generate_corosync_cfg(self.logdir, self.cluster_name, localname()) logfile = corosync_log_file(BuildOptions.COROSYNC_CONFIG_FILE) if self.verbose: print("Starting corosync") with subprocess.Popen("corosync", stdout=subprocess.PIPE) as test: test.wait() # Wait for corosync to be ready before returning self._ready(logfile, timeout=timeout) def stop(self): """ Stop the corosync process """ killall(["corosync"]) # If we did not write out the corosync config file, don't do anything else. if not self._generated_cfg_file: return if self.verbose: print("Corosync output") logfile = corosync_log_file(BuildOptions.COROSYNC_CONFIG_FILE) with open(logfile, "r", encoding="utf-8") as corosync_log: for line in corosync_log.readlines(): print(line.strip()) os.remove(BuildOptions.COROSYNC_CONFIG_FILE) diff --git a/python/pacemaker/_cts/errors.py b/python/pacemaker/_cts/errors.py index 64ea31255e..2e245e71ad 100644 --- a/python/pacemaker/_cts/errors.py +++ b/python/pacemaker/_cts/errors.py @@ -1,53 +1,53 @@ """ A module providing custom exception classes used throughout the pacemaker library """ __all__ = ["ExitCodeError", "OutputFoundError", "OutputNotFoundError", "XmlValidationError"] __copyright__ = "Copyright 2009-2023 the Pacemaker project contributors" -__license__ = "LGPLv2.1+" +__license__ = "GNU General Public License version 2 or later (GPLv2+)" class TestError(Exception): """ Base class for exceptions in this module """ class ExitCodeError(TestError): """ Exception raised when command exit status is unexpected """ def __init__(self, exit_code): TestError.__init__(self) self.exit_code = exit_code def __str__(self): return repr(self.exit_code) class OutputNotFoundError(TestError): """ Exception raised when command output does not contain wanted string """ def __init__(self, output): TestError.__init__(self) self.output = output def __str__(self): return repr(self.output) class OutputFoundError(TestError): """ Exception raised when command output contains unwanted string """ def __init__(self, output): TestError.__init__(self) self.output = output def __str__(self): return repr(self.output) class XmlValidationError(TestError): """ Exception raised when xmllint fails """ def __init__(self, output): TestError.__init__(self) self.output = output def __str__(self): return repr(self.output) diff --git a/python/pacemaker/_cts/patterns.py b/python/pacemaker/_cts/patterns.py index 197b5dbcb1..c3130ad859 100644 --- a/python/pacemaker/_cts/patterns.py +++ b/python/pacemaker/_cts/patterns.py @@ -1,411 +1,411 @@ """ Pattern-holding classes for Pacemaker's Cluster Test Suite (CTS) """ __all__ = ["PatternSelector"] __copyright__ = "Copyright 2008-2023 the Pacemaker project contributors" -__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" +__license__ = "GNU General Public License version 2 or later (GPLv2+)" import argparse from pacemaker.buildoptions import BuildOptions class BasePatterns: """ The base class for holding a stack-specific set of command and log file/stdout patterns. Stack-specific classes need to be built on top of this one. """ def __init__(self): """ Create a new BasePatterns instance which holds a very minimal set of basic patterns. """ self._bad_news = [] self._components = {} self._name = "crm-base" self._ignore = [ "avoid confusing Valgrind", # Logging bug in some versions of libvirtd r"libvirtd.*: internal error: Failed to parse PCI config address", # pcs can log this when node is fenced, but fencing is OK in some # tests (and we will catch it in pacemaker logs when not OK) r"pcs.daemon:No response from: .* request: get_configs, error:", ] self._commands = { "StatusCmd" : "crmadmin -t 60 -S %s 2>/dev/null", "CibQuery" : "cibadmin -Ql", "CibAddXml" : "cibadmin --modify -c --xml-text %s", "CibDelXpath" : "cibadmin --delete --xpath %s", "RscRunning" : BuildOptions.DAEMON_DIR + "/cts-exec-helper -R -r %s", "CIBfile" : "%s:" + BuildOptions.CIB_DIR + "/cib.xml", "TmpDir" : "/tmp", "BreakCommCmd" : "iptables -A INPUT -s %s -j DROP >/dev/null 2>&1", "FixCommCmd" : "iptables -D INPUT -s %s -j DROP >/dev/null 2>&1", "ReduceCommCmd" : "", "RestoreCommCmd" : "tc qdisc del dev lo root", "MaintenanceModeOn" : "cibadmin --modify -c --xml-text ''", "MaintenanceModeOff" : "cibadmin --delete --xpath \"//nvpair[@name='maintenance-mode']\"", "StandbyCmd" : "crm_attribute -Vq -U %s -n standby -l forever -v %s 2>/dev/null", "StandbyQueryCmd" : "crm_attribute -qG -U %s -n standby -l forever -d off 2>/dev/null", } self._search = { "Pat:DC_IDLE" : r"pacemaker-controld.*State transition.*-> S_IDLE", # This won't work if we have multiple partitions "Pat:Local_started" : r"%s\W.*controller successfully started", "Pat:NonDC_started" : r"%s\W.*State transition.*-> S_NOT_DC", "Pat:DC_started" : r"%s\W.*State transition.*-> S_IDLE", "Pat:We_stopped" : r"%s\W.*OVERRIDE THIS PATTERN", "Pat:They_stopped" : r"%s\W.*LOST:.* %s ", "Pat:They_dead" : r"node %s.*: is dead", "Pat:They_up" : r"%s %s\W.*OVERRIDE THIS PATTERN", "Pat:TransitionComplete" : "Transition status: Complete: complete", "Pat:Fencing_start" : r"Requesting peer fencing .* targeting %s", "Pat:Fencing_ok" : r"pacemaker-fenced.*:\s*Operation .* targeting %s by .* for .*@.*: OK", "Pat:Fencing_recover" : r"pacemaker-schedulerd.*: Recover\s+%s", "Pat:Fencing_active" : r"stonith resource .* is active on 2 nodes (attempting recovery)", "Pat:Fencing_probe" : r"pacemaker-controld.* Result of probe operation for %s on .*: Error", "Pat:RscOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s.*: (0 \()?ok", "Pat:RscOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of %s ", "Pat:CloneOpFail" : r"pacemaker-schedulerd.*:.*Unexpected result .* recorded for %s of (%s|%s) ", "Pat:RscRemoteOpOK" : r"pacemaker-controld.*:\s+Result of %s operation for %s on %s: (0 \()?ok", "Pat:NodeFenced" : r"pacemaker-controld.*:\s* Peer %s was terminated \(.*\) by .* on behalf of .*: OK", } def get_component(self, key): """ Return the patterns for a single component as a list, given by key. This is typically the name of some subprogram (pacemaker-based, pacemaker-fenced, etc.) or various special purpose keys. If key is unknown, return an empty list. """ if key in self._components: return self._components[key] print("Unknown component '%s' for %s" % (key, self._name)) return [] def get_patterns(self, key): """ Return various patterns supported by this object, given by key. Depending on the key, this could either be a list or a hash. If key is unknown, return None. """ if key == "BadNews": return self._bad_news if key == "BadNewsIgnore": return self._ignore if key == "Commands": return self._commands if key == "Search": return self._search if key == "Components": return self._components print("Unknown pattern '%s' for %s" % (key, self._name)) return None def __getitem__(self, key): if key == "Name": return self._name if key in self._commands: return self._commands[key] if key in self._search: return self._search[key] print("Unknown template '%s' for %s" % (key, self._name)) return None class Corosync2Patterns(BasePatterns): """ Patterns for Corosync version 2 cluster manager class """ def __init__(self): BasePatterns.__init__(self) self._name = "crm-corosync" self._commands.update({ "StartCmd" : "service corosync start && service pacemaker start", "StopCmd" : "service pacemaker stop; [ ! -e /usr/sbin/pacemaker-remoted ] || service pacemaker_remote stop; service corosync stop", "EpochCmd" : "crm_node -e", "QuorumCmd" : "crm_node -q", "PartitionCmd" : "crm_node -p", }) self._search.update({ # Close enough ... "Corosync Cluster Engine exiting normally" isn't # printed reliably. "Pat:We_stopped" : r"%s\W.*Unloading all Corosync service engines", "Pat:They_stopped" : r"%s\W.*pacemaker-controld.*Node %s(\[|\s).*state is now lost", "Pat:They_dead" : r"pacemaker-controld.*Node %s(\[|\s).*state is now lost", "Pat:They_up" : r"\W%s\W.*pacemaker-controld.*Node %s state is now member", "Pat:ChildExit" : r"\[[0-9]+\] exited with status [0-9]+ \(", # "with signal 9" == pcmk_child_exit(), "$" == check_active_before_startup_processes() "Pat:ChildKilled" : r"%s\W.*pacemakerd.*%s\[[0-9]+\] terminated( with signal 9|$)", "Pat:ChildRespawn" : r"%s\W.*pacemakerd.*Respawning %s subdaemon after unexpected exit", "Pat:InfraUp" : r"%s\W.*corosync.*Initializing transport", "Pat:PacemakerUp" : r"%s\W.*pacemakerd.*Starting Pacemaker", }) self._ignore += [ r"crm_mon:", r"crmadmin:", r"update_trace_data", r"async_notify:.*strange, client not found", r"Parse error: Ignoring unknown option .*nodename", r"error.*: Operation 'reboot' .* using FencingFail returned ", r"getinfo response error: 1$", r"sbd.* error: inquisitor_child: DEBUG MODE IS ACTIVE", r"sbd.* pcmk:\s*error:.*Connection to cib_ro.* (failed|closed)", ] self._bad_news = [ r"[^(]error:", r"crit:", r"ERROR:", r"CRIT:", r"Shutting down...NOW", r"Timer I_TERMINATE just popped", r"input=I_ERROR", r"input=I_FAIL", r"input=I_INTEGRATED cause=C_TIMER_POPPED", r"input=I_FINALIZED cause=C_TIMER_POPPED", r"input=I_ERROR", r"(pacemakerd|pacemaker-execd|pacemaker-controld):.*, exiting", r"schedulerd.*Attempting recovery of resource", r"is taking more than 2x its timeout", r"Confirm not received from", r"Welcome reply not received from", r"Attempting to schedule .* after a stop", r"Resource .* was active at shutdown", r"duplicate entries for call_id", r"Search terminated:", r":global_timer_callback", r"Faking parameter digest creation", r"Parameters to .* action changed:", r"Parameters to .* changed", r"pacemakerd.*\[[0-9]+\] terminated( with signal| as IPC server|$)", r"pacemaker-schedulerd.*Recover\s+.*\(.* -\> .*\)", r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting", r"Peer is not part of our cluster", r"We appear to be in an election loop", r"Unknown node -> we will not deliver message", r"(Blackbox dump requested|Problem detected)", r"pacemakerd.*Could not connect to Cluster Configuration Database API", r"Receiving messages from a node we think is dead", r"share the same cluster nodeid", r"share the same name", r"pacemaker-controld:.*Transition failed: terminated", r"Local CIB .* differs from .*:", r"warn.*:\s*Continuing but .* will NOT be used", r"warn.*:\s*Cluster configuration file .* is corrupt", r"Election storm", r"stalled the FSA with pending inputs", ] self._components["common-ignore"] = [ r"Pending action:", r"resource( was|s were) active at shutdown", r"pending LRM operations at shutdown", r"Lost connection to the CIB manager", r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported", r"pacemaker-controld.*:\s*Performing A_EXIT_1 - forcefully exiting ", r".*:\s*Requesting fencing \([^)]+\) of node ", r"(Blackbox dump requested|Problem detected)", ] self._components["corosync-ignore"] = [ r"Could not connect to Corosync CFG: CS_ERR_LIBRARY", r"error:.*Connection to the CPG API failed: Library error", r"\[[0-9]+\] exited with status [0-9]+ \(", r"\[[0-9]+\] terminated with signal 15", r"pacemaker-based.*error:.*Corosync connection lost", r"pacemaker-fenced.*error:.*Corosync connection terminated", r"pacemaker-controld.*State transition .* S_RECOVERY", r"pacemaker-controld.*error:.*Input (I_ERROR|I_TERMINATE ) .*received in state", r"pacemaker-controld.*error:.*Could not recover from internal error", r"error:.*Connection to cib_(shm|rw).* (failed|closed)", r"error:.*cib_(shm|rw) IPC provider disconnected while waiting", r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"crit: Fencing daemon connection failed", # This is overbroad, but we don't have a way to say that only # certain transition errors are acceptable (if the fencer respawns, # fence devices may appear multiply active). We have to rely on # other causes of a transition error logging their own error # message, which is the usual practice. r"pacemaker-schedulerd.* Calculated transition .*/pe-error", ] self._components["corosync"] = [ # We expect each daemon to lose its cluster connection. # However, if the CIB manager loses its connection first, # it's possible for another daemon to lose that connection and # exit before losing the cluster connection. r"pacemakerd.*:\s*warning:.*Lost connection to cluster layer", r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)", r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer", r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)", r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)", r"schedulerd.*Scheduling node .* for fencing", r"pacemaker-controld.*:\s*Peer .* was terminated \(.*\) by .* on behalf of .*:\s*OK", ] self._components["pacemaker-based"] = [ r"pacemakerd.* pacemaker-attrd\[[0-9]+\] exited with status 102", r"pacemakerd.* pacemaker-controld\[[0-9]+\] exited with status 1", r"pacemakerd.* Respawning pacemaker-attrd subdaemon after unexpected exit", r"pacemakerd.* Respawning pacemaker-based subdaemon after unexpected exit", r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit", r"pacemakerd.* Respawning pacemaker-fenced subdaemon after unexpected exit", r"pacemaker-.* Connection to cib_.* (failed|closed)", r"pacemaker-attrd.*:.*Lost connection to the CIB manager", r"pacemaker-controld.*:.*Lost connection to the CIB manager", r"pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy", r"pacemaker-controld.* State transition .* S_RECOVERY", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*Could not recover from internal error", ] self._components["pacemaker-based-ignore"] = [ r"pacemaker-execd.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error \(Lost connection to fencer\)", r"pacemaker-controld.*:Could not connect to attrd: Connection refused", # This is overbroad, but we don't have a way to say that only # certain transition errors are acceptable (if the fencer respawns, # fence devices may appear multiply active). We have to rely on # other causes of a transition error logging their own error # message, which is the usual practice. r"pacemaker-schedulerd.* Calculated transition .*/pe-error", ] self._components["pacemaker-execd"] = [ r"pacemaker-controld.*Connection to executor failed", r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy", r"pacemaker-controld.*State transition .* S_RECOVERY", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*Could not recover from internal error", r"pacemakerd.*pacemaker-controld\[[0-9]+\] exited with status 1", r"pacemakerd.* Respawning pacemaker-execd subdaemon after unexpected exit", r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit", ] self._components["pacemaker-execd-ignore"] = [ r"pacemaker-(attrd|controld).*Connection to lrmd.* (failed|closed)", r"pacemaker-(attrd|controld).*Could not execute alert", ] self._components["pacemaker-controld"] = [ r"State transition .* -> S_IDLE", ] self._components["pacemaker-controld-ignore"] = [] self._components["pacemaker-attrd"] = [] self._components["pacemaker-attrd-ignore"] = [] self._components["pacemaker-schedulerd"] = [ r"State transition .* S_RECOVERY", r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit", r"pacemaker-controld\[[0-9]+\] exited with status 1 \(", r"Connection to the scheduler failed", r"pacemaker-controld.*I_ERROR.*save_cib_contents", r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover", r"pacemaker-controld.*Could not recover from internal error", ] self._components["pacemaker-schedulerd-ignore"] = [ r"Connection to pengine.* (failed|closed)", ] self._components["pacemaker-fenced"] = [ r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"Fencing daemon connection failed", r"pacemaker-controld.*Fencer successfully connected", ] self._components["pacemaker-fenced-ignore"] = [ r"(error|warning):.*Connection to (fencer|stonith-ng).* (closed|failed|lost)", r"crit:.*Fencing daemon connection failed", r"error:.*Fencer connection failed \(will retry\)", r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error \(Lost connection to fencer\)", # This is overbroad, but we don't have a way to say that only # certain transition errors are acceptable (if the fencer respawns, # fence devices may appear multiply active). We have to rely on # other causes of a transition error logging their own error # message, which is the usual practice. r"pacemaker-schedulerd.* Calculated transition .*/pe-error", ] self._components["pacemaker-fenced-ignore"].extend(self._components["common-ignore"]) patternVariants = { "crm-base": BasePatterns, "crm-corosync": Corosync2Patterns } class PatternSelector: """ A class for choosing one of several Pattern objects and then extracting various pieces of information from that object """ def __init__(self, name="crm-corosync"): """ Create a new PatternSelector object by instantiating whatever class is given by name. Defaults to Corosync2Patterns for "crm-corosync" or None. While other objects could be supported in the future, only this and the base object are supported at this time. """ self._name = name # If no name was given, use the default. Otherwise, look up the appropriate # class in patternVariants, instantiate it, and use that. if not name: self._base = Corosync2Patterns() else: self._base = patternVariants[name]() def get_patterns(self, kind): """ Call get_patterns on the previously instantiated pattern object """ return self._base.get_patterns(kind) def get_template(self, key): """ Return a single pattern from the previously instantiated pattern object as a string, or None if no pattern exists for the given key. """ return self._base[key] def get_component(self, kind): """ Call get_component on the previously instantiated pattern object """ return self._base.get_component(kind) def __getitem__(self, key): return self.get_template(key) # PYTHONPATH=python python python/pacemaker/_cts/patterns.py -k crm-corosync -t StartCmd if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-k", "--kind", metavar="KIND") parser.add_argument("-t", "--template", metavar="TEMPLATE") args = parser.parse_args() print(PatternSelector(args.kind)[args.template]) diff --git a/python/pacemaker/_cts/process.py b/python/pacemaker/_cts/process.py index 961a2be716..2940b71736 100644 --- a/python/pacemaker/_cts/process.py +++ b/python/pacemaker/_cts/process.py @@ -1,76 +1,76 @@ """ A module for managing and communicating with external processes """ __all__ = ["killall", "exit_if_proc_running", "pipe_communicate", "stdout_from_command"] __copyright__ = "Copyright 2009-2023 the Pacemaker project contributors" -__license__ = "LGPLv2.1+" +__license__ = "GNU General Public License version 2 or later (GPLv2+)" import subprocess import sys import psutil from pacemaker.exitstatus import ExitStatus def killall(process_names, terminate=False): """ Kill all instances of every process in a list """ if not process_names: return if not isinstance(process_names, list): process_names = [process_names] procs = [] for proc in psutil.process_iter(["name"]): if proc.info["name"] in process_names: procs.append(proc) if terminate: for proc in procs: proc.terminate() _, alive = psutil.wait_procs(procs, timeout=3) procs = alive for proc in procs: proc.kill() def is_proc_running(process_name): """ Check whether a process with a given name is running """ for proc in psutil.process_iter(["name"]): if proc.info["name"] == process_name: return True return False def exit_if_proc_running(process_name): """ Exit with error if a given process is running """ if is_proc_running(process_name): print("Error: %s is already running!" % process_name) print("Run %s only when the cluster is stopped." % sys.argv[0]) sys.exit(ExitStatus.ERROR) def pipe_communicate(pipes, check_stderr=False, stdin=None): """ Get text output from pipes """ if stdin is not None: pipe_outputs = pipes.communicate(input=stdin.encode()) else: pipe_outputs = pipes.communicate() output = pipe_outputs[0].decode(sys.stdout.encoding) if check_stderr: output = output + pipe_outputs[1].decode(sys.stderr.encoding) return output def stdout_from_command(args): """ Execute command and return its standard output """ with subprocess.Popen(args, stdout=subprocess.PIPE) as p: p.wait() return pipe_communicate(p).split("\n") diff --git a/python/pacemaker/_cts/test.py b/python/pacemaker/_cts/test.py index c86e55e22f..fb809a9284 100644 --- a/python/pacemaker/_cts/test.py +++ b/python/pacemaker/_cts/test.py @@ -1,594 +1,594 @@ """ A module providing base classes for defining regression tests and groups of regression tests. Everything exported here should be considered an abstract class that needs to be subclassed in order to do anything useful. Various functions will raise NotImplementedError if not overridden by a subclass. """ __copyright__ = "Copyright 2009-2023 the Pacemaker project contributors" -__license__ = "LGPLv2.1+" +__license__ = "GNU General Public License version 2 or later (GPLv2+)" __all__ = ["Test", "Tests"] import io import os import re import shlex import signal import subprocess import sys import time from pacemaker._cts.errors import ExitCodeError, OutputFoundError, OutputNotFoundError, XmlValidationError from pacemaker._cts.process import pipe_communicate from pacemaker.buildoptions import BuildOptions from pacemaker.exitstatus import ExitStatus def find_validator(rng_file): """ Return the command line used to validate XML output, or None if the validator is not installed. """ if os.access("/usr/bin/xmllint", os.X_OK): if rng_file is None: return ["xmllint", "-"] return ["xmllint", "--relaxng", rng_file, "-"] return None def rng_directory(): """ Which directory contains the RNG schema files? """ if "PCMK_schema_directory" in os.environ: return os.environ["PCMK_schema_directory"] if os.path.exists("%s/cts-fencing.in" % sys.path[0]): return "xml" return BuildOptions.SCHEMA_DIR class Pattern: """ A class for checking log files for a given pattern """ def __init__(self, pat, negative=False, regex=False): """ Create a new Pattern instance Arguments: pat -- The string to search for negative -- If True, pat must not be found in any input regex -- If True, pat is a regex and not a substring """ self._pat = pat self.negative = negative self.regex = regex def __str__(self): return self._pat def match(self, line): """ Is this pattern found in the given line? """ if self.regex: return re.search(self._pat, line) is not None return self._pat in line class Test: """ The base class for a single regression test. A single regression test may still run multiple commands as part of its execution. """ def __init__(self, name, description, **kwargs): """ Create a new Test instance. This method must be provided by all subclasses, which must call Test.__init__ first. Arguments: description -- A user-readable description of the test, helpful in identifying what test is running or has failed. name -- The name of the test. Command line tools use this attribute to allow running only tests with the exact name, or tests whose name matches a given pattern. This should be unique among all tests. Keyword arguments: force_wait -- logdir -- The base directory under which to create a directory to store output and temporary data. timeout -- How long to wait for the test to complete. verbose -- Whether to print additional information, including verbose command output and daemon log files. """ self.description = description self.executed = False self.name = name self.force_wait = kwargs.get("force_wait", False) self.logdir = kwargs.get("logdir", "/tmp") self.timeout = kwargs.get("timeout", 2) self.verbose = kwargs.get("verbose", False) self._cmds = [] self._patterns = [] self._daemon_location = None self._daemon_output = "" self._daemon_process = None self._result_exitcode = ExitStatus.OK self._result_txt = "" ### ### PROPERTIES ### @property def exitcode(self): """ The final exitcode of the Test. If all commands pass, this property will be ExitStatus.OK. Otherwise, this property will be the exitcode of the first command to fail. """ return self._result_exitcode @exitcode.setter def exitcode(self, value): self._result_exitcode = value @property def logpath(self): """ The path to the log for whatever daemon is being tested. Note that this requires all subclasses to set self._daemon_location before accessing this property or an exception will be raised. """ return os.path.join(self.logdir, self._daemon_location + ".log") ### ### PRIVATE METHODS ### def _kill_daemons(self): """ Kill any running daemons in preparation for executing the test """ raise NotImplementedError("_kill_daemons not provided by subclass") def _match_log_patterns(self): """ Check test output for expected patterns, setting self.exitcode and self._result_txt as appropriate. Not all subclass will need to do this. """ if len(self._patterns) == 0: return n_failed_matches = 0 n_negative_matches = 0 output = self._daemon_output.split("\n") for pat in self._patterns: positive_match = False for line in output: if pat.match(line): if pat.negative: n_negative_matches += 1 if self.verbose: print("This pattern should not have matched = '%s" % pat) break positive_match = True break if not pat.negative and not positive_match: n_failed_matches += 1 print("Pattern Not Matched = '%s'" % pat) if n_failed_matches > 0 or n_negative_matches > 0: msg = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." self._result_txt = msg % (self.name, n_failed_matches, len(self._patterns), n_negative_matches) self.exitcode = ExitStatus.ERROR def _new_cmd(self, cmd, args, exitcode, **kwargs): """ Add a command to be executed as part of this test. Arguments: cmd -- The program to run. args -- Commands line arguments to pass to cmd, as a string. exitcode -- The expected exit code of cmd. This can be used to run a command that is expected to fail. Keyword arguments: stdout_match -- If not None, a string that is expected to be present in the stdout of cmd. This can be a regular expression. no_wait -- Do not wait for cmd to complete. stdout_negative_match -- If not None, a string that is expected to be missing in the stdout of cmd. This can be a regualr expression. kill -- A command to be run after cmd, typically in order to kill a failed process. This should be the entire command line including arguments as a single string. validate -- If True, the output of cmd will be passed to xmllint for validation. If validation fails, XmlValidationError will be raised. check_rng -- If True and validate is True, command output will additionally be checked against the api-result.rng file. check_stderr -- If True, the stderr of cmd will be included in output. env -- If not None, variables to set in the environment """ self._cmds.append( { "args": args, "check_rng": kwargs.get("check_rng", True), "check_stderr": kwargs.get("check_stderr", True), "cmd": cmd, "expected_exitcode": exitcode, "kill": kwargs.get("kill", None), "no_wait": kwargs.get("no_wait", False), "stdout_match": kwargs.get("stdout_match", None), "stdout_negative_match": kwargs.get("stdout_negative_match", None), "validate": kwargs.get("validate", True), "env": kwargs.get("env", None), } ) def _start_daemons(self): """ Start any necessary daemons in preparation for executing the test """ raise NotImplementedError("_start_daemons not provided by subclass") ### ### PUBLIC METHODS ### def add_cmd(self, cmd, args, validate=True, check_rng=True, check_stderr=True, env=None): """ Add a simple command to be executed as part of this test """ self._new_cmd(cmd, args, ExitStatus.OK, validate=validate, check_rng=check_rng, check_stderr=check_stderr, env=env) def add_cmd_and_kill(self, cmd, args, kill_proc): """ Add a command and system command to be executed as part of this test """ self._new_cmd(cmd, args, ExitStatus.OK, kill=kill_proc) def add_cmd_check_stdout(self, cmd, args, match, no_match=None, env=None): """ Add a simple command with expected output to be executed as part of this test """ self._new_cmd(cmd, args, ExitStatus.OK, stdout_match=match, stdout_negative_match=no_match, env=env) def add_cmd_expected_fail(self, cmd, args, exitcode=ExitStatus.ERROR): """ Add a command that is expected to fail to be executed as part of this test """ self._new_cmd(cmd, args, exitcode) def add_cmd_no_wait(self, cmd, args): """ Add a simple command to be executed (without waiting) as part of this test """ self._new_cmd(cmd, args, ExitStatus.OK, no_wait=True) def add_log_pattern(self, pattern, negative=False, regex=False): """ Add a pattern that should appear in the test's logs """ self._patterns.append(Pattern(pattern, negative=negative, regex=regex)) def clean_environment(self): """ Clean up the host after executing a test """ if self._daemon_process: if self._daemon_process.poll() is None: self._daemon_process.terminate() self._daemon_process.wait() else: return_code = { getattr(signal, _signame): _signame for _signame in dir(signal) if _signame.startswith('SIG') and not _signame.startswith("SIG_") }.get(-self._daemon_process.returncode, "RET=%d" % (self._daemon_process.returncode)) msg = "FAILURE - '%s' failed. %s abnormally exited during test (%s)." self._result_txt = msg % (self.name, self._daemon_location, return_code) self.exitcode = ExitStatus.ERROR self._daemon_process = None self._daemon_output = "" # the default for utf-8 encoding would error out if e.g. memory corruption # makes fenced output any kind of 8 bit value - while still interesting # for debugging and we'd still like the regression-test to go over the # full set of test-cases with open(self.logpath, 'rt', encoding = "ISO-8859-1") as logfile: for line in logfile.readlines(): self._daemon_output += line if self.verbose: print("Daemon Output Start") print(self._daemon_output) print("Daemon Output End") def print_result(self, filler): """ Print the result of the last test execution """ print("%s%s" % (filler, self._result_txt)) def run(self): """ Execute this test """ i = 1 self.start_environment() if self.verbose: print("\n--- START TEST - %s" % self.name) self._result_txt = "SUCCESS - '%s'" % (self.name) self.exitcode = ExitStatus.OK for cmd in self._cmds: try: self.run_cmd(cmd) except ExitCodeError as e: print("Step %d FAILED - command returned %s, expected %d" % (i, e, cmd['expected_exitcode'])) self.set_error(i, cmd) break except OutputNotFoundError as e: print("Step %d FAILED - '%s' was not found in command output: %s" % (i, cmd['stdout_match'], e)) self.set_error(i, cmd) break except OutputFoundError as e: print("Step %d FAILED - '%s' was found in command output: %s" % (i, cmd['stdout_negative_match'], e)) self.set_error(i, cmd) break except XmlValidationError as e: print("Step %d FAILED - xmllint failed: %s" % (i, e)) self.set_error(i, cmd) break if self.verbose: print("Step %d SUCCESS" % (i)) i = i + 1 self.clean_environment() if self.exitcode == ExitStatus.OK: self._match_log_patterns() print(self._result_txt) if self.verbose: print("--- END TEST - %s\n" % self.name) self.executed = True def run_cmd(self, args): """ Execute a command as part of this test """ cmd = shlex.split(args['args']) cmd.insert(0, args['cmd']) if self.verbose: print("\n\nRunning: %s" % " ".join(cmd)) # FIXME: Using "with" here breaks fencing merge tests. # pylint: disable=consider-using-with if args['env']: new_env = os.environ.copy() new_env.update(args['env']) test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=new_env) else: test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if args['kill']: if self.verbose: print("Also running: %s" % args['kill']) ### Typically, the kill argument is used to detect some sort of ### failure. Without yielding for a few seconds here, the process ### launched earlier that is listening for the failure may not have ### time to connect to pacemaker-execd. time.sleep(2) subprocess.Popen(shlex.split(args['kill'])) if not args['no_wait']: test.wait() else: return ExitStatus.OK output = pipe_communicate(test, check_stderr=args['check_stderr']) if self.verbose: print(output) if test.returncode != args['expected_exitcode']: raise ExitCodeError(test.returncode) if args['stdout_match'] is not None and \ re.search(args['stdout_match'], output) is None: raise OutputNotFoundError(output) if args['stdout_negative_match'] is not None and \ re.search(args['stdout_negative_match'], output) is not None: raise OutputFoundError(output) if args['validate']: if args['check_rng']: rng_file = rng_directory() + "/api/api-result.rng" else: rng_file = None cmd = find_validator(rng_file) if not cmd: raise XmlValidationError("Could not find validator for %s" % rng_file) if self.verbose: print("\nRunning: %s" % " ".join(cmd)) with subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as validator: output = pipe_communicate(validator, check_stderr=True, stdin=output) if self.verbose: print(output) if validator.returncode != 0: raise XmlValidationError(output) return ExitStatus.OK def set_error(self, step, cmd): """ Record failure of this test """ msg = "FAILURE - '%s' failed at step %d. Command: %s %s" self._result_txt = msg % (self.name, step, cmd['cmd'], cmd['args']) self.exitcode = ExitStatus.ERROR def start_environment(self): """ Prepare the host for executing a test """ if os.path.exists(self.logpath): os.remove(self.logpath) self._kill_daemons() self._start_daemons() logfile = None init_time = time.time() update_time = init_time while True: # FIXME: Eventually use 'with' here, which seems complicated given # everything happens in a loop. # pylint: disable=consider-using-with time.sleep(0.1) if not self.force_wait and logfile is None \ and os.path.exists(self.logpath): logfile = io.open(self.logpath, 'rt', encoding = "ISO-8859-1") if not self.force_wait and logfile is not None: for line in logfile.readlines(): if "successfully started" in line: return now = time.time() if self.timeout > 0 and (now - init_time) >= self.timeout: if not self.force_wait: print("\tDaemon %s doesn't seem to have been initialized within %fs." "\n\tConsider specifying a longer '--timeout' value." %(self._daemon_location, self.timeout)) return if self.verbose and (now - update_time) >= 5: print("Waiting for %s to be initialized: %fs ..." %(self._daemon_location, now - init_time)) update_time = now class Tests: """ The base class for a collection of regression tests """ def __init__(self, **kwargs): """ Create a new Tests instance. This method must be provided by all subclasses, which must call Tests.__init__ first. Keywork arguments: force_wait -- logdir -- The base directory under which to create a directory to store output and temporary data. timeout -- How long to wait for the test to complete. verbose -- Whether to print additional information, including verbose command output and daemon log files. """ self.force_wait = kwargs.get("force_wait", False) self.logdir = kwargs.get("logdir", "/tmp") self.timeout = kwargs.get("timeout", 2) self.verbose = kwargs.get("verbose", False) self._tests = [] def exit(self): """ Exit (with error status code if any test failed) """ for test in self._tests: if not test.executed: continue if test.exitcode != ExitStatus.OK: sys.exit(ExitStatus.ERROR) sys.exit(ExitStatus.OK) def print_list(self): """ List all registered tests """ print("\n==== %d TESTS FOUND ====" % len(self._tests)) print("%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")) print("%35s - %s" % ("--------------------", "--------------------")) for test in self._tests: print("%35s - %s" % (test.name, test.description)) print("==== END OF LIST ====\n") def print_results(self): """ Print summary of results of executed tests """ failures = 0 success = 0 print("\n\n======= FINAL RESULTS ==========") print("\n--- FAILURE RESULTS:") for test in self._tests: if not test.executed: continue if test.exitcode != ExitStatus.OK: failures = failures + 1 test.print_result(" ") else: success = success + 1 if failures == 0: print(" None") print("\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures)) def run_single(self, name): """ Run a single named test """ for test in self._tests: if test.name == name: test.run() break def run_tests(self): """ Run all tests """ for test in self._tests: test.run() def run_tests_matching(self, pattern): """ Run all tests whose name matches a pattern """ for test in self._tests: if test.name.count(pattern) != 0: test.run() diff --git a/python/pacemaker/buildoptions.py.in b/python/pacemaker/buildoptions.py.in index 2889485538..53b492b04f 100644 --- a/python/pacemaker/buildoptions.py.in +++ b/python/pacemaker/buildoptions.py.in @@ -1,57 +1,57 @@ """ A module providing information on build-time configuration of pacemaker """ __all__ = ["BuildOptions"] __copyright__ = "Copyright 2023 the Pacemaker project contributors" -__license__ = "LGPLv2.1+" +__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)" class BuildOptions: """ Variables generated as part of the ./configure && make process. These affect how pacemaker was configured and where its various parts get installed. """ BASH_PATH = "@BASH_PATH@" """ Path to the bash shell """ _BUILD_DIR = "@abs_top_builddir@" """ Top-level build directory NOTE: This is not especially useful on installed systems, but is useful for running various programs from a source checkout """ CIB_DIR = "@CRM_CONFIG_DIR@" """ Where CIB files are stored """ COROSYNC_CONFIG_FILE = "@PCMK__COROSYNC_CONF@" """ Path to the corosync config file """ DAEMON_DIR = "@CRM_DAEMON_DIR@" """ Where Pacemaker daemons are installed """ DAEMON_USER = "@CRM_DAEMON_USER@" """ User to run Pacemaker daemons as """ LOCAL_STATE_DIR = "@localstatedir@" """ Where miscellaneous temporary state files are stored """ LOG_DIR = "@CRM_LOG_DIR@" """ Where Pacemaker log files are stored """ OCF_RA_INSTALL_DIR = "@OCF_RA_INSTALL_DIR@" """ Where resource agents are installed """ OCF_ROOT_DIR = "@OCF_ROOT_DIR@" """ Root directory for OCF resource agents and libraries """ RSC_TMP_DIR = "@CRM_RSCTMP_DIR@" """ Where resource agents should keep state files """ # pylint: disable=comparison-of-constants REMOTE_ENABLED = "@PC_NAME_GNUTLS@" != "" """ Was Pacemaker Remote support built? """ SBIN_DIR = "@sbindir@" """ Where administrative programs are installed """ SCHEMA_DIR = "@CRM_SCHEMA_DIRECTORY@" """ Where Relax-NG schema files are stored """ diff --git a/python/pacemaker/exitstatus.py b/python/pacemaker/exitstatus.py index e41d37ccf6..f74f9ec224 100644 --- a/python/pacemaker/exitstatus.py +++ b/python/pacemaker/exitstatus.py @@ -1,59 +1,59 @@ """ A module providing constants relating to why a process or function exited """ __all__ = ["ExitStatus"] __copyright__ = "Copyright 2023 the Pacemaker project contributors" -__license__ = "LGPLv2.1+" +__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)" from enum import IntEnum, unique # These values must be kept in sync with include/crm/common/results.h @unique class ExitStatus(IntEnum): """ Why did a function or process exit? These constants describe both success and failure conditions. """ OK = 0 ERROR = 1 INVALID_PARAM = 2 UNIMPLEMENT_FEATURE = 3 INSUFFICIENT_PRIV = 4 NOT_INSTALLED = 5 NOT_CONFIGURED = 6 NOT_RUNNING = 7 PROMOTED = 8 FAILED_PROMOTED = 9 USAGE = 64 DATAERR = 65 NOINPUT = 66 NOUSER = 67 NOHOST = 68 UNAVAILABLE = 69 SOFTWARE = 70 OSERR = 71 OSFILE = 72 CANTCREAT = 73 IOERR = 74 TEMPFAIL = 75 PROTOCOL = 76 NOPERM = 77 CONFIG = 78 FATAL = 100 PANIC = 101 DISCONNECT = 102 OLD = 103 DIGEST = 104 NOSUCH = 105 QUORUM = 106 UNSAFE = 107 EXISTS = 108 MULTIPLE = 109 EXPIRED = 110 NOT_YET_IN_EFFECT = 111 INDETERMINATE = 112 UNSATISFIED = 113 TIMEOUT = 124 DEGRADED = 190 DEGRADED_PROMOTED = 191 NONE = 193 MAX = 255