diff --git a/cts/cts-cli.in b/cts/cts-cli.in index 9b8da46a14..cc635314a6 100644 --- a/cts/cts-cli.in +++ b/cts/cts-cli.in @@ -1,3425 +1,3425 @@ #!@PYTHON@ """Regression tests for Pacemaker's command line tools.""" # pylint doesn't like the module name "cts-cli" which is an invalid complaint for this file # but probably something we want to continue warning about elsewhere # pylint: disable=invalid-name # pacemaker imports need to come after we modify sys.path, which pylint will complain about. # pylint: disable=wrong-import-position # We know this is a very long file. # pylint: disable=too-many-lines __copyright__ = "Copyright 2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import argparse from contextlib import contextmanager from datetime import datetime, timedelta import fileinput from functools import partial from gettext import ngettext from multiprocessing import Pool, cpu_count import os import pathlib import re from shutil import copyfile import signal from string import Formatter import subprocess import sys from tempfile import NamedTemporaryFile, TemporaryDirectory, mkstemp import types # These imports allow running from a source checkout after running `make`. if os.path.exists("@abs_top_srcdir@/python"): sys.path.insert(0, "@abs_top_srcdir@/python") # pylint: disable=comparison-of-constants,comparison-with-itself,condition-evals-to-constant if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@": sys.path.insert(0, "@abs_top_builddir@/python") from pacemaker._cts.errors import XmlValidationError from pacemaker._cts.validate import validate from pacemaker.buildoptions import BuildOptions from pacemaker.exitstatus import ExitStatus # Individual tool tests are split out, but can also be accessed as a group with "tools" tools_tests = ["cibadmin", "crm_attribute", "crm_standby", "crm_resource", "crm_ticket", "crmadmin", "crm_shadow", "crm_verify"] # The default list of tests to run, in the order they should be run default_tests = ["access_render", "daemons", "dates", "error_codes"] + tools_tests + \ ["crm_mon", "acls", "validity", "upgrade", "rules", "feature_set"] other_tests = ["agents"] # The directory containing this program test_home = os.path.dirname(os.path.realpath(__file__)) # The name of the shadow CIB SHADOW_NAME = "cts-cli" # Arguments to pass to valgrind VALGRIND_ARGS = ["-q", "--gen-suppressions=all", "--show-reachable=no", "--leak-check=full", "--trace-children=no", "--time-stamp=yes", "--num-callers=20", "--suppressions=%s/valgrind-pcmk.suppressions" % test_home] class PluralFormatter(Formatter): """ Special string formatting class for selecting singular vs. plurals. Use like so: fmt = PluralFormatter() print(fmt.format("{0} {0}:plural,test,tests} succeeded", n_tests)) """ def format_field(self, value, format_spec): """Convert a value to a formatted representation.""" if format_spec.startswith("plural,"): eles = format_spec.split(',') if len(eles) == 2: singular = eles[1] plural = singular + "s" else: singular = eles[1] plural = eles[2] return ngettext(singular, plural, value) return super().format_field(value, format_spec) def apply_substitutions(s, extra=None): """Apply text substitutions to an input string and return it.""" substitutions = { "cts_cli_data": "%s/cli" % test_home, "shadow": SHADOW_NAME, "test_home": test_home, } if extra is not None: substitutions.update(extra) return s.format(**substitutions) def cleanup_shadow_dir(): """Remove any previously created shadow CIB directory.""" subprocess.run(["crm_shadow", "--force", "--delete", SHADOW_NAME], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) def copy_existing_cib(existing): """ Generate a CIB by copying an existing one to a temporary location. This is suitable for use with the cib_gen= parameter to the TestGroup class. """ (fp, new) = mkstemp(prefix="cts-cli.cib.xml.") os.close(fp) copyfile(apply_substitutions(existing), new) return new def current_cib(): """Return the complete current CIB.""" with environ({"CIB_user": "root"}): return subprocess.check_output(["cibadmin", "-Q"], encoding="utf-8") def make_test_group(desc, cmd, classes, **kwargs): """ Create a TestGroup that replicates the same test for multiple classes. The given description, cmd, and kwargs will be passed as arguments to each Test subclass in the classes parameter. The resulting objects will then be added to a TestGroup and returned. The main purpose of this function is to be able to run the same test for both text and XML formats without having to duplicate everything. Thus, the cmd string may contain "{fmt}", which will have any --output-as= class variable substituted in. """ tests = [] for c in classes: obj = c(desc, apply_substitutions(cmd, extra={"fmt": c.format_args}), **kwargs) tests.append(obj) return TestGroup(tests) def create_shadow_cib(shadow_dir, create_empty=True, validate_with=None, valgrind=False): """ Create a shadow CIB file. Keyword arguments: create_empty -- If True, the shadow CIB will be empty. Otherwise, the shadow CIB will be a copy of the currently active cluster configuration. validate_with -- If not None, the schema version to validate the CIB against valgrind -- If True, run the create operation under valgrind """ args = ["crm_shadow", "--batch", "--force"] if create_empty: args += ["--create-empty", SHADOW_NAME] else: args += ["--create", SHADOW_NAME] if validate_with is not None: args += ["--validate-with", validate_with] if valgrind: args = ["valgrind"] + VALGRIND_ARGS + args os.environ["CIB_shadow_dir"] = shadow_dir os.environ["CIB_shadow"] = SHADOW_NAME subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) delete_shadow_resource_defaults() def delete_shadow_resource_defaults(): """Clear out the rsc_defaults section from a shadow CIB file.""" # A newly created empty CIB might or might not have a rsc_defaults section # depending on whether the --with-resource-stickiness-default configure # option was used. To ensure regression tests behave the same either way, # delete any rsc_defaults after creating or erasing a CIB. subprocess.run(["cibadmin", "--delete", "--xml-text", ""], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) # The above command might or might not bump the CIB version, so reset it # to ensure future changes result in the same version for comparison. reset_shadow_cib_version() def reset_shadow_cib_version(): """Set various version numbers in a shadow CIB file back to 0.""" with fileinput.input(files=[shadow_path()], inplace=True) as f: for line in f: line = re.sub('epoch="[0-9]*"', 'epoch="1"', line) line = re.sub('num_updates="[0-9]*"', 'num_updates="0"', line) line = re.sub('admin_epoch="[0-9]*"', 'admin_epoch="0"', line) print(line, end='') def run_cmd_list(cmds): """ Run one or more shell commands. cmds can be: * A string * A Python function * A list of the above Raises subprocess.CalledProcessError on error. """ if cmds is None: return if isinstance(cmds, (str, types.FunctionType)): cmds = [cmds] for c in cmds: if isinstance(c, types.FunctionType): c() else: subprocess.run(apply_substitutions(c), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, universal_newlines=True, check=True) def sanitize_output(s): """ Replace content in the output expected to change between test runs. This is stuff like version numbers, timestamps, source line numbers, build options, system names and messages, etc. """ # A list of tuples of regular expressions and their replacements. replacements = [ (r'Created new pacemaker-.* configuration', r'Created new pacemaker configuration'), (r'Device not configured', r'No such device or address'), (r'^Entity: line [0-9]+: ', r''), (r'Last change: .*', r'Last change:'), (r'Last updated: .*', r'Last updated:'), (r'^Migration will take effect until: .*', r'Migration will take effect until:'), (r'(\* Possible values.*: .*)\(default: [^)]*\)', r'\1(default: )'), (r"""-X '.*'""", r"""-X '...'"""), (r' api-version="[^"]*"', r' api-version="X"'), (r'\(apply_upgrade@.*\.c:[0-9]+\)', r'apply_upgrade'), (r'(', r'\1/>'), (r'(', r'\1>'), (r'crm_feature_set="[^"]*" ', r''), (r'@crm_feature_set=[0-9.]+, ', r''), (r'\(crm_time_parse_duration@.*\.c:[0-9]+\)', r'crm_time_parse_duration'), (r'\(crm_time_parse_period@.*\.c:[0-9]+\)', r'crm_time_parse_period'), (r'\(crm_time_parse_sec@.*\.c:[0-9]+\)', r'crm_time_parse_sec'), (r' default="[^"]*"', r' default=""'), (r' end="[0-9][-+: 0-9]*Z*"', r' end=""'), (r'last_change time=".*"', r'last_change time=""'), (r'last_update time=".*"', r'last_update time=""'), (r' last-rc-change=[\'"][-+A-Za-z0-9: ]*[\'"],?', r''), (r'\(parse_date@.*\.c:[0-9]+\)', r'parse_date'), (r'\((pcmk__.*)@.*\.c:[0-9]+\)', r'\1'), (r'request=".*(crm_?[a-zA-Z0-9]+) ', r'request="\1 '), (r'request=".*iso8601', r'request="iso8601'), (r' start="[0-9][-+: 0-9]*Z*"', r' start=""'), (r'/tmp/cts-cli\.[^/]*/shadow.cts-cli', r'/tmp/cts-cli.shadow/shadow.cts-cli'), (r'^/tmp/cts-cli\.xmllint\.[^:]*:', r'/tmp/cts-cli.xmllint:'), (r'^/tmp/cts-cli\.xmllint\.[^ ]* fails to validate', r'/tmp/cts-cli.xmllint fails to validate'), (r'.*\((unpack_.*)@.*\.c:[0-9]+\)', r'\1'), (r'validate-with="[^"]*" ', r''), (r'(@validate-with=pacemaker-)[0-9.]+,', r'\1X,'), (r' version="[^"]*"', r' version=""'), (r'\(version .*\)', r'(version)'), - (r'--xml-file .*cts/cli/(.*)\.xml', r'--xml-file \1.xml'), + (r'--xml-file .*cli/(.*)\.xml', r'--xml-file \1.xml'), ] new_output = [] for line in s: # @TODO Add a way to suppress this message within cibadmin, and then drop # the handling here. if line.startswith("The supplied command can provide skewed result"): continue for (pattern, repl) in replacements: line = re.sub(pattern, repl, line) new_output.append(line) return new_output def shadow_path(): """Return the current shadow CIB path.""" p = subprocess.check_output(["crm_shadow", "--file"], encoding="utf-8") return p.strip() def write_cib(s): """ Generate a CIB by writing a string to a temporary location. This is suitable for use with the cib_gen= parameter to the TestGroup class. """ (fp, new) = mkstemp(prefix="cts-cli.cib.xml.") os.write(fp, s.encode()) os.close(fp) return new @contextmanager def environ(env): """ Run code in an environment modified with the provided dict. This context manager augments the current process environment with the provided dict, allowing code to be constructed like so: e = {"CIB_user": "xyx"} with environ(e): ... When the context manager exits, the previous environment will be restored. It is possible to remove an environment key (whether it was in the environment by default, or given with a nested call to this context) by passing None for the value. Additionally, this context manager accepts None for the env parameter, in which case nothing will be done. Finally, note that values in env will be passed to apply_substitutions before being set in the environment. """ if env is None: env = {} original_env = {} else: original_env = os.environ.copy() for k, v in env.items(): if v is None: os.environ.pop(k) else: os.environ[k] = apply_substitutions(v) try: yield finally: for k, v in original_env.items(): if v is None: os.environ.pop(k) else: os.environ[k] = v class StdinCmd: """ A class for defining a command that should be run later. subprocess.Popen (and its various helper functions) start running the command immediately, which doesn't work if we want to provide the command when a Test is created, but delay its execution until the environment is defined when the Test is run. This class allows us to do that. """ def __init__(self, cmd): """Create a new StdinCmd instance. Arguments: cmd -- The command string to run later. This string will be passed to apply_substitutions before being executed. """ self._cmd = cmd def run(self): """Run this command, returning a subprocess.Popen object.""" return subprocess.Popen(apply_substitutions(self._cmd), shell=True, encoding="utf-8", stdout=subprocess.PIPE) class Test: """A base class for defining a single command line regression test.""" format_args = "" def __init__(self, desc, cmd, expected_rc=ExitStatus.OK, update_cib=False, setup=None, teardown=None, stdin=None, env=None): """ Create a new Test instance. Arguments: desc -- A short human-readable description of this test cmd -- The command to run for this test, as a string. This string will be passed to apply_substitutions before being executed. Keyword arguments: expected_rc -- The expected return value of cmd update_cib -- If True, the resulting CIB will be printed after performing the test setup -- A shell command to be run in the same environment as cmd, immediately before the test. Valid types are: a string, a Python function, or a list of the above teardown -- Like setup, but runs immediately after the test stdin -- If not None, the text to feed to cmd as its stdin env -- If not None, a dict of values to be added to the test environment. This will be added when the test is run and will override anything given to the TestGroup. """ self.desc = desc self.cmd = cmd self.expected_rc = expected_rc self.update_cib = update_cib self._setup = setup self._teardown = teardown self._stdin = stdin if env is None: self._env = {} else: self._env = env self._output = None @property def output(self): """Return the test's detailed output.""" return self._output def _log_end_test(self, rc): """Log a message when a test ends.""" if isinstance(rc, ExitStatus): rc_str = str(rc) else: if rc < 0: rc = abs(rc) rc_str = signal.strsignal(rc) else: rc = ExitStatus(rc) rc_str = str(rc) self._output.append("=#=#=#= End test: %s - %s (%d) =#=#=#=" % (self.desc, rc_str, rc)) def _log_start_test(self): """Log a message when a test starts.""" self._output.append("=#=#=#= Begin test: %s =#=#=#=" % self.desc) def _log_test_failed(self, app, rc): """Log a message when a test fails.""" self._output.append("* Failed (rc=%.3d): %-23s - %s" % (rc, app, self.desc)) def _log_test_passed(self, app): """Log a message when a test passes.""" self._output.append("* Passed: %-21s - %s" % (app, self.desc)) # pylint: disable=unused-argument def _validate_hook(self, rc, _stdout, _stderr, valgrind=False): """Validate test output.""" self._log_end_test(rc) return rc def _run_setup_teardown(self, cmd, app): """ Run any setup or teardown command required by this test. On success (or if no command is present), return True. On failure, return False and log the stdout/stderr of the command for debugging. Arguments: cmd -- The setup/teardown command(s) to run app -- The base name of the test command, for logging purposes """ try: run_cmd_list(cmd) return True except subprocess.CalledProcessError as exn: rc = exn.returncode self._output.extend(exn.stderr.splitlines()) self._output.extend(exn.stdout.splitlines()) self._log_test_failed(app, rc) return False def run(self, group, env=None, valgrind=False): """ Run this test. Basic output is printed to stdout, while detailed output is available in the self.output property after this function has been run. Return True if the return code matches self.expected_rc, and False otherwise. Arguments: group -- The name of the group this test is a part of, for logging purposes Keyword arguments: env -- If not None, a dict of values to be added to the test environment """ self._output = [] cmd = apply_substitutions(self.cmd) app = cmd.split(" ")[0] test_id = "%s(%s)" % (app, group) print("* Running: %-31s - %s" % (test_id, self.desc)) self._log_start_test() # Add any environment variables specified in Test.__init__ if env is None: env = self._env else: env = env.update(self._env) with environ(env): # Run the setup hook, if any if not self._run_setup_teardown(self._setup, app): return False # Define basic arguments for all forms of running this test. kwargs = {"stdout": subprocess.PIPE, "stderr": subprocess.PIPE, "shell": True, "universal_newlines": True, "check": False} stdin_p = None # Handle the stdin= parameter. if isinstance(self._stdin, StdinCmd): stdin_p = self._stdin.run() kwargs["stdin"] = stdin_p.stdout elif isinstance(self._stdin, pathlib.Path): kwargs["input"] = self._stdin.read_text() else: kwargs["input"] = self._stdin if valgrind: cmd = "valgrind %s %s" % (" ".join(VALGRIND_ARGS), cmd) # Run the test command # We handle the "check" argument above in the kwargs dict. # pylint: disable-msg=subprocess-run-check cmd_p = subprocess.run(cmd, **kwargs) rc = cmd_p.returncode if stdin_p is not None: stdin_p.stdout.close() self._output.extend(cmd_p.stderr.splitlines()) self._output.extend(cmd_p.stdout.splitlines()) # Run the teardown hook, if any if not self._run_setup_teardown(self._teardown, app): return False if self.update_cib: self._output.append("=#=#=#= Current cib after: %s =#=#=#=" % self.desc) self._output.extend(current_cib().splitlines()) self._validate_hook(rc, cmd_p.stdout, cmd_p.stderr, valgrind=valgrind) if rc == self.expected_rc: self._log_test_passed(app) return True self._log_test_failed(app, rc) return False class AclTest(Test): """ A Test subclass specialized for running certain ACL tests. Differences from the Test class: * Does not use the stdin= parameter. * Runs the setup and teardown hooks in an environment that also sets CIB_user=root, CIB_shadow="", and CIB_file to a temporary file. * The setup hooks should construct a new CIB and write it to that temporary file. * Prints the input CIB before running the test. """ def run(self, group, env=None, valgrind=False): """ Run this test. Basic output is printed to stdout, while detailed output is available in the self.output property after this function has been run. Return True if the return code matches self.expected_rc, and False otherwise. Arguments: group -- The name of the group this test is a part of, for logging purposes Keyword arguments: env -- If not None, a dict of values to be added to the test environment """ self._output = [] cmd = apply_substitutions(self.cmd) app = cmd.split(" ")[0] test_id = "%s(%s)" % (app, group) print("* Running: %-31s - %s" % (test_id, self.desc)) # Add any environment variables specified in Test.__init__ if env is None: env = self._env else: env = env.update(self._env) with environ(env): with NamedTemporaryFile(prefix="cts-cli.cib.") as fp: fp.write(current_cib().encode()) fp.flush() # Run the setup hook, if any. Typically, this is something that # modifies the existing CIB. We need to do these modifications # in a different environment from what the test will run in, since # the test may not have the permissions necessary to do the # modifications. with environ({"CIB_user": "root", "CIB_file": fp.name, "CIB_shadow": None}): if not self._run_setup_teardown(self._setup, app): return False # At the least, print the CIB that will be the test input. self._output.extend(current_cib().splitlines()) # Note: This is positioned differently from where it is in Test.run. self._log_start_test() # Define basic arguments for running this test. kwargs = {"stdout": subprocess.PIPE, "stderr": subprocess.PIPE, "shell": True, "universal_newlines": True, "check": False} # Read in the potentially modified CIB as the stdin for the test. fp.seek(0) kwargs["input"] = fp.read().decode(encoding="utf-8") if valgrind: cmd = "valgrind %s %s" % (" ".join(VALGRIND_ARGS), cmd) # Run the test command # We handle the "check" argument above in the kwargs dict. # pylint: disable-msg=subprocess-run-check cmd_p = subprocess.run(cmd, **kwargs) rc = cmd_p.returncode self._output.extend(cmd_p.stderr.splitlines()) self._output.extend(cmd_p.stdout.splitlines()) # Run the teardown hook, if any with environ({"CIB_user": "root", "CIB_file": fp.name, "CIB_shadow": None}): if not self._run_setup_teardown(self._teardown, app): return False if self.update_cib: self._output.append("=#=#=#= Current cib after: %s =#=#=#=" % self.desc) self._output.extend(current_cib().splitlines()) self._validate_hook(rc, cmd_p.stdout, cmd_p.stderr, valgrind=valgrind) if rc == self.expected_rc: self._log_test_passed(app) return True self._log_test_failed(app, rc) return False class ValidatingTest(Test): """A Test subclass that additionally runs test results through xmllint.""" format_args = "--output-as=xml" def __init__(self, desc, cmd, **kwargs): """Create a new ValidatingTest instance.""" Test.__init__(self, desc + " (XML)", cmd, **kwargs) def _validate_hook(self, rc, stdout, stderr, valgrind=False): """Validate test output with xmllint.""" # Do not validate if running under valgrind, even if told to do so. Valgrind # will output a lot more stuff that is not XML, so it wouldn't validate # anyway. if valgrind: return Test._validate_hook(self, rc, stdout, stderr, valgrind=valgrind) try: validate(stdout) # We only care about the return code from validation if there was an error, # which will be dealt with below. Here, we want to log the original return # code from the test itself. self._log_end_test(rc) return 0 except XmlValidationError as e: self._output.append("=#=#=#= End test: %s - Failed to validate (%d) =#=#=#=" % (self.desc, e.exit_code)) self._output.extend(e.output.splitlines()) return e.exit_code class TestGroup: """A base class for a group of related tests.""" def __init__(self, tests, cib_gen=None, env=None, setup=None, teardown=None): """ Create a new TestGroup instance. Arguments: tests -- A list of Test instances Keyword arguments: cib_gen -- If not None, a function that generates a CIB file and returns the name of that CIB. This will be added to the test environment as CIB_file and used for all tests in this group. The file will then be deleted after all tests have been run. env -- If not None, a dict of values to be added to the test environment setup -- A command string, python function, or list of the previous types to run immediately before the test. This will be run in the same environment as cmd. teardown -- Like setup, but runs immediately after the tests """ self.tests = tests self._cib_gen = cib_gen self._env = env self._setup = setup self._teardown = teardown self._successes = None self._failures = None self._output = None @property def failures(self): """Return the number of member tests that failed.""" return self._failures @property def output(self): """Return the test's detailed output.""" return self._output @property def successes(self): """Return the number of member tests that succeeded.""" return self._successes def _run_setup_teardown(self, cmd): """ Run any setup or teardown command required by this test group. On success (or if no command is present), return True. On failure, return False and log the stdout/stderr of the command for debugging. Arguments: cmd -- The setup/teardown command(s) to run """ try: run_cmd_list(cmd) return True except subprocess.CalledProcessError as exn: self._output.extend(exn.stderr.splitlines()) self._output.extend(exn.stdout.splitlines()) return False def run(self, group, valgrind=False): """ Run all Test instances that are a part of this regression test. Additionally, record their stdout and stderr in the self.output property and the total number of tests that passed and failed. Arguments: group -- The name of the group this test is a part of, for logging purposes """ self._failures = 0 self._successes = 0 self._output = [] cib_file = None with environ(self._env): # If we were given a way to generate a CIB, do that now and add it to the # environment. if self._cib_gen is not None: cib_file = self._cib_gen() os.environ.update({"CIB_file": cib_file}) # Run the setup hook, if any if not self._run_setup_teardown(self._setup): return False # Run the tests for t in self.tests: rc = t.run(group, valgrind=valgrind) if isinstance(t, TestGroup): self._successes += t.successes self._failures += t.failures else: if rc: self._successes += 1 else: self._failures += 1 self._output.extend(t.output) if cib_file is not None: os.environ.pop("CIB_file") os.unlink(cib_file) # Run the teardown hook, if any if self._run_setup_teardown(self._teardown): return False return True class ShadowTestGroup(TestGroup): """A group of related tests that require a shadow CIB.""" def __init__(self, tests, **kwargs): """ Create a new ShadowTestGroup instance. Arguments: tests -- A list of Test instances Keyword arguments: create -- If True, create a shadow CIB file (see create_empty). Otherwise, just create a temp directory and set environment variables. create_empty -- If True, the shadow CIB will be empty. Otherwise, the shadow CIB will be a copy of the currently active cluster configuration. validate_with -- If not None, the schema version to validate the CIB against """ self._create = kwargs.pop("create", True) self._create_empty = kwargs.pop("create_empty", True) self._validate_with = kwargs.pop("validate_with", None) TestGroup.__init__(self, tests, **kwargs) def run(self, group, valgrind=False): """ Run all Test instances that are a part of this regression test. Additionally, record their stdout and stderr in the self.output property and the total number of tests that passed and failed. Arguments: group -- The name of the group this test is a part of, for logging purposes """ with TemporaryDirectory(prefix="cts-cli.shadow.") as shadow_dir: if self._create: create_shadow_cib(shadow_dir, create_empty=self._create_empty, validate_with=self._validate_with, valgrind=valgrind) else: os.environ["CIB_shadow_dir"] = shadow_dir os.environ["CIB_shadow"] = SHADOW_NAME rc = TestGroup.run(self, group, valgrind=valgrind) if self._create: cleanup_shadow_dir() os.environ.pop("CIB_shadow_dir") os.environ.pop("CIB_shadow") return rc class RegressionTest: """A base class for testing a single command line tool.""" def __init__(self): """Create a new RegressionTest instance.""" self._identical = None self._successes = None self._failures = None self._tempfile = None self._output = None @property def failures(self): """Return the number of member tests that failed.""" return self._failures @property def identical(self): """Return whether the expected output matches the actual output.""" return self._identical @property def name(self): """ Return the name of this regression test. This should be a unique, very short, single word name without any special characters. It must match the name of some word in the default_tests list because it may be given with the -r option on the command line to select only certain tests to run. All subclasses must define this property. """ raise NotImplementedError @property def results_file(self): """Return the location where the regression test results are stored.""" return self._tempfile @property def successes(self): """Return the number of member tests that succeeded.""" return self._successes @property def summary(self): """Return a list of all Passed/Failed lines for tests in this regression test.""" retval = [] for line in self._output: if line.startswith("* Failed") or line.startswith("* Passed"): retval.append(line) return retval @property def tests(self): """A list of Test and TestGroup instances to be run as part of this regression test.""" return [] def cleanup(self): """Remove the temp file where test output is stored.""" os.remove(self._tempfile) self._tempfile = None def diff(self, verbose=False): """ Compare the results of this regression test to the expected results. Arguments: verbose -- If True, the diff will be written to stdout """ args = ["diff", "-wu", "%s/cli/regression.%s.exp" % (test_home, self.name), self.results_file] try: if verbose: subprocess.run(args, check=True) else: subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) self._identical = True except subprocess.CalledProcessError: self._identical = False def process_results(self, verbose): """If actual output differs from expected output, print the actual output.""" if self.identical: self.cleanup() return print(" %s" % self.results_file) if verbose: print("======================================================") with open(self.results_file, encoding="utf-8") as f: print(f.read()) print("======================================================") def run(self, valgrind=False): """ Run all Test and TestGroup instances that are a part of this regression test. Additionally, record their stdout and stderr in the self.output property and the total number of tests that passed and failed. """ self._failures = 0 self._successes = 0 self._output = [] for t in self.tests: rc = t.run(self.name, valgrind=valgrind) if isinstance(t, TestGroup): self._successes += t.successes self._failures += t.failures else: if rc: self._successes += 1 else: self._failures += 1 self._output.extend(t.output) self._output = sanitize_output(self._output) def write(self): """ Write test results to a temporary file and set self.results to its location. If self.run() has not yet been called, or there is otherwise no output, self.results will be None """ if not self._output: self._tempfile = None return s = "\n".join(self._output).encode() s += b"\n" (fp, self._tempfile) = mkstemp(prefix="cts-cli.%s." % self.name) os.write(fp, s) os.close(fp) class AccessRenderRegressionTest(RegressionTest): """A class for testing rendering of ACLs.""" @property def name(self): """Return the name of this regression test.""" return "access_render" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" acl_cib = """ """ # Create a test CIB that has ACL roles basic_tests = [ Test("Configure some ACLs", "cibadmin -M -o acls -p", update_cib=True, stdin=acl_cib), Test("Enable ACLs", "crm_attribute -n enable-acl -v true", update_cib=True), # Run cibadmin --show-access on the test CIB as an ACL-restricted user Test("An instance of ACLs render (into color)", "cibadmin --force --show-access=color -Q --user tony"), Test("An instance of ACLs render (into namespacing)", "cibadmin --force --show-access=namespace -Q --user tony"), Test("An instance of ACLs render (into text)", "cibadmin --force --show-access=text -Q --user tony"), ] return [ ShadowTestGroup(basic_tests), ] class DaemonsRegressionTest(RegressionTest): """A class for testing command line options of pacemaker daemons.""" @property def name(self): """Return the name of this regression test.""" return "daemons" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" return [ Test("Get CIB manager metadata", "pacemaker-based metadata"), Test("Get controller metadata", "pacemaker-controld metadata"), Test("Get fencer metadata", "pacemaker-fenced metadata"), Test("Get scheduler metadata", "pacemaker-schedulerd metadata"), ] class DatesRegressionTest(RegressionTest): """A class for testing handling of ISO8601 dates.""" @property def name(self): """Return the name of this regression test.""" return "dates" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" invalid_periods = [ "", "2019-01-01 00:00:00Z", # Start with no end "2019-01-01 00:00:00Z/", # Start with only a trailing slash "PT2S/P1M", # Two durations "2019-13-01 00:00:00Z/P1M", # Out-of-range month "20191077T15/P1M", # Out-of-range day "2019-10-01T25:00:00Z/P1M", # Out-of-range hour "2019-10-01T24:00:01Z/P1M", # Hour 24 with anything but :00:00 "PT5H/20191001T007000Z", # Out-of-range minute "2019-10-01 00:00:80Z/P1M", # Out-of-range second "2019-10-01 00:00:10 +25:00/P1M", # Out-of-range offset hour "20191001T000010 -00:61/P1M", # Out-of-range offset minute "P1Y/2019-02-29 00:00:00Z", # Feb. 29 in non-leap-year "2019-01-01 00:00:00Z/P", # Duration with no values "P1Z/2019-02-20 00:00:00Z", # Invalid duration unit "P1YM/2019-02-20 00:00:00Z", # No number for duration unit ] # Ensure invalid period specifications are rejected invalid_period_tests = [] for p in invalid_periods: invalid_period_tests.append(Test("Invalid period - [%s]" % p, "iso8601 -p '%s'" % p, expected_rc=ExitStatus.INVALID_PARAM)) year_tests = [] for y in ["06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "40"]: year_tests.extend([ Test("20%s-W01-7" % y, "iso8601 -d '20%s-W01-7 00Z'" % y), Test("20%s-W01-7 - round-trip" % y, "iso8601 -d '20%s-W01-7 00Z' -W -E '20%s-W01-7 00:00:00Z'" % (y, y)), Test("20%s-W01-1" % y, "iso8601 -d '20%s-W01-1 00Z'" % y), Test("20%s-W01-1 - round-trip" % y, "iso8601 -d '20%s-W01-1 00Z' -W -E '20%s-W01-1 00:00:00Z'" % (y, y)) ]) return invalid_period_tests + [ make_test_group("'2005-040/2005-043' period", "iso8601 {fmt} -p '2005-040/2005-043'", [Test, ValidatingTest]), Test("2014-01-01 00:30:00 - 1 Hour", "iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"), Test("Valid date - Feb 29 in leap year", "iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"), Test("Valid date - using 'T' and offset", "iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"), Test("24:00:00 equivalent to 00:00:00 of next day", "iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"), ] + year_tests + [ make_test_group("2009-W53-07", "iso8601 {fmt} -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'", [Test, ValidatingTest]), Test("epoch + 2 Years 5 Months 6 Minutes", "iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"), Test("2009-01-31 + 1 Month", "iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"), Test("2009-01-31 + 2 Months", "iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"), Test("2009-01-31 + 3 Months", "iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"), make_test_group("2009-03-31 - 1 Month", "iso8601 {fmt} -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'", [Test, ValidatingTest]), make_test_group("2038-01-01 + 3 Months", "iso8601 {fmt} -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'", [Test, ValidatingTest]), ] class ErrorCodeRegressionTest(RegressionTest): """A class for testing error code reporting.""" @property def name(self): """Return the name of this regression test.""" return "error_codes" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" # Legacy return codes # # Don't test unknown legacy code. FreeBSD includes a colon in strerror(), # while other distros do not. legacy_tests = [ make_test_group("Get legacy return code", "crm_error {fmt} 201", [Test, ValidatingTest]), make_test_group("Get legacy return code (with name)", "crm_error -n {fmt} 201", [Test, ValidatingTest]), make_test_group("Get multiple legacy return codes", "crm_error {fmt} 201 202", [Test, ValidatingTest]), make_test_group("Get multiple legacy return codes (with names)", "crm_error -n {fmt} 201 202", [Test, ValidatingTest]), # We can only rely on our custom codes, so we'll spot-check codes 201-209 Test("List legacy return codes (spot check)", "crm_error -l | grep 20[1-9]"), ValidatingTest("List legacy return codes (spot check)", "crm_error -l --output-as=xml | grep -Ev '&1"), Test("Require --force for CIB erasure", "cibadmin -E", expected_rc=ExitStatus.UNSAFE, update_cib=True), Test("Allow CIB erasure with --force", "cibadmin -E --force"), # Verify the output after erasure Test("Query CIB", "cibadmin -Q", setup=delete_shadow_resource_defaults, update_cib=True), ] # Add some stuff to the empty CIB so we know that erasing it did something. basic_tests_setup = [ """cibadmin -C -o nodes --xml-text ''""", """cibadmin -C -o crm_config --xml-text ''""", """cibadmin -C -o resources --xml-text ''""" ] return [ ShadowTestGroup(basic_tests, setup=basic_tests_setup), ] class CrmAttributeRegressionTest(RegressionTest): """A class for testing crm_attribute.""" @property def name(self): """Return the name of this regression test.""" return "crm_attribute" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" options_tests = [ make_test_group("List all available options (invalid type)", "crm_attribute --list-options=asdf {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.USAGE), make_test_group("List non-advanced cluster options", "crm_attribute --list-options=cluster {fmt}", [Test, ValidatingTest]), make_test_group("List all available cluster options", "crm_attribute --list-options=cluster --all {fmt}", [Test, ValidatingTest]), Test("Return usage error if both -p and OCF_RESOURCE_INSTANCE are empty strings", "crm_attribute -N cluster01 -p '' -G", expected_rc=ExitStatus.USAGE), ] value_update_tests = [ Test("Query the value of an attribute that does not exist", "crm_attribute -n ABCD --query --quiet", expected_rc=ExitStatus.NOSUCH), Test("Configure something before erasing", "crm_attribute -n test_attr -v 5", update_cib=True), Test("Test '++' XML attribute update syntax", """cibadmin -M --score --xml-text=''""", update_cib=True), Test("Test '+=' XML attribute update syntax", """cibadmin -M --score --xml-text=''""", update_cib=True), make_test_group("Test '++' nvpair value update syntax", "crm_attribute -n test_attr -v 'value++' --score {fmt}", [Test, ValidatingTest], update_cib=True), make_test_group("Test '+=' nvpair value update syntax", "crm_attribute -n test_attr -v 'value+=2' --score {fmt}", [Test, ValidatingTest], update_cib=True), Test("Test '++' XML attribute update syntax (--score not set)", """cibadmin -M --xml-text=''""", update_cib=True), Test("Test '+=' XML attribute update syntax (--score not set)", """cibadmin -M --xml-text=''""", update_cib=True), make_test_group("Test '++' nvpair value update syntax (--score not set)", "crm_attribute -n test_attr -v 'value++' {fmt}", [Test, ValidatingTest], update_cib=True), make_test_group("Test '+=' nvpair value update syntax (--score not set)", "crm_attribute -n test_attr -v 'value+=2' {fmt}", [Test, ValidatingTest], update_cib=True), ] query_set_tests = [ Test("Set cluster option", "crm_attribute -n cluster-delay -v 60s", update_cib=True), Test("Query new cluster option", "cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay"), Test("Set no-quorum policy", "crm_attribute -n no-quorum-policy -v ignore", update_cib=True), Test("Delete nvpair", """cibadmin -D -o crm_config --xml-text ''""", update_cib=True), Test("Create operation should fail", """cibadmin -C -o crm_config --xml-text ''""", expected_rc=ExitStatus.EXISTS, update_cib=True), Test("Modify cluster options section", """cibadmin -M -o crm_config --xml-text ''""", update_cib=True), Test("Query updated cluster option", "cibadmin -Q -o crm_config | grep cib-bootstrap-options-cluster-delay", update_cib=True), Test("Set duplicate cluster option", "crm_attribute -n cluster-delay -v 40s -s duplicate", update_cib=True), Test("Setting multiply defined cluster option should fail", "crm_attribute -n cluster-delay -v 30s", expected_rc=ExitStatus.MULTIPLE, update_cib=True), Test("Set cluster option with -s", "crm_attribute -n cluster-delay -v 30s -s duplicate", update_cib=True), Test("Delete cluster option with -i", "crm_attribute -n cluster-delay -D -i cib-bootstrap-options-cluster-delay", update_cib=True), Test("Create node1 and bring it online", "crm_simulate --live-check --in-place --node-up=node1", update_cib=True), Test("Create node attribute", "crm_attribute -n ram -v 1024M -N node1 -t nodes", update_cib=True), Test("Query new node attribute", "cibadmin -Q -o nodes | grep node1-ram", update_cib=True), Test("Create second node attribute", "crm_attribute -n rattr -v XYZ -N node1 -t nodes", update_cib=True), Test("Query node attributes by pattern", "crm_attribute -t nodes -P 'ra.*' -N node1 --query"), Test("Update node attributes by pattern", "crm_attribute -t nodes -P 'rat.*' -N node1 -v 10", update_cib=True), Test("Delete node attributes by pattern", "crm_attribute -t nodes -P 'rat.*' -N node1 -D", update_cib=True), Test("Set a transient (fail-count) node attribute", "crm_attribute -n fail-count-foo -v 3 -N node1 -t status", update_cib=True), Test("Query a fail count", "crm_failcount --query -r foo -N node1", update_cib=True), Test("Show node attributes with crm_simulate", "crm_simulate --live-check --show-attrs"), Test("Set a second transient node attribute", "crm_attribute -n fail-count-bar -v 5 -N node1 -t status", update_cib=True), Test("Query transient node attributes by pattern", "crm_attribute -t status -P fail-count -N node1 --query"), Test("Update transient node attributes by pattern", "crm_attribute -t status -P fail-count -N node1 -v 10", update_cib=True), Test("Delete transient node attributes by pattern", "crm_attribute -t status -P fail-count -N node1 -D", update_cib=True), Test("crm_attribute given invalid delete usage", "crm_attribute -t nodes -N node1 -D", expected_rc=ExitStatus.USAGE), Test("Set a utilization node attribute", "crm_attribute -n cpu -v 1 -N node1 -z", update_cib=True), Test("Query utilization node attribute", "crm_attribute --query -n cpu -N node1 -z"), # This update will fail because it has version numbers Test("Replace operation should fail", """cibadmin -Q | sed -e 's/epoch="[^"]*"/epoch="1"/' | cibadmin -R -p""", expected_rc=ExitStatus.OLD), ] promotable_tests = [ make_test_group("Query a nonexistent promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Delete a nonexistent promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -D {fmt}", [Test, ValidatingTest]), make_test_group("Query after deleting a nonexistent promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Update a nonexistent promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -v 1 {fmt}", [Test, ValidatingTest]), make_test_group("Query after updating a nonexistent promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}", [Test, ValidatingTest]), make_test_group("Update an existing promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -v 5 {fmt}", [Test, ValidatingTest]), make_test_group("Query after updating an existing promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}", [Test, ValidatingTest]), make_test_group("Delete an existing promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -D {fmt}", [Test, ValidatingTest]), make_test_group("Query after deleting an existing promotable score attribute", "crm_attribute -N cluster01 -p promotable-rsc -G {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), ] # Test for an issue with legacy command line parsing when the resource is # specified in the environment (CLBZ#5509) ocf_rsc_instance_tests = [ make_test_group("Update a promotable score attribute to -INFINITY", "crm_attribute -N cluster01 -p -v -INFINITY {fmt}", [Test, ValidatingTest], env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}), make_test_group("Query after updating a promotable score attribute to -INFINITY", "crm_attribute -N cluster01 -p -G {fmt}", [Test, ValidatingTest], env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}), Test("Try OCF_RESOURCE_INSTANCE if -p is specified with an empty string", "crm_attribute -N cluster01 -p '' -G", env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}), ] return options_tests + [ ShadowTestGroup(value_update_tests), ShadowTestGroup(query_set_tests), TestGroup(promotable_tests + ocf_rsc_instance_tests, env={"OCF_RESOURCE_INSTANCE": "promotable-rsc"}, cib_gen=partial(copy_existing_cib, "{cts_cli_data}/crm_mon.xml")), ] class CrmStandbyRegressionTest(RegressionTest): """A class for testing crm_standby.""" @property def name(self): """Return the name of this regression test.""" return "crm_standby" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ Test("Default standby value", "crm_standby -N node1 -G"), Test("Set standby status", "crm_standby -N node1 -v true", update_cib=True), Test("Query standby value", "crm_standby -N node1 -G"), Test("Delete standby value", "crm_standby -N node1 -D", update_cib=True), ] return [ ShadowTestGroup(basic_tests, setup="""cibadmin -C -o nodes --xml-text ''"""), ] class CrmResourceRegressionTest(RegressionTest): """A class for testing crm_resource.""" @property def name(self): """Return the name of this regression test.""" return "crm_resource" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" options_tests = [ Test("crm_resource run with extra arguments", "crm_resource foo bar", expected_rc=ExitStatus.USAGE), Test("List all available resource options (invalid type)", "crm_resource --list-options=asdf", expected_rc=ExitStatus.USAGE), Test("List all available resource options (invalid type)", "crm_resource --list-options=asdf --output-as=xml", expected_rc=ExitStatus.USAGE), make_test_group("List non-advanced primitive meta-attributes", "crm_resource --list-options=primitive {fmt}", [Test, ValidatingTest]), make_test_group("List all available primitive meta-attributes", "crm_resource --list-options=primitive --all {fmt}", [Test, ValidatingTest]), make_test_group("List non-advanced fencing parameters", "crm_resource --list-options=fencing {fmt}", [Test, ValidatingTest]), make_test_group("List all available fencing parameters", "crm_resource --list-options=fencing --all {fmt}", [Test, ValidatingTest]), ] basic_tests = [ Test("Create a resource", """cibadmin -C -o resources --xml-text ''""", update_cib=True), Test("crm_resource given both -r and resource config", "crm_resource -r xyz --class ocf --provider pacemaker --agent Dummy", expected_rc=ExitStatus.USAGE), Test("crm_resource given resource config with invalid action", "crm_resource --class ocf --provider pacemaker --agent Dummy -D", expected_rc=ExitStatus.USAGE), Test("Create a resource meta attribute", "crm_resource -r dummy --meta -p is-managed -v false", update_cib=True), Test("Query a resource meta attribute", "crm_resource -r dummy --meta -g is-managed", update_cib=True), Test("Remove a resource meta attribute", "crm_resource -r dummy --meta -d is-managed", update_cib=True), ValidatingTest("Create another resource meta attribute", "crm_resource -r dummy --meta -p target-role -v Stopped --output-as=xml"), ValidatingTest("Show why a resource is not running", "crm_resource -Y -r dummy --output-as=xml"), ValidatingTest("Remove another resource meta attribute", "crm_resource -r dummy --meta -d target-role --output-as=xml"), ValidatingTest("Get a non-existent attribute from a resource element", "crm_resource -r dummy --get-parameter nonexistent --element --output-as=xml"), make_test_group("Get a non-existent attribute from a resource element", "crm_resource -r dummy --get-parameter nonexistent --element {fmt}", [Test, ValidatingTest], update_cib=True), Test("Get an existent attribute from a resource element", "crm_resource -r dummy --get-parameter class --element", update_cib=True), ValidatingTest("Set a non-existent attribute for a resource element", "crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml", update_cib=True), ValidatingTest("Set an existent attribute for a resource element", "crm_resource -r dummy --set-parameter=description -v test_description --element --output-as=xml", update_cib=True), ValidatingTest("Delete an existent attribute for a resource element", "crm_resource -r dummy -d description --element --output-as=xml", update_cib=True), ValidatingTest("Delete a non-existent attribute for a resource element", "crm_resource -r dummy -d description --element --output-as=xml", update_cib=True), Test("Set a non-existent attribute for a resource element", "crm_resource -r dummy --set-parameter=description -v test_description --element", update_cib=True), Test("Set an existent attribute for a resource element", "crm_resource -r dummy --set-parameter=description -v test_description --element", update_cib=True), Test("Delete an existent attribute for a resource element", "crm_resource -r dummy -d description --element", update_cib=True), Test("Delete a non-existent attribute for a resource element", "crm_resource -r dummy -d description --element", update_cib=True), Test("Create a resource attribute", "crm_resource -r dummy -p delay -v 10s", update_cib=True), make_test_group("List the configured resources", "crm_resource -L {fmt}", [Test, ValidatingTest], update_cib=True), Test("Implicitly list the configured resources", "crm_resource"), Test("List IDs of instantiated resources", "crm_resource -l"), make_test_group("Show XML configuration of resource", "crm_resource -q -r dummy {fmt}", [Test, ValidatingTest]), Test("Require a destination when migrating a resource that is stopped", "crm_resource -r dummy -M", update_cib=True, expected_rc=ExitStatus.USAGE), Test("Don't support migration to non-existent locations", "crm_resource -r dummy -M -N i.do.not.exist", update_cib=True, expected_rc=ExitStatus.NOSUCH), Test("Create a fencing resource", """cibadmin -C -o resources --xml-text ''""", update_cib=True), Test("Bring resources online", "crm_simulate --live-check --in-place", update_cib=True), Test("Try to move a resource to its existing location", "crm_resource -r dummy --move --node node1", update_cib=True, expected_rc=ExitStatus.EXISTS), Test("Try to move a resource that doesn't exist", "crm_resource -r xyz --move --node node1", expected_rc=ExitStatus.NOSUCH), Test("Move a resource from its existing location", "crm_resource -r dummy --move", update_cib=True), Test("Clear out constraints generated by --move", "crm_resource -r dummy --clear", update_cib=True), Test("Ban a resource on unknown node", "crm_resource -r dummy -B -N host1", expected_rc=ExitStatus.NOSUCH), Test("Create two more nodes and bring them online", "crm_simulate --live-check --in-place --node-up=node2 --node-up=node3", update_cib=True), Test("Ban dummy from node1", "crm_resource -r dummy -B -N node1", update_cib=True), Test("Show where a resource is running", "crm_resource -r dummy -W"), Test("Show constraints on a resource", "crm_resource -a -r dummy"), ValidatingTest("Ban dummy from node2", "crm_resource -r dummy -B -N node2 --output-as=xml", update_cib=True), Test("Relocate resources due to ban", "crm_simulate --live-check --in-place -S", update_cib=True), ValidatingTest("Move dummy to node1", "crm_resource -r dummy -M -N node1 --output-as=xml", update_cib=True), Test("Clear implicit constraints for dummy on node2", "crm_resource -r dummy -U -N node2", update_cib=True), Test("Drop the status section", "cibadmin -R -o status --xml-text ''"), Test("Create a clone", """cibadmin -C -o resources --xml-text ''"""), Test("Create a resource meta attribute", "crm_resource -r test-primitive --meta -p is-managed -v false", update_cib=True), Test("Create a resource meta attribute in the primitive", "crm_resource -r test-primitive --meta -p is-managed -v false --force", update_cib=True), Test("Update resource meta attribute with duplicates", "crm_resource -r test-clone --meta -p is-managed -v true", update_cib=True), Test("Update resource meta attribute with duplicates (force clone)", "crm_resource -r test-clone --meta -p is-managed -v true --force", update_cib=True), Test("Update child resource meta attribute with duplicates", "crm_resource -r test-primitive --meta -p is-managed -v false", update_cib=True), Test("Delete resource meta attribute with duplicates", "crm_resource -r test-clone --meta -d is-managed", update_cib=True), Test("Delete resource meta attribute in parent", "crm_resource -r test-primitive --meta -d is-managed", update_cib=True), Test("Create a resource meta attribute in the primitive", "crm_resource -r test-primitive --meta -p is-managed -v false --force", update_cib=True), Test("Update existing resource meta attribute", "crm_resource -r test-clone --meta -p is-managed -v true", update_cib=True), Test("Create a resource meta attribute in the parent", "crm_resource -r test-clone --meta -p is-managed -v true --force", update_cib=True), Test("Delete resource parent meta attribute (force)", "crm_resource -r test-clone --meta -d is-managed --force", update_cib=True), # Restore meta-attributes before running this test Test("Delete resource child meta attribute", "crm_resource -r test-primitive --meta -d is-managed", setup=["crm_resource -r test-primitive --meta -p is-managed -v true --force", "crm_resource -r test-clone --meta -p is-managed -v true --force"], update_cib=True), Test("Create the dummy-group resource group", """cibadmin -C -o resources --xml-text '""" """""" """""" """'""", update_cib=True), Test("Create a resource meta attribute in dummy1", "crm_resource -r dummy1 --meta -p is-managed -v true", update_cib=True), Test("Create a resource meta attribute in dummy-group", "crm_resource -r dummy-group --meta -p is-managed -v false", update_cib=True), Test("Delete the dummy-group resource group", "cibadmin -D -o resources --xml-text ''", update_cib=True), Test("Specify a lifetime when moving a resource", "crm_resource -r dummy --move --node node2 --lifetime=PT1H", update_cib=True), Test("Try to move a resource previously moved with a lifetime", "crm_resource -r dummy --move --node node1", update_cib=True), Test("Ban dummy from node1 for a short time", "crm_resource -r dummy -B -N node1 --lifetime=PT1S", update_cib=True), Test("Remove expired constraints", "sleep 2 && crm_resource --clear --expired", update_cib=True), # Clear has already been tested elsewhere, but we need to get rid of the # constraints so testing delete works. It won't delete if there's still # a reference to the resource somewhere. Test("Clear all implicit constraints for dummy", "crm_resource -r dummy -U", update_cib=True), Test("Set a node health strategy", "crm_attribute -n node-health-strategy -v migrate-on-red", update_cib=True), Test("Set a node health attribute", "crm_attribute -N node3 -n '#health-cts-cli' -v red", update_cib=True), ValidatingTest("Show why a resource is not running on an unhealthy node", "crm_resource -N node3 -Y -r dummy --output-as=xml"), Test("Delete a resource", "crm_resource -D -r dummy -t primitive", update_cib=True), ] constraint_tests = [] for rsc in ["prim1", "prim2", "prim3", "prim4", "prim5", "prim6", "prim7", "prim8", "prim9", "prim10", "prim11", "prim12", "prim13", "group", "clone"]: constraint_tests.extend([ make_test_group("Check locations and constraints for %s" % rsc, "crm_resource -a -r %s {fmt}" % rsc, [Test, ValidatingTest]), make_test_group("Recursively check locations and constraints for %s" % rsc, "crm_resource -A -r %s {fmt}" % rsc, [Test, ValidatingTest]), ]) constraint_tests.extend([ Test("Check locations and constraints for group member (referring to group)", "crm_resource -a -r gr2"), Test("Check locations and constraints for group member (without referring to group)", "crm_resource -a -r gr2 --force"), ]) colocation_tests = [ ValidatingTest("Set a meta-attribute for primitive and resources colocated with it", "crm_resource -r prim5 --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml"), Test("Set a meta-attribute for group and resource colocated with it", "crm_resource -r group --meta --set-parameter=target-role -v Stopped --recursive"), ValidatingTest("Set a meta-attribute for clone and resource colocated with it", "crm_resource -r clone --meta --set-parameter=target-role -v Stopped --recursive --output-as=xml"), ] digest_tests = [ ValidatingTest("Show resource digests", "crm_resource --digests -r rsc1 -N node1 --output-as=xml"), Test("Show resource digests with overrides", "crm_resource --digests -r rsc1 -N node1 --output-as=xml CRM_meta_interval=10000 CRM_meta_timeout=20000"), make_test_group("Show resource operations", "crm_resource --list-operations {fmt}", [Test, ValidatingTest]), ] basic2_tests = [ make_test_group("List a promotable clone resource", "crm_resource --locate -r promotable-clone {fmt}", [Test, ValidatingTest]), make_test_group("List the primitive of a promotable clone resource", "crm_resource --locate -r promotable-rsc {fmt}", [Test, ValidatingTest]), make_test_group("List a single instance of a promotable clone resource", "crm_resource --locate -r promotable-rsc:0 {fmt}", [Test, ValidatingTest]), make_test_group("List another instance of a promotable clone resource", "crm_resource --locate -r promotable-rsc:1 {fmt}", [Test, ValidatingTest]), Test("Try to move an instance of a cloned resource", "crm_resource -r promotable-rsc:0 --move --node node1", expected_rc=ExitStatus.INVALID_PARAM), ] basic_tests_setup = [ "crm_attribute -n no-quorum-policy -v ignore", "crm_simulate --live-check --in-place --node-up=node1" ] return options_tests + [ ShadowTestGroup(basic_tests, setup=basic_tests_setup), TestGroup(constraint_tests, env={"CIB_file": "{cts_cli_data}/constraints.xml"}), TestGroup(colocation_tests, cib_gen=partial(copy_existing_cib, "{cts_cli_data}/constraints.xml")), TestGroup(digest_tests, env={"CIB_file": "{cts_cli_data}/crm_resource_digests.xml"}), TestGroup(basic2_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}), ValidatingTest("Check that CIB_file=\"-\" works - crm_resource", "crm_resource --digests -r rsc1 -N node1 --output-as=xml", env={"CIB_file": "-"}, stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_resource_digests.xml"))), ] class CrmTicketRegressionTest(RegressionTest): """A class for testing crm_ticket.""" @property def name(self): """Return the name of this regression test.""" return "crm_ticket" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ Test("Default ticket granted state", "crm_ticket -t ticketA -G granted -d false"), Test("Set ticket granted state", "crm_ticket -t ticketA -r --force", update_cib=True), make_test_group("List ticket IDs", "crm_ticket -w {fmt}", [Test, ValidatingTest]), make_test_group("Query ticket state", "crm_ticket -t ticketA -q {fmt}", [Test, ValidatingTest]), make_test_group("Query ticket granted state", "crm_ticket -t ticketA -G granted {fmt}", [Test, ValidatingTest]), Test("Delete ticket granted state", "crm_ticket -t ticketA -D granted --force", update_cib=True), Test("Make a ticket standby", "crm_ticket -t ticketA -s", update_cib=True), Test("Query ticket standby state", "crm_ticket -t ticketA -G standby"), Test("Activate a ticket", "crm_ticket -t ticketA -a", update_cib=True), make_test_group("List ticket details", "crm_ticket -L -t ticketA {fmt}", [Test, ValidatingTest]), Test("Add a second ticket", "crm_ticket -t ticketB -G granted -d false", update_cib=True), Test("Set second ticket granted state", "crm_ticket -t ticketB -r --force", update_cib=True), make_test_group("List tickets", "crm_ticket -l {fmt}", [Test, ValidatingTest]), Test("Delete second ticket", """cibadmin --delete --xml-text ''""", update_cib=True), Test("Delete ticket standby state", "crm_ticket -t ticketA -D standby", update_cib=True), Test("Add a constraint to a ticket", """cibadmin -C -o constraints --xml-text ''""", update_cib=True), make_test_group("Query ticket constraints", "crm_ticket -t ticketA -c {fmt}", [Test, ValidatingTest]), Test("Delete ticket constraint", """cibadmin --delete --xml-text ''""", update_cib=True), ] basic_tests_setup = [ """cibadmin -C -o crm_config --xml-text ''""", """cibadmin -C -o resources --xml-text ''""" ] return [ ShadowTestGroup(basic_tests, setup=basic_tests_setup), ] class CrmadminRegressionTest(RegressionTest): """A class for testing crmadmin.""" @property def name(self): """Return the name of this regression test.""" return "crmadmin" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ make_test_group("List all nodes", "crmadmin -N {fmt}", [Test, ValidatingTest]), make_test_group("Minimally list all nodes", "crmadmin -N -q {fmt}", [Test, ValidatingTest]), Test("List all nodes as bash exports", "crmadmin -N -B"), make_test_group("List cluster nodes", "crmadmin -N cluster {fmt}", [Test, ValidatingTest]), make_test_group("List guest nodes", "crmadmin -N guest {fmt}", [Test, ValidatingTest]), make_test_group("List remote nodes", "crmadmin -N remote {fmt}", [Test, ValidatingTest]), make_test_group("List cluster,remote nodes", "crmadmin -N cluster,remote {fmt}", [Test, ValidatingTest]), make_test_group("List guest,remote nodes", "crmadmin -N guest,remote {fmt}", [Test, ValidatingTest]), ] return [ TestGroup(basic_tests, env={"CIB_file": "{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml"}), Test("Check that CIB_file=\"-\" works", "crmadmin -N", env={"CIB_file": "-"}, stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crmadmin-cluster-remote-guest-nodes.xml"))), ] class CrmShadowRegressionTest(RegressionTest): """A class for testing crm_shadow.""" @property def name(self): """Return the name of this regression test.""" return "crm_shadow" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" no_instance_tests = [ make_test_group("Get active shadow instance (no active instance)", "crm_shadow --which {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Get active shadow instance's file name (no active instance)", "crm_shadow --file {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Get active shadow instance's contents (no active instance)", "crm_shadow --display {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Get active shadow instance's diff (no active instance)", "crm_shadow --diff {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), ] # Create new shadow instance based on active CIB # Don't use create_shadow_cib() here; test explicitly new_instance_tests = [ make_test_group("Create copied shadow instance", "crm_shadow --create {shadow} --batch {fmt}", [Test, ValidatingTest], setup="crm_shadow --delete {shadow} --force"), # Query shadow instance based on active CIB make_test_group("Get active shadow instance (copied)", "crm_shadow --which {fmt}", [Test, ValidatingTest]), make_test_group("Get active shadow instance's file name (copied)", "crm_shadow --file {fmt}", [Test, ValidatingTest]), make_test_group("Get active shadow instance's contents (copied)", "crm_shadow --display {fmt}", [Test, ValidatingTest]), make_test_group("Get active shadow instance's diff (copied)", "crm_shadow --diff {fmt}", [Test, ValidatingTest]), ] # Make some changes to the shadow file modify_cib = """export CIB_file=$(crm_shadow --file) && """ \ """cibadmin --modify --xml-text '' && """ \ """cibadmin --delete --xml-text '' && """ \ """cibadmin --create -o resources --xml-text '' && """ \ """cibadmin --create -o status --xml-text ''""" more_tests = [ # We can't use make_test_group() here because we only want to run # the modify_cib setup code once, and make_test_group will pass all # kwargs to every instance it creates. Test("Get active shadow instance's diff (after changes)", "crm_shadow --diff", setup=modify_cib, expected_rc=ExitStatus.ERROR), ValidatingTest("Get active shadow instance's diff (after changes)", "crm_shadow --diff --output-as=xml", expected_rc=ExitStatus.ERROR), TestGroup([ # Commit the modified shadow CIB to a temp active CIB file Test("Commit shadow instance", "crm_shadow --commit {shadow}", expected_rc=ExitStatus.USAGE), Test("Commit shadow instance (force)", "crm_shadow --commit {shadow} --force"), Test("Get active shadow instance's diff (after commit)", "crm_shadow --diff", expected_rc=ExitStatus.ERROR), Test("Commit shadow instance (force) (all)", "crm_shadow --commit {shadow} --force --all"), Test("Get active shadow instance's diff (after commit all)", "crm_shadow --diff", expected_rc=ExitStatus.ERROR), ], cib_gen=partial(copy_existing_cib, "{cts_cli_data}/crm_mon.xml")), TestGroup([ # Repeat sequence with XML output ValidatingTest("Commit shadow instance", "crm_shadow --commit {shadow} --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Commit shadow instance (force)", "crm_shadow --commit {shadow} --force --output-as=xml"), ValidatingTest("Get active shadow instance's diff (after commit)", "crm_shadow --diff --output-as=xml", expected_rc=ExitStatus.ERROR), ValidatingTest("Commit shadow instance (force) (all)", "crm_shadow --commit {shadow} --force --all --output-as=xml"), ValidatingTest("Get active shadow instance's diff (after commit all)", "crm_shadow --diff --output-as=xml", expected_rc=ExitStatus.ERROR), # Commit an inactive shadow instance with no active instance make_test_group("Commit shadow instance (no active instance)", "crm_shadow --commit {shadow} {fmt}", [Test, ValidatingTest], env={"CIB_shadow": None}, expected_rc=ExitStatus.USAGE), make_test_group("Commit shadow instance (no active instance) (force)", "crm_shadow --commit {shadow} --force {fmt}", [Test, ValidatingTest], env={"CIB_shadow": None}), # Commit an inactive shadow instance with an active instance make_test_group("Commit shadow instance (mismatch)", "crm_shadow --commit {shadow} {fmt}", [Test, ValidatingTest], env={"CIB_shadow": "nonexistent_shadow"}, expected_rc=ExitStatus.USAGE), make_test_group("Commit shadow instance (mismatch) (force)", "crm_shadow --commit {shadow} --force {fmt}", [Test, ValidatingTest], env={"CIB_shadow": "nonexistent_shadow"}), # Commit an active shadow instance whose shadow file is missing make_test_group("Commit shadow instance (nonexistent shadow file)", "crm_shadow --commit nonexistent_shadow {fmt}", [Test, ValidatingTest], env={"CIB_shadow": "nonexistent_shadow"}, expected_rc=ExitStatus.USAGE), make_test_group("Commit shadow instance (nonexistent shadow file) (force)", "crm_shadow --commit nonexistent_shadow --force {fmt}", [Test, ValidatingTest], env={"CIB_shadow": "nonexistent_shadow"}, expected_rc=ExitStatus.NOSUCH), make_test_group("Get active shadow instance's diff (nonexistent shadow file)", "crm_shadow --diff {fmt}", [Test, ValidatingTest], env={"CIB_shadow": "nonexistent_shadow"}, expected_rc=ExitStatus.NOSUCH), # Commit an active shadow instance when the CIB file is missing make_test_group("Commit shadow instance (nonexistent CIB file)", "crm_shadow --commit {shadow} {fmt}", [Test, ValidatingTest], env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}, expected_rc=ExitStatus.USAGE), make_test_group("Commit shadow instance (nonexistent CIB file) (force)", "crm_shadow --commit {shadow} --force {fmt}", [Test, ValidatingTest], env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}, expected_rc=ExitStatus.NOSUCH), make_test_group("Get active shadow instance's diff (nonexistent CIB file)", "crm_shadow --diff {fmt}", [Test, ValidatingTest], env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}, expected_rc=ExitStatus.NOSUCH), ], cib_gen=partial(copy_existing_cib, "{cts_cli_data}/crm_mon.xml")), ] delete_1_tests = [ # Delete an active shadow instance Test("Delete shadow instance", "crm_shadow --delete {shadow}", expected_rc=ExitStatus.USAGE), Test("Delete shadow instance (force)", "crm_shadow --delete {shadow} --force"), ShadowTestGroup([ ValidatingTest("Delete shadow instance", "crm_shadow --delete {shadow} --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Delete shadow instance (force)", "crm_shadow --delete {shadow} --force --output-as=xml"), ]) ] delete_2_tests = [ # Delete an inactive shadow instance with no active instance Test("Delete shadow instance (no active instance)", "crm_shadow --delete {shadow}", expected_rc=ExitStatus.USAGE), Test("Delete shadow instance (no active instance) (force)", "crm_shadow --delete {shadow} --force"), ] delete_3_tests = [ ValidatingTest("Delete shadow instance (no active instance)", "crm_shadow --delete {shadow} --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Delete shadow instance (no active instance) (force)", "crm_shadow --delete {shadow} --force --output-as=xml"), ] delete_4_tests = [ # Delete an inactive shadow instance with an active instance Test("Delete shadow instance (mismatch)", "crm_shadow --delete {shadow}", expected_rc=ExitStatus.USAGE), Test("Delete shadow instance (mismatch) (force)", "crm_shadow --delete {shadow} --force"), ] delete_5_tests = [ ValidatingTest("Delete shadow instance (mismatch)", "crm_shadow --delete {shadow} --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Delete shadow instance (mismatch) (force)", "crm_shadow --delete {shadow} --force --output-as=xml"), # Delete an active shadow instance whose shadow file is missing Test("Delete shadow instance (nonexistent shadow file)", "crm_shadow --delete nonexistent_shadow", expected_rc=ExitStatus.USAGE), Test("Delete shadow instance (nonexistent shadow file) (force)", "crm_shadow --delete nonexistent_shadow --force"), ValidatingTest("Delete shadow instance (nonexistent shadow file)", "crm_shadow --delete nonexistent_shadow --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Delete shadow instance (nonexistent shadow file) (force)", "crm_shadow --delete nonexistent_shadow --force --output-as=xml"), ] delete_6_tests = [ # Delete an active shadow instance when the CIB file is missing Test("Delete shadow instance (nonexistent CIB file)", "crm_shadow --delete {shadow}", expected_rc=ExitStatus.USAGE), Test("Delete shadow instance (nonexistent CIB file) (force)", "crm_shadow --delete {shadow} --force"), ] delete_7_tests = [ ValidatingTest("Delete shadow instance (nonexistent CIB file)", "crm_shadow --delete {shadow} --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Delete shadow instance (nonexistent CIB file) (force)", "crm_shadow --delete {shadow} --force --output-as=xml"), ] create_1_tests = [ # Create new shadow instance based on active CIB with no instance active make_test_group("Create copied shadow instance (no active instance)", "crm_shadow --create {shadow} --batch {fmt}", [Test, ValidatingTest], setup="crm_shadow --delete {shadow} --force", env={"CIB_shadow": None}), # Create new shadow instance based on active CIB with other instance active make_test_group("Create copied shadow instance (mismatch)", "crm_shadow --create {shadow} --batch {fmt}", [Test, ValidatingTest], setup="crm_shadow --delete {shadow} --force", env={"CIB_shadow": "nonexistent_shadow"}), # Create new shadow instance based on CIB (shadow file already exists) make_test_group("Create copied shadow instance (file already exists)", "crm_shadow --create {shadow} --batch {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.CANTCREAT), make_test_group("Create copied shadow instance (file already exists) (force)", "crm_shadow --create {shadow} --batch --force {fmt}", [Test, ValidatingTest]), # Create new shadow instance based on active CIB when the CIB file is missing make_test_group("Create copied shadow instance (nonexistent CIB file) (force)", "crm_shadow --create {shadow} --batch --force {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH, setup="crm_shadow --delete {shadow} --force", env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}), ] create_2_tests = [ # Create new empty shadow instance make_test_group("Create empty shadow instance", "crm_shadow --create-empty {shadow} --batch {fmt}", [Test, ValidatingTest], setup="crm_shadow --delete {shadow} --force"), # Create empty shadow instance with no active instance make_test_group("Create empty shadow instance (no active instance)", "crm_shadow --create-empty {shadow} --batch {fmt}", [Test, ValidatingTest], setup="crm_shadow --delete {shadow} --force", env={"CIB_shadow": None}), # Create empty shadow instance with other instance active make_test_group("Create empty shadow instance (mismatch)", "crm_shadow --create-empty {shadow} --batch {fmt}", [Test, ValidatingTest], setup="crm_shadow --delete {shadow} --force", env={"CIB_shadow": "nonexistent_shadow"}), # Create empty shadow instance when the CIB file is missing make_test_group("Create empty shadow instance (nonexistent CIB file)", "crm_shadow --create-empty {shadow} --batch {fmt}", [Test, ValidatingTest], setup="crm_shadow --delete {shadow} --force", env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}), # Create empty shadow instance (shadow file already exists) make_test_group("Create empty shadow instance (file already exists)", "crm_shadow --create-empty {shadow} --batch {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.CANTCREAT), make_test_group("Create empty shadow instance (file already exists) (force)", "crm_shadow --create-empty {shadow} --batch --force {fmt}", [Test, ValidatingTest]), # Query shadow instance with an empty CIB. # --which and --file queries were done earlier. TestGroup([ make_test_group("Get active shadow instance's contents (empty CIB)", "crm_shadow --display {fmt}", [Test, ValidatingTest]), make_test_group("Get active shadow instance's diff (empty CIB)", "crm_shadow --diff {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.ERROR), ], setup=delete_shadow_resource_defaults), # Reset shadow instance (overwrite existing shadow file based on active CIB) Test("Reset shadow instance", "crm_shadow --reset {shadow} --batch"), Test("Get active shadow instance's diff (after reset)", "crm_shadow --diff"), ] reset_1_tests = [ ValidatingTest("Reset shadow instance", "crm_shadow --reset {shadow} --batch --output-as=xml"), ValidatingTest("Get active shadow instance's diff (after reset)", "crm_shadow --diff --output-as=xml"), # Reset an inactive shadow instance with no active instance Test("Reset shadow instance (no active instance)", "crm_shadow --reset {shadow} --batch", env={"CIB_shadow": None}), ] reset_2_tests = [ ValidatingTest("Reset shadow instance (no active instance)", "crm_shadow --reset {shadow} --batch --output-as=xml"), # Reset an inactive shadow instance with an active instance Test("Reset shadow instance (mismatch)", "crm_shadow --reset {shadow} --batch", env={"CIB_shadow": "nonexistent_shadow"}, expected_rc=ExitStatus.USAGE), Test("Reset shadow instance (mismatch) (force)", "crm_shadow --reset {shadow} --batch --force", env={"CIB_shadow": "nonexistent_shadow"}), ] reset_3_tests = [ ValidatingTest("Reset shadow instance (mismatch)", "crm_shadow --reset {shadow} --batch --output-as=xml", expected_rc=ExitStatus.USAGE), ValidatingTest("Reset shadow instance (mismatch) (force)", "crm_shadow --reset {shadow} --batch --force --output-as=xml"), ] reset_4_tests = [ # Reset an active shadow instance when the CIB file is missing make_test_group("Reset shadow instance (nonexistent CIB file)", "crm_shadow --reset {shadow} --batch {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Reset shadow instance (nonexistent CIB file) (force)", "crm_shadow --reset {shadow} --batch --force {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), # Reset an active shadow instance whose shadow file is missing TestGroup([ make_test_group("Reset shadow instance (nonexistent shadow file)", "crm_shadow --reset {shadow} --batch {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), ], env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}, setup="crm_shadow --delete {shadow} --force"), TestGroup([ make_test_group("Reset shadow instance (nonexistent shadow file) (force)", "crm_shadow --reset {shadow} --batch --force {fmt}", [Test, ValidatingTest]), ], env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}, setup="crm_shadow --delete {shadow} --force"), ] # Switch shadow instances switch_tests = [ make_test_group("Switch to new shadow instance", "crm_shadow --switch {shadow} --batch {fmt}", [Test, ValidatingTest]), TestGroup([ make_test_group("Switch to nonexistent shadow instance", "crm_shadow --switch {shadow} --batch {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Switch to nonexistent shadow instance (force)", "crm_shadow --switch {shadow} --batch --force {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), ], setup="crm_shadow --delete {shadow} --force"), ] return no_instance_tests + [ ShadowTestGroup(new_instance_tests + more_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}, create=False), ShadowTestGroup(delete_1_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}), ShadowTestGroup(delete_2_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml", "CIB_shadow": None}), ShadowTestGroup(delete_3_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml", "CIB_shadow": None}), ShadowTestGroup(delete_4_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml", "CIB_shadow": "nonexistent_shadow"}), ShadowTestGroup(delete_5_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml", "CIB_shadow": "nonexistent_shadow"}), ShadowTestGroup(delete_6_tests, env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}), ShadowTestGroup(delete_7_tests, env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}), ShadowTestGroup(create_1_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}, create=False), ShadowTestGroup(create_2_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}, create=False), ShadowTestGroup(reset_1_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}, create_empty=True), ShadowTestGroup(reset_2_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml", "CIB_shadow": None}, create_empty=True), ShadowTestGroup(reset_3_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml", "CIB_shadow": "nonexistent_shadow"}, create_empty=True), ShadowTestGroup(reset_4_tests, env={"CIB_file": "{cts_cli_data}/nonexistent_cib.xml"}, create_empty=True), ShadowTestGroup(switch_tests, env={"CIB_shadow": "nonexistent_shadow"}, create_empty=True), ] class CrmVerifyRegressionTest(RegressionTest): """A class for testing crm_verify.""" @property def name(self): """Return the name of this regression test.""" return "crm_verify" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" invalid_tests = [ Test("Verbosely verify a file-specified configuration with an unallowed fencing level ID", "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_fencing_topology.xml --verbose", expected_rc=ExitStatus.CONFIG), make_test_group("Verify a file-specified invalid configuration", "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.CONFIG), make_test_group("Verify a file-specified invalid configuration (verbose)", "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml --verbose {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.CONFIG), make_test_group("Verify a file-specified invalid configuration (quiet)", "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_bz.xml --quiet {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.CONFIG), ValidatingTest("Verify another file-specified invalid configuration", "crm_verify --xml-file {cts_cli_data}/crm_verify_invalid_no_stonith.xml --output-as=xml", expected_rc=ExitStatus.CONFIG), ] with open("%s/cli/crm_mon.xml" % test_home, encoding="utf-8") as f: cib_contents = f.read() valid_tests = [ ValidatingTest("Verify a file-specified valid configuration", "crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml"), ValidatingTest("Verify a piped-in valid configuration", "crm_verify -p --output-as=xml", stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_mon.xml"))), ValidatingTest("Verbosely verify a file-specified valid configuration", "crm_verify --xml-file {cts_cli_data}/crm_mon.xml --output-as=xml --verbose"), ValidatingTest("Verbosely verify a piped-in valid configuration", "crm_verify -p --output-as=xml --verbose", stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_mon.xml"))), ValidatingTest("Verify a string-supplied valid configuration", "crm_verify -X '%s' --output-as=xml" % cib_contents), ValidatingTest("Verbosely verify a string-supplied valid configuration", "crm_verify -X '%s' --output-as=xml --verbose" % cib_contents), ] return invalid_tests + valid_tests class CrmMonRegressionTest(RegressionTest): """A class for testing crm_mon.""" @property def name(self): """Return the name of this regression test.""" return "crm_mon" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ make_test_group("Basic output", "crm_mon -1 {fmt}", [Test, ValidatingTest]), make_test_group("Output without node section", "crm_mon -1 --exclude=nodes {fmt}", [Test, ValidatingTest]), # The next test doesn't need to be performed for other output formats. It's # really just a test to make sure that blank lines are correct. Test("Output with only the node section", "crm_mon -1 --exclude=all --include=nodes"), # XML includes everything already so there's no need for a complete test Test("Complete text output", "crm_mon -1 --include=all"), # XML includes detailed output already Test("Complete text output with detail", "crm_mon -1R --include=all"), Test("Complete brief text output", "crm_mon -1 --include=all --brief"), Test("Complete text output grouped by node", "crm_mon -1 --include=all --group-by-node"), # XML does not have a brief output option Test("Complete brief text output grouped by node", "crm_mon -1 --include=all --group-by-node --brief"), ValidatingTest("Output grouped by node", "crm_mon --output-as=xml --group-by-node"), make_test_group("Complete output filtered by node", "crm_mon -1 --include=all --node=cluster01 {fmt}", [Test, ValidatingTest]), make_test_group("Complete output filtered by tag", "crm_mon -1 --include=all --node=even-nodes {fmt}", [Test, ValidatingTest]), make_test_group("Complete output filtered by resource tag", "crm_mon -1 --include=all --resource=fencing-rscs {fmt}", [Test, ValidatingTest]), make_test_group("Output filtered by node that doesn't exist", "crm_mon -1 --node=blah {fmt}", [Test, ValidatingTest]), Test("Basic text output with inactive resources", "crm_mon -1 -r"), # XML already includes inactive resources Test("Basic text output with inactive resources, filtered by node", "crm_mon -1 -r --node=cluster02"), make_test_group("Complete output filtered by primitive resource", "crm_mon -1 --include=all --resource=Fencing {fmt}", [Test, ValidatingTest]), make_test_group("Complete output filtered by group resource", "crm_mon -1 --include=all --resource=exim-group {fmt}", [Test, ValidatingTest]), Test("Complete text output filtered by group resource member", "crm_mon -1 --include=all --resource=Public-IP"), ValidatingTest("Output filtered by group resource member", "crm_mon --output-as=xml --resource=Email"), make_test_group("Complete output filtered by clone resource", "crm_mon -1 --include=all --resource=ping-clone {fmt}", [Test, ValidatingTest]), make_test_group("Complete output filtered by clone resource instance", "crm_mon -1 --include=all --resource=ping {fmt}", [Test, ValidatingTest]), Test("Complete text output filtered by exact clone resource instance", "crm_mon -1 --include=all --show-detail --resource=ping:0"), ValidatingTest("Output filtered by exact clone resource instance", "crm_mon --output-as=xml --resource=ping:1"), make_test_group("Output filtered by resource that doesn't exist", "crm_mon -1 --resource=blah {fmt}", [Test, ValidatingTest]), Test("Basic text output with inactive resources, filtered by tag", "crm_mon -1 -r --resource=inactive-rscs"), Test("Basic text output with inactive resources, filtered by bundle resource", "crm_mon -1 -r --resource=httpd-bundle"), ValidatingTest("Output filtered by inactive bundle resource", "crm_mon --output-as=xml --resource=httpd-bundle"), Test("Basic text output with inactive resources, filtered by bundled IP address resource", "crm_mon -1 -r --resource=httpd-bundle-ip-192.168.122.131"), ValidatingTest("Output filtered by bundled IP address resource", "crm_mon --output-as=xml --resource=httpd-bundle-ip-192.168.122.132"), Test("Basic text output with inactive resources, filtered by bundled container", "crm_mon -1 -r --resource=httpd-bundle-docker-1"), ValidatingTest("Output filtered by bundled container", "crm_mon --output-as=xml --resource=httpd-bundle-docker-2"), Test("Basic text output with inactive resources, filtered by bundle connection", "crm_mon -1 -r --resource=httpd-bundle-0"), ValidatingTest("Output filtered by bundle connection", "crm_mon --output-as=xml --resource=httpd-bundle-0"), Test("Basic text output with inactive resources, filtered by bundled primitive resource", "crm_mon -1 -r --resource=httpd"), ValidatingTest("Output filtered by bundled primitive resource", "crm_mon --output-as=xml --resource=httpd"), Test("Complete text output, filtered by clone name in cloned group", "crm_mon -1 --include=all --show-detail --resource=mysql-clone-group"), ValidatingTest("Output, filtered by clone name in cloned group", "crm_mon --output-as=xml --resource=mysql-clone-group"), Test("Complete text output, filtered by group name in cloned group", "crm_mon -1 --include=all --show-detail --resource=mysql-group"), ValidatingTest("Output, filtered by group name in cloned group", "crm_mon --output-as=xml --resource=mysql-group"), Test("Complete text output, filtered by exact group instance name in cloned group", "crm_mon -1 --include=all --show-detail --resource=mysql-group:1"), ValidatingTest("Output, filtered by exact group instance name in cloned group", "crm_mon --output-as=xml --resource=mysql-group:1"), Test("Complete text output, filtered by primitive name in cloned group", "crm_mon -1 --include=all --show-detail --resource=mysql-proxy"), ValidatingTest("Output, filtered by primitive name in cloned group", "crm_mon --output-as=xml --resource=mysql-proxy"), Test("Complete text output, filtered by exact primitive instance name in cloned group", "crm_mon -1 --include=all --show-detail --resource=mysql-proxy:1"), ValidatingTest("Output, filtered by exact primitive instance name in cloned group", "crm_mon --output-as=xml --resource=mysql-proxy:1"), ] partial_tests = [ Test("Output of partially active resources", "crm_mon -1 --show-detail"), ValidatingTest("Output of partially active resources", "crm_mon --output-as=xml"), Test("Output of partially active resources, with inactive resources", "crm_mon -1 -r --show-detail"), # XML already includes inactive resources Test("Complete brief text output, with inactive resources", "crm_mon -1 -r --include=all --brief --show-detail"), # XML does not have a brief output option Test("Text output of partially active group", "crm_mon -1 --resource=partially-active-group"), Test("Text output of partially active group, with inactive resources", "crm_mon -1 --resource=partially-active-group -r"), Test("Text output of active member of partially active group", "crm_mon -1 --resource=dummy-1"), Test("Text output of inactive member of partially active group", "crm_mon -1 --resource=dummy-2 --show-detail"), Test("Complete brief text output grouped by node, with inactive resources", "crm_mon -1 -r --include=all --group-by-node --brief --show-detail"), Test("Text output of partially active resources, with inactive resources, filtered by node", "crm_mon -1 -r --node=cluster01"), ValidatingTest("Output of partially active resources, filtered by node", "crm_mon --output-as=xml --node=cluster01"), ] unmanaged_tests = [ make_test_group("Output of active unmanaged resource on offline node", "crm_mon -1 {fmt}", [Test, ValidatingTest]), Test("Brief text output of active unmanaged resource on offline node", "crm_mon -1 --brief"), Test("Brief text output of active unmanaged resource on offline node, grouped by node", "crm_mon -1 --brief --group-by-node"), ] maint1_tests = [ make_test_group("Output of all resources with maintenance-mode enabled", "crm_mon -1 -r {fmt}", [Test, ValidatingTest], setup="crm_attribute -n maintenance-mode -v true", teardown="crm_attribute -n maintenance-mode -v false"), make_test_group("Output of all resources with maintenance enabled for a node", "crm_mon -1 -r {fmt}", [Test, ValidatingTest], setup="crm_attribute -n maintenance -N cluster02 -v true", teardown="crm_attribute -n maintenance -N cluster02 -v false"), ] maint2_tests = [ # The fence resource is excluded, for comparison make_test_group("Output of all resources with maintenance meta attribute true", "crm_mon -1 -r {fmt}", [Test, ValidatingTest]), ] t180_tests = [ Test("Text output of guest node's container on different node from its remote resource", "crm_mon -1"), Test("Complete text output of guest node's container on different node from its remote resource", "crm_mon -1 --show-detail"), ] return [ TestGroup(basic_tests, env={"CIB_file": "{cts_cli_data}/crm_mon.xml"}), Test("Check that CIB_file=\"-\" works", "crm_mon -1", env={"CIB_file": "-"}, stdin=pathlib.Path(apply_substitutions("{cts_cli_data}/crm_mon.xml"))), TestGroup(partial_tests, env={"CIB_file": "{cts_cli_data}/crm_mon-partial.xml"}), TestGroup(unmanaged_tests, env={"CIB_file": "{cts_cli_data}/crm_mon-unmanaged.xml"}), TestGroup(maint1_tests, cib_gen=partial(copy_existing_cib, "{cts_cli_data}/crm_mon.xml")), TestGroup(maint2_tests, env={"CIB_file": "{cts_cli_data}/crm_mon-rsc-maint.xml"}), TestGroup(t180_tests, env={"CIB_file": "{cts_cli_data}/crm_mon-T180.xml"}), ] class AclsRegressionTest(RegressionTest): """A class for testing access control lists.""" @property def name(self): """Return the name of this regression test.""" return "acls" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" acl_cib = """ """ basic_tests = [ Test("Configure some ACLs", "cibadmin -M -o acls -p", update_cib=True, stdin=acl_cib), Test("Enable ACLs", "crm_attribute -n enable-acl -v true", update_cib=True), Test("Set cluster option", "crm_attribute -n no-quorum-policy -v ignore", update_cib=True), Test("New ACL role", """cibadmin --create -o acls --xml-text ''""", update_cib=True), Test("New ACL target", """cibadmin --create -o acls --xml-text ''""", update_cib=True), Test("Another ACL role", """cibadmin --create -o acls --xml-text ''""", update_cib=True), Test("Another ACL target", """cibadmin --create -o acls --xml-text ''""", update_cib=True), Test("Updated ACL", """cibadmin --replace -o acls --xml-text ''""", update_cib=True), ] no_acl_tests = [ Test("unknownguy: Query configuration", "cibadmin -Q", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("unknownguy: Set enable-acl", "crm_attribute -n enable-acl -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("unknownguy: Set stonith-enabled", "crm_attribute -n stonith-enabled -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("unknownguy: Create a resource", """cibadmin -C -o resources --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), ] deny_cib_tests = [ Test("l33t-haxor: Query configuration", "cibadmin -Q", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("l33t-haxor: Set enable-acl", "crm_attribute -n enable-acl -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("l33t-haxor: Set stonith-enabled", "crm_attribute -n stonith-enabled -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("l33t-haxor: Create a resource", """cibadmin -C -o resources --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), ] observer_tests = [ Test("niceguy: Query configuration", "cibadmin -Q"), Test("niceguy: Set enable-acl", "crm_attribute -n enable-acl -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("niceguy: Set stonith-enabled", "crm_attribute -n stonith-enabled -v false", update_cib=True), Test("niceguy: Create a resource", """cibadmin -C -o resources --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("root: Query configuration", "cibadmin -Q", env={"CIB_user": "root"}), Test("root: Set stonith-enabled", "crm_attribute -n stonith-enabled -v true", update_cib=True, env={"CIB_user": "root"}), Test("root: Create a resource", """cibadmin -C -o resources --xml-text ''""", update_cib=True, env={"CIB_user": "root"}), ] deny_cib_2_tests = [ Test("l33t-haxor: Create a resource meta attribute", "crm_resource -r dummy --meta -p target-role -v Stopped", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("l33t-haxor: Query a resource meta attribute", "crm_resource -r dummy --meta -g target-role", expected_rc=ExitStatus.INSUFFICIENT_PRIV), Test("l33t-haxor: Remove a resource meta attribute", "crm_resource -r dummy --meta -d target-role", expected_rc=ExitStatus.INSUFFICIENT_PRIV), ] observer_2_tests = [ Test("niceguy: Create a resource meta attribute", "crm_resource -r dummy --meta -p target-role -v Stopped", update_cib=True), Test("niceguy: Query a resource meta attribute", "crm_resource -r dummy --meta -g target-role", update_cib=True), Test("niceguy: Remove a resource meta attribute", "crm_resource -r dummy --meta -d target-role", update_cib=True), Test("niceguy: Create a resource meta attribute", "crm_resource -r dummy --meta -p target-role -v Started", update_cib=True), ] read_meta_tests = [ Test("badidea: Query configuration - implied deny", "cibadmin -Q"), ] deny_cib_3_tests = [ Test("betteridea: Query configuration - explicit deny", "cibadmin -Q"), ] replace_tests = [ TestGroup([ AclTest("niceguy: Replace - remove acls", "cibadmin --replace -p", setup="cibadmin --delete --xml-text ''", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("niceguy: Replace - create resource", "cibadmin --replace -p", setup="""cibadmin -C -o resources --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("niceguy: Replace - modify attribute (deny)", "cibadmin --replace -p", setup="crm_attribute -n enable-acl -v false", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("niceguy: Replace - delete attribute (deny)", "cibadmin --replace -p", setup="""cibadmin --replace --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("niceguy: Replace - create attribute (deny)", "cibadmin --replace -p", setup="""cibadmin --modify --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), ], env={"CIB_user": "niceguy"}), # admin role TestGroup([ AclTest("bob: Replace - create attribute (direct allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("bob: Replace - modify attribute (direct allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("bob: Replace - delete attribute (direct allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --replace -o resources --xml-text ''"""), ], env={"CIB_user": "bob"}), # super_user role TestGroup([ AclTest("joe: Replace - create attribute (inherited allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("joe: Replace - modify attribute (inherited allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("joe: Replace - delete attribute (inherited allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --replace -o resources --xml-text ''"""), ], env={"CIB_user": "joe"}), # rsc_writer role TestGroup([ AclTest("mike: Replace - create attribute (allow overrides deny)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("mike: Replace - modify attribute (allow overrides deny)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''"""), AclTest("mike: Replace - delete attribute (allow overrides deny)", "cibadmin --replace -o resources -p", setup="""cibadmin --replace -o resources --xml-text ''"""), # Create an additional resource for deny-overrides-allow testing AclTest("mike: Create another resource", """cibadmin -C -o resources --xml-text ''""", update_cib=True), ], env={"CIB_user": "mike"}), # rsc_denied role TestGroup([ AclTest("chris: Replace - create attribute (deny overrides allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("chris: Replace - modify attribute (deny overrides allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --modify --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), AclTest("chris: Replace - delete attribute (deny overrides allow)", "cibadmin --replace -o resources -p", setup="""cibadmin --replace -o resources --xml-text ''""", expected_rc=ExitStatus.INSUFFICIENT_PRIV), ], env={"CIB_user": "chris"}), ] loop_tests = [ # no ACL TestGroup(no_acl_tests, env={"CIB_user": "unknownguy"}), # deny /cib permission TestGroup(deny_cib_tests, env={"CIB_user": "l33t-haxor"}), # observer role TestGroup(observer_tests, env={"CIB_user": "niceguy"}), # deny /cib permission TestGroup(deny_cib_2_tests, env={"CIB_user": "l33t-haxor"}), # observer role TestGroup(observer_2_tests, env={"CIB_user": "niceguy"}), # read //meta_attributes TestGroup(read_meta_tests, env={"CIB_user": "badidea"}), # deny /cib, read //meta_attributes TestGroup(deny_cib_3_tests, env={"CIB_user": "betteridea"}), ] + replace_tests return [ ShadowTestGroup(basic_tests + [ TestGroup(loop_tests, env={"PCMK_trace_functions": "pcmk__check_acl,pcmk__apply_creation_acl"})]), ] class ValidityRegressionTest(RegressionTest): """A class for testing CIB validity.""" @property def name(self): """Return the name of this regression test.""" return "validity" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ Test("Try to make resulting CIB invalid (enum violation)", """cibadmin -M -o constraints --xml-text ''""", expected_rc=ExitStatus.CONFIG, update_cib=True), Test("Run crm_simulate with invalid CIB (enum violation)", "crm_simulate -p -S", stdin=StdinCmd("""cibadmin -Q | sed 's#"start"#"break"#'"""), expected_rc=ExitStatus.CONFIG), Test("Try to make resulting CIB invalid (unrecognized validate-with)", "cibadmin -M --xml-text ''", expected_rc=ExitStatus.CONFIG, update_cib=True), Test("Run crm_simulate with invalid CIB (unrecognized validate-with)", "crm_simulate -p -S", stdin=StdinCmd("""cibadmin -Q | sed 's#"pacemaker-1.2"#"pacemaker-9999.0"#'"""), expected_rc=ExitStatus.CONFIG), Test("Try to make resulting CIB invalid, but possibly recoverable (valid with X.Y+1)", "cibadmin -C -o configuration --xml-text ''", expected_rc=ExitStatus.CONFIG, update_cib=True), Test("Run crm_simulate with invalid, but possibly recoverable CIB (valid with X.Y+1)", "crm_simulate -p -S", stdin=StdinCmd("cibadmin -Q | sed 's###'")), Test("Make resulting CIB valid, although without validate-with attribute", "cibadmin -p -R", stdin=StdinCmd("""cibadmin -Q | sed 's#[ ][ ]*validate-with="[^"]*"##'"""), update_cib=True), Test("Run crm_simulate with valid CIB, but without validate-with attribute", "crm_simulate -p -S", stdin=StdinCmd("cibadmin -Q")), # this will just disable validation and accept the config, outputting # validation errors Test("Make resulting CIB invalid, and without validate-with attribute", "cibadmin -p -R", stdin=StdinCmd("""cibadmin -Q | """ """sed -e 's#[ ][ ]*validate-with="[^"]*"##' """ """ -e 's#\\([ ][ ]*epoch="[^"]*\\)"#\\10"#' """ """ -e 's#"start"#"break"#'"""), update_cib=True), Test("Run crm_simulate with invalid CIB, also without validate-with attribute", "crm_simulate -p -S", stdin=StdinCmd("""cibadmin -Q | """ """sed -e 's#[ ][ ]*validate-with="[^"]*"##' """ """ -e 's#\\([ ][ ]*epoch="[^"]*\\)"#\\10"#' """ """ -e 's#"start"#"break"#'""")), ] basic_tests_setup = [ """cibadmin -C -o resources --xml-text ''""", """cibadmin -C -o resources --xml-text ''""", """cibadmin -C -o constraints --xml-text ''""", ] return [ ShadowTestGroup(basic_tests, validate_with="pacemaker-1.2", setup=basic_tests_setup, env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"}), ] class UpgradeRegressionTest(RegressionTest): """A class for testing upgrading the CIB.""" @property def name(self): """Return the name of this regression test.""" return "upgrade" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" resource_cib = """ """ basic_tests = [ Test("Set stonith-enabled=false", "crm_attribute -n stonith-enabled -v false", update_cib=True), Test("Configure the initial resource", "cibadmin -M -o resources -p", update_cib=True, stdin=resource_cib), Test("Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)", "cibadmin --upgrade --force -V -V", update_cib=True), Test("Query a resource instance attribute (shall survive)", "crm_resource -r mySmartFuse -g requires", update_cib=True), ] return [ ShadowTestGroup(basic_tests, validate_with="pacemaker-2.10", env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"}) ] class RulesRegressionTest(RegressionTest): """A class for testing support for CIB rules.""" @property def name(self): """Return the name of this regression test.""" return "rules" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" tomorrow = datetime.now() + timedelta(days=1) rule_cib = """ """ % tomorrow.strftime("%F %T %z") usage_tests = [ make_test_group("crm_rule given no arguments", "crm_rule {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.USAGE), make_test_group("crm_rule given no rule to check", "crm_rule -c {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.USAGE), make_test_group("crm_rule given invalid input XML", "crm_rule -c -r blahblah -X invalidxml {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.DATAERR), make_test_group("crm_rule given invalid input XML on stdin", "crm_rule -c -r blahblah -X - {fmt}", [Test, ValidatingTest], stdin=StdinCmd("echo invalidxml"), expected_rc=ExitStatus.DATAERR), ] basic_tests = [ make_test_group("Try to check a rule that doesn't exist", "crm_rule -c -r blahblah {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Try to check a rule that has too many date_expressions", "crm_rule -c -r cli-rule-too-many-date-expressions {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), make_test_group("Verify basic rule is expired", "crm_rule -c -r cli-prefer-rule-dummy-expired {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.EXPIRED), make_test_group("Verify basic rule worked in the past", "crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101 {fmt}", [Test, ValidatingTest]), make_test_group("Verify basic rule is not yet in effect", "crm_rule -c -r cli-prefer-rule-dummy-not-yet {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOT_YET_IN_EFFECT), make_test_group("Verify date_spec rule with years has expired", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.EXPIRED), make_test_group("Verify multiple rules at once", "crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.EXPIRED), make_test_group("Verify date_spec rule with years is in effect", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201 {fmt}", [Test, ValidatingTest]), make_test_group("Try to check a rule whose date_spec does not contain years=", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), make_test_group("Try to check a rule whose date_spec contains years= and moon=", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), make_test_group("Try to check a rule with no date_expression", "crm_rule -c -r cli-no-date_expression-rule {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), ] return usage_tests + [ TestGroup(basic_tests, cib_gen=partial(write_cib, rule_cib)) ] class FeatureSetRegressionTest(RegressionTest): """A class for testing support for version-specific features.""" @property def name(self): """Return the name of this regression test.""" return "feature_set" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ # Import the test CIB Test("Import the test CIB", "cibadmin --replace --xml-file {cts_cli_data}/crm_mon-feature_set.xml", update_cib=True), Test("Complete text output, no mixed status", "crm_mon -1 --show-detail"), ValidatingTest("Output, no mixed status", "crm_mon --output-as=xml"), # Modify the CIB to fake that the cluster has mixed versions Test("Fake inconsistent feature set", "crm_attribute --node=cluster02 --name=#feature-set --update=3.15.0 --lifetime=reboot", update_cib=True), Test("Complete text output, mixed status", "crm_mon -1 --show-detail"), ValidatingTest("Output, mixed status", "crm_mon --output-as=xml"), ] return [ ShadowTestGroup(basic_tests), ] # Tests that depend on resource agents and must be run in an installed # environment class AgentRegressionTest(RegressionTest): """A class for testing resource agents.""" @property def name(self): """Return the name of this regression test.""" return "agents" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" return [ make_test_group("Validate a valid resource configuration", "crm_resource --validate --class ocf --provider pacemaker --agent Dummy {fmt}", [Test, ValidatingTest]), # Make the Dummy configuration invalid (op_sleep can't be a generic string) make_test_group("Validate an invalid resource configuration", "crm_resource --validate --class ocf --provider pacemaker --agent Dummy {fmt}", [Test, ValidatingTest], env={"OCF_RESKEY_op_sleep": "asdf"}), ] def build_options(): """Handle command line arguments.""" parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="Command line tool regression tests", epilog="Default tests: %s\n" "Other tests: agents (must be run in an installed environment)" % " ".join(default_tests)) parser.add_argument("-j", "--jobs", metavar="JOBS", default=cpu_count() - 1, type=int, help="The number of tests to run simultaneously") parser.add_argument("-p", "--path", metavar="DIR", action="append", help="Look for executables in DIR (may be specified multiple times)") parser.add_argument("-r", "--run-only", metavar="TEST", choices=default_tests + ["tools"] + other_tests, action="append", help="Run only specified tests (may be specified multiple times)") parser.add_argument("-s", "--save", action="store_true", help="Save actual output as expected output") parser.add_argument("-v", "--valgrind", action="store_true", help="Run all commands under valgrind") parser.add_argument("-V", "--verbose", action="store_true", help="Display any differences from expected output") args = parser.parse_args() if args.path is None: args.path = [] return args def setup_environment(valgrind): """Set various environment variables needed for operation.""" if valgrind: os.environ["G_SLICE"] = "always-malloc" # Ensure all command output is in portable locale for comparison os.environ["LC_ALL"] = "C" # Log test errors to stderr os.environ["PCMK_stderr"] = "1" # Because we will change the value of PCMK_trace_functions and then reset it # back to some initial value at various points, it's easiest to assume it is # defined but empty by default if "PCMK_trace_functions" not in os.environ: os.environ["PCMK_trace_functions"] = "" def path_prepend(p): """Add another directory to the front of $PATH.""" old = os.environ["PATH"] os.environ["PATH"] = "%s:%s" % (p, old) def setup_path(opts_path): """Set the PATH environment variable appropriately for the tests.""" srcdir = os.path.dirname(test_home) # Add any search paths given on the command line for p in opts_path: path_prepend(p) if os.path.exists("%s/tools/crm_simulate" % srcdir): print("Using local binaries from: %s" % srcdir) path_prepend("%s/tools" % srcdir) for daemon in ["based", "controld", "fenced", "schedulerd"]: path_prepend("%s/daemons/%s" % (srcdir, daemon)) print("Using local schemas from: %s/xml" % srcdir) os.environ["PCMK_schema_directory"] = "%s/xml" % srcdir else: path_prepend(BuildOptions.DAEMON_DIR) os.environ["PCMK_schema_directory"] = BuildOptions.SCHEMA_DIR def _run_one(valgrind, r): """Run and return a TestGroup object.""" # See comments in run_regression_tests. r.run(valgrind=valgrind) return r def run_regression_tests(regs, jobs, valgrind=False): """Run the given tests and return the modified objects.""" executed = [] with Pool(processes=jobs) as pool: # What we really want to do here is: # pool.map(lambda r: r.run(),regs) # # However, multiprocessing uses pickle somehow in its operation, and python # doesn't want to pickle a lambda (nor a nested function within this one). # Thus, we need to use the _run_one wrapper at the file level just to call # run(). Further, if we don't return the modified object from that and then # return the list of modified objects here, it looks like the rest of the # program will use the originals, before this was ever run. executed = pool.map(partial(_run_one, valgrind), regs) return executed def results(regs, save, verbose): """Print the output from each regression test, returning the number whose output differs.""" output_differs = 0 if verbose: print("\n\nResults") for r in regs: r.write() - r.diff() - if not r.identical: - output_differs += 1 - if save: dest = "%s/cli/regression.%s.exp" % (test_home, r.name) copyfile(r.results_file, dest) + r.diff() + if not r.identical: + output_differs += 1 + return output_differs def summary(regs, output_differs, verbose): """Print the summary output for the entire test run.""" test_failures = 0 test_successes = 0 for r in regs: test_failures += r.failures test_successes += r.successes print("\n\nSummary") # First, print all the Passed/Failed lines from each Test run. for r in regs: print("\n".join(r.summary)) fmt = PluralFormatter() # Then, print information specific to each result possibility. Basically, # if there were failures then we print the output differences, leave the # failed output files in place, and exit with an error. Otherwise, clean up # anything that passed. if test_failures > 0 and output_differs > 0: print(fmt.format("{0} {0:plural,test} failed; see output in:", test_failures)) for r in regs: r.process_results(verbose) return ExitStatus.ERROR if test_failures > 0: print(fmt.format("{0} {0:plural,test} failed", test_failures)) for r in regs: r.process_results(verbose) return ExitStatus.ERROR if output_differs: print(fmt.format("{0} {0:plural,test} passed but output was " "unexpected; see output in:", test_successes)) for r in regs: r.process_results(verbose) return ExitStatus.DIGEST print(fmt.format("{0} {0:plural,test} passed", test_successes)) for r in regs: r.cleanup() return ExitStatus.OK regression_classes = [ AccessRenderRegressionTest, DaemonsRegressionTest, DatesRegressionTest, ErrorCodeRegressionTest, CibadminRegressionTest, CrmAttributeRegressionTest, CrmStandbyRegressionTest, CrmResourceRegressionTest, CrmTicketRegressionTest, CrmadminRegressionTest, CrmShadowRegressionTest, CrmVerifyRegressionTest, CrmMonRegressionTest, AclsRegressionTest, ValidityRegressionTest, UpgradeRegressionTest, RulesRegressionTest, FeatureSetRegressionTest, AgentRegressionTest, ] def main(): """Run command line regression tests as specified by arguments.""" opts = build_options() setup_environment(opts.valgrind) setup_path(opts.path) # Filter the list of all regression test classes to include only those that # were requested on the command line. If empty, this defaults to default_tests. if not opts.run_only: opts.run_only = default_tests if opts.run_only == ["tools"]: opts.run_only = tools_tests regs = [] for cls in regression_classes: obj = cls() if obj.name in opts.run_only: regs.append(obj) regs = run_regression_tests(regs, max(1, opts.jobs), valgrind=opts.valgrind) output_differs = results(regs, opts.save, opts.verbose) rc = summary(regs, output_differs, opts.verbose) sys.exit(rc) if __name__ == "__main__": main() diff --git a/python/pacemaker/_cts/test.py b/python/pacemaker/_cts/test.py index b6d3f03e0d..4342d72338 100644 --- a/python/pacemaker/_cts/test.py +++ b/python/pacemaker/_cts/test.py @@ -1,529 +1,529 @@ """ A module providing base classes. These classes are used for defining regression tests and groups of regression tests. Everything exported here should be considered an abstract class that needs to be subclassed in order to do anything useful. Various functions will raise NotImplementedError if not overridden by a subclass. """ __copyright__ = "Copyright 2009-2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+)" __all__ = ["Test", "Tests"] import io import os import re import shlex import signal import subprocess import sys import time from pacemaker._cts.errors import ExitCodeError, OutputFoundError, OutputNotFoundError, XmlValidationError from pacemaker._cts.process import pipe_communicate from pacemaker._cts.validate import validate from pacemaker.exitstatus import ExitStatus class Pattern: """A class for checking log files for a given pattern.""" def __init__(self, pat, negative=False, regex=False): """ Create a new Pattern instance. Arguments: pat -- The string to search for negative -- If True, pat must not be found in any input regex -- If True, pat is a regex and not a substring """ self._pat = pat self.negative = negative self.regex = regex def __str__(self): return self._pat def match(self, line): """Return True if this pattern is found in the given line.""" if self.regex: return re.search(self._pat, line) is not None return self._pat in line class Test: """ The base class for a single regression test. A single regression test may still run multiple commands as part of its execution. """ def __init__(self, name, description, **kwargs): """ Create a new Test instance. This method must be provided by all subclasses, which must call Test.__init__ first. Arguments: description -- A user-readable description of the test, helpful in identifying what test is running or has failed. name -- The name of the test. Command line tools use this attribute to allow running only tests with the exact name, or tests whose name matches a given pattern. This should be unique among all tests. Keyword arguments: force_wait -- logdir -- The base directory under which to create a directory to store output and temporary data. timeout -- How long to wait for the test to complete. verbose -- Whether to print additional information, including verbose command output and daemon log files. """ self.description = description self.executed = False self.name = name self.force_wait = kwargs.get("force_wait", False) self.logdir = kwargs.get("logdir", "/tmp") self.timeout = kwargs.get("timeout", 2) self.verbose = kwargs.get("verbose", False) self._cmds = [] self._patterns = [] self._daemon_location = None self._daemon_output = "" self._daemon_process = None self._result_exitcode = ExitStatus.OK self._result_txt = "" # # PROPERTIES # @property def exitcode(self): """ Return the final exitcode of the Test. If all commands pass, this property will be ExitStatus.OK. Otherwise, this property will be the exitcode of the first command to fail. """ return self._result_exitcode @exitcode.setter def exitcode(self, value): self._result_exitcode = value @property def logpath(self): """ Return the path to the log for whatever daemon is being tested. Note that this requires all subclasses to set self._daemon_location before accessing this property or an exception will be raised. """ return os.path.join(self.logdir, "%s.log" % self._daemon_location) # # PRIVATE METHODS # def _kill_daemons(self): """Kill any running daemons in preparation for executing the test.""" raise NotImplementedError("_kill_daemons not provided by subclass") def _match_log_patterns(self): """ Check test output for expected patterns. Set self.exitcode and self._result_txt as appropriate. Not all subclass will need to do this. """ if len(self._patterns) == 0: return n_failed_matches = 0 n_negative_matches = 0 output = self._daemon_output.split("\n") for pat in self._patterns: positive_match = False for line in output: if pat.match(line): if pat.negative: n_negative_matches += 1 if self.verbose: print("This pattern should not have matched = '%s" % pat) break positive_match = True break if not pat.negative and not positive_match: n_failed_matches += 1 print("Pattern Not Matched = '%s'" % pat) if n_failed_matches > 0 or n_negative_matches > 0: msg = "FAILURE - '%s' failed. %d patterns out of %d not matched. %d negative matches." self._result_txt = msg % (self.name, n_failed_matches, len(self._patterns), n_negative_matches) self.exitcode = ExitStatus.ERROR def _start_daemons(self): """Start any necessary daemons in preparation for executing the test.""" raise NotImplementedError("_start_daemons not provided by subclass") # # PUBLIC METHODS # def add_cmd(self, cmd=None, **kwargs): """ Add a command to be executed as part of this test. Arguments: cmd -- The program to run. Keyword arguments: args -- Commands line arguments to pass to cmd, as a string. check_rng -- If True and validate is True, command output will additionally be checked against the api-result.rng file. check_stderr -- If True, the stderr of cmd will be included in output. env -- If not None, variables to set in the environment expected_exitcode -- The expected exit code of cmd. This can be used to run a command that is expected to fail. kill -- A command to be run after cmd, typically in order to kill a failed process. This should be the entire command line including arguments as a single string. no_wait -- Do not wait for cmd to complete. stdout_match -- If not None, a string that is expected to be present in the stdout of cmd. This can be a regular expression. stdout_no_match -- If not None, a string that is expected to be missing in the stdout of cmd. This can be a regular expression. validate -- If True, the output of cmd will be passed to xmllint for validation. If validation fails, XmlValidationError will be raised. """ if cmd is None: raise ValueError("cmd cannot be None") self._cmds.append( { "args": kwargs.get("args", ""), "check_rng": kwargs.get("check_rng", True), "check_stderr": kwargs.get("check_stderr", True), "cmd": cmd, "expected_exitcode": kwargs.get("expected_exitcode", ExitStatus.OK), "kill": kwargs.get("kill"), "no_wait": kwargs.get("no_wait", False), "stdout_match": kwargs.get("stdout_match"), "stdout_no_match": kwargs.get("stdout_no_match"), "validate": kwargs.get("validate", True), "env": kwargs.get("env"), } ) def add_log_pattern(self, pattern, negative=False, regex=False): """Add a pattern that should appear in the test's logs.""" self._patterns.append(Pattern(pattern, negative=negative, regex=regex)) def _signal_dict(self): """Return a dictionary mapping signal numbers to their names.""" # FIXME: When we support python >= 3.5, this function can be replaced with: # signal.Signals(self.daemon_process.returncode).name return { getattr(signal, _signame): _signame for _signame in dir(signal) if _signame.startswith("SIG") and not _signame.startswith("SIG_") } def clean_environment(self): """Clean up the host after executing a test.""" if self._daemon_process: if self._daemon_process.poll() is None: self._daemon_process.terminate() self._daemon_process.wait() else: rc = self._daemon_process.returncode signame = self._signal_dict().get(-rc, "RET=%s" % rc) msg = "FAILURE - '%s' failed. %s abnormally exited during test (%s)." self._result_txt = msg % (self.name, self._daemon_location, signame) self.exitcode = ExitStatus.ERROR self._daemon_process = None self._daemon_output = "" # the default for utf-8 encoding would error out if e.g. memory corruption # makes fenced output any kind of 8 bit value - while still interesting # for debugging and we'd still like the regression-test to go over the # full set of test-cases with open(self.logpath, 'rt', encoding="ISO-8859-1") as logfile: for line in logfile.readlines(): self._daemon_output += line if self.verbose: print("Daemon Output Start") print(self._daemon_output) print("Daemon Output End") def print_result(self, filler): """Print the result of the last test execution.""" print("%s%s" % (filler, self._result_txt)) def run(self): """Execute this test.""" i = 1 self.start_environment() if self.verbose: print("\n--- START TEST - %s" % self.name) self._result_txt = "SUCCESS - '%s'" % (self.name) self.exitcode = ExitStatus.OK for cmd in self._cmds: try: self.run_cmd(cmd) except ExitCodeError as e: print("Step %d FAILED - command returned %s, expected %d" % (i, e, cmd['expected_exitcode'])) self.set_error(i, cmd) break except OutputNotFoundError as e: print("Step %d FAILED - '%s' was not found in command output: %s" % (i, cmd['stdout_match'], e)) self.set_error(i, cmd) break except OutputFoundError as e: print("Step %d FAILED - '%s' was found in command output: %s" % (i, cmd['stdout_no_match'], e)) self.set_error(i, cmd) break except XmlValidationError as e: print("Step %d FAILED - xmllint failed: %s" % (i, e)) self.set_error(i, cmd) break if self.verbose: print("Step %d SUCCESS" % (i)) i += 1 self.clean_environment() if self.exitcode == ExitStatus.OK: self._match_log_patterns() print(self._result_txt) if self.verbose: print("--- END TEST - %s\n" % self.name) self.executed = True def run_cmd(self, args): """Execute a command as part of this test.""" cmd = shlex.split(args['args']) cmd.insert(0, args['cmd']) if self.verbose: print("\n\nRunning: %s" % " ".join(cmd)) # FIXME: Using "with" here breaks fencing merge tests. # pylint: disable=consider-using-with if args['env']: new_env = os.environ.copy() new_env.update(args['env']) test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=new_env) else: test = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if args['kill']: if self.verbose: print("Also running: %s" % args['kill']) # Typically, the kill argument is used to detect some sort of # failure. Without yielding for a few seconds here, the process # launched earlier that is listening for the failure may not have # time to connect to pacemaker-execd. time.sleep(2) subprocess.Popen(shlex.split(args['kill'])) if not args['no_wait']: test.wait() else: return ExitStatus.OK output = pipe_communicate(test, check_stderr=args['check_stderr']) if self.verbose: print(output) if test.returncode != args['expected_exitcode']: raise ExitCodeError(test.returncode) if args['stdout_match'] is not None and \ re.search(args['stdout_match'], output) is None: raise OutputNotFoundError(output) if args['stdout_no_match'] is not None and \ re.search(args['stdout_no_match'], output) is not None: raise OutputFoundError(output) if args['validate']: - output = validate(output, args['check_rng'], args['verbose']) + output = validate(output, args['check_rng'], self.verbose) if self.verbose: print(output) return ExitStatus.OK def set_error(self, step, cmd): """Record failure of this test.""" msg = "FAILURE - '%s' failed at step %d. Command: %s %s" self._result_txt = msg % (self.name, step, cmd['cmd'], cmd['args']) self.exitcode = ExitStatus.ERROR def start_environment(self): """Prepare the host for executing a test.""" if os.path.exists(self.logpath): os.remove(self.logpath) self._kill_daemons() self._start_daemons() logfile = None init_time = time.time() update_time = init_time while True: # FIXME: Eventually use 'with' here, which seems complicated given # everything happens in a loop. # pylint: disable=consider-using-with time.sleep(0.1) if not self.force_wait and logfile is None \ and os.path.exists(self.logpath): logfile = io.open(self.logpath, 'rt', encoding="ISO-8859-1") if not self.force_wait and logfile is not None: for line in logfile.readlines(): if "successfully started" in line: return now = time.time() if self.timeout > 0 and (now - init_time) >= self.timeout: if not self.force_wait: print("\tDaemon %s doesn't seem to have been initialized within %fs." "\n\tConsider specifying a longer '--timeout' value." % (self._daemon_location, self.timeout)) return if self.verbose and (now - update_time) >= 5: print("Waiting for %s to be initialized: %fs ..." % (self._daemon_location, now - init_time)) update_time = now class Tests: """The base class for a collection of regression tests.""" def __init__(self, **kwargs): """ Create a new Tests instance. This method must be provided by all subclasses, which must call Tests.__init__ first. Keywork arguments: force_wait -- logdir -- The base directory under which to create a directory to store output and temporary data. timeout -- How long to wait for the test to complete. verbose -- Whether to print additional information, including verbose command output and daemon log files. """ self.force_wait = kwargs.get("force_wait", False) self.logdir = kwargs.get("logdir", "/tmp") self.timeout = kwargs.get("timeout", 2) self.verbose = kwargs.get("verbose", False) self._tests = [] def exit(self): """Exit (with error status code if any test failed).""" for test in self._tests: if not test.executed: continue if test.exitcode != ExitStatus.OK: sys.exit(ExitStatus.ERROR) sys.exit(ExitStatus.OK) def print_list(self): """List all registered tests.""" print("\n==== %d TESTS FOUND ====" % len(self._tests)) print("%35s - %s" % ("TEST NAME", "TEST DESCRIPTION")) print("%35s - %s" % ("--------------------", "--------------------")) for test in self._tests: print("%35s - %s" % (test.name, test.description)) print("==== END OF LIST ====\n") def print_results(self): """Print summary of results of executed tests.""" failures = 0 success = 0 print("\n\n======= FINAL RESULTS ==========") print("\n--- FAILURE RESULTS:") for test in self._tests: if not test.executed: continue if test.exitcode != ExitStatus.OK: failures += 1 test.print_result(" ") else: success += 1 if failures == 0: print(" None") print("\n--- TOTALS\n Pass:%d\n Fail:%d\n" % (success, failures)) def run_single(self, name): """Run a single named test.""" for test in self._tests: if test.name == name: test.run() break def run_tests(self): """Run all tests.""" for test in self._tests: test.run() def run_tests_matching(self, pattern): """Run all tests whose name matches a pattern.""" for test in self._tests: if test.name.count(pattern) != 0: test.run()