diff --git a/cts/cli/regression.error_codes.exp b/cts/cli/regression.error_codes.exp index 7d705e2652..f620845b9e 100644 --- a/cts/cli/regression.error_codes.exp +++ b/cts/cli/regression.error_codes.exp @@ -1,556 +1,556 @@ =#=#=#= Begin test: Get legacy return code =#=#=#= Error =#=#=#= End test: Get legacy return code - OK (0) =#=#=#= -* Passed: crm_error - Get legacy return code +* Passed: crm_error - Get legacy return code =#=#=#= Begin test: Get legacy return code (XML) =#=#=#= - + =#=#=#= End test: Get legacy return code (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get legacy return code (XML) +* Passed: crm_error - Get legacy return code (XML) =#=#=#= Begin test: Get legacy return code (with name) =#=#=#= pcmk_err_generic - Error =#=#=#= End test: Get legacy return code (with name) - OK (0) =#=#=#= -* Passed: crm_error - Get legacy return code (with name) +* Passed: crm_error - Get legacy return code (with name) =#=#=#= Begin test: Get legacy return code (with name) (XML) =#=#=#= - + =#=#=#= End test: Get legacy return code (with name) (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get legacy return code (with name) (XML) +* Passed: crm_error - Get legacy return code (with name) (XML) =#=#=#= Begin test: Get multiple legacy return codes =#=#=#= Error Operation requires quorum =#=#=#= End test: Get multiple legacy return codes - OK (0) =#=#=#= -* Passed: crm_error - Get multiple legacy return codes +* Passed: crm_error - Get multiple legacy return codes =#=#=#= Begin test: Get multiple legacy return codes (XML) =#=#=#= - + =#=#=#= End test: Get multiple legacy return codes (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get multiple legacy return codes (XML) +* Passed: crm_error - Get multiple legacy return codes (XML) =#=#=#= Begin test: Get multiple legacy return codes (with names) =#=#=#= pcmk_err_generic - Error pcmk_err_no_quorum - Operation requires quorum =#=#=#= End test: Get multiple legacy return codes (with names) - OK (0) =#=#=#= -* Passed: crm_error - Get multiple legacy return codes (with names) +* Passed: crm_error - Get multiple legacy return codes (with names) =#=#=#= Begin test: Get multiple legacy return codes (with names) (XML) =#=#=#= - + =#=#=#= End test: Get multiple legacy return codes (with names) (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get multiple legacy return codes (with names) (XML) +* Passed: crm_error - Get multiple legacy return codes (with names) (XML) =#=#=#= Begin test: List legacy return codes (spot check) =#=#=#= 201: Error 202: Operation requires quorum 203: Update does not conform to the configured schema 204: Schema transform failed 205: Update was older than existing configuration 206: Application of update diff failed 207: Application of update diff failed, requesting full refresh 208: On-disk configuration was manually modified 209: Could not archive previous configuration =#=#=#= End test: List legacy return codes (spot check) - OK (0) =#=#=#= -* Passed: crm_error - List legacy return codes (spot check) +* Passed: crm_error - List legacy return codes (spot check) =#=#=#= Begin test: List legacy return codes (spot check) (XML) =#=#=#= =#=#=#= End test: List legacy return codes (spot check) (XML) - OK (0) =#=#=#= -* Passed: crm_error - List legacy return codes (spot check) (XML) +* Passed: crm_error - List legacy return codes (spot check) (XML) =#=#=#= Begin test: List legacy return codes (spot check) (with names) =#=#=#= 201: pcmk_err_generic Error 202: pcmk_err_no_quorum Operation requires quorum 203: pcmk_err_schema_validation Update does not conform to the configured schema 204: pcmk_err_transform_failed Schema transform failed 205: pcmk_err_old_data Update was older than existing configuration 206: pcmk_err_diff_failed Application of update diff failed 207: pcmk_err_diff_resync Application of update diff failed, requesting full refresh 208: pcmk_err_cib_modified On-disk configuration was manually modified 209: pcmk_err_cib_backup Could not archive previous configuration =#=#=#= End test: List legacy return codes (spot check) (with names) - OK (0) =#=#=#= -* Passed: crm_error - List legacy return codes (spot check) (with names) +* Passed: crm_error - List legacy return codes (spot check) (with names) =#=#=#= Begin test: List legacy return codes (spot check) (with names) (XML) =#=#=#= =#=#=#= End test: List legacy return codes (spot check) (with names) (XML) - OK (0) =#=#=#= -* Passed: crm_error - List legacy return codes (spot check) (with names) (XML) +* Passed: crm_error - List legacy return codes (spot check) (with names) (XML) =#=#=#= Begin test: Get unknown Pacemaker return code =#=#=#= Error =#=#=#= End test: Get unknown Pacemaker return code - OK (0) =#=#=#= -* Passed: crm_error - Get unknown Pacemaker return code +* Passed: crm_error - Get unknown Pacemaker return code =#=#=#= Begin test: Get unknown Pacemaker return code (XML) =#=#=#= =#=#=#= End test: Get unknown Pacemaker return code (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get unknown Pacemaker return code (XML) +* Passed: crm_error - Get unknown Pacemaker return code (XML) =#=#=#= Begin test: Get unknown Pacemaker return code (with name) =#=#=#= Unknown - Error =#=#=#= End test: Get unknown Pacemaker return code (with name) - OK (0) =#=#=#= -* Passed: crm_error - Get unknown Pacemaker return code (with name) +* Passed: crm_error - Get unknown Pacemaker return code (with name) =#=#=#= Begin test: Get unknown Pacemaker return code (with name) (XML) =#=#=#= =#=#=#= End test: Get unknown Pacemaker return code (with name) (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get unknown Pacemaker return code (with name) (XML) +* Passed: crm_error - Get unknown Pacemaker return code (with name) (XML) =#=#=#= Begin test: Get negative Pacemaker return code =#=#=#= Node not found =#=#=#= End test: Get negative Pacemaker return code - OK (0) =#=#=#= -* Passed: crm_error - Get negative Pacemaker return code +* Passed: crm_error - Get negative Pacemaker return code =#=#=#= Begin test: Get negative Pacemaker return code (XML) =#=#=#= =#=#=#= End test: Get negative Pacemaker return code (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get negative Pacemaker return code (XML) +* Passed: crm_error - Get negative Pacemaker return code (XML) =#=#=#= Begin test: Get negative Pacemaker return code (with name) =#=#=#= pcmk_rc_node_unknown - Node not found =#=#=#= End test: Get negative Pacemaker return code (with name) - OK (0) =#=#=#= -* Passed: crm_error - Get negative Pacemaker return code (with name) +* Passed: crm_error - Get negative Pacemaker return code (with name) =#=#=#= Begin test: Get negative Pacemaker return code (with name) (XML) =#=#=#= =#=#=#= End test: Get negative Pacemaker return code (with name) (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get negative Pacemaker return code (with name) (XML) +* Passed: crm_error - Get negative Pacemaker return code (with name) (XML) =#=#=#= Begin test: List Pacemaker return codes (non-positive) =#=#=#= -1039: Compression/decompression error -1038: Nameserver resolution error -1037: No active transaction found -1036: Bad XML patch format -1035: Bad input value provided -1034: Disabled -1033: Two or more XML elements have the same ID -1032: Unable to parse CIB XML -1031: Cluster simulation produced invalid transition -1030: Error writing graph file -1029: Error writing dot(1) file -1028: Value too small to be stored in data type -1027: Input file not available -1026: Output message produced no output -1025: Result occurs after given range -1024: Result occurs within given range -1023: Result occurs before given range -1022: Result undetermined -1021: Not applicable under current conditions -1020: IPC server process is active but not accepting connections -1019: IPC server is unresponsive -1018: IPC server is blocked by unauthorized process -1017: Operation requires quorum -1016: Update does not conform to the configured schema -1015: Schema is already the latest available -1014: Schema transform failed -1013: Update was older than existing configuration -1012: Application of update diff failed -1011: Application of update diff failed, requesting full refresh -1010: On-disk configuration was manually modified -1009: Could not archive previous configuration -1008: Could not save new configuration to disk -1007: Could not parse on-disk configuration -1006: Resource active on multiple nodes -1005: Node not found -1004: Already in requested state -1003: Bad name/value pair given -1002: Unknown output format -1001: Error 0: OK =#=#=#= End test: List Pacemaker return codes (non-positive) - OK (0) =#=#=#= -* Passed: crm_error - List Pacemaker return codes (non-positive) +* Passed: crm_error - List Pacemaker return codes (non-positive) =#=#=#= Begin test: List Pacemaker return codes (non-positive) (XML) =#=#=#= =#=#=#= End test: List Pacemaker return codes (non-positive) (XML) - OK (0) =#=#=#= -* Passed: crm_error - List Pacemaker return codes (non-positive) (XML) +* Passed: crm_error - List Pacemaker return codes (non-positive) (XML) =#=#=#= Begin test: List Pacemaker return codes (non-positive) (with names) =#=#=#= -1039: pcmk_rc_compression Compression/decompression error -1038: pcmk_rc_ns_resolution Nameserver resolution error -1037: pcmk_rc_no_transaction No active transaction found -1036: pcmk_rc_bad_xml_patch Bad XML patch format -1035: pcmk_rc_bad_input Bad input value provided -1034: pcmk_rc_disabled Disabled -1033: pcmk_rc_duplicate_id Two or more XML elements have the same ID -1032: pcmk_rc_unpack_error Unable to parse CIB XML -1031: pcmk_rc_invalid_transition Cluster simulation produced invalid transition -1030: pcmk_rc_graph_error Error writing graph file -1029: pcmk_rc_dot_error Error writing dot(1) file -1028: pcmk_rc_underflow Value too small to be stored in data type -1027: pcmk_rc_no_input Input file not available -1026: pcmk_rc_no_output Output message produced no output -1025: pcmk_rc_after_range Result occurs after given range -1024: pcmk_rc_within_range Result occurs within given range -1023: pcmk_rc_before_range Result occurs before given range -1022: pcmk_rc_undetermined Result undetermined -1021: pcmk_rc_op_unsatisfied Not applicable under current conditions -1020: pcmk_rc_ipc_pid_only IPC server process is active but not accepting connections -1019: pcmk_rc_ipc_unresponsive IPC server is unresponsive -1018: pcmk_rc_ipc_unauthorized IPC server is blocked by unauthorized process -1017: pcmk_rc_no_quorum Operation requires quorum -1016: pcmk_rc_schema_validation Update does not conform to the configured schema -1015: pcmk_rc_schema_unchanged Schema is already the latest available -1014: pcmk_rc_transform_failed Schema transform failed -1013: pcmk_rc_old_data Update was older than existing configuration -1012: pcmk_rc_diff_failed Application of update diff failed -1011: pcmk_rc_diff_resync Application of update diff failed, requesting full refresh -1010: pcmk_rc_cib_modified On-disk configuration was manually modified -1009: pcmk_rc_cib_backup Could not archive previous configuration -1008: pcmk_rc_cib_save Could not save new configuration to disk -1007: pcmk_rc_cib_corrupt Could not parse on-disk configuration -1006: pcmk_rc_multiple Resource active on multiple nodes -1005: pcmk_rc_node_unknown Node not found -1004: pcmk_rc_already Already in requested state -1003: pcmk_rc_bad_nvpair Bad name/value pair given -1002: pcmk_rc_unknown_format Unknown output format -1001: pcmk_rc_error Error 0: pcmk_rc_ok OK =#=#=#= End test: List Pacemaker return codes (non-positive) (with names) - OK (0) =#=#=#= -* Passed: crm_error - List Pacemaker return codes (non-positive) (with names) +* Passed: crm_error - List Pacemaker return codes (non-positive) (with names) =#=#=#= Begin test: List Pacemaker return codes (non-positive) (with names) (XML) =#=#=#= =#=#=#= End test: List Pacemaker return codes (non-positive) (with names) (XML) - OK (0) =#=#=#= -* Passed: crm_error - List Pacemaker return codes (non-positive) (with names) (XML) +* Passed: crm_error - List Pacemaker return codes (non-positive) (with names) (XML) =#=#=#= Begin test: Get unknown crm_exit_t exit code =#=#=#= Unknown exit status =#=#=#= End test: Get unknown crm_exit_t exit code - OK (0) =#=#=#= -* Passed: crm_error - Get unknown crm_exit_t exit code +* Passed: crm_error - Get unknown crm_exit_t exit code =#=#=#= Begin test: Get unknown crm_exit_t exit code (XML) =#=#=#= =#=#=#= End test: Get unknown crm_exit_t exit code (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get unknown crm_exit_t exit code (XML) +* Passed: crm_error - Get unknown crm_exit_t exit code (XML) =#=#=#= Begin test: Get unknown crm_exit_t exit code (with name) =#=#=#= CRM_EX_UNKNOWN - Unknown exit status =#=#=#= End test: Get unknown crm_exit_t exit code (with name) - OK (0) =#=#=#= -* Passed: crm_error - Get unknown crm_exit_t exit code (with name) +* Passed: crm_error - Get unknown crm_exit_t exit code (with name) =#=#=#= Begin test: Get unknown crm_exit_t exit code (with name) (XML) =#=#=#= =#=#=#= End test: Get unknown crm_exit_t exit code (with name) (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get unknown crm_exit_t exit code (with name) (XML) +* Passed: crm_error - Get unknown crm_exit_t exit code (with name) (XML) =#=#=#= Begin test: Get crm_exit_t exit code =#=#=#= Error occurred =#=#=#= End test: Get crm_exit_t exit code - OK (0) =#=#=#= -* Passed: crm_error - Get crm_exit_t exit code +* Passed: crm_error - Get crm_exit_t exit code =#=#=#= Begin test: Get crm_exit_t exit code (XML) =#=#=#= - + =#=#=#= End test: Get crm_exit_t exit code (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get crm_exit_t exit code (XML) +* Passed: crm_error - Get crm_exit_t exit code (XML) =#=#=#= Begin test: Get crm_exit_t exit code (with name) =#=#=#= CRM_EX_ERROR - Error occurred =#=#=#= End test: Get crm_exit_t exit code (with name) - OK (0) =#=#=#= -* Passed: crm_error - Get crm_exit_t exit code (with name) +* Passed: crm_error - Get crm_exit_t exit code (with name) =#=#=#= Begin test: Get crm_exit_t exit code (with name) (XML) =#=#=#= - + =#=#=#= End test: Get crm_exit_t exit code (with name) (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get crm_exit_t exit code (with name) (XML) +* Passed: crm_error - Get crm_exit_t exit code (with name) (XML) =#=#=#= Begin test: Get all crm_exit_t exit codes =#=#=#= 0: OK 1: Error occurred 2: Invalid parameter 3: Unimplemented 4: Insufficient privileges 5: Not installed 6: Not configured 7: Not running 8: Promoted 9: Failed in promoted role 64: Incorrect usage 65: Invalid data given 66: Input file not available 67: User does not exist 68: Host does not exist 69: Necessary service unavailable 70: Internal software bug 71: Operating system error occurred 72: System file not available 73: Cannot create output file 74: I/O error occurred 75: Temporary failure, try again 76: Protocol violated 77: Insufficient privileges 78: Invalid configuration 100: Fatal error occurred, will not respawn 101: System panic required 102: Not connected 103: Update was older than existing configuration 104: Digest mismatch 105: No such object 106: Quorum required 107: Operation not safe 108: Requested item already exists 109: Multiple items match request 110: Requested item has expired 111: Requested item is not yet in effect 112: Could not determine status 113: Not applicable under current conditions 124: Timeout occurred 190: Service is active but might fail soon 191: Service is promoted but might fail soon 193: No exit status available =#=#=#= End test: Get all crm_exit_t exit codes - OK (0) =#=#=#= -* Passed: crm_error - Get all crm_exit_t exit codes +* Passed: crm_error - Get all crm_exit_t exit codes =#=#=#= Begin test: Get all crm_exit_t exit codes (XML) =#=#=#= =#=#=#= End test: Get all crm_exit_t exit codes (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get all crm_exit_t exit codes (XML) +* Passed: crm_error - Get all crm_exit_t exit codes (XML) =#=#=#= Begin test: Get all crm_exit_t exit codes (with name) =#=#=#= 0: CRM_EX_OK OK 1: CRM_EX_ERROR Error occurred 2: CRM_EX_INVALID_PARAM Invalid parameter 3: CRM_EX_UNIMPLEMENT_FEATURE Unimplemented 4: CRM_EX_INSUFFICIENT_PRIV Insufficient privileges 5: CRM_EX_NOT_INSTALLED Not installed 6: CRM_EX_NOT_CONFIGURED Not configured 7: CRM_EX_NOT_RUNNING Not running 8: CRM_EX_PROMOTED Promoted 9: CRM_EX_FAILED_PROMOTED Failed in promoted role 64: CRM_EX_USAGE Incorrect usage 65: CRM_EX_DATAERR Invalid data given 66: CRM_EX_NOINPUT Input file not available 67: CRM_EX_NOUSER User does not exist 68: CRM_EX_NOHOST Host does not exist 69: CRM_EX_UNAVAILABLE Necessary service unavailable 70: CRM_EX_SOFTWARE Internal software bug 71: CRM_EX_OSERR Operating system error occurred 72: CRM_EX_OSFILE System file not available 73: CRM_EX_CANTCREAT Cannot create output file 74: CRM_EX_IOERR I/O error occurred 75: CRM_EX_TEMPFAIL Temporary failure, try again 76: CRM_EX_PROTOCOL Protocol violated 77: CRM_EX_NOPERM Insufficient privileges 78: CRM_EX_CONFIG Invalid configuration 100: CRM_EX_FATAL Fatal error occurred, will not respawn 101: CRM_EX_PANIC System panic required 102: CRM_EX_DISCONNECT Not connected 103: CRM_EX_OLD Update was older than existing configuration 104: CRM_EX_DIGEST Digest mismatch 105: CRM_EX_NOSUCH No such object 106: CRM_EX_QUORUM Quorum required 107: CRM_EX_UNSAFE Operation not safe 108: CRM_EX_EXISTS Requested item already exists 109: CRM_EX_MULTIPLE Multiple items match request 110: CRM_EX_EXPIRED Requested item has expired 111: CRM_EX_NOT_YET_IN_EFFECT Requested item is not yet in effect 112: CRM_EX_INDETERMINATE Could not determine status 113: CRM_EX_UNSATISFIED Not applicable under current conditions 124: CRM_EX_TIMEOUT Timeout occurred 190: CRM_EX_DEGRADED Service is active but might fail soon 191: CRM_EX_DEGRADED_PROMOTED Service is promoted but might fail soon 193: CRM_EX_NONE No exit status available =#=#=#= End test: Get all crm_exit_t exit codes (with name) - OK (0) =#=#=#= -* Passed: crm_error - Get all crm_exit_t exit codes (with name) +* Passed: crm_error - Get all crm_exit_t exit codes (with name) =#=#=#= Begin test: Get all crm_exit_t exit codes (with name) (XML) =#=#=#= =#=#=#= End test: Get all crm_exit_t exit codes (with name) (XML) - OK (0) =#=#=#= -* Passed: crm_error - Get all crm_exit_t exit codes (with name) (XML) +* Passed: crm_error - Get all crm_exit_t exit codes (with name) (XML) diff --git a/cts/cts-cli.in b/cts/cts-cli.in index ba71f616f5..a3e31a4380 100644 --- a/cts/cts-cli.in +++ b/cts/cts-cli.in @@ -1,1651 +1,1738 @@ #!@PYTHON@ """Regression tests for Pacemaker's command line tools.""" # pylint doesn't like the module name "cts-cli" which is an invalid complaint for this file # but probably something we want to continue warning about elsewhere # pylint: disable=invalid-name # pacemaker imports need to come after we modify sys.path, which pylint will complain about. # pylint: disable=wrong-import-position __copyright__ = "Copyright 2024 the Pacemaker project contributors" __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY" import argparse from contextlib import contextmanager from datetime import datetime, timedelta import fileinput from functools import partial from multiprocessing import Pool, cpu_count import os import pathlib import re from shutil import copyfile import signal import subprocess import sys from tempfile import NamedTemporaryFile, TemporaryDirectory, mkstemp import types # These imports allow running from a source checkout after running `make`. if os.path.exists("@abs_top_srcdir@/python"): sys.path.insert(0, "@abs_top_srcdir@/python") # pylint: disable=comparison-of-constants,comparison-with-itself,condition-evals-to-constant if os.path.exists("@abs_top_builddir@/python") and "@abs_top_builddir@" != "@abs_top_srcdir@": sys.path.insert(0, "@abs_top_builddir@/python") from pacemaker._cts.errors import XmlValidationError from pacemaker._cts.validate import validate from pacemaker.buildoptions import BuildOptions from pacemaker.exitstatus import ExitStatus # The default list of tests to run, in the order they should be run default_tests = ["access_render", "daemons", "dates", "error_codes", "tools", "crm_mon", "acls", "validity", "upgrade", "rules", "feature_set"] other_tests = ["agents"] # The directory containing this program test_home = os.path.dirname(os.path.realpath(__file__)) # The name of the shadow CIB SHADOW_NAME = "cts-cli" # Arguments to pass to valgrind VALGRIND_ARGS = ["-q", "--gen-suppressions=all", "--show-reachable=no", "--leak-check=full", "--trace-children=no", "--time-stamp=yes", "--num-callers=20", "--suppressions=%s/valgrind-pcmk.suppressions" % test_home] def apply_substitutions(s, extra=None): """Apply text substitutions to an input string and return it.""" substitutions = { "cts_cli_data": "%s/cli" % test_home, "shadow": SHADOW_NAME, "test_home": test_home, } if extra is not None: substitutions.update(extra) return s.format(**substitutions) def cleanup_shadow_dir(): """Remove any previously created shadow CIB directory.""" subprocess.run(["crm_shadow", "--force", "--delete", SHADOW_NAME], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) def copy_existing_cib(existing): """ Generate a CIB by copying an existing one to a temporary location. This is suitable for use with the cib_gen= parameter to the TestGroup class. """ (fp, new) = mkstemp(prefix="cts-cli.cib.xml.") os.close(fp) copyfile(apply_substitutions(existing), new) return new def current_cib(): """Return the complete current CIB.""" with environ({"CIB_user": "root"}): return subprocess.check_output(["cibadmin", "-Q"], encoding="utf-8") def make_test_group(desc, cmd, classes, **kwargs): """ Create a TestGroup that replicates the same test for multiple classes. The given description, cmd, and kwargs will be passed as arguments to each Test subclass in the classes parameter. The resulting objects will then be added to a TestGroup and returned. The main purpose of this function is to be able to run the same test for both text and XML formats without having to duplicate everything. Thus, the cmd string may contain "{fmt}", which will have any --output-as= class variable substituted in. """ tests = [] for c in classes: obj = c(desc, apply_substitutions(cmd, extra={"fmt": c.format_args}), **kwargs) tests.append(obj) return TestGroup(tests) def create_shadow_cib(shadow_dir, create_empty=True, validate_with=None, valgrind=False): """ Create a shadow CIB file. Keyword arguments: create_empty -- If True, the shadow CIB will be empty. Otherwise, the shadow CIB will be a copy of the currently active cluster configuration. validate_with -- If not None, the schema version to validate the CIB against valgrind -- If True, run the create operation under valgrind """ args = ["crm_shadow", "--batch", "--force"] if create_empty: args += ["--create-empty", SHADOW_NAME] else: args += ["--create", SHADOW_NAME] if validate_with is not None: args += ["--validate-with", validate_with] if valgrind: args = ["valgrind"] + VALGRIND_ARGS + args os.environ["CIB_shadow_dir"] = shadow_dir os.environ["CIB_shadow"] = SHADOW_NAME subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) delete_shadow_resource_defaults() def delete_shadow_resource_defaults(): """Clear out the rsc_defaults section from a shadow CIB file.""" # A newly created empty CIB might or might not have a rsc_defaults section # depending on whether the --with-resource-stickiness-default configure # option was used. To ensure regression tests behave the same either way, # delete any rsc_defaults after creating or erasing a CIB. subprocess.run(["cibadmin", "--delete", "--xml-text", ""], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) # The above command might or might not bump the CIB version, so reset it # to ensure future changes result in the same version for comparison. reset_shadow_cib_version() def reset_shadow_cib_version(): """Set various version numbers in a shadow CIB file back to 0.""" with fileinput.input(files=[shadow_path()], inplace=True) as f: for line in f: line = re.sub('epoch="[0-9]*"', 'epoch="1"', line) line = re.sub('num_updates="[0-9]*"', 'num_updates="0"', line) line = re.sub('admin_epoch="[0-9]*"', 'admin_epoch="0"', line) print(line, end='') def run_cmd_list(cmds): """ Run one or more shell commands. cmds can be: * A string * A Python function * A list of the above Raises subprocess.CalledProcessError on error. """ if cmds is None: return if isinstance(cmds, (str, types.FunctionType)): cmds = [cmds] for c in cmds: if isinstance(c, types.FunctionType): c() else: subprocess.run(apply_substitutions(c), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, universal_newlines=True, check=True) def sanitize_output(s): """ Replace content in the output expected to change between test runs. This is stuff like version numbers, timestamps, source line numbers, build options, system names and messages, etc. """ # A list of tuples of regular expressions and their replacements. replacements = [ (r'Last change: .*', r'Last change:'), (r'Last updated: .*', r'Last updated:'), (r' api-version="[^"]*"', r' api-version="X"'), (r'.*\((apply_upgrade)@.*\.c:[0-9][0-9]*\)', r'\1'), (r'crm_feature_set="[^"]*" ', r''), (r'.*\((crm_time_parse_duration)@.*\.c:[0-9][0-9]*\)', r'\1'), (r'.*\((crm_time_parse_period)@.*\.c:[0-9][0-9]*\)', r'\1'), (r'.*\((crm_time_parse_sec)@.*\.c:[0-9][0-9]*\)', r'\1'), (r' default="[^"]*"', r' default=""'), (r' end="[0-9][-+: 0-9]*Z*"', r' end=""'), (r'last_change time=".*"', r'last_change time=""'), (r'last_update time=".*"', r'last_update time=""'), (r' last-rc-change=[\'"][-+A-Za-z0-9: ]*[\'"],?', r''), (r'.*\((parse_date)@.*\.c:[0-9][0-9]*\)', r'\1'), (r'.*\((pcmk__.*)@.*\.c:[0-9][0-9]*\)', r'\1'), (r'request=".*(crm_[a-zA-Z0-9]*)', r'request="\1'), (r'request=".*iso8601', r'request="iso8601'), (r' start="[0-9][-+: 0-9]*Z*"', r' start=""'), (r'.*\((unpack_.*)@.*\.c:[0-9][0-9]*\)', r'\1'), (r'validate-with="[^"]*" ', r''), (r' version="[^"]*"', r' version=""'), (r'\(version .*\)', r'(version)') ] new_output = [] for line in s: # @TODO Add a way to suppress this message within cibadmin, and then drop # the handling here. if line.startswith("The supplied command can provide skewed result"): continue for (pattern, repl) in replacements: line = re.sub(pattern, repl, line) new_output.append(line) return new_output def shadow_path(): """Return the current shadow CIB path.""" p = subprocess.check_output(["crm_shadow", "--file"], encoding="utf-8") return p.strip() def write_cib(s): """ Generate a CIB by writing a string to a temporary location. This is suitable for use with the cib_gen= parameter to the TestGroup class. """ (fp, new) = mkstemp(prefix="cts-cli.cib.xml.") os.write(fp, s.encode()) os.close(fp) return new @contextmanager def environ(env): """ Run code in an environment modified with the provided dict. This context manager augments the current process environment with the provided dict, allowing code to be constructed like so: e = {"CIB_user": "xyx"} with environ(e): ... When the context manager exits, the previous environment will be restored. It is possible to remove an environment key (whether it was in the environment by default, or given with a nested call to this context) by passing None for the value. Additionally, this context manager accepts None for the env parameter, in which case nothing will be done. Finally, note that values in env will be passed to apply_substitutions before being set in the environment. """ if env is None: env = {} original_env = {} else: original_env = os.environ.copy() for k, v in env.items(): if v is None: os.environ.pop(k) else: os.environ[k] = apply_substitutions(v) try: yield finally: for k, v in original_env.items(): if v is None: os.environ.pop(k) else: os.environ[k] = v class StdinCmd: """ A class for defining a command that should be run later. subprocess.Popen (and its various helper functions) start running the command immediately, which doesn't work if we want to provide the command when a Test is created, but delay its execution until the environment is defined when the Test is run. This class allows us to do that. """ def __init__(self, cmd): """Create a new StdinCmd instance. Arguments: cmd -- The command string to run later. This string will be passed to apply_substitutions before being executed. """ self._cmd = cmd def run(self): """Run this command, returning a subprocess.Popen object.""" return subprocess.Popen(apply_substitutions(self._cmd), shell=True, encoding="utf-8", stdout=subprocess.PIPE) class Test: """A base class for defining a single command line regression test.""" format_args = "" def __init__(self, desc, cmd, expected_rc=ExitStatus.OK, update_cib=False, setup=None, teardown=None, stdin=None, env=None): """ Create a new Test instance. Arguments: desc -- A short human-readable description of this test cmd -- The command to run for this test, as a string. This string will be passed to apply_substitutions before being executed. Keyword arguments: expected_rc -- The expected return value of cmd update_cib -- If True, the resulting CIB will be printed after performing the test setup -- A shell command to be run in the same environment as cmd, immediately before the test. Valid types are: a string, a Python function, or a list of the above teardown -- Like setup, but runs immediately after the test stdin -- If not None, the text to feed to cmd as its stdin env -- If not None, a dict of values to be added to the test environment. This will be added when the test is run and will override anything given to the TestGroup. """ self.desc = desc self.cmd = cmd self.expected_rc = expected_rc self.update_cib = update_cib self._setup = setup self._teardown = teardown self._stdin = stdin if env is None: self._env = {} else: self._env = env self._output = None @property def output(self): """Return the test's detailed output.""" return self._output def _log_end_test(self, rc): """Log a message when a test ends.""" if isinstance(rc, ExitStatus): rc_str = str(rc) else: if rc < 0: rc = abs(rc) rc_str = signal.strsignal(rc) else: rc = ExitStatus(rc) rc_str = str(rc) self._output.append("=#=#=#= End test: %s - %s (%d) =#=#=#=" % (self.desc, rc_str, rc)) def _log_start_test(self): """Log a message when a test starts.""" self._output.append("=#=#=#= Begin test: %s =#=#=#=" % self.desc) def _log_test_failed(self, app, rc): """Log a message when a test fails.""" self._output.append("* Failed (rc=%.3d): %-23s - %s" % (rc, app, self.desc)) def _log_test_passed(self, app): """Log a message when a test passes.""" self._output.append("* Passed: %-21s - %s" % (app, self.desc)) # pylint: disable=unused-argument def _validate_hook(self, rc, _stdout, _stderr, valgrind=False): """Validate test output.""" self._log_end_test(rc) return rc def _run_setup_teardown(self, cmd, app): """ Run any setup or teardown command required by this test. On success (or if no command is present), return True. On failure, return False and log the stdout/stderr of the command for debugging. Arguments: cmd -- The setup/teardown command(s) to run app -- The base name of the test command, for logging purposes """ try: run_cmd_list(cmd) return True except subprocess.CalledProcessError as exn: rc = exn.returncode self._output.extend(exn.stderr.splitlines()) self._output.extend(exn.stdout.splitlines()) self._log_test_failed(app, rc) return False def run(self, group, env=None, valgrind=False): """ Run this test. Basic output is printed to stdout, while detailed output is available in the self.output property after this function has been run. Return True if the return code matches self.expected_rc, and False otherwise. Arguments: group -- The name of the group this test is a part of, for logging purposes Keyword arguments: env -- If not None, a dict of values to be added to the test environment """ self._output = [] cmd = apply_substitutions(self.cmd) app = cmd.split(" ")[0] test_id = "%s(%s)" % (app, group) print("* Running: %-31s - %s" % (test_id, self.desc)) self._log_start_test() # Add any environment variables specified in Test.__init__ if env is None: env = self._env else: env = env.update(self._env) with environ(env): # Run the setup hook, if any if not self._run_setup_teardown(self._setup, app): return False # Define basic arguments for all forms of running this test. kwargs = {"stdout": subprocess.PIPE, "stderr": subprocess.PIPE, "shell": True, "universal_newlines": True, "check": False} stdin_p = None # Handle the stdin= parameter. if isinstance(self._stdin, StdinCmd): stdin_p = self._stdin.run() kwargs["stdin"] = stdin_p.stdout elif isinstance(self._stdin, pathlib.Path): kwargs["input"] = self._stdin.read_text() else: kwargs["input"] = self._stdin if valgrind: cmd = "valgrind %s %s" % (" ".join(VALGRIND_ARGS), cmd) # Run the test command # We handle the "check" argument above in the kwargs dict. # pylint: disable-msg=subprocess-run-check cmd_p = subprocess.run(cmd, **kwargs) rc = cmd_p.returncode if stdin_p is not None: stdin_p.stdout.close() self._output.extend(cmd_p.stderr.splitlines()) self._output.extend(cmd_p.stdout.splitlines()) # Run the teardown hook, if any if not self._run_setup_teardown(self._teardown, app): return False if self.update_cib: self._output.append("=#=#=#= Current cib after: %s =#=#=#=" % self.desc) self._output.extend(current_cib().splitlines()) self._validate_hook(rc, cmd_p.stdout, cmd_p.stderr, valgrind=valgrind) if rc == self.expected_rc: self._log_test_passed(app) return True self._log_test_failed(app, rc) return False class ValidatingTest(Test): """A Test subclass that additionally runs test results through xmllint.""" format_args = "--output-as=xml" def __init__(self, desc, cmd, **kwargs): """Create a new ValidatingTest instance.""" Test.__init__(self, desc + " (XML)", cmd, **kwargs) def _validate_hook(self, rc, stdout, stderr, valgrind=False): """Validate test output with xmllint.""" # Do not validate if running under valgrind, even if told to do so. Valgrind # will output a lot more stuff that is not XML, so it wouldn't validate # anyway. if valgrind: return Test._validate_hook(self, rc, stdout, stderr, valgrind=valgrind) try: validate(stdout) # We only care about the return code from validation if there was an error, # which will be dealt with below. Here, we want to log the original return # code from the test itself. self._log_end_test(rc) return 0 except XmlValidationError as e: self._output.append("=#=#=#= End test: %s - Failed to validate (%d) =#=#=#=" % (self.desc, e.exit_code)) self._output.extend(e.output.splitlines()) return e.exit_code class TestGroup: """A base class for a group of related tests.""" def __init__(self, tests, cib_gen=None, env=None, setup=None, teardown=None): """ Create a new TestGroup instance. Arguments: tests -- A list of Test instances Keyword arguments: cib_gen -- If not None, a function that generates a CIB file and returns the name of that CIB. This will be added to the test environment as CIB_file and used for all tests in this group. The file will then be deleted after all tests have been run. env -- If not None, a dict of values to be added to the test environment setup -- A command string, python function, or list of the previous types to run immediately before the test. This will be run in the same environment as cmd. teardown -- Like setup, but runs immediately after the tests """ self.tests = tests self._cib_gen = cib_gen self._env = env self._setup = setup self._teardown = teardown self._successes = None self._failures = None self._output = None @property def failures(self): """Return the number of member tests that failed.""" return self._failures @property def output(self): """Return the test's detailed output.""" return self._output @property def successes(self): """Return the number of member tests that succeeded.""" return self._successes def _run_setup_teardown(self, cmd): """ Run any setup or teardown command required by this test group. On success (or if no command is present), return True. On failure, return False and log the stdout/stderr of the command for debugging. Arguments: cmd -- The setup/teardown command(s) to run """ try: run_cmd_list(cmd) return True except subprocess.CalledProcessError as exn: self._output.extend(exn.stderr.splitlines()) self._output.extend(exn.stdout.splitlines()) return False def run(self, group, valgrind=False): """ Run all Test instances that are a part of this regression test. Additionally, record their stdout and stderr in the self.output property and the total number of tests that passed and failed. Arguments: group -- The name of the group this test is a part of, for logging purposes """ self._failures = 0 self._successes = 0 self._output = [] cib_file = None with environ(self._env): # If we were given a way to generate a CIB, do that now and add it to the # environment. if self._cib_gen is not None: cib_file = self._cib_gen() os.environ.update({"CIB_file": cib_file}) # Run the setup hook, if any if not self._run_setup_teardown(self._setup): return False # Run the tests for t in self.tests: rc = t.run(group, valgrind=valgrind) if isinstance(t, TestGroup): self._successes += t.successes self._failures += t.failures else: if rc: self._successes += 1 else: self._failures += 1 self._output.extend(t.output) if cib_file is not None: os.environ.pop("CIB_file") os.unlink(cib_file) # Run the teardown hook, if any if self._run_setup_teardown(self._teardown): return False return True class ShadowTestGroup(TestGroup): """A group of related tests that require a shadow CIB.""" def __init__(self, tests, **kwargs): """ Create a new ShadowTestGroup instance. Arguments: tests -- A list of Test instances Keyword arguments: create -- If True, create a shadow CIB file (see create_empty). Otherwise, just create a temp directory and set environment variables. create_empty -- If True, the shadow CIB will be empty. Otherwise, the shadow CIB will be a copy of the currently active cluster configuration. validate_with -- If not None, the schema version to validate the CIB against """ self._create = kwargs.pop("create", True) self._create_empty = kwargs.pop("create_empty", True) self._validate_with = kwargs.pop("validate_with", None) TestGroup.__init__(self, tests, **kwargs) def run(self, group, valgrind=False): """ Run all Test instances that are a part of this regression test. Additionally, record their stdout and stderr in the self.output property and the total number of tests that passed and failed. Arguments: group -- The name of the group this test is a part of, for logging purposes """ with TemporaryDirectory(prefix="cts-cli.shadow.") as shadow_dir: if self._create: create_shadow_cib(shadow_dir, create_empty=self._create_empty, validate_with=self._validate_with, valgrind=valgrind) else: os.environ["CIB_shadow_dir"] = shadow_dir os.environ["CIB_shadow"] = SHADOW_NAME rc = TestGroup.run(self, group, valgrind=valgrind) if self._create: cleanup_shadow_dir() os.environ.pop("CIB_shadow_dir") os.environ.pop("CIB_shadow") return rc class RegressionTest: """A base class for testing a single command line tool.""" def __init__(self): """Create a new RegressionTest instance.""" self._identical = None self._successes = None self._failures = None self._tempfile = None self._output = None @property def failures(self): """Return the number of member tests that failed.""" return self._failures @property def identical(self): """Return whether the expected output matches the actual output.""" return self._identical @property def name(self): """ Return the name of this regression test. This should be a unique, very short, single word name without any special characters. It must match the name of some word in the default_tests list because it may be given with the -r option on the command line to select only certain tests to run. All subclasses must define this property. """ raise NotImplementedError @property def results_file(self): """Return the location where the regression test results are stored.""" return self._tempfile @property def successes(self): """Return the number of member tests that succeeded.""" return self._successes @property def summary(self): """Return a list of all Passed/Failed lines for tests in this regression test.""" retval = [] for line in self._output: if line.startswith("* Failed") or line.startswith("* Passed"): retval.append(line) return retval @property def tests(self): """A list of Test and TestGroup instances to be run as part of this regression test.""" return [] def cleanup(self): """Remove the temp file where test output is stored.""" os.remove(self._tempfile) self._tempfile = None def diff(self, verbose=False): """ Compare the results of this regression test to the expected results. Arguments: verbose -- If True, the diff will be written to stdout """ args = ["diff", "-wu", "%s/cli/regression.%s.exp" % (test_home, self.name), self.results_file] try: if verbose: subprocess.run(args, check=True) else: subprocess.run(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True) self._identical = True except subprocess.CalledProcessError: self._identical = False def process_results(self, verbose): """If actual output differs from expected output, print the actual output.""" if self.identical: self.cleanup() return print(" %s" % self.results_file) if verbose: print("======================================================") with open(self.results_file, encoding="utf-8") as f: print(f.read()) print("======================================================") def run(self, valgrind=False): """ Run all Test and TestGroup instances that are a part of this regression test. Additionally, record their stdout and stderr in the self.output property and the total number of tests that passed and failed. """ self._failures = 0 self._successes = 0 self._output = [] for t in self.tests: rc = t.run(self.name, valgrind=valgrind) if isinstance(t, TestGroup): self._successes += t.successes self._failures += t.failures else: if rc: self._successes += 1 else: self._failures += 1 self._output.extend(t.output) self._output = sanitize_output(self._output) def write(self): """ Write test results to a temporary file and set self.results to its location. If self.run() has not yet been called, or there is otherwise no output, self.results will be None """ if not self._output: self._tempfile = None return s = "\n".join(self._output).encode() s += b"\n" (fp, self._tempfile) = mkstemp(prefix="cts-cli.%s." % self.name) os.write(fp, s) os.close(fp) class AccessRenderRegressionTest(RegressionTest): """A class for testing rendering of ACLs.""" @property def name(self): """Return the name of this regression test.""" return "access_render" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" acl_cib = """ """ # Create a test CIB that has ACL roles basic_tests = [ Test("Configure some ACLs", "cibadmin -M -o acls -p", update_cib=True, stdin=acl_cib), Test("Enable ACLs", "crm_attribute -n enable-acl -v true", update_cib=True), # Run cibadmin --show-access on the test CIB as an ACL-restricted user Test("An instance of ACLs render (into color)", "cibadmin --force --show-access=color -Q --user tony"), Test("An instance of ACLs render (into namespacing)", "cibadmin --force --show-access=namespace -Q --user tony"), Test("An instance of ACLs render (into text)", "cibadmin --force --show-access=text -Q --user tony"), ] return [ ShadowTestGroup(basic_tests), ] class DaemonsRegressionTest(RegressionTest): """A class for testing command line options of pacemaker daemons.""" @property def name(self): """Return the name of this regression test.""" return "daemons" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" return [ Test("Get CIB manager metadata", "pacemaker-based metadata"), Test("Get controller metadata", "pacemaker-controld metadata"), Test("Get fencer metadata", "pacemaker-fenced metadata"), Test("Get scheduler metadata", "pacemaker-schedulerd metadata"), ] class DatesRegressionTest(RegressionTest): """A class for testing handling of ISO8601 dates.""" @property def name(self): """Return the name of this regression test.""" return "dates" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" invalid_periods = [ "", "2019-01-01 00:00:00Z", # Start with no end "2019-01-01 00:00:00Z/", # Start with only a trailing slash "PT2S/P1M", # Two durations "2019-13-01 00:00:00Z/P1M", # Out-of-range month "20191077T15/P1M", # Out-of-range day "2019-10-01T25:00:00Z/P1M", # Out-of-range hour "2019-10-01T24:00:01Z/P1M", # Hour 24 with anything but :00:00 "PT5H/20191001T007000Z", # Out-of-range minute "2019-10-01 00:00:80Z/P1M", # Out-of-range second "2019-10-01 00:00:10 +25:00/P1M", # Out-of-range offset hour "20191001T000010 -00:61/P1M", # Out-of-range offset minute "P1Y/2019-02-29 00:00:00Z", # Feb. 29 in non-leap-year "2019-01-01 00:00:00Z/P", # Duration with no values "P1Z/2019-02-20 00:00:00Z", # Invalid duration unit "P1YM/2019-02-20 00:00:00Z", # No number for duration unit ] # Ensure invalid period specifications are rejected invalid_period_tests = [] for p in invalid_periods: invalid_period_tests.append(Test("Invalid period - [%s]" % p, "iso8601 -p '%s'" % p, expected_rc=ExitStatus.INVALID_PARAM)) year_tests = [] for y in ["06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "40"]: year_tests.extend([ Test("20%s-W01-7" % y, "iso8601 -d '20%s-W01-7 00Z'" % y), Test("20%s-W01-7 - round-trip" % y, "iso8601 -d '20%s-W01-7 00Z' -W -E '20%s-W01-7 00:00:00Z'" % (y, y)), Test("20%s-W01-1" % y, "iso8601 -d '20%s-W01-1 00Z'" % y), Test("20%s-W01-1 - round-trip" % y, "iso8601 -d '20%s-W01-1 00Z' -W -E '20%s-W01-1 00:00:00Z'" % (y, y)) ]) return invalid_period_tests + [ make_test_group("'2005-040/2005-043' period", "iso8601 {fmt} -p '2005-040/2005-043'", [Test, ValidatingTest]), Test("2014-01-01 00:30:00 - 1 Hour", "iso8601 -d '2014-01-01 00:30:00Z' -D P-1H -E '2013-12-31 23:30:00Z'"), Test("Valid date - Feb 29 in leap year", "iso8601 -d '2020-02-29 00:00:00Z' -E '2020-02-29 00:00:00Z'"), Test("Valid date - using 'T' and offset", "iso8601 -d '20191201T131211 -05:00' -E '2019-12-01 18:12:11Z'"), Test("24:00:00 equivalent to 00:00:00 of next day", "iso8601 -d '2019-12-31 24:00:00Z' -E '2020-01-01 00:00:00Z'"), ] + year_tests + [ make_test_group("2009-W53-07", "iso8601 {fmt} -d '2009-W53-7 00:00:00Z' -W -E '2009-W53-7 00:00:00Z'", [Test, ValidatingTest]), Test("epoch + 2 Years 5 Months 6 Minutes", "iso8601 -d 'epoch' -D P2Y5MT6M -E '1972-06-01 00:06:00Z'"), Test("2009-01-31 + 1 Month", "iso8601 -d '20090131T000000Z' -D P1M -E '2009-02-28 00:00:00Z'"), Test("2009-01-31 + 2 Months", "iso8601 -d '2009-01-31 00:00:00Z' -D P2M -E '2009-03-31 00:00:00Z'"), Test("2009-01-31 + 3 Months", "iso8601 -d '2009-01-31 00:00:00Z' -D P3M -E '2009-04-30 00:00:00Z'"), make_test_group("2009-03-31 - 1 Month", "iso8601 {fmt} -d '2009-03-31 01:00:00 +01:00' -D P-1M -E '2009-02-28 00:00:00Z'", [Test, ValidatingTest]), make_test_group("2038-01-01 + 3 Months", "iso8601 {fmt} -d '2038-01-01 00:00:00Z' -D P3M -E '2038-04-01 00:00:00Z'", [Test, ValidatingTest]), ] +class ErrorCodeRegressionTest(RegressionTest): + """A class for testing error code reporting.""" + + @property + def name(self): + """Return the name of this regression test.""" + return "error_codes" + + @property + def tests(self): + """A list of Test instances to be run as part of this regression test.""" + # Legacy return codes + # + # Don't test unknown legacy code. FreeBSD includes a colon in strerror(), + # while other distros do not. + legacy_tests = [ + make_test_group("Get legacy return code", "crm_error {fmt} 201", + [Test, ValidatingTest]), + make_test_group("Get legacy return code (with name)", "crm_error -n {fmt} 201", + [Test, ValidatingTest]), + make_test_group("Get multiple legacy return codes", "crm_error {fmt} 201 202", + [Test, ValidatingTest]), + make_test_group("Get multiple legacy return codes (with names)", + "crm_error -n {fmt} 201 202", + [Test, ValidatingTest]), + # We can only rely on our custom codes, so we'll spot-check codes 201-209 + Test("List legacy return codes (spot check)", + "crm_error -l | grep 20[1-9]"), + ValidatingTest("List legacy return codes (spot check)", + "crm_error -l --output-as=xml | grep -Ev ' """ basic_tests = [ Test("Set stonith-enabled=false", "crm_attribute -n stonith-enabled -v false", update_cib=True), Test("Configure the initial resource", "cibadmin -M -o resources -p", update_cib=True, stdin=resource_cib), Test("Upgrade to latest CIB schema (trigger 2.10.xsl + the wrapping)", "cibadmin --upgrade --force -V -V", update_cib=True), Test("Query a resource instance attribute (shall survive)", "crm_resource -r mySmartFuse -g requires", update_cib=True), ] return [ ShadowTestGroup(basic_tests, validate_with="pacemaker-2.10", env={"PCMK_trace_functions": "apply_upgrade,pcmk__update_schema"}) ] class RulesRegressionTest(RegressionTest): """A class for testing support for CIB rules.""" @property def name(self): """Return the name of this regression test.""" return "rules" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" tomorrow = datetime.now() + timedelta(days=1) rule_cib = """ """ % tomorrow.strftime("%F %T %z") usage_tests = [ make_test_group("crm_rule given no arguments", "crm_rule {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.USAGE), make_test_group("crm_rule given no rule to check", "crm_rule -c {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.USAGE), make_test_group("crm_rule given invalid input XML", "crm_rule -c -r blahblah -X invalidxml {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.DATAERR), make_test_group("crm_rule given invalid input XML on stdin", "crm_rule -c -r blahblah -X - {fmt}", [Test, ValidatingTest], stdin=StdinCmd("echo invalidxml"), expected_rc=ExitStatus.DATAERR), ] basic_tests = [ make_test_group("Try to check a rule that doesn't exist", "crm_rule -c -r blahblah {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOSUCH), make_test_group("Try to check a rule that has too many date_expressions", "crm_rule -c -r cli-rule-too-many-date-expressions {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), make_test_group("Verify basic rule is expired", "crm_rule -c -r cli-prefer-rule-dummy-expired {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.EXPIRED), make_test_group("Verify basic rule worked in the past", "crm_rule -c -r cli-prefer-rule-dummy-expired -d 20180101 {fmt}", [Test, ValidatingTest]), make_test_group("Verify basic rule is not yet in effect", "crm_rule -c -r cli-prefer-rule-dummy-not-yet {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.NOT_YET_IN_EFFECT), make_test_group("Verify date_spec rule with years has expired", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.EXPIRED), make_test_group("Verify multiple rules at once", "crm_rule -c -r cli-prefer-rule-dummy-not-yet -r cli-prefer-rule-dummy-date_spec-only-years {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.EXPIRED), make_test_group("Verify date_spec rule with years is in effect", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-only-years -d 20190201 {fmt}", [Test, ValidatingTest]), make_test_group("Try to check a rule whose date_spec does not contain years=", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-without-years {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), make_test_group("Try to check a rule whose date_spec contains years= and moon=", "crm_rule -c -r cli-prefer-rule-dummy-date_spec-years-moon {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), make_test_group("Try to check a rule with no date_expression", "crm_rule -c -r cli-no-date_expression-rule {fmt}", [Test, ValidatingTest], expected_rc=ExitStatus.UNIMPLEMENT_FEATURE), ] return usage_tests + [ TestGroup(basic_tests, cib_gen=partial(write_cib, rule_cib)) ] class FeatureSetRegressionTest(RegressionTest): """A class for testing support for version-specific features.""" @property def name(self): """Return the name of this regression test.""" return "feature_set" @property def tests(self): """A list of Test instances to be run as part of this regression test.""" basic_tests = [ # Import the test CIB Test("Import the test CIB", "cibadmin --replace --xml-file {cts_cli_data}/crm_mon-feature_set.xml", update_cib=True), Test("Complete text output, no mixed status", "crm_mon -1 --show-detail"), ValidatingTest("Output, no mixed status", "crm_mon --output-as=xml"), # Modify the CIB to fake that the cluster has mixed versions Test("Fake inconsistent feature set", "crm_attribute --node=cluster02 --name=#feature-set --update=3.15.0 --lifetime=reboot", update_cib=True), Test("Complete text output, mixed status", "crm_mon -1 --show-detail"), ValidatingTest("Output, mixed status", "crm_mon --output-as=xml"), ] return [ ShadowTestGroup(basic_tests), ] def build_options(): """Handle command line arguments.""" parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="Command line tool regression tests", epilog="Default tests: %s\n" "Other tests: agents (must be run in an installed environment)" % " ".join(default_tests)) parser.add_argument("-j", "--jobs", metavar="JOBS", default=cpu_count() - 1, type=int, help="The number of tests to run simultaneously") parser.add_argument("-p", "--path", metavar="DIR", action="append", help="Look for executables in DIR (may be specified multiple times)") parser.add_argument("-r", "--run-only", metavar="TEST", choices=default_tests + other_tests, action="append", help="Run only specified tests (may be specified multiple times)") parser.add_argument("-s", "--save", action="store_true", help="Save actual output as expected output") parser.add_argument("-v", "--valgrind", action="store_true", help="Run all commands under valgrind") parser.add_argument("-V", "--verbose", action="store_true", help="Display any differences from expected output") args = parser.parse_args() if args.path is None: args.path = [] return args def setup_environment(valgrind): """Set various environment variables needed for operation.""" if valgrind: os.environ["G_SLICE"] = "always-malloc" # Ensure all command output is in portable locale for comparison os.environ["LC_ALL"] = "C" # Log test errors to stderr os.environ["PCMK_stderr"] = "1" # Because we will change the value of PCMK_trace_functions and then reset it # back to some initial value at various points, it's easiest to assume it is # defined but empty by default if "PCMK_trace_functions" not in os.environ: os.environ["PCMK_trace_functions"] = "" def path_prepend(p): """Add another directory to the front of $PATH.""" old = os.environ["PATH"] os.environ["PATH"] = "%s:%s" % (p, old) def setup_path(opts_path): """Set the PATH environment variable appropriately for the tests.""" srcdir = os.path.dirname(test_home) # Add any search paths given on the command line for p in opts_path: path_prepend(p) if os.path.exists("%s/tools/crm_simulate" % srcdir): print("Using local binaries from: %s" % srcdir) path_prepend("%s/tools" % srcdir) for daemon in ["based", "controld", "fenced", "schedulerd"]: path_prepend("%s/daemons/%s" % (srcdir, daemon)) print("Using local schemas from: %s/xml" % srcdir) os.environ["PCMK_schema_directory"] = "%s/xml" % srcdir else: path_prepend(BuildOptions.DAEMON_DIR) os.environ["PCMK_schema_directory"] = BuildOptions.SCHEMA_DIR def _run_one(valgrind, r): """Run and return a TestGroup object.""" # See comments in run_regression_tests. r.run(valgrind=valgrind) return r def run_regression_tests(regs, jobs, valgrind=False): """Run the given tests and return the modified objects.""" executed = [] with Pool(processes=jobs) as pool: # What we really want to do here is: # pool.map(lambda r: r.run(),regs) # # However, multiprocessing uses pickle somehow in its operation, and python # doesn't want to pickle a lambda (nor a nested function within this one). # Thus, we need to use the _run_one wrapper at the file level just to call # run(). Further, if we don't return the modified object from that and then # return the list of modified objects here, it looks like the rest of the # program will use the originals, before this was ever run. executed = pool.map(partial(_run_one, valgrind), regs) return executed def results(regs, save, verbose): """Print the output from each regression test, returning the number whose output differs.""" output_differs = 0 if verbose: print("\n\nResults") for r in regs: r.write() r.diff() if not r.identical: output_differs += 1 if save: dest = "%s/cli/regression.%s.exp" % (test_home, r.name) copyfile(r.results_file, dest) return output_differs def summary(regs, output_differs, verbose): """Print the summary output for the entire test run.""" test_failures = 0 test_successes = 0 for r in regs: test_failures += r.failures test_successes += r.successes print("\n\nSummary") # First, print all the Passed/Failed lines from each Test run. for r in regs: print("\n".join(r.summary)) # Then, print information specific to each result possibility. Basically, # if there were failures then we print the output differences, leave the # failed output files in place, and exit with an error. Otherwise, clean up # anything that passed. if test_failures > 0 and output_differs > 0: print("%d test failed; see output in:" % test_failures) for r in regs: r.process_results(verbose) return ExitStatus.ERROR if test_failures > 0: print("%d tests failed" % test_failures) for r in regs: r.process_results(verbose) return ExitStatus.ERROR if output_differs: print("%d tests passed but output was unexpected; see output in:" % test_successes) for r in regs: r.process_results(verbose) return ExitStatus.DIGEST print("%d tests passed" % test_successes) for r in regs: r.cleanup() return ExitStatus.OK regression_classes = [ AccessRenderRegressionTest, DaemonsRegressionTest, DatesRegressionTest, + ErrorCodeRegressionTest, CrmMonRegressionTest, UpgradeRegressionTest, RulesRegressionTest, FeatureSetRegressionTest, ] def main(): """Run command line regression tests as specified by arguments.""" opts = build_options() setup_environment(opts.valgrind) setup_path(opts.path) # Filter the list of all regression test classes to include only those that # were requested on the command line. If empty, this defaults to default_tests. if not opts.run_only: opts.run_only = default_tests regs = [] for cls in regression_classes: obj = cls() if obj.name in opts.run_only: regs.append(obj) regs = run_regression_tests(regs, max(1, opts.jobs), valgrind=opts.valgrind) output_differs = results(regs, opts.save, opts.verbose) rc = summary(regs, output_differs, opts.verbose) sys.exit(rc) if __name__ == "__main__": main()