Implement a basic KVM selftest runner in Python to run selftests. Add command line options to select individual testcase file or a directory containing multiple testcase files. After selecting the tests to run, start their execution and print their final execution status (passed, failed, skipped, no run), stdout and stderr on terminal. Print execution status in colors on the terminals where it is supported to easily distinguish different statuses of the tests execution. If a test fails or times out, then return with a non-zero exit code after all of the tests execution have completed. If none of the tests fails or times out then exit with status 0 Provide some sample test configuration files to demonstrate the execution of the runner. Runner can be started from tools/testing/selftests/kvm directory as: python3 runner --dirs tests OR python3 runner --testcases \ tests/dirty_log_perf_test/no_dirty_log_protect.test This is a very basic implementation of the runner. Next patches will enhance the runner by adding more features like parallelization, dumping output to file system, time limit, out-of-tree builds run, etc. Signed-off-by: Vipin Sharma --- tools/testing/selftests/kvm/.gitignore | 4 +- .../testing/selftests/kvm/runner/__main__.py | 94 +++++++++++++++++++ .../testing/selftests/kvm/runner/selftest.py | 64 +++++++++++++ .../selftests/kvm/runner/test_runner.py | 37 ++++++++ .../2slot_5vcpu_10iter.test | 1 + .../no_dirty_log_protect.test | 1 + 6 files changed, 200 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/kvm/runner/__main__.py create mode 100644 tools/testing/selftests/kvm/runner/selftest.py create mode 100644 tools/testing/selftests/kvm/runner/test_runner.py create mode 100644 tools/testing/selftests/kvm/tests/dirty_log_perf_test/2slot_5vcpu_10iter.test create mode 100644 tools/testing/selftests/kvm/tests/dirty_log_perf_test/no_dirty_log_protect.test diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 1d41a046a7bf..95af97b1ff9e 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -3,10 +3,12 @@ !/**/ !*.c !*.h +!*.py !*.S !*.sh +!*.test !.gitignore !config !settings !Makefile -!Makefile.kvm \ No newline at end of file +!Makefile.kvm diff --git a/tools/testing/selftests/kvm/runner/__main__.py b/tools/testing/selftests/kvm/runner/__main__.py new file mode 100644 index 000000000000..8d1a78450e41 --- /dev/null +++ b/tools/testing/selftests/kvm/runner/__main__.py @@ -0,0 +1,94 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2025 Google LLC +# +# Author: vipinsh@google.com (Vipin Sharma) + +import argparse +import logging +import os +import sys + +from test_runner import TestRunner +from selftest import SelftestStatus + + +def cli(): + parser = argparse.ArgumentParser( + prog="KVM Selftests Runner", + formatter_class=argparse.RawTextHelpFormatter, + allow_abbrev=False + ) + + parser.add_argument("-t", + "--testcases", + nargs="*", + default=[], + help="Testcases to run. Provide the space separated testcases paths") + + parser.add_argument("-d", + "--dirs", + nargs="*", + default=[], + help="Run the testcases present in the given directory and all of its sub directories. Provide the space separated paths to add multiple directories.") + + return parser.parse_args() + + +def setup_logging(): + class TerminalColorFormatter(logging.Formatter): + reset = "\033[0m" + red_bold = "\033[31;1m" + green = "\033[32m" + yellow = "\033[33m" + blue = "\033[34m" + + COLORS = { + SelftestStatus.PASSED: green, + SelftestStatus.NO_RUN: blue, + SelftestStatus.SKIPPED: yellow, + SelftestStatus.FAILED: red_bold + } + + def __init__(self, fmt=None, datefmt=None): + super().__init__(fmt, datefmt) + + def format(self, record): + return (self.COLORS.get(record.levelno, "") + + super().format(record) + self.reset) + + logger = logging.getLogger("runner") + logger.setLevel(logging.INFO) + + ch = logging.StreamHandler() + ch_formatter = TerminalColorFormatter(fmt="%(asctime)s | %(message)s", + datefmt="%H:%M:%S") + ch.setFormatter(ch_formatter) + logger.addHandler(ch) + + +def fetch_testcases_in_dirs(dirs): + testcases = [] + for dir in dirs: + for root, child_dirs, files in os.walk(dir): + for file in files: + testcases.append(os.path.join(root, file)) + return testcases + + +def fetch_testcases(args): + testcases = args.testcases + testcases.extend(fetch_testcases_in_dirs(args.dirs)) + # Remove duplicates + testcases = list(dict.fromkeys(testcases)) + return testcases + + +def main(): + args = cli() + setup_logging() + testcases = fetch_testcases(args) + return TestRunner(testcases).start() + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/tools/testing/selftests/kvm/runner/selftest.py b/tools/testing/selftests/kvm/runner/selftest.py new file mode 100644 index 000000000000..34005f83f0c3 --- /dev/null +++ b/tools/testing/selftests/kvm/runner/selftest.py @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2025 Google LLC +# +# Author: vipinsh@google.com (Vipin Sharma) + +import pathlib +import enum +import os +import subprocess + +class SelftestStatus(enum.IntEnum): + """ + Selftest Status. Integer values are just +1 to the logging.INFO level. + """ + + PASSED = 21 + NO_RUN = 22 + SKIPPED = 23 + FAILED = 24 + + def __str__(self): + return str.__str__(self.name) + +class Selftest: + """ + Represents a single selftest. + + Extract the test execution command from test file and executes it. + """ + + def __init__(self, test_path): + test_command = pathlib.Path(test_path).read_text().strip() + if not test_command: + raise ValueError("Empty test command in " + test_path) + + test_command = os.path.join(".", test_command) + self.exists = os.path.isfile(test_command.split(maxsplit=1)[0]) + self.test_path = test_path + self.command = test_command + self.status = SelftestStatus.NO_RUN + self.stdout = "" + self.stderr = "" + + def run(self): + if not self.exists: + self.stderr = "File doesn't exists." + return + + run_args = { + "universal_newlines": True, + "shell": True, + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE + } + proc = subprocess.run(self.command, **run_args) + self.stdout = proc.stdout + self.stderr = proc.stderr + + if proc.returncode == 0: + self.status = SelftestStatus.PASSED + elif proc.returncode == 4: + self.status = SelftestStatus.SKIPPED + else: + self.status = SelftestStatus.FAILED diff --git a/tools/testing/selftests/kvm/runner/test_runner.py b/tools/testing/selftests/kvm/runner/test_runner.py new file mode 100644 index 000000000000..4418777d75e3 --- /dev/null +++ b/tools/testing/selftests/kvm/runner/test_runner.py @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright 2025 Google LLC +# +# Author: vipinsh@google.com (Vipin Sharma) + +import logging +from selftest import Selftest +from selftest import SelftestStatus + +logger = logging.getLogger("runner") + + +class TestRunner: + def __init__(self, testcases): + self.tests = [] + + for testcase in testcases: + self.tests.append(Selftest(testcase)) + + def _log_result(self, test_result): + logger.info("*** stdout ***\n" + test_result.stdout) + logger.info("*** stderr ***\n" + test_result.stderr) + logger.log(test_result.status, + f"[{test_result.status.name}] {test_result.test_path}") + + def start(self): + ret = 0 + + for test in self.tests: + test.run() + self._log_result(test) + + if (test.status not in [SelftestStatus.PASSED, + SelftestStatus.NO_RUN, + SelftestStatus.SKIPPED]): + ret = 1 + return ret diff --git a/tools/testing/selftests/kvm/tests/dirty_log_perf_test/2slot_5vcpu_10iter.test b/tools/testing/selftests/kvm/tests/dirty_log_perf_test/2slot_5vcpu_10iter.test new file mode 100644 index 000000000000..5b8d56b44a75 --- /dev/null +++ b/tools/testing/selftests/kvm/tests/dirty_log_perf_test/2slot_5vcpu_10iter.test @@ -0,0 +1 @@ +dirty_log_perf_test -x 2 -v 5 -i 10 diff --git a/tools/testing/selftests/kvm/tests/dirty_log_perf_test/no_dirty_log_protect.test b/tools/testing/selftests/kvm/tests/dirty_log_perf_test/no_dirty_log_protect.test new file mode 100644 index 000000000000..ed3490b1d1a1 --- /dev/null +++ b/tools/testing/selftests/kvm/tests/dirty_log_perf_test/no_dirty_log_protect.test @@ -0,0 +1 @@ +dirty_log_perf_test -g -- 2.51.0.618.g983fd99d29-goog Add command line option, -p/--path, to specify the directory where test binaries exist. If this option is not provided then default to the current directory. Example: python3 runner --dirs test -p ~/build/selftests This option enables executing tests from out-of-tree builds. Signed-off-by: Vipin Sharma --- tools/testing/selftests/kvm/runner/__main__.py | 8 +++++++- tools/testing/selftests/kvm/runner/selftest.py | 4 ++-- tools/testing/selftests/kvm/runner/test_runner.py | 4 ++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/kvm/runner/__main__.py b/tools/testing/selftests/kvm/runner/__main__.py index 8d1a78450e41..943c3bfe2eb6 100644 --- a/tools/testing/selftests/kvm/runner/__main__.py +++ b/tools/testing/selftests/kvm/runner/__main__.py @@ -31,6 +31,12 @@ def cli(): default=[], help="Run the testcases present in the given directory and all of its sub directories. Provide the space separated paths to add multiple directories.") + parser.add_argument("-p", + "--path", + nargs='?', + default=".", + help="Finds the test executables in the given path. Default is the current directory.") + return parser.parse_args() @@ -87,7 +93,7 @@ def main(): args = cli() setup_logging() testcases = fetch_testcases(args) - return TestRunner(testcases).start() + return TestRunner(testcases, args).start() if __name__ == "__main__": diff --git a/tools/testing/selftests/kvm/runner/selftest.py b/tools/testing/selftests/kvm/runner/selftest.py index 34005f83f0c3..a94b6d4cda05 100644 --- a/tools/testing/selftests/kvm/runner/selftest.py +++ b/tools/testing/selftests/kvm/runner/selftest.py @@ -28,12 +28,12 @@ class Selftest: Extract the test execution command from test file and executes it. """ - def __init__(self, test_path): + def __init__(self, test_path, path): test_command = pathlib.Path(test_path).read_text().strip() if not test_command: raise ValueError("Empty test command in " + test_path) - test_command = os.path.join(".", test_command) + test_command = os.path.join(path, test_command) self.exists = os.path.isfile(test_command.split(maxsplit=1)[0]) self.test_path = test_path self.command = test_command diff --git a/tools/testing/selftests/kvm/runner/test_runner.py b/tools/testing/selftests/kvm/runner/test_runner.py index 4418777d75e3..acc9fb3dabde 100644 --- a/tools/testing/selftests/kvm/runner/test_runner.py +++ b/tools/testing/selftests/kvm/runner/test_runner.py @@ -11,11 +11,11 @@ logger = logging.getLogger("runner") class TestRunner: - def __init__(self, testcases): + def __init__(self, testcases, args): self.tests = [] for testcase in testcases: - self.tests.append(Selftest(testcase)) + self.tests.append(Selftest(testcase, args.path)) def _log_result(self, test_result): logger.info("*** stdout ***\n" + test_result.stdout) -- 2.51.0.618.g983fd99d29-goog Add a command line argument in KVM selftest runner to limit amount of time (seconds) given to a test for execution. Kill the test if it exceeds the given timeout. Define a new SelftestStatus.TIMED_OUT to denote a selftest final result. Add terminal color for status messages of timed out tests. Set the default value of 120 seconds for all tests. Signed-off-by: Vipin Sharma --- .../testing/selftests/kvm/runner/__main__.py | 9 ++++- .../testing/selftests/kvm/runner/selftest.py | 33 ++++++++++++------- .../selftests/kvm/runner/test_runner.py | 2 +- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/kvm/runner/__main__.py b/tools/testing/selftests/kvm/runner/__main__.py index 943c3bfe2eb6..5cedc5098a54 100644 --- a/tools/testing/selftests/kvm/runner/__main__.py +++ b/tools/testing/selftests/kvm/runner/__main__.py @@ -37,6 +37,11 @@ def cli(): default=".", help="Finds the test executables in the given path. Default is the current directory.") + parser.add_argument("--timeout", + default=120, + type=int, + help="Timeout, in seconds, before runner kills the running test. (Default: 120 seconds)") + return parser.parse_args() @@ -44,6 +49,7 @@ def setup_logging(): class TerminalColorFormatter(logging.Formatter): reset = "\033[0m" red_bold = "\033[31;1m" + red = "\033[31;1m" green = "\033[32m" yellow = "\033[33m" blue = "\033[34m" @@ -52,7 +58,8 @@ def setup_logging(): SelftestStatus.PASSED: green, SelftestStatus.NO_RUN: blue, SelftestStatus.SKIPPED: yellow, - SelftestStatus.FAILED: red_bold + SelftestStatus.FAILED: red_bold, + SelftestStatus.TIMED_OUT: red } def __init__(self, fmt=None, datefmt=None): diff --git a/tools/testing/selftests/kvm/runner/selftest.py b/tools/testing/selftests/kvm/runner/selftest.py index a94b6d4cda05..4783785ca230 100644 --- a/tools/testing/selftests/kvm/runner/selftest.py +++ b/tools/testing/selftests/kvm/runner/selftest.py @@ -17,6 +17,7 @@ class SelftestStatus(enum.IntEnum): NO_RUN = 22 SKIPPED = 23 FAILED = 24 + TIMED_OUT = 25 def __str__(self): return str.__str__(self.name) @@ -28,7 +29,7 @@ class Selftest: Extract the test execution command from test file and executes it. """ - def __init__(self, test_path, path): + def __init__(self, test_path, path, timeout): test_command = pathlib.Path(test_path).read_text().strip() if not test_command: raise ValueError("Empty test command in " + test_path) @@ -37,6 +38,7 @@ class Selftest: self.exists = os.path.isfile(test_command.split(maxsplit=1)[0]) self.test_path = test_path self.command = test_command + self.timeout = timeout self.status = SelftestStatus.NO_RUN self.stdout = "" self.stderr = "" @@ -50,15 +52,24 @@ class Selftest: "universal_newlines": True, "shell": True, "stdout": subprocess.PIPE, - "stderr": subprocess.PIPE + "stderr": subprocess.PIPE, + "timeout": self.timeout, } - proc = subprocess.run(self.command, **run_args) - self.stdout = proc.stdout - self.stderr = proc.stderr - if proc.returncode == 0: - self.status = SelftestStatus.PASSED - elif proc.returncode == 4: - self.status = SelftestStatus.SKIPPED - else: - self.status = SelftestStatus.FAILED + try: + proc = subprocess.run(self.command, **run_args) + self.stdout = proc.stdout + self.stderr = proc.stderr + + if proc.returncode == 0: + self.status = SelftestStatus.PASSED + elif proc.returncode == 4: + self.status = SelftestStatus.SKIPPED + else: + self.status = SelftestStatus.FAILED + except subprocess.TimeoutExpired as e: + self.status = SelftestStatus.TIMED_OUT + if e.stdout is not None: + self.stdout = e.stdout + if e.stderr is not None: + self.stderr = e.stderr diff --git a/tools/testing/selftests/kvm/runner/test_runner.py b/tools/testing/selftests/kvm/runner/test_runner.py index acc9fb3dabde..bea82c6239cd 100644 --- a/tools/testing/selftests/kvm/runner/test_runner.py +++ b/tools/testing/selftests/kvm/runner/test_runner.py @@ -15,7 +15,7 @@ class TestRunner: self.tests = [] for testcase in testcases: - self.tests.append(Selftest(testcase, args.path)) + self.tests.append(Selftest(testcase, args.path, args.timeout)) def _log_result(self, test_result): logger.info("*** stdout ***\n" + test_result.stdout) -- 2.51.0.618.g983fd99d29-goog Add a command line flag, -o/--output, to selftest runner which enables it to save individual tests output (stdout & stderr) stream to a directory in a hierarchical way. Create folder hierarchy same as tests hieararcy given by --testcases and --dirs. Also, add a command line flag, --append-output-time, which will append timestamp (format YYYY.M.DD.HH.MM.SS) to the directory name given in --output flag. Example: python3 runner --dirs test -o test_result --append_output_time This will create test_result.2025.06.06.08.45.57 directory. Signed-off-by: Vipin Sharma --- .../testing/selftests/kvm/runner/__main__.py | 34 +++++++++++++-- .../testing/selftests/kvm/runner/selftest.py | 42 ++++++++++++++++--- .../selftests/kvm/runner/test_runner.py | 4 +- 3 files changed, 69 insertions(+), 11 deletions(-) diff --git a/tools/testing/selftests/kvm/runner/__main__.py b/tools/testing/selftests/kvm/runner/__main__.py index 5cedc5098a54..b27a41e86271 100644 --- a/tools/testing/selftests/kvm/runner/__main__.py +++ b/tools/testing/selftests/kvm/runner/__main__.py @@ -7,6 +7,8 @@ import argparse import logging import os import sys +import datetime +import pathlib from test_runner import TestRunner from selftest import SelftestStatus @@ -42,10 +44,20 @@ def cli(): type=int, help="Timeout, in seconds, before runner kills the running test. (Default: 120 seconds)") + parser.add_argument("-o", + "--output", + nargs='?', + help="Dumps test runner output which includes each test execution result, their stdouts and stderrs hierarchically in the given directory.") + + parser.add_argument("--append-output-time", + action="store_true", + default=False, + help="Appends timestamp to the output directory.") + return parser.parse_args() -def setup_logging(): +def setup_logging(args): class TerminalColorFormatter(logging.Formatter): reset = "\033[0m" red_bold = "\033[31;1m" @@ -72,12 +84,26 @@ def setup_logging(): logger = logging.getLogger("runner") logger.setLevel(logging.INFO) + formatter_args = { + "fmt": "%(asctime)s | %(message)s", + "datefmt": "%H:%M:%S" + } + ch = logging.StreamHandler() - ch_formatter = TerminalColorFormatter(fmt="%(asctime)s | %(message)s", - datefmt="%H:%M:%S") + ch_formatter = TerminalColorFormatter(**formatter_args) ch.setFormatter(ch_formatter) logger.addHandler(ch) + if args.output != None: + if (args.append_output_time): + args.output += datetime.datetime.now().strftime(".%Y.%m.%d.%H.%M.%S") + pathlib.Path(args.output).mkdir(parents=True, exist_ok=True) + logging_file = os.path.join(args.output, "log") + fh = logging.FileHandler(logging_file) + fh_formatter = logging.Formatter(**formatter_args) + fh.setFormatter(fh_formatter) + logger.addHandler(fh) + def fetch_testcases_in_dirs(dirs): testcases = [] @@ -98,7 +124,7 @@ def fetch_testcases(args): def main(): args = cli() - setup_logging() + setup_logging(args) testcases = fetch_testcases(args) return TestRunner(testcases, args).start() diff --git a/tools/testing/selftests/kvm/runner/selftest.py b/tools/testing/selftests/kvm/runner/selftest.py index 4783785ca230..1aedeaeb5e74 100644 --- a/tools/testing/selftests/kvm/runner/selftest.py +++ b/tools/testing/selftests/kvm/runner/selftest.py @@ -7,6 +7,7 @@ import pathlib import enum import os import subprocess +import contextlib class SelftestStatus(enum.IntEnum): """ @@ -29,7 +30,7 @@ class Selftest: Extract the test execution command from test file and executes it. """ - def __init__(self, test_path, path, timeout): + def __init__(self, test_path, path, timeout, output_dir): test_command = pathlib.Path(test_path).read_text().strip() if not test_command: raise ValueError("Empty test command in " + test_path) @@ -39,15 +40,14 @@ class Selftest: self.test_path = test_path self.command = test_command self.timeout = timeout + if output_dir is not None: + output_dir = os.path.join(output_dir, test_path.lstrip("./")) + self.output_dir = output_dir self.status = SelftestStatus.NO_RUN self.stdout = "" self.stderr = "" - def run(self): - if not self.exists: - self.stderr = "File doesn't exists." - return - + def _run(self, output=None, error=None): run_args = { "universal_newlines": True, "shell": True, @@ -59,7 +59,12 @@ class Selftest: try: proc = subprocess.run(self.command, **run_args) self.stdout = proc.stdout + if output is not None: + output.write(proc.stdout) + self.stderr = proc.stderr + if error is not None: + error.write(proc.stderr) if proc.returncode == 0: self.status = SelftestStatus.PASSED @@ -71,5 +76,30 @@ class Selftest: self.status = SelftestStatus.TIMED_OUT if e.stdout is not None: self.stdout = e.stdout + if output is not None: + output.write(e.stdout) if e.stderr is not None: self.stderr = e.stderr + if error is not None: + error.write(e.stderr) + + def run(self): + if not self.exists: + self.stderr = "File doesn't exists." + return + + if self.output_dir is not None: + pathlib.Path(self.output_dir).mkdir(parents=True, exist_ok=True) + + output = None + error = None + with contextlib.ExitStack() as stack: + if self.output_dir is not None: + output_path = os.path.join(self.output_dir, "stdout") + output = stack.enter_context( + open(output_path, encoding="utf-8", mode="w")) + + error_path = os.path.join(self.output_dir, "stderr") + error = stack.enter_context( + open(error_path, encoding="utf-8", mode="w")) + return self._run(output, error) diff --git a/tools/testing/selftests/kvm/runner/test_runner.py b/tools/testing/selftests/kvm/runner/test_runner.py index bea82c6239cd..b9101f0e0432 100644 --- a/tools/testing/selftests/kvm/runner/test_runner.py +++ b/tools/testing/selftests/kvm/runner/test_runner.py @@ -13,9 +13,11 @@ logger = logging.getLogger("runner") class TestRunner: def __init__(self, testcases, args): self.tests = [] + self.output_dir = args.output for testcase in testcases: - self.tests.append(Selftest(testcase, args.path, args.timeout)) + self.tests.append(Selftest(testcase, args.path, args.timeout, + args.output)) def _log_result(self, test_result): logger.info("*** stdout ***\n" + test_result.stdout) -- 2.51.0.618.g983fd99d29-goog Add a command line argument, --jobs, to specify how many tests can execute concurrently. Set default to 1. Example: python3 runner --test-dirs tests -j 10 Signed-off-by: Vipin Sharma --- .../testing/selftests/kvm/runner/__main__.py | 6 ++++ .../selftests/kvm/runner/test_runner.py | 28 +++++++++++++------ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/kvm/runner/__main__.py b/tools/testing/selftests/kvm/runner/__main__.py index b27a41e86271..b98f72c9f7ee 100644 --- a/tools/testing/selftests/kvm/runner/__main__.py +++ b/tools/testing/selftests/kvm/runner/__main__.py @@ -54,6 +54,12 @@ def cli(): default=False, help="Appends timestamp to the output directory.") + parser.add_argument("-j", + "--jobs", + default=1, + type=int, + help="Maximum number of tests that can be run concurrently. (Default: 1)") + return parser.parse_args() diff --git a/tools/testing/selftests/kvm/runner/test_runner.py b/tools/testing/selftests/kvm/runner/test_runner.py index b9101f0e0432..92eec18fe5c6 100644 --- a/tools/testing/selftests/kvm/runner/test_runner.py +++ b/tools/testing/selftests/kvm/runner/test_runner.py @@ -4,6 +4,8 @@ # Author: vipinsh@google.com (Vipin Sharma) import logging +import concurrent.futures + from selftest import Selftest from selftest import SelftestStatus @@ -14,11 +16,16 @@ class TestRunner: def __init__(self, testcases, args): self.tests = [] self.output_dir = args.output + self.jobs = args.jobs for testcase in testcases: self.tests.append(Selftest(testcase, args.path, args.timeout, args.output)) + def _run_test(self, test): + test.run() + return test + def _log_result(self, test_result): logger.info("*** stdout ***\n" + test_result.stdout) logger.info("*** stderr ***\n" + test_result.stderr) @@ -28,12 +35,17 @@ class TestRunner: def start(self): ret = 0 - for test in self.tests: - test.run() - self._log_result(test) - - if (test.status not in [SelftestStatus.PASSED, - SelftestStatus.NO_RUN, - SelftestStatus.SKIPPED]): - ret = 1 + with concurrent.futures.ProcessPoolExecutor(max_workers=self.jobs) as executor: + all_futures = [] + for test in self.tests: + future = executor.submit(self._run_test, test) + all_futures.append(future) + + for future in concurrent.futures.as_completed(all_futures): + test_result = future.result() + self._log_result(test_result) + if (test_result.status not in [SelftestStatus.PASSED, + SelftestStatus.NO_RUN, + SelftestStatus.SKIPPED]): + ret = 1 return ret -- 2.51.0.618.g983fd99d29-goog Add various print flags to selectively print outputs on terminal based on test execution status (passed, failed, timed out, skipped, no run). For each status provide further options (off, full, stderr, stdout, status) to choose verbosity of their prints. Make "full" the default choice. Example: To print stderr for the failed tests and only status for the passed test: python3 runner --test-dirs tests --print-failed stderr \ --print-passed status Above command with disable print off skipped, timed out, and no run tests. Signed-off-by: Vipin Sharma --- .../testing/selftests/kvm/runner/__main__.py | 45 +++++++++++++++++++ .../selftests/kvm/runner/test_runner.py | 19 ++++++-- 2 files changed, 60 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/runner/__main__.py b/tools/testing/selftests/kvm/runner/__main__.py index b98f72c9f7ee..4867e89c30f2 100644 --- a/tools/testing/selftests/kvm/runner/__main__.py +++ b/tools/testing/selftests/kvm/runner/__main__.py @@ -9,6 +9,7 @@ import os import sys import datetime import pathlib +import textwrap from test_runner import TestRunner from selftest import SelftestStatus @@ -60,6 +61,50 @@ def cli(): type=int, help="Maximum number of tests that can be run concurrently. (Default: 1)") + status_choices = ["off", "full", "stdout", "stderr", "status"] + status_help_text = textwrap.dedent('''\ + Control output of the {} test. + off : dont print anything. + full : print stdout, stderr, and status of the test. + stdout: print stdout and status of the test. + stderr: print stderr and status of the test. + status: only print the status of test execution and no other output.'''); + + parser.add_argument("--print-passed", + default="full", + const="full", + nargs='?', + choices=status_choices, + help = status_help_text.format("passed")) + + parser.add_argument("--print-failed", + default="full", + const="full", + nargs='?', + choices=status_choices, + help = status_help_text.format("failed")) + + parser.add_argument("--print-skipped", + default="full", + const="full", + nargs='?', + choices=status_choices, + help = status_help_text.format("skipped")) + + parser.add_argument("--print-timed-out", + default="full", + const="full", + nargs='?', + choices=status_choices, + help = status_help_text.format("timed-out")) + + parser.add_argument("--print-no-run", + default="full", + const="full", + nargs='?', + choices=status_choices, + help = status_help_text.format("no-run")) + return parser.parse_args() diff --git a/tools/testing/selftests/kvm/runner/test_runner.py b/tools/testing/selftests/kvm/runner/test_runner.py index 92eec18fe5c6..e8e8fd91c1ad 100644 --- a/tools/testing/selftests/kvm/runner/test_runner.py +++ b/tools/testing/selftests/kvm/runner/test_runner.py @@ -17,6 +17,13 @@ class TestRunner: self.tests = [] self.output_dir = args.output self.jobs = args.jobs + self.print_stds = { + SelftestStatus.PASSED: args.print_passed, + SelftestStatus.FAILED: args.print_failed, + SelftestStatus.SKIPPED: args.print_skipped, + SelftestStatus.TIMED_OUT: args.print_timed_out, + SelftestStatus.NO_RUN: args.print_no_run + } for testcase in testcases: self.tests.append(Selftest(testcase, args.path, args.timeout, @@ -27,10 +34,14 @@ class TestRunner: return test def _log_result(self, test_result): - logger.info("*** stdout ***\n" + test_result.stdout) - logger.info("*** stderr ***\n" + test_result.stderr) - logger.log(test_result.status, - f"[{test_result.status.name}] {test_result.test_path}") + print_level = self.print_stds.get(test_result.status, "full") + + if (print_level == "full" or print_level == "stdout"): + logger.info("*** stdout ***\n" + test_result.stdout) + if (print_level == "full" or print_level == "stderr"): + logger.info("*** stderr ***\n" + test_result.stderr) + if (print_level != "off"): + logger.log(test_result.status, f"[{test_result.status.name}] {test_result.test_path}") def start(self): ret = 0 -- 2.51.0.618.g983fd99d29-goog Print current state of the KVM selftest runner during its execution. Show it as the bottom most line, make it sticky and colored. Provide the following information: - Total number of tests selected for run. - How many have executed. - Total for each end state. Example: Total: 3/3 Passed: 1 Failed: 1 Skipped: 0 Timed Out: 0 No Run: 1 Signed-off-by: Vipin Sharma --- .../testing/selftests/kvm/runner/test_runner.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/testing/selftests/kvm/runner/test_runner.py b/tools/testing/selftests/kvm/runner/test_runner.py index e8e8fd91c1ad..42274e695b76 100644 --- a/tools/testing/selftests/kvm/runner/test_runner.py +++ b/tools/testing/selftests/kvm/runner/test_runner.py @@ -15,6 +15,7 @@ logger = logging.getLogger("runner") class TestRunner: def __init__(self, testcases, args): self.tests = [] + self.status = {x: 0 for x in SelftestStatus} self.output_dir = args.output self.jobs = args.jobs self.print_stds = { @@ -33,9 +34,18 @@ class TestRunner: test.run() return test + def _sticky_update(self): + print(f"\r\033[1mTotal: {self.tests_ran}/{len(self.tests)}" \ + f"\033[32;1m Passed: {self.status[SelftestStatus.PASSED]}" \ + f"\033[31;1m Failed: {self.status[SelftestStatus.FAILED]}" \ + f"\033[33;1m Skipped: {self.status[SelftestStatus.SKIPPED]}"\ + f"\033[91;1m Timed Out: {self.status[SelftestStatus.TIMED_OUT]}"\ + f"\033[34;1m No Run: {self.status[SelftestStatus.NO_RUN]}\033[0m", end="\r") + def _log_result(self, test_result): print_level = self.print_stds.get(test_result.status, "full") + print("\033[2K", end="\r") if (print_level == "full" or print_level == "stdout"): logger.info("*** stdout ***\n" + test_result.stdout) if (print_level == "full" or print_level == "stderr"): @@ -43,8 +53,13 @@ class TestRunner: if (print_level != "off"): logger.log(test_result.status, f"[{test_result.status.name}] {test_result.test_path}") + self.status[test_result.status] += 1 + # Sticky bottom line + self._sticky_update() + def start(self): ret = 0 + self.tests_ran = 0 with concurrent.futures.ProcessPoolExecutor(max_workers=self.jobs) as executor: all_futures = [] @@ -54,9 +69,11 @@ class TestRunner: for future in concurrent.futures.as_completed(all_futures): test_result = future.result() + self.tests_ran += 1 self._log_result(test_result) if (test_result.status not in [SelftestStatus.PASSED, SelftestStatus.NO_RUN, SelftestStatus.SKIPPED]): ret = 1 + print("\n") return ret -- 2.51.0.618.g983fd99d29-goog Add 'tests_install' rule in the Makefile.kvm to auto generate default testcases for KVM selftests runner. Preserve the hierarchy of test executables for autogenerated files. Remove these testcases on invocation of 'make clean'. Autogeneration of default test files allows runner to execute default testcases easily. These default testcases don't need to be checked in as they are just executing the test without any command line options. Signed-off-by: Vipin Sharma --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/Makefile.kvm | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 95af97b1ff9e..548d435bde2f 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -7,6 +7,7 @@ !*.S !*.sh !*.test +default.test !.gitignore !config !settings diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm index 41b40c676d7f..6bb63f88c0e6 100644 --- a/tools/testing/selftests/kvm/Makefile.kvm +++ b/tools/testing/selftests/kvm/Makefile.kvm @@ -306,11 +306,15 @@ $(SPLIT_TEST_GEN_PROGS): $(OUTPUT)/%: $(OUTPUT)/%.o $(OUTPUT)/$(ARCH)/%.o $(SPLIT_TEST_GEN_OBJ): $(OUTPUT)/$(ARCH)/%.o: $(ARCH)/%.c $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ +# Default testcases for KVM selftests runner will be generated in this directory. +DEFAULT_TESTCASES = testcases_default_gen + EXTRA_CLEAN += $(GEN_HDRS) \ $(LIBKVM_OBJS) \ $(SPLIT_TEST_GEN_OBJ) \ $(TEST_DEP_FILES) \ $(TEST_GEN_OBJ) \ + $(OUTPUT)/$(DEFAULT_TESTCASES) \ cscope.* $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c $(GEN_HDRS) @@ -339,3 +343,19 @@ cscope: find . -name '*.c' \ -exec realpath --relative-base=$(PWD) {} \;) | sort -u > cscope.files cscope -b + +# Generate testcases in DEFAULT_TESTCASES directory. +# $(OUTPUT) is either CWD or specified in the make command. +tests_install: list_progs = $(patsubst $(OUTPUT)/%,%,$(TEST_GEN_PROGS)) +tests_install: + $(foreach tc, $(TEST_PROGS), \ + $(shell mkdir -p $(OUTPUT)/$(DEFAULT_TESTCASES)/$(patsubst %.sh,%,$(tc)))) + $(foreach tc, $(TEST_PROGS), \ + $(shell echo $(tc) > $(patsubst %.sh,$(OUTPUT)/$(DEFAULT_TESTCASES)/%/default.test,$(tc)))) + + $(foreach tc, $(list_progs), \ + $(shell mkdir -p $(OUTPUT)/$(DEFAULT_TESTCASES)/$(tc))) + $(foreach tc, $(list_progs), \ + $(shell echo $(tc) > $(patsubst %,$(OUTPUT)/$(DEFAULT_TESTCASES)/%/default.test,$(tc)))) + + @: -- 2.51.0.618.g983fd99d29-goog Add README.rst for KVM selftest runner and explain how to use the runner. Signed-off-by: Vipin Sharma --- tools/testing/selftests/kvm/.gitignore | 1 + tools/testing/selftests/kvm/runner/README.rst | 54 +++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 tools/testing/selftests/kvm/runner/README.rst diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 548d435bde2f..83aa2fe01bac 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -4,6 +4,7 @@ !*.c !*.h !*.py +!*.rst !*.S !*.sh !*.test diff --git a/tools/testing/selftests/kvm/runner/README.rst b/tools/testing/selftests/kvm/runner/README.rst new file mode 100644 index 000000000000..83b071c0a0e6 --- /dev/null +++ b/tools/testing/selftests/kvm/runner/README.rst @@ -0,0 +1,54 @@ +KVM Selftest Runner +=================== + +KVM selftest runner is highly configurable test executor that allows to run +tests with different configurations (not just the default), parallely, save +output to disk hierarchically, control what gets printed on console, provide +execution status. + +To generate default tests use:: + + # make tests_install + +This will create ``testcases_default_gen`` directory which will have testcases +in `default.test` files. Each KVM selftest will have a directory in which +`default.test` file will be created with executable path relative to KVM +selftest root directory i.e. `/tools/testing/selftests/kvm`. For example, the +`dirty_log_perf_test` will have:: + + # cat testcase_default_gen/dirty_log_perf_test/default.test + dirty_log_perf_test + +Runner will execute `dirty_log_perf_test`. Testcases files can also provide +extra arguments to the test:: + + # cat tests/dirty_log_perf_test/2slot_5vcpu_10iter.test + dirty_log_perf_test -x 2 -v 5 -i 10 + +In this case runner will execute the `dirty_log_perf_test` with the options. + +Example +======= + +To see all of the options:: + + # python3 runner -h + +To run all of the default tests:: + + # python3 runner -d testcases_default_gen + +To run tests parallely:: + + # python3 runner -d testcases_default_gen -j 40 + +To print only passed test status and failed test stderr:: + + # python3 runner -d testcases_default_gen --print-passed status \ + --print-failed stderr + +To run tests binary which are in some other directory (out of tree builds):: + + # python3 runner -d testcases_default_gen -p /path/to/binaries + + -- 2.51.0.618.g983fd99d29-goog