patman: test_util: Use unittest text runner to print test results

The python tools' test utilities handle printing test results, but the
output is quite bare compared to an ordinary unittest run. Delegate
printing the results to a unittest text runner, which gives us niceties
like clear separation between each test's result and how long it took to
run the test suite.

Unfortunately it does not print info for skipped tests by default, but
this can be handled later by a custom test result subclass. It also does
not print the tool name; manually print a heading that includes the
toolname so that the outputs of each tool's tests are distinguishable in
the CI output.

Signed-off-by: Alper Nebi Yasak <alpernebiyasak@gmail.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
This commit is contained in:
Alper Nebi Yasak 2022-04-02 20:06:06 +03:00 committed by Simon Glass
parent ce12c47b92
commit d8318feba1
6 changed files with 38 additions and 61 deletions

View File

@ -13,7 +13,6 @@ import os
import site
import sys
import traceback
import unittest
# Get the absolute path to this file at run-time
our_path = os.path.dirname(os.path.realpath(__file__))
@ -73,19 +72,18 @@ def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
from binman import image_test
import doctest
result = unittest.TestResult()
test_name = args and args[0] or None
# Run the entry tests first ,since these need to be the first to import the
# 'entry' module.
test_util.run_test_suites(
result, debug, verbosity, test_preserve_dirs, processes, test_name,
result = test_util.run_test_suites(
'binman', debug, verbosity, test_preserve_dirs, processes, test_name,
toolpath,
[bintool_test.TestBintool, entry_test.TestEntry, ftest.TestFunctional,
fdt_test.TestFdt, elf_test.TestElf, image_test.TestImage,
cbfs_util_test.TestCbfs, fip_util_test.TestFip])
return test_util.report_result('binman', test_name, result)
return (0 if result.wasSuccessful() else 1)
def RunTestCoverage(toolpath):
"""Run the tests and check that we get 100% coverage"""

View File

@ -11,7 +11,6 @@ import multiprocessing
import os
import re
import sys
import unittest
# Bring in the patman libraries
our_path = os.path.dirname(os.path.realpath(__file__))
@ -34,19 +33,18 @@ def RunTests(skip_net_tests, verboose, args):
from buildman import test
import doctest
result = unittest.TestResult()
test_name = args and args[0] or None
if skip_net_tests:
test.use_network = False
# Run the entry tests first ,since these need to be the first to import the
# 'entry' module.
test_util.run_test_suites(
result, False, verboose, False, None, test_name, [],
result = test_util.run_test_suites(
'buildman', False, verboose, False, None, test_name, [],
[test.TestBuild, func_test.TestFunctional,
'buildman.toolchain', 'patman.gitutil'])
return test_util.report_result('buildman', test_name, result)
return (0 if result.wasSuccessful() else 1)
options, args = cmdline.ParseArgs()

View File

@ -24,7 +24,6 @@ see doc/driver-model/of-plat.rst
from argparse import ArgumentParser
import os
import sys
import unittest
# Bring in the patman libraries
our_path = os.path.dirname(os.path.realpath(__file__))
@ -49,18 +48,18 @@ def run_tests(processes, args):
from dtoc import test_src_scan
from dtoc import test_dtoc
result = unittest.TestResult()
sys.argv = [sys.argv[0]]
test_name = args.files and args.files[0] or None
test_dtoc.setup()
test_util.run_test_suites(
result, debug=True, verbosity=1, test_preserve_dirs=False,
result = test_util.run_test_suites(
toolname='dtoc', debug=True, verbosity=1, test_preserve_dirs=False,
processes=processes, test_name=test_name, toolpath=[],
class_and_module_list=[test_dtoc.TestDtoc,test_src_scan.TestSrcScan])
return test_util.report_result('binman', test_name, result)
return (0 if result.wasSuccessful() else 1)
def RunTestCoverage():
"""Run the tests and check that we get 100% coverage"""

View File

@ -784,13 +784,13 @@ def RunTests(args):
Returns:
Return code, 0 on success
"""
result = unittest.TestResult()
test_name = args and args[0] or None
test_util.run_test_suites(
result, False, False, False, None, test_name, None,
result = test_util.run_test_suites(
'test_fdt', False, False, False, None, test_name, None,
[TestFdt, TestNode, TestProp, TestFdtUtil])
return test_util.report_result('fdt', test_name, result)
return (0 if result.wasSuccessful() else 1)
if __name__ != '__main__':
sys.exit(1)

View File

@ -12,7 +12,6 @@ import re
import shutil
import sys
import traceback
import unittest
if __name__ == "__main__":
# Allow 'from patman import xxx to work'
@ -134,13 +133,12 @@ if args.cmd == 'test':
import doctest
from patman import func_test
result = unittest.TestResult()
test_util.run_test_suites(
result, False, False, False, None, None, None,
result = test_util.run_test_suites(
'patman', False, False, False, None, None, None,
[test_checkpatch.TestPatch, func_test.TestFunctional,
'gitutil', 'settings', 'terminal'])
sys.exit(test_util.report_result('patman', args.testname, result))
sys.exit(0 if result.wasSuccessful() else 1)
# Process commits, produce patches files, check them, email them
elif args.cmd == 'send':

View File

@ -102,36 +102,12 @@ def capture_sys_output():
sys.stdout, sys.stderr = old_out, old_err
def report_result(toolname:str, test_name: str, result: unittest.TestResult):
"""Report the results from a suite of tests
Args:
toolname: Name of the tool that ran the tests
test_name: Name of test that was run, or None for all
result: A unittest.TestResult object containing the results
"""
print(result)
for test, err in result.errors:
print(test.id(), err)
for test, err in result.failures:
print(test.id(), err)
if result.skipped:
print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
's' if len(result.skipped) > 1 else ''))
for skip_info in result.skipped:
print('%s: %s' % (skip_info[0], skip_info[1]))
if result.errors or result.failures:
print('%s tests FAILED' % toolname)
return 1
return 0
def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
def run_test_suites(toolname, debug, verbosity, test_preserve_dirs, processes,
test_name, toolpath, class_and_module_list):
"""Run a series of test suites and collect the results
Args:
result: A unittest.TestResult object to add the results to
toolname: Name of the tool that ran the tests
debug: True to enable debugging, which shows a full stack trace on error
verbosity: Verbosity level to use (0-4)
test_preserve_dirs: True to preserve the input directory used by tests
@ -145,11 +121,6 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
class_and_module_list: List of test classes (type class) and module
names (type str) to run
"""
for module in class_and_module_list:
if isinstance(module, str) and (not test_name or test_name == module):
suite = doctest.DocTestSuite(module)
suite.run(result)
sys.argv = [sys.argv[0]]
if debug:
sys.argv.append('-D')
@ -161,6 +132,19 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
suite = unittest.TestSuite()
loader = unittest.TestLoader()
runner = unittest.TextTestRunner(
stream=sys.stdout,
verbosity=(1 if verbosity is None else verbosity),
)
if use_concurrent and processes != 1:
suite = ConcurrentTestSuite(suite,
fork_for_tests(processes or multiprocessing.cpu_count()))
for module in class_and_module_list:
if isinstance(module, str) and (not test_name or test_name == module):
suite.addTests(doctest.DocTestSuite(module))
for module in class_and_module_list:
if isinstance(module, str):
continue
@ -179,9 +163,9 @@ def run_test_suites(result, debug, verbosity, test_preserve_dirs, processes,
suite.addTests(loader.loadTestsFromName(test_name, module))
else:
suite.addTests(loader.loadTestsFromTestCase(module))
if use_concurrent and processes != 1:
concurrent_suite = ConcurrentTestSuite(suite,
fork_for_tests(processes or multiprocessing.cpu_count()))
concurrent_suite.run(result)
else:
suite.run(result)
print(f" Running {toolname} tests ".center(70, "="))
result = runner.run(suite)
print()
return result