forked from Minki/linux
kunit: tool: misc cleanups
This primarily comes from running pylint over kunit tool code and ignoring some warnings we don't care about. If we ever got a fully clean setup, we could add this to run_checks.py, but we're not there yet. Fix things like * Drop unused imports * check `is None`, not `== None` (see PEP 8) * remove redundant parens around returns * remove redundant `else` / convert `elif` to `if` where appropriate * rename make_arch_qemuconfig() param to base_kunitconfig (this is the name used in the subclass, and it's a better one) * kunit_tool_test: check the exit code for SystemExit (could be 0) Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Reviewed-by: Brendan Higgins <brendanhiggins@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
This commit is contained in:
parent
94507ee3e9
commit
0453f984a7
@ -124,7 +124,7 @@ def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest)
|
||||
lines.pop()
|
||||
|
||||
# Filter out any extraneous non-test output that might have gotten mixed in.
|
||||
return [l for l in lines if re.match('^[^\s.]+\.[^\s.]+$', l)]
|
||||
return [l for l in lines if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
|
||||
|
||||
def _suites_from_test_list(tests: List[str]) -> List[str]:
|
||||
"""Extracts all the suites from an ordered list of tests."""
|
||||
@ -188,8 +188,7 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -
|
||||
def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus:
|
||||
if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED):
|
||||
return KunitStatus.SUCCESS
|
||||
else:
|
||||
return KunitStatus.TEST_FAILURE
|
||||
return KunitStatus.TEST_FAILURE
|
||||
|
||||
def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]:
|
||||
parse_start = time.time()
|
||||
@ -353,7 +352,7 @@ def add_exec_opts(parser) -> None:
|
||||
'a non-hermetic test, one that might pass/fail based on '
|
||||
'what ran before it.',
|
||||
type=str,
|
||||
choices=['suite', 'test']),
|
||||
choices=['suite', 'test'])
|
||||
|
||||
def add_parse_opts(parser) -> None:
|
||||
parser.add_argument('--raw_output', help='If set don\'t format output from kernel. '
|
||||
@ -497,7 +496,7 @@ def main(argv, linux=None):
|
||||
if result.status != KunitStatus.SUCCESS:
|
||||
sys.exit(1)
|
||||
elif cli_args.subcommand == 'parse':
|
||||
if cli_args.file == None:
|
||||
if cli_args.file is None:
|
||||
sys.stdin.reconfigure(errors='backslashreplace') # pytype: disable=attribute-error
|
||||
kunit_output = sys.stdin
|
||||
else:
|
||||
|
@ -20,16 +20,15 @@ class KconfigEntry:
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.value == 'n':
|
||||
return r'# CONFIG_%s is not set' % (self.name)
|
||||
else:
|
||||
return r'CONFIG_%s=%s' % (self.name, self.value)
|
||||
return f'# CONFIG_{self.name} is not set'
|
||||
return f'CONFIG_{self.name}={self.value}'
|
||||
|
||||
|
||||
class KconfigParseError(Exception):
|
||||
"""Error parsing Kconfig defconfig or .config."""
|
||||
|
||||
|
||||
class Kconfig(object):
|
||||
class Kconfig:
|
||||
"""Represents defconfig or .config specified using the Kconfig language."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
@ -49,7 +48,7 @@ class Kconfig(object):
|
||||
if a.value == 'n':
|
||||
continue
|
||||
return False
|
||||
elif a.value != b:
|
||||
if a.value != b:
|
||||
return False
|
||||
return True
|
||||
|
||||
@ -91,6 +90,5 @@ def parse_from_string(blob: str) -> Kconfig:
|
||||
|
||||
if line[0] == '#':
|
||||
continue
|
||||
else:
|
||||
raise KconfigParseError('Failed to parse: ' + line)
|
||||
raise KconfigParseError('Failed to parse: ' + line)
|
||||
return kconfig
|
||||
|
@ -8,12 +8,9 @@
|
||||
|
||||
from dataclasses import dataclass
|
||||
import json
|
||||
import os
|
||||
|
||||
import kunit_parser
|
||||
from typing import Any, Dict
|
||||
|
||||
from kunit_parser import Test, TestStatus
|
||||
from typing import Any, Dict
|
||||
|
||||
@dataclass
|
||||
class Metadata:
|
||||
|
@ -38,7 +38,7 @@ class BuildError(Exception):
|
||||
"""Represents an error trying to build the Linux kernel."""
|
||||
|
||||
|
||||
class LinuxSourceTreeOperations(object):
|
||||
class LinuxSourceTreeOperations:
|
||||
"""An abstraction over command line operations performed on a source tree."""
|
||||
|
||||
def __init__(self, linux_arch: str, cross_compile: Optional[str]):
|
||||
@ -53,7 +53,7 @@ class LinuxSourceTreeOperations(object):
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise ConfigError(e.output.decode())
|
||||
|
||||
def make_arch_qemuconfig(self, kconfig: kunit_config.Kconfig) -> None:
|
||||
def make_arch_qemuconfig(self, base_kunitconfig: kunit_config.Kconfig) -> None:
|
||||
pass
|
||||
|
||||
def make_allyesconfig(self, build_dir: str, make_options) -> None:
|
||||
@ -182,7 +182,7 @@ def get_source_tree_ops(arch: str, cross_compile: Optional[str]) -> LinuxSourceT
|
||||
config_path = os.path.join(QEMU_CONFIGS_DIR, arch + '.py')
|
||||
if arch == 'um':
|
||||
return LinuxSourceTreeOperationsUml(cross_compile=cross_compile)
|
||||
elif os.path.isfile(config_path):
|
||||
if os.path.isfile(config_path):
|
||||
return get_source_tree_ops_from_qemu_config(config_path, cross_compile)[1]
|
||||
|
||||
options = [f[:-3] for f in os.listdir(QEMU_CONFIGS_DIR) if f.endswith('.py')]
|
||||
@ -213,7 +213,7 @@ def get_source_tree_ops_from_qemu_config(config_path: str,
|
||||
return params.linux_arch, LinuxSourceTreeOperationsQemu(
|
||||
params, cross_compile=cross_compile)
|
||||
|
||||
class LinuxSourceTree(object):
|
||||
class LinuxSourceTree:
|
||||
"""Represents a Linux kernel source tree with KUnit tests."""
|
||||
|
||||
def __init__(
|
||||
@ -368,6 +368,6 @@ class LinuxSourceTree(object):
|
||||
waiter.join()
|
||||
subprocess.call(['stty', 'sane'])
|
||||
|
||||
def signal_handler(self, sig, frame) -> None:
|
||||
def signal_handler(self, unused_sig, unused_frame) -> None:
|
||||
logging.error('Build interruption occurred. Cleaning console.')
|
||||
subprocess.call(['stty', 'sane'])
|
||||
|
@ -15,10 +15,9 @@ import sys
|
||||
|
||||
import datetime
|
||||
from enum import Enum, auto
|
||||
from functools import reduce
|
||||
from typing import Iterable, Iterator, List, Optional, Tuple
|
||||
|
||||
class Test(object):
|
||||
class Test:
|
||||
"""
|
||||
A class to represent a test parsed from KTAP results. All KTAP
|
||||
results within a test log are stored in a main Test object as
|
||||
@ -126,17 +125,16 @@ class TestCounts:
|
||||
"""
|
||||
if self.total() == 0:
|
||||
return TestStatus.NO_TESTS
|
||||
elif self.crashed:
|
||||
if self.crashed:
|
||||
# Crashes should take priority.
|
||||
return TestStatus.TEST_CRASHED
|
||||
elif self.failed:
|
||||
if self.failed:
|
||||
return TestStatus.FAILURE
|
||||
elif self.passed:
|
||||
if self.passed:
|
||||
# No failures or crashes, looks good!
|
||||
return TestStatus.SUCCESS
|
||||
else:
|
||||
# We have only skipped tests.
|
||||
return TestStatus.SKIPPED
|
||||
# We have only skipped tests.
|
||||
return TestStatus.SKIPPED
|
||||
|
||||
def add_status(self, status: TestStatus) -> None:
|
||||
"""Increments the count for `status`."""
|
||||
@ -381,7 +379,7 @@ def peek_test_name_match(lines: LineStream, test: Test) -> bool:
|
||||
if not match:
|
||||
return False
|
||||
name = match.group(4)
|
||||
return (name == test.name)
|
||||
return name == test.name
|
||||
|
||||
def parse_test_result(lines: LineStream, test: Test,
|
||||
expected_num: int) -> bool:
|
||||
@ -553,17 +551,16 @@ def format_test_result(test: Test) -> str:
|
||||
String containing formatted test result
|
||||
"""
|
||||
if test.status == TestStatus.SUCCESS:
|
||||
return (green('[PASSED] ') + test.name)
|
||||
elif test.status == TestStatus.SKIPPED:
|
||||
return (yellow('[SKIPPED] ') + test.name)
|
||||
elif test.status == TestStatus.NO_TESTS:
|
||||
return (yellow('[NO TESTS RUN] ') + test.name)
|
||||
elif test.status == TestStatus.TEST_CRASHED:
|
||||
return green('[PASSED] ') + test.name
|
||||
if test.status == TestStatus.SKIPPED:
|
||||
return yellow('[SKIPPED] ') + test.name
|
||||
if test.status == TestStatus.NO_TESTS:
|
||||
return yellow('[NO TESTS RUN] ') + test.name
|
||||
if test.status == TestStatus.TEST_CRASHED:
|
||||
print_log(test.log)
|
||||
return (red('[CRASHED] ') + test.name)
|
||||
else:
|
||||
print_log(test.log)
|
||||
return (red('[FAILED] ') + test.name)
|
||||
return red('[CRASHED] ') + test.name
|
||||
print_log(test.log)
|
||||
return red('[FAILED] ') + test.name
|
||||
|
||||
def print_test_result(test: Test) -> None:
|
||||
"""
|
||||
@ -607,7 +604,7 @@ def print_summary_line(test: Test) -> None:
|
||||
"""
|
||||
if test.status == TestStatus.SUCCESS:
|
||||
color = green
|
||||
elif test.status == TestStatus.SKIPPED or test.status == TestStatus.NO_TESTS:
|
||||
elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS):
|
||||
color = yellow
|
||||
else:
|
||||
color = red
|
||||
|
@ -251,8 +251,8 @@ class KUnitParserTest(unittest.TestCase):
|
||||
|
||||
def test_ignores_hyphen(self):
|
||||
hyphen_log = test_data_path('test_strip_hyphen.log')
|
||||
file = open(hyphen_log)
|
||||
result = kunit_parser.parse_run_tests(file.readlines())
|
||||
with open(hyphen_log) as file:
|
||||
result = kunit_parser.parse_run_tests(file.readlines())
|
||||
|
||||
# A skipped test does not fail the whole suite.
|
||||
self.assertEqual(
|
||||
@ -347,7 +347,7 @@ class LineStreamTest(unittest.TestCase):
|
||||
called_times = 0
|
||||
def generator():
|
||||
nonlocal called_times
|
||||
for i in range(1,5):
|
||||
for _ in range(1,5):
|
||||
called_times += 1
|
||||
yield called_times, str(called_times)
|
||||
|
||||
@ -553,7 +553,8 @@ class KUnitMainTest(unittest.TestCase):
|
||||
def test_exec_no_tests(self):
|
||||
self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0'])
|
||||
with self.assertRaises(SystemExit) as e:
|
||||
kunit.main(['run'], self.linux_source_mock)
|
||||
kunit.main(['run'], self.linux_source_mock)
|
||||
self.assertEqual(e.exception.code, 1)
|
||||
self.linux_source_mock.run_kernel.assert_called_once_with(
|
||||
args=None, build_dir='.kunit', filter_glob='', timeout=300)
|
||||
self.print_mock.assert_any_call(StrContains(' 0 tests run!'))
|
||||
@ -588,6 +589,7 @@ class KUnitMainTest(unittest.TestCase):
|
||||
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
|
||||
with self.assertRaises(SystemExit) as e:
|
||||
kunit.main(['run', '--raw_output=invalid'], self.linux_source_mock)
|
||||
self.assertNotEqual(e.exception.code, 0)
|
||||
|
||||
def test_run_raw_output_does_not_take_positional_args(self):
|
||||
# --raw_output is a string flag, but we don't want it to consume
|
||||
|
@ -14,7 +14,7 @@ import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import textwrap
|
||||
from typing import Dict, List, Sequence, Tuple
|
||||
from typing import Dict, List, Sequence
|
||||
|
||||
ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__))
|
||||
TIMEOUT = datetime.timedelta(minutes=5).total_seconds()
|
||||
|
Loading…
Reference in New Issue
Block a user