1
0
mirror of https://xff.cz/git/u-boot/ synced 2025-10-31 18:35:42 +01:00

binman: Run tests concurrently

At present the tests run one after the other using a single CPU. This is
not very efficient. Bring in the concurrencytest module and run the tests
concurrently, using one process for each CPU by default. A -P option
allows this to be overridden, which is necessary for code-coverage to
function correctly.

This requires fixing a few tests which are currently not fully
independent.

At some point we might consider doing this across all pytests in U-Boot.
There is a pytest version that supports specifying the number of processes
to use, but it did not work for me.

Signed-off-by: Simon Glass <sjg@chromium.org>
This commit is contained in:
Simon Glass
2018-10-01 21:12:47 -06:00
parent 2673afe221
commit 11ae93eef4
12 changed files with 274 additions and 22 deletions

View File

@@ -10,6 +10,7 @@
"""See README for more information"""
import glob
import multiprocessing
import os
import sys
import traceback
@@ -17,7 +18,7 @@ import unittest
# Bring in the patman and dtoc libraries
our_path = os.path.dirname(os.path.realpath(__file__))
for dirname in ['../patman', '../dtoc', '..']:
for dirname in ['../patman', '../dtoc', '..', '../concurrencytest']:
sys.path.insert(0, os.path.join(our_path, dirname))
# Bring in the libfdt module
@@ -27,16 +28,22 @@ sys.path.insert(0, os.path.join(our_path,
import cmdline
import command
use_concurrent = True
try:
from concurrencytest import ConcurrentTestSuite, fork_for_tests
except:
use_concurrent = False
import control
import test_util
def RunTests(debug, args):
def RunTests(debug, processes, args):
"""Run the functional tests and any embedded doctests
Args:
debug: True to enable debugging, which shows a full stack trace on error
args: List of positional args provided to binman. This can hold a test
name to execute (as in 'binman -t testSections', for example)
processes: Number of processes to use to run tests (None=same as #CPUs)
"""
import elf_test
import entry_test
@@ -54,19 +61,28 @@ def RunTests(debug, args):
sys.argv = [sys.argv[0]]
if debug:
sys.argv.append('-D')
if debug:
sys.argv.append('-D')
# Run the entry tests first ,since these need to be the first to import the
# 'entry' module.
test_name = args and args[0] or None
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
elf_test.TestElf, image_test.TestImage):
if test_name:
try:
suite = unittest.TestLoader().loadTestsFromName(test_name, module)
suite.addTests(loader.loadTestsFromName(test_name, module))
except AttributeError:
continue
else:
suite = unittest.TestLoader().loadTestsFromTestCase(module)
suite.addTests(loader.loadTestsFromTestCase(module))
if use_concurrent and processes != 1:
concurrent_suite = ConcurrentTestSuite(suite,
fork_for_tests(processes or multiprocessing.cpu_count()))
concurrent_suite.run(result)
else:
suite.run(result)
print result
@@ -115,7 +131,7 @@ def RunBinman(options, args):
sys.tracebacklimit = 0
if options.test:
ret_code = RunTests(options.debug, args[1:])
ret_code = RunTests(options.debug, options.processes, args[1:])
elif options.test_coverage:
RunTestCoverage()