Skip to content

Commit 8dc5e44

Browse files
committed
Merge pull request #1087 from PrzemekWirkus/bugfix_test_build_results
Tools: bugfix - singletest.py was not reporting test case build failure
2 parents f54dc44 + 9a720be commit 8dc5e44

File tree

3 files changed

+84
-26
lines changed

3 files changed

+84
-26
lines changed

workspace_tools/build.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -254,9 +254,12 @@
254254
print "Completed in: (%.2f)s" % (time() - start)
255255
print
256256

257-
print print_build_results(successes, "Build successes:"),
258-
print print_build_results(skipped, "Build skipped:"),
259-
print print_build_results(failures, "Build failures:"),
257+
for report, report_name in [(successes, "Build successes:"),
258+
(skipped, "Build skipped:"),
259+
(failures, "Build failures:"),
260+
]:
261+
if report:
262+
print print_build_results(report, report_name),
260263

261264
if failures:
262265
sys.exit(1)

workspace_tools/build_api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -526,7 +526,7 @@ def static_analysis_scan_library(src_paths, build_path, target, toolchain_name,
526526
def print_build_results(result_list, build_name):
527527
""" Generate result string for build results """
528528
result = ""
529-
if result_list:
529+
if len(result_list) > 0:
530530
result += build_name + "\n"
531531
result += "\n".join([" * %s" % f for f in result_list])
532532
result += "\n"

workspace_tools/test_api.py

Lines changed: 77 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
5050
from workspace_tools.build_api import get_target_supported_toolchains
5151
from workspace_tools.build_api import write_build_report
52+
from workspace_tools.build_api import print_build_results
5253
from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
5354
from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
5455
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
@@ -124,6 +125,7 @@ class SingleTestRunner(object):
124125
TEST_RESULT_TIMEOUT = "TIMEOUT"
125126
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
126127
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
128+
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
127129

128130
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
129131
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
@@ -142,7 +144,8 @@ class SingleTestRunner(object):
142144
"timeout" : TEST_RESULT_TIMEOUT,
143145
"no_image" : TEST_RESULT_NO_IMAGE,
144146
"end" : TEST_RESULT_UNDEF,
145-
"mbed_assert" : TEST_RESULT_MBED_ASSERT
147+
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
148+
"build_failed" : TEST_RESULT_BUILD_FAILED
146149
}
147150

148151
def __init__(self,
@@ -182,6 +185,11 @@ def __init__(self,
182185
from colorama import init
183186
init()
184187

188+
# Build results
189+
build_failures = []
190+
build_successes = []
191+
build_skipped = []
192+
185193
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
186194
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
187195
# Settings related to test loops counters
@@ -299,6 +307,8 @@ def is_shuffle_seed_float(self):
299307

300308
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report):
301309
for toolchain in toolchains:
310+
tt_id = "%s::%s" % (toolchain, target)
311+
302312
# Toolchain specific build successes and failures
303313
build_report[toolchain] = {
304314
"mbed_failure": False,
@@ -310,13 +320,14 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
310320
}
311321
# print target, toolchain
312322
# Test suite properties returned to external tools like CI
313-
test_suite_properties = {}
314-
test_suite_properties['jobs'] = self.opts_jobs
315-
test_suite_properties['clean'] = clean
316-
test_suite_properties['target'] = target
317-
test_suite_properties['test_ids'] = ', '.join(test_ids)
318-
test_suite_properties['toolchain'] = toolchain
319-
test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed
323+
test_suite_properties = {
324+
'jobs': self.opts_jobs,
325+
'clean': clean,
326+
'target': target,
327+
'test_ids': ', '.join(test_ids),
328+
'toolchain': toolchain,
329+
'shuffle_random_seed': self.shuffle_random_seed
330+
}
320331

321332

322333
# print '=== %s::%s ===' % (target, toolchain)
@@ -329,6 +340,7 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
329340
build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None
330341
clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
331342

343+
332344
try:
333345
build_mbed_libs_result = build_mbed_libs(T,
334346
toolchain,
@@ -337,12 +349,15 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
337349
jobs=self.opts_jobs)
338350

339351
if not build_mbed_libs_result:
352+
self.build_skipped.append(tt_id)
340353
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
341354
continue
355+
else:
356+
self.build_successes.append(tt_id)
342357
except ToolException:
343-
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
358+
self.build_failures.append(tt_id)
344359
build_report[toolchain]["mbed_failure"] = True
345-
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
360+
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
346361
continue
347362

348363
build_dir = join(BUILD_DIR, "test", target, toolchain)
@@ -411,12 +426,9 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
411426
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
412427
build_report[toolchain]["library_failure"] = True
413428
build_report[toolchain]["library_build_failing"].append(lib_id)
414-
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
415429
continue
416430

417431

418-
419-
420432
for test_id in valid_test_map_keys:
421433
test = TEST_MAP[test_id]
422434

@@ -437,6 +449,14 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
437449
test_uuid = uuid.uuid4()
438450
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
439451

452+
# Prepare extended test results data structure (it can be used to generate detailed test report)
453+
if toolchain not in self.test_summary_ext:
454+
self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
455+
if target not in self.test_summary_ext[toolchain]:
456+
self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
457+
458+
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
459+
440460
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
441461
try:
442462
path = build_project(test.source_dir,
@@ -457,7 +477,26 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
457477
project_name_str = project_name if project_name is not None else test_id
458478
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
459479
build_report[toolchain]["test_build_failing"].append(test_id)
460-
# return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
480+
self.build_failures.append(tt_test_id)
481+
482+
# Append test results to global test summary
483+
self.test_summary.append(
484+
(self.TEST_RESULT_BUILD_FAILED, target, toolchain, test_id, 'Toolchain build failed', 0, 0, '-')
485+
)
486+
487+
# Add detailed test result to test summary structure
488+
if target not in self.test_summary_ext[toolchain][target]:
489+
self.test_summary_ext[toolchain][target][test_id] = { 0: {
490+
'single_test_result' : self.TEST_RESULT_BUILD_FAILED,
491+
'single_test_output' : '',
492+
'target_name' : target,
493+
'toolchain_name' : toolchain,
494+
'test_id' : test_id,
495+
'test_description' : 'Toolchain build failed',
496+
'elapsed_time' : 0,
497+
'duration' : 0,
498+
'copy_method' : None
499+
}}
461500
continue
462501

463502
if self.opts_only_build_tests:
@@ -479,17 +518,17 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
479518
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
480519

481520
# read MUTs, test specification and perform tests
482-
single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
521+
handle_result = self.handle(test_spec, target, toolchain, test_loops=test_loops)
522+
if handle_result:
523+
single_test_result, detailed_test_results = handle_result
524+
else:
525+
continue
483526

484527
# Append test results to global test summary
485528
if single_test_result is not None:
486529
self.test_summary.append(single_test_result)
487530

488-
# Prepare extended test results data structure (it can be used to generate detailed test report)
489-
if toolchain not in self.test_summary_ext:
490-
self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
491-
if target not in self.test_summary_ext[toolchain]:
492-
self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
531+
# Add detailed test result to test summary structure
493532
if target not in self.test_summary_ext[toolchain][target]:
494533
self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it
495534

@@ -511,6 +550,9 @@ def execute(self):
511550
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
512551

513552
build_reports = []
553+
self.build_failures = []
554+
self.build_successes = []
555+
self.build_skipped = []
514556

515557
if self.opts_parallel_test_exec:
516558
###################################################################
@@ -554,7 +596,6 @@ def execute(self):
554596
}
555597

556598
for toolchain in sorted(target_build_report["report"], key=target_build_report["report"].get):
557-
print "%s - %s" % (target_build_report["target"], toolchain)
558599
report = target_build_report["report"][toolchain]
559600

560601
if report["mbed_failure"]:
@@ -703,6 +744,7 @@ def generate_test_summary(self, test_summary, shuffle_seed=None):
703744
""" Prints well-formed summary with results (SQL table like)
704745
table shows target x test results matrix across
705746
"""
747+
success_code = 0 # Success code that can be leter returned to
706748
result = "Test summary:\n"
707749
# Pretty table package is used to print results
708750
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
@@ -723,7 +765,8 @@ def generate_test_summary(self, test_summary, shuffle_seed=None):
723765
self.TEST_RESULT_IOERR_SERIAL : 0,
724766
self.TEST_RESULT_NO_IMAGE : 0,
725767
self.TEST_RESULT_TIMEOUT : 0,
726-
self.TEST_RESULT_MBED_ASSERT : 0
768+
self.TEST_RESULT_MBED_ASSERT : 0,
769+
self.TEST_RESULT_BUILD_FAILED : 0
727770
}
728771

729772
for test in test_summary:
@@ -1413,6 +1456,8 @@ def progress_bar(percent_progress, saturation=0):
14131456

14141457
def singletest_in_cli_mode(single_test):
14151458
""" Runs SingleTestRunner object in CLI (Command line interface) mode
1459+
1460+
@return returns success code (0 == success) for building and running tests
14161461
"""
14171462
start = time()
14181463
# Execute tests depending on options and filter applied
@@ -1427,7 +1472,17 @@ def singletest_in_cli_mode(single_test):
14271472
# prints well-formed summary with results (SQL table like)
14281473
# table shows text x toolchain test result matrix
14291474
print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
1475+
14301476
print "Completed in %.2f sec"% (elapsed_time)
1477+
print
1478+
# Write summary of the builds
1479+
1480+
for report, report_name in [(single_test.build_successes, "Build successes:"),
1481+
(single_test.build_skipped, "Build skipped:"),
1482+
(single_test.build_failures, "Build failures:"),
1483+
]:
1484+
if report:
1485+
print print_build_results(report, report_name)
14311486

14321487
# Store extra reports in files
14331488
if single_test.opts_report_html_file_name:

0 commit comments

Comments
 (0)