Skip to content

Commit 373e8c4

Browse files
committed
Fixed test build failure, now BUILD_FAILURE is one of possible results.
Added build report after build completion. Fixed result from execute() capture when result is NoneType value
1 parent 1de9187 commit 373e8c4

File tree

1 file changed

+69
-21
lines changed

1 file changed

+69
-21
lines changed

workspace_tools/test_api.py

Lines changed: 69 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
5050
from workspace_tools.build_api import get_target_supported_toolchains
5151
from workspace_tools.build_api import write_build_report
52+
from workspace_tools.build_api import print_build_results
5253
from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
5354
from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
5455
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
@@ -124,6 +125,7 @@ class SingleTestRunner(object):
124125
TEST_RESULT_TIMEOUT = "TIMEOUT"
125126
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
126127
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
128+
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
127129

128130
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
129131
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
@@ -142,7 +144,8 @@ class SingleTestRunner(object):
142144
"timeout" : TEST_RESULT_TIMEOUT,
143145
"no_image" : TEST_RESULT_NO_IMAGE,
144146
"end" : TEST_RESULT_UNDEF,
145-
"mbed_assert" : TEST_RESULT_MBED_ASSERT
147+
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
148+
"build_failed" : TEST_RESULT_BUILD_FAILED
146149
}
147150

148151
def __init__(self,
@@ -182,6 +185,11 @@ def __init__(self,
182185
from colorama import init
183186
init()
184187

188+
# Build results
189+
build_failures = []
190+
build_successes = []
191+
build_skipped = []
192+
185193
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
186194
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
187195
# Settings related to test loops counters
@@ -299,6 +307,8 @@ def is_shuffle_seed_float(self):
299307

300308
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report):
301309
for toolchain in toolchains:
310+
tt_id = "%s::%s" % (toolchain, target)
311+
302312
# Toolchain specific build successes and failures
303313
build_report[toolchain] = {
304314
"mbed_failure": False,
@@ -310,13 +320,14 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
310320
}
311321
# print target, toolchain
312322
# Test suite properties returned to external tools like CI
313-
test_suite_properties = {}
314-
test_suite_properties['jobs'] = self.opts_jobs
315-
test_suite_properties['clean'] = clean
316-
test_suite_properties['target'] = target
317-
test_suite_properties['test_ids'] = ', '.join(test_ids)
318-
test_suite_properties['toolchain'] = toolchain
319-
test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed
323+
test_suite_properties = {
324+
'jobs': self.opts_jobs,
325+
'clean': clean,
326+
'target': target,
327+
'test_ids': ', '.join(test_ids),
328+
'toolchain': toolchain,
329+
'shuffle_random_seed': self.shuffle_random_seed
330+
}
320331

321332

322333
# print '=== %s::%s ===' % (target, toolchain)
@@ -329,6 +340,7 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
329340
build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None
330341
clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None
331342

343+
332344
try:
333345
build_mbed_libs_result = build_mbed_libs(T,
334346
toolchain,
@@ -337,12 +349,15 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
337349
jobs=self.opts_jobs)
338350

339351
if not build_mbed_libs_result:
352+
self.build_skipped.append(tt_id)
340353
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
341354
continue
355+
else:
356+
self.build_successes.append(tt_id)
342357
except ToolException:
343-
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
358+
self.build_failures.append(tt_id)
344359
build_report[toolchain]["mbed_failure"] = True
345-
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
360+
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
346361
continue
347362

348363
build_dir = join(BUILD_DIR, "test", target, toolchain)
@@ -415,8 +430,6 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
415430
continue
416431

417432

418-
419-
420433
for test_id in valid_test_map_keys:
421434
test = TEST_MAP[test_id]
422435

@@ -437,6 +450,12 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
437450
test_uuid = uuid.uuid4()
438451
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
439452

453+
# Prepare extended test results data structure (it can be used to generate detailed test report)
454+
if toolchain not in self.test_summary_ext:
455+
self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
456+
if target not in self.test_summary_ext[toolchain]:
457+
self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
458+
440459
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
441460
try:
442461
path = build_project(test.source_dir,
@@ -457,7 +476,25 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
457476
project_name_str = project_name if project_name is not None else test_id
458477
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
459478
build_report[toolchain]["test_build_failing"].append(test_id)
460-
# return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
479+
480+
# Append test results to global test summary
481+
self.test_summary.append(
482+
(self.TEST_RESULT_BUILD_FAILED, target, toolchain, test_id, 'Toolchain build failed', 0, 0, '-')
483+
)
484+
485+
# Add detailed test result to test summary structure
486+
if target not in self.test_summary_ext[toolchain][target]:
487+
self.test_summary_ext[toolchain][target][test_id] = { 0: {
488+
'single_test_result' : self.TEST_RESULT_BUILD_FAILED,
489+
'single_test_output' : '',
490+
'target_name' : target,
491+
'toolchain_name' : toolchain,
492+
'test_id' : test_id,
493+
'test_description' : 'Toolchain build failed',
494+
'elapsed_time' : 0,
495+
'duration' : 0,
496+
'copy_method' : None
497+
}}
461498
continue
462499

463500
if self.opts_only_build_tests:
@@ -479,17 +516,17 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
479516
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
480517

481518
# read MUTs, test specification and perform tests
482-
single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
519+
handle_result = self.handle(test_spec, target, toolchain, test_loops=test_loops)
520+
if handle_result:
521+
single_test_result, detailed_test_results = handle_result
522+
else:
523+
continue
483524

484525
# Append test results to global test summary
485526
if single_test_result is not None:
486527
self.test_summary.append(single_test_result)
487528

488-
# Prepare extended test results data structure (it can be used to generate detailed test report)
489-
if toolchain not in self.test_summary_ext:
490-
self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
491-
if target not in self.test_summary_ext[toolchain]:
492-
self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
529+
# Add detailed test result to test summary structure
493530
if target not in self.test_summary_ext[toolchain][target]:
494531
self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it
495532

@@ -511,6 +548,9 @@ def execute(self):
511548
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
512549

513550
build_reports = []
551+
self.build_failures = []
552+
self.build_successes = []
553+
self.build_skipped = []
514554

515555
if self.opts_parallel_test_exec:
516556
###################################################################
@@ -554,7 +594,6 @@ def execute(self):
554594
}
555595

556596
for toolchain in sorted(target_build_report["report"], key=target_build_report["report"].get):
557-
print "%s - %s" % (target_build_report["target"], toolchain)
558597
report = target_build_report["report"][toolchain]
559598

560599
if report["mbed_failure"]:
@@ -703,6 +742,7 @@ def generate_test_summary(self, test_summary, shuffle_seed=None):
703742
""" Prints well-formed summary with results (SQL table like)
704743
table shows target x test results matrix across
705744
"""
745+
success_code = 0 # Success code that can be leter returned to
706746
result = "Test summary:\n"
707747
# Pretty table package is used to print results
708748
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
@@ -723,7 +763,8 @@ def generate_test_summary(self, test_summary, shuffle_seed=None):
723763
self.TEST_RESULT_IOERR_SERIAL : 0,
724764
self.TEST_RESULT_NO_IMAGE : 0,
725765
self.TEST_RESULT_TIMEOUT : 0,
726-
self.TEST_RESULT_MBED_ASSERT : 0
766+
self.TEST_RESULT_MBED_ASSERT : 0,
767+
self.TEST_RESULT_BUILD_FAILED : 0
727768
}
728769

729770
for test in test_summary:
@@ -1413,6 +1454,8 @@ def progress_bar(percent_progress, saturation=0):
14131454

14141455
def singletest_in_cli_mode(single_test):
14151456
""" Runs SingleTestRunner object in CLI (Command line interface) mode
1457+
1458+
@return returns success code (0 == success) for building and running tests
14161459
"""
14171460
start = time()
14181461
# Execute tests depending on options and filter applied
@@ -1427,7 +1470,12 @@ def singletest_in_cli_mode(single_test):
14271470
# prints well-formed summary with results (SQL table like)
14281471
# table shows text x toolchain test result matrix
14291472
print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)
1473+
print
14301474
print "Completed in %.2f sec"% (elapsed_time)
1475+
print
1476+
print print_build_results(single_test.build_successes, "Build successes:"),
1477+
print print_build_results(single_test.build_skipped, "Build skipped:"),
1478+
print print_build_results(single_test.build_failures, "Build failures:"),
14311479

14321480
# Store extra reports in files
14331481
if single_test.opts_report_html_file_name:

0 commit comments

Comments
 (0)