Skip to content

Tools: bugfix: singletes.py was not reporting test case build failure #1087

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
May 4, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions workspace_tools/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,9 +254,12 @@
print "Completed in: (%.2f)s" % (time() - start)
print

print print_build_results(successes, "Build successes:"),
print print_build_results(skipped, "Build skipped:"),
print print_build_results(failures, "Build failures:"),
for report, report_name in [(successes, "Build successes:"),
(skipped, "Build skipped:"),
(failures, "Build failures:"),
]:
if report:
print print_build_results(report, report_name),

if failures:
sys.exit(1)
2 changes: 1 addition & 1 deletion workspace_tools/build_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,7 @@ def static_analysis_scan_library(src_paths, build_path, target, toolchain_name,
def print_build_results(result_list, build_name):
""" Generate result string for build results """
result = ""
if result_list:
if len(result_list) > 0:
result += build_name + "\n"
result += "\n".join([" * %s" % f for f in result_list])
result += "\n"
Expand Down
99 changes: 77 additions & 22 deletions workspace_tools/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
from workspace_tools.build_api import get_target_supported_toolchains
from workspace_tools.build_api import write_build_report
from workspace_tools.build_api import print_build_results
from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
from workspace_tools.toolchains import TOOLCHAIN_BIN_PATH
from workspace_tools.test_exporters import ReportExporter, ResultExporterType
Expand Down Expand Up @@ -124,6 +125,7 @@ class SingleTestRunner(object):
TEST_RESULT_TIMEOUT = "TIMEOUT"
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"

GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
Expand All @@ -142,7 +144,8 @@ class SingleTestRunner(object):
"timeout" : TEST_RESULT_TIMEOUT,
"no_image" : TEST_RESULT_NO_IMAGE,
"end" : TEST_RESULT_UNDEF,
"mbed_assert" : TEST_RESULT_MBED_ASSERT
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
"build_failed" : TEST_RESULT_BUILD_FAILED
}

def __init__(self,
Expand Down Expand Up @@ -182,6 +185,11 @@ def __init__(self,
from colorama import init
init()

# Build results
build_failures = []
build_successes = []
build_skipped = []

PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
# Settings related to test loops counters
Expand Down Expand Up @@ -299,6 +307,8 @@ def is_shuffle_seed_float(self):

def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report):
for toolchain in toolchains:
tt_id = "%s::%s" % (toolchain, target)

# Toolchain specific build successes and failures
build_report[toolchain] = {
"mbed_failure": False,
Expand All @@ -310,13 +320,14 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
}
# print target, toolchain
# Test suite properties returned to external tools like CI
test_suite_properties = {}
test_suite_properties['jobs'] = self.opts_jobs
test_suite_properties['clean'] = clean
test_suite_properties['target'] = target
test_suite_properties['test_ids'] = ', '.join(test_ids)
test_suite_properties['toolchain'] = toolchain
test_suite_properties['shuffle_random_seed'] = self.shuffle_random_seed
test_suite_properties = {
'jobs': self.opts_jobs,
'clean': clean,
'target': target,
'test_ids': ', '.join(test_ids),
'toolchain': toolchain,
'shuffle_random_seed': self.shuffle_random_seed
}


# print '=== %s::%s ===' % (target, toolchain)
Expand All @@ -329,6 +340,7 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
build_mbed_libs_options = ["analyze"] if self.opts_goanna_for_mbed_sdk else None
clean_mbed_libs_options = True if self.opts_goanna_for_mbed_sdk or clean or self.opts_clean else None


try:
build_mbed_libs_result = build_mbed_libs(T,
toolchain,
Expand All @@ -337,12 +349,15 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
jobs=self.opts_jobs)

if not build_mbed_libs_result:
self.build_skipped.append(tt_id)
print self.logger.log_line(self.logger.LogType.NOTIF, 'Skipped tests for %s target. Toolchain %s is not yet supported for this target'% (T.name, toolchain))
continue
else:
self.build_successes.append(tt_id)
except ToolException:
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
self.build_failures.append(tt_id)
build_report[toolchain]["mbed_failure"] = True
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building MBED libs for %s using %s'% (target, toolchain))
continue

build_dir = join(BUILD_DIR, "test", target, toolchain)
Expand Down Expand Up @@ -411,12 +426,9 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building library %s'% (lib_id))
build_report[toolchain]["library_failure"] = True
build_report[toolchain]["library_build_failing"].append(lib_id)
#return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
continue




for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]

Expand All @@ -437,6 +449,14 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))

# Prepare extended test results data structure (it can be used to generate detailed test report)
if toolchain not in self.test_summary_ext:
self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
if target not in self.test_summary_ext[toolchain]:
self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target

tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only

project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(test.source_dir,
Expand All @@ -457,7 +477,26 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
project_name_str = project_name if project_name is not None else test_id
print self.logger.log_line(self.logger.LogType.ERROR, 'There were errors while building project %s'% (project_name_str))
build_report[toolchain]["test_build_failing"].append(test_id)
# return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext
self.build_failures.append(tt_test_id)

# Append test results to global test summary
self.test_summary.append(
(self.TEST_RESULT_BUILD_FAILED, target, toolchain, test_id, 'Toolchain build failed', 0, 0, '-')
)

# Add detailed test result to test summary structure
if target not in self.test_summary_ext[toolchain][target]:
self.test_summary_ext[toolchain][target][test_id] = { 0: {
'single_test_result' : self.TEST_RESULT_BUILD_FAILED,
'single_test_output' : '',
'target_name' : target,
'toolchain_name' : toolchain,
'test_id' : test_id,
'test_description' : 'Toolchain build failed',
'elapsed_time' : 0,
'duration' : 0,
'copy_method' : None
}}
continue

if self.opts_only_build_tests:
Expand All @@ -479,17 +518,17 @@ def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_rep
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path

# read MUTs, test specification and perform tests
single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
handle_result = self.handle(test_spec, target, toolchain, test_loops=test_loops)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will fix:

[DEBUG] Command: C:/mbed_tools/ARMCompiler_5.03_117_Windows\bin\fromelf --bin -o C:\Jenkins\jobs\mbed_2.0_build_release_test_publish\workspace\build\test\NRF51822\ARM\DTCT_1\detect.hex C:\Jenkins\jobs\mbed_2.0_build_release_test_publish\workspace\build\test\NRF51822\ARM\DTCT_1\detect.elf
[DEBUG] Return: 0
[DEBUG] Merge SoftDevice file s110_nrf51822_8.0.0_softdevice.hex
Traceback (most recent call last):
  File "./workspace_tools/singletest.py", line 237, in <module>
    singletest_in_cli_mode(single_test)
  File "C:\Jenkins\jobs\mbed_2.0_build_release_test_publish\workspace\workspace_tools\test_api.py", line 1419, in singletest_in_cli_mode
    test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report = single_test.execute()
  File "C:\Jenkins\jobs\mbed_2.0_build_release_test_publish\workspace\workspace_tools\test_api.py", line 543, in execute
    self.execute_thread_slice(q, target, toolchains, clean, test_ids, cur_build_report)
  File "C:\Jenkins\jobs\mbed_2.0_build_release_test_publish\workspace\workspace_tools\test_api.py", line 482, in execute_thread_slice
    single_test_result, detailed_test_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
TypeError: 'NoneType' object is not iterable

when None object is passed as port or mount point from e.g. mbed-ls tools

if handle_result:
single_test_result, detailed_test_results = handle_result
else:
continue

# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)

# Prepare extended test results data structure (it can be used to generate detailed test report)
if toolchain not in self.test_summary_ext:
self.test_summary_ext[toolchain] = {} # test_summary_ext : toolchain
if target not in self.test_summary_ext[toolchain]:
self.test_summary_ext[toolchain][target] = {} # test_summary_ext : toolchain : target
# Add detailed test result to test summary structure
if target not in self.test_summary_ext[toolchain][target]:
self.test_summary_ext[toolchain][target][test_id] = detailed_test_results # test_summary_ext : toolchain : target : test_it

Expand All @@ -511,6 +550,9 @@ def execute(self):
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)

build_reports = []
self.build_failures = []
self.build_successes = []
self.build_skipped = []

if self.opts_parallel_test_exec:
###################################################################
Expand Down Expand Up @@ -554,7 +596,6 @@ def execute(self):
}

for toolchain in sorted(target_build_report["report"], key=target_build_report["report"].get):
print "%s - %s" % (target_build_report["target"], toolchain)
report = target_build_report["report"][toolchain]

if report["mbed_failure"]:
Expand Down Expand Up @@ -703,6 +744,7 @@ def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
success_code = 0 # Success code that can be leter returned to
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
Expand All @@ -723,7 +765,8 @@ def generate_test_summary(self, test_summary, shuffle_seed=None):
self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT : 0,
self.TEST_RESULT_MBED_ASSERT : 0
self.TEST_RESULT_MBED_ASSERT : 0,
self.TEST_RESULT_BUILD_FAILED : 0
}

for test in test_summary:
Expand Down Expand Up @@ -1413,6 +1456,8 @@ def progress_bar(percent_progress, saturation=0):

def singletest_in_cli_mode(single_test):
""" Runs SingleTestRunner object in CLI (Command line interface) mode

@return returns success code (0 == success) for building and running tests
"""
start = time()
# Execute tests depending on options and filter applied
Expand All @@ -1427,7 +1472,17 @@ def singletest_in_cli_mode(single_test):
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print single_test.generate_test_summary_by_target(test_summary, shuffle_seed)

print "Completed in %.2f sec"% (elapsed_time)
print
# Write summary of the builds

for report, report_name in [(single_test.build_successes, "Build successes:"),
(single_test.build_skipped, "Build skipped:"),
(single_test.build_failures, "Build failures:"),
]:
if report:
print print_build_results(report, report_name)

# Store extra reports in files
if single_test.opts_report_html_file_name:
Expand Down