|
18 | 18 | """
|
19 | 19 |
|
20 | 20 | from tools.utils import construct_enum, mkdir
|
| 21 | +from prettytable import PrettyTable |
21 | 22 | import os
|
22 | 23 |
|
23 | 24 | ResultExporterType = construct_enum(HTML='Html_Exporter',
|
24 | 25 | JUNIT='JUnit_Exporter',
|
25 | 26 | JUNIT_OPER='JUnit_Exporter_Interoperability',
|
26 | 27 | BUILD='Build_Exporter',
|
| 28 | + TEXT='Text_Exporter', |
27 | 29 | PRINT='Print_Exporter')
|
28 | 30 |
|
29 | 31 |
|
@@ -88,6 +90,8 @@ def report(self, test_summary_ext, test_suite_properties=None,
|
88 | 90 | elif self.result_exporter_type == ResultExporterType.PRINT:
|
89 | 91 | # JUNIT exporter for interoperability test
|
90 | 92 | return self.exporter_print(test_summary_ext, print_log_for_failures=print_log_for_failures)
|
| 93 | + elif self.result_exporter_type == ResultExporterType.TEXT: |
| 94 | + return self.exporter_text(test_summary_ext) |
91 | 95 | return None
|
92 | 96 |
|
93 | 97 | def report_to_file(self, test_summary_ext, file_name, test_suite_properties=None):
|
@@ -351,3 +355,59 @@ def exporter_print(self, test_result_ext, print_log_for_failures=False):
|
351 | 355 | return False
|
352 | 356 | else:
|
353 | 357 | return True
|
| 358 | + |
| 359 | + def exporter_text(self, test_result_ext): |
| 360 | + """ Prints well-formed summary with results (SQL table like) |
| 361 | + table shows target x test results matrix across |
| 362 | + """ |
| 363 | + success_code = 0 # Success code that can be leter returned to |
| 364 | + # Pretty table package is used to print results |
| 365 | + pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description", |
| 366 | + "Elapsed Time (sec)", "Timeout (sec)"]) |
| 367 | + pt.align["Result"] = "l" # Left align |
| 368 | + pt.align["Target"] = "l" # Left align |
| 369 | + pt.align["Toolchain"] = "l" # Left align |
| 370 | + pt.align["Test ID"] = "l" # Left align |
| 371 | + pt.align["Test Description"] = "l" # Left align |
| 372 | + pt.padding_width = 1 # One space between column edges and contents (default) |
| 373 | + |
| 374 | + result_dict = {"OK" : 0, |
| 375 | + "FAIL" : 0, |
| 376 | + "ERROR" : 0, |
| 377 | + "UNDEF" : 0, |
| 378 | + "IOERR_COPY" : 0, |
| 379 | + "IOERR_DISK" : 0, |
| 380 | + "IOERR_SERIAL" : 0, |
| 381 | + "TIMEOUT" : 0, |
| 382 | + "NO_IMAGE" : 0, |
| 383 | + "MBED_ASSERT" : 0, |
| 384 | + "BUILD_FAILED" : 0, |
| 385 | + "NOT_SUPPORTED" : 0 |
| 386 | + } |
| 387 | + unique_test_ids = self.get_all_unique_test_ids(test_result_ext) |
| 388 | + targets = sorted(test_result_ext.keys()) |
| 389 | + for target in targets: |
| 390 | + toolchains = sorted(test_result_ext[target].keys()) |
| 391 | + for toolchain in toolchains: |
| 392 | + test_cases = [] |
| 393 | + tests = sorted(test_result_ext[target][toolchain].keys()) |
| 394 | + for test in tests: |
| 395 | + test_results = test_result_ext[target][toolchain][test] |
| 396 | + for test_res in test_results: |
| 397 | + test_ids = sorted(test_res.keys()) |
| 398 | + for test_no in test_ids: |
| 399 | + test_result = test_res[test_no] |
| 400 | + result_dict[test_result['result']] += 1 |
| 401 | + pt.add_row([test_result['result'], |
| 402 | + test_result['target_name'], |
| 403 | + test_result['toolchain_name'], |
| 404 | + test_result['id'], |
| 405 | + test_result['description'], |
| 406 | + test_result['elapsed_time'], |
| 407 | + test_result['duration']]) |
| 408 | + result = pt.get_string() |
| 409 | + result += "\n" |
| 410 | + |
| 411 | + # Print result count |
| 412 | + result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()]) |
| 413 | + return result |
0 commit comments