Skip to content

Commit 92cf40d

Browse files
committed
[benchmark] MarkdownReportHandler
`logging.Handler` that creates nicely formatted report from `BecnhmarkDoctor`’s `check` in Markdown table for display on GitHub.
1 parent 9a04207 commit 92cf40d

File tree

2 files changed

+102
-2
lines changed

2 files changed

+102
-2
lines changed

benchmark/scripts/Benchmark_Driver

Lines changed: 48 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -261,6 +261,51 @@ class LoggingReportFormatter(logging.Formatter):
261261
'{0} {1}{2}'.format(record.levelname, category, msg))
262262

263263

264+
class MarkdownReportHandler(logging.StreamHandler):
265+
r"""Write custom formatted messages from BenchmarkDoctor to the stream.
266+
267+
It works around StreamHandler's hardcoded '\n' and handles the custom
268+
level and category formatting for BenchmarkDoctor's check report.
269+
"""
270+
271+
def __init__(self, stream):
272+
"""Initialize the handler and write a Markdown table header."""
273+
super(MarkdownReportHandler, self).__init__(stream)
274+
self.setLevel(logging.INFO)
275+
self.stream.write('\n✅ | Benchmark Check Report\n---|---')
276+
self.stream.flush()
277+
278+
levels = {logging.WARNING: '\n⚠️', logging.ERROR: '\n⛔️',
279+
logging.INFO: ' <br><sub> '}
280+
categories = {'naming': '🔤', 'runtime': '⏱', 'memory': 'Ⓜ️'}
281+
quotes_re = re.compile("'")
282+
283+
def format(self, record):
284+
msg = super(MarkdownReportHandler, self).format(record)
285+
return (self.levels.get(record.levelno, '') +
286+
('' if record.levelno == logging.INFO else
287+
self.categories.get(record.name.split('.')[-1], '') + ' | ') +
288+
self.quotes_re.sub('`', msg))
289+
290+
def emit(self, record):
291+
msg = self.format(record)
292+
stream = self.stream
293+
try:
294+
if (isinstance(msg, unicode) and
295+
getattr(stream, 'encoding', None)):
296+
stream.write(msg.encode(stream.encoding))
297+
else:
298+
stream.write(msg)
299+
except UnicodeError:
300+
stream.write(msg.encode("UTF-8"))
301+
self.flush()
302+
303+
def close(self):
304+
self.stream.write('\n\n')
305+
self.stream.flush()
306+
super(MarkdownReportHandler, self).close()
307+
308+
264309
class BenchmarkDoctor(object):
265310
"""Checks that the benchmark conforms to the standard set of requirements.
266311
@@ -302,8 +347,9 @@ class BenchmarkDoctor(object):
302347
]
303348

304349
def __del__(self):
305-
"""Unregister handler on exit."""
306-
self.log.removeHandler(self.console_handler)
350+
"""Close log handlers on exit."""
351+
for handler in list(self.log.handlers):
352+
handler.close()
307353

308354
capital_words_re = re.compile('[A-Z][a-zA-Z0-9]+')
309355

benchmark/scripts/test_Benchmark_Driver.py

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
import time
1919
import unittest
2020

21+
from StringIO import StringIO
2122
from imp import load_source
2223

2324
from compare_perf_tests import PerformanceTestResult
@@ -33,6 +34,7 @@
3334
BenchmarkDriver = Benchmark_Driver.BenchmarkDriver
3435
BenchmarkDoctor = Benchmark_Driver.BenchmarkDoctor
3536
LoggingReportFormatter = Benchmark_Driver.LoggingReportFormatter
37+
MarkdownReportHandler = Benchmark_Driver.MarkdownReportHandler
3638

3739

3840
class Test_parse_args(unittest.TestCase):
@@ -421,6 +423,58 @@ def test_no_prefix_for_base_logging(self):
421423
self.assertEquals(f.format(lr), 'INFO Hi!')
422424

423425

426+
class TestMarkdownReportHandler(unittest.TestCase):
427+
def setUp(self):
428+
super(TestMarkdownReportHandler, self).setUp()
429+
self.stream = StringIO()
430+
self.handler = MarkdownReportHandler(self.stream)
431+
432+
def assert_contains(self, texts):
433+
assert not isinstance(texts, str)
434+
for text in texts:
435+
self.assertIn(text, self.stream.getvalue())
436+
437+
def record(self, level, category, msg):
438+
return logging.makeLogRecord({
439+
'name': 'BenchmarkDoctor.' + category,
440+
'levelno': level, 'msg': msg})
441+
442+
def test_init_writes_table_header(self):
443+
self.assertEquals(self.handler.level, logging.INFO)
444+
self.assert_contains(['Benchmark Check Report\n', '---|---'])
445+
446+
def test_close_writes_final_newlines(self):
447+
self.handler.close()
448+
self.assert_contains(['---|---\n\n'])
449+
450+
def test_errors_and_warnings_start_new_rows_with_icons(self):
451+
self.handler.emit(self.record(logging.ERROR, '', 'Blunder'))
452+
self.handler.emit(self.record(logging.WARNING, '', 'Boo-boo'))
453+
self.assert_contains(['\n⛔️ | Blunder',
454+
'\n⚠️ | Boo-boo'])
455+
456+
def test_category_icons(self):
457+
self.handler.emit(self.record(logging.WARNING, 'naming', 'naming'))
458+
self.handler.emit(self.record(logging.WARNING, 'runtime', 'runtime'))
459+
self.handler.emit(self.record(logging.WARNING, 'memory', 'memory'))
460+
self.assert_contains(['🔤 | naming',
461+
'⏱ | runtime',
462+
'Ⓜ️ | memory'])
463+
464+
def test_info_stays_in_table_cell_breaking_line_row_to_subscript(self):
465+
"""Assuming Infos only follow after Errors and Warnings.
466+
467+
Infos don't emit category icons.
468+
"""
469+
self.handler.emit(self.record(logging.ERROR, 'naming', 'Blunder'))
470+
self.handler.emit(self.record(logging.INFO, 'naming', 'Fixit'))
471+
self.assert_contains(['Blunder <br><sub> Fixit'])
472+
473+
def test_names_in_code_format(self):
474+
self.handler.emit(self.record(logging.WARNING, '', "'QuotedName'"))
475+
self.assert_contains(['| `QuotedName`'])
476+
477+
424478
def _PTR(min=700, mem_pages=1000, setup=None):
425479
"""Create PerformanceTestResult Stub."""
426480
return Stub(samples=Stub(min=min), mem_pages=mem_pages, setup=setup)

0 commit comments

Comments
 (0)