Skip to content

[tools] Parallel building of tests #2990

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Oct 19, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion tools/build_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -455,12 +455,29 @@ def build_project(src_paths, build_path, target, toolchain_name,
# Link Program
res, _ = toolchain.link_program(resources, build_path, name)

memap_instance = getattr(toolchain, 'memap_instance', None)
memap_table = ''
if memap_instance:
# Write output to stdout in text (pretty table) format
memap_table = memap_instance.generate_output('table')

if not silent:
print memap_table

# Write output to file in JSON format
map_out = join(build_path, name + "_map.json")
memap_instance.generate_output('json', map_out)

# Write output to file in CSV format for the CI
map_csv = join(build_path, name + "_map.csv")
memap_instance.generate_output('csv-ci', map_csv)

resources.detect_duplicates(toolchain)

if report != None:
end = time()
cur_result["elapsed_time"] = end - start
cur_result["output"] = toolchain.get_output()
cur_result["output"] = toolchain.get_output() + memap_table
cur_result["result"] = "OK"
cur_result["memory_usage"] = toolchain.map_outputs

Expand Down
51 changes: 33 additions & 18 deletions tools/memap.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,6 +393,8 @@ def generate_output(self, export_format, file_output=None):

Keyword arguments:
file_desc - descriptor (either stdout or file)

Returns: generated string for the 'table' format, otherwise None
"""

try:
Expand All @@ -407,11 +409,13 @@ def generate_output(self, export_format, file_output=None):
to_call = {'json': self.generate_json,
'csv-ci': self.generate_csv,
'table': self.generate_table}[export_format]
to_call(file_desc)
output = to_call(file_desc)

if file_desc is not sys.stdout:
file_desc.close()

return output

def generate_json(self, file_desc):
"""Generate a json file from a memory map

Expand All @@ -421,6 +425,8 @@ def generate_json(self, file_desc):
file_desc.write(json.dumps(self.mem_report, indent=4))
file_desc.write('\n')

return None

def generate_csv(self, file_desc):
"""Generate a CSV file from a memoy map

Expand Down Expand Up @@ -461,11 +467,15 @@ def generate_csv(self, file_desc):
csv_writer.writerow(csv_module_section)
csv_writer.writerow(csv_sizes)

return None

def generate_table(self, file_desc):
"""Generate a table from a memoy map

Positional arguments:
file_desc - the file to write out the final report to

Returns: string of the generated table
"""
# Create table
columns = ['Module']
Expand Down Expand Up @@ -501,28 +511,29 @@ def generate_table(self, file_desc):

table.add_row(subtotal_row)

file_desc.write(table.get_string())
file_desc.write('\n')
output = table.get_string()
output += '\n'

if self.mem_summary['heap'] == 0:
file_desc.write("Allocated Heap: unknown\n")
output += "Allocated Heap: unknown\n"
else:
file_desc.write("Allocated Heap: %s bytes\n" %
str(self.mem_summary['heap']))
output += "Allocated Heap: %s bytes\n" % \
str(self.mem_summary['heap'])

if self.mem_summary['stack'] == 0:
file_desc.write("Allocated Stack: unknown\n")
output += "Allocated Stack: unknown\n"
else:
file_desc.write("Allocated Stack: %s bytes\n" %
str(self.mem_summary['stack']))
output += "Allocated Stack: %s bytes\n" % \
str(self.mem_summary['stack'])

file_desc.write("Total Static RAM memory (data + bss): %s bytes\n" %
(str(self.mem_summary['static_ram'])))
file_desc.write(
"Total RAM memory (data + bss + heap + stack): %s bytes\n"
% (str(self.mem_summary['total_ram'])))
file_desc.write("Total Flash memory (text + data + misc): %s bytes\n" %
(str(self.mem_summary['total_flash'])))
output += "Total Static RAM memory (data + bss): %s bytes\n" % \
str(self.mem_summary['static_ram'])
output += "Total RAM memory (data + bss + heap + stack): %s bytes\n" % \
str(self.mem_summary['total_ram'])
output += "Total Flash memory (text + data + misc): %s bytes\n" % \
str(self.mem_summary['total_flash'])

return output

toolchains = ["ARM", "ARM_STD", "ARM_MICRO", "GCC_ARM", "IAR"]

Expand Down Expand Up @@ -646,11 +657,15 @@ def main():
if memap.parse(args.file, args.toolchain) is False:
sys.exit(0)

returned_string = None
# Write output in file
if args.output != None:
memap.generate_output(args.export, args.output)
returned_string = memap.generate_output(args.export, args.output)
else: # Write output in screen
memap.generate_output(args.export)
returned_string = memap.generate_output(args.export)

if args.export == 'table' and returned_string:
print returned_string

sys.exit(0)

Expand Down
166 changes: 126 additions & 40 deletions tools/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
from Queue import Queue, Empty
from os.path import join, exists, basename, relpath
from threading import Thread, Lock
from multiprocessing import Pool, cpu_count
from subprocess import Popen, PIPE

# Imports related to mbed build api
Expand Down Expand Up @@ -2068,6 +2069,48 @@ def norm_relative_path(path, start):
path = path.replace("\\", "/")
return path


def build_test_worker(*args, **kwargs):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sweet! 👍

"""This is a worker function for the parallel building of tests. The `args`
and `kwargs` are passed directly to `build_project`. It returns a dictionary
with the following structure:

{
'result': `True` if no exceptions were thrown, `False` otherwise
'reason': Instance of exception that was thrown on failure
'bin_file': Path to the created binary if `build_project` was
successful. Not present otherwise
'kwargs': The keyword arguments that were passed to `build_project`.
This includes arguments that were modified (ex. report)
}
"""
bin_file = None
ret = {
'result': False,
'args': args,
'kwargs': kwargs
}

try:
bin_file = build_project(*args, **kwargs)
ret['result'] = True
ret['bin_file'] = bin_file
ret['kwargs'] = kwargs

except NotSupportedException, e:
ret['reason'] = e
except ToolException, e:
ret['reason'] = e
except KeyboardInterrupt, e:
ret['reason'] = e
except:
# Print unhandled exceptions here
import traceback
traceback.print_exc(file=sys.stdout)

return ret


def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
clean=False, notify=None, verbose=False, jobs=1, macros=None,
silent=False, report=None, properties=None,
Expand Down Expand Up @@ -2095,58 +2138,101 @@ def build_tests(tests, base_source_paths, build_path, target, toolchain_name,

result = True

map_outputs_total = list()
jobs_count = int(jobs if jobs else cpu_count())
p = Pool(processes=jobs_count)
results = []
for test_name, test_path in tests.iteritems():
test_build_path = os.path.join(build_path, test_path)
src_path = base_source_paths + [test_path]
bin_file = None
test_case_folder_name = os.path.basename(test_path)

args = (src_path, test_build_path, target, toolchain_name)
kwargs = {
'jobs': jobs,
'clean': clean,
'macros': macros,
'name': test_case_folder_name,
'project_id': test_name,
'report': report,
'properties': properties,
'verbose': verbose,
'app_config': app_config,
'build_profile': build_profile,
'silent': True
}

try:
bin_file = build_project(src_path, test_build_path, target, toolchain_name,
jobs=jobs,
clean=clean,
macros=macros,
name=test_case_folder_name,
project_id=test_name,
report=report,
properties=properties,
verbose=verbose,
app_config=app_config,
build_profile=build_profile)

except NotSupportedException:
pass
except ToolException:
result = False
if continue_on_build_fail:
continue
else:
break
results.append(p.apply_async(build_test_worker, args, kwargs))

# If a clean build was carried out last time, disable it for the next build.
# Otherwise the previously built test will be deleted.
if clean:
clean = False

# Normalize the path
if bin_file:
bin_file = norm_relative_path(bin_file, execution_directory)

test_build['tests'][test_name] = {
"binaries": [
{
"path": bin_file
}
]
}
p.close()
result = True
itr = 0
while len(results):
itr += 1
if itr > 360000:
p.terminate()
p.join()
raise ToolException("Compile did not finish in 10 minutes")
else:
sleep(0.01)
pending = 0
for r in results:
if r.ready() is True:
try:
worker_result = r.get()
results.remove(r)

# Take report from the kwargs and merge it into existing report
report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
for test_key in report_entry.keys():
report[target_name][toolchain_name][test_key] = report_entry[test_key]

# Set the overall result to a failure if a build failure occurred
if not worker_result['result'] and not isinstance(worker_result['reason'], NotSupportedException):
result = False
break

# Adding binary path to test build result
if worker_result['result'] and 'bin_file' in worker_result:
bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)

test_build['tests'][worker_result['kwargs']['project_id']] = {
"binaries": [
{
"path": bin_file
}
]
}

test_key = worker_result['kwargs']['project_id'].upper()
print report[target_name][toolchain_name][test_key][0][0]['output'].rstrip()
print 'Image: %s\n' % bin_file

print 'Image: %s'% bin_file
except:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
p.join()
raise
else:
pending += 1
if pending >= jobs_count:
break

# Break as soon as possible if there is a failure and we are not
# continuing on build failures
if not result and not continue_on_build_fail:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
break

p.join()

test_builds = {}
test_builds["%s-%s" % (target_name, toolchain_name)] = test_build


return result, test_builds

Expand Down
Loading