|
18 | 18 | import sys
|
19 | 19 | import traceback
|
20 | 20 | from functools import reduce
|
21 |
| -from multiprocessing import freeze_support |
| 21 | +from multiprocessing import Lock, Pool, cpu_count, freeze_support |
22 | 22 |
|
23 | 23 | from swift_build_support.swift_build_support import shell
|
24 | 24 | from swift_build_support.swift_build_support.SwiftBuildSupport import \
|
|
29 | 29 | SCRIPT_DIR = os.path.dirname(SCRIPT_FILE)
|
30 | 30 |
|
31 | 31 |
|
| 32 | +def run_parallel(fn, pool_args, n_processes=0): |
| 33 | + """Function used to run a given closure in parallel. |
| 34 | +
|
| 35 | + NOTE: This function was originally located in the shell module of |
| 36 | + swift_build_support and should eventually be replaced with a better |
| 37 | + parallel implementation. |
| 38 | + """ |
| 39 | + |
| 40 | + def init(l): |
| 41 | + global lock |
| 42 | + lock = l |
| 43 | + |
| 44 | + if n_processes == 0: |
| 45 | + n_processes = cpu_count() * 2 |
| 46 | + |
| 47 | + lk = Lock() |
| 48 | + print("Running ``%s`` with up to %d processes." % |
| 49 | + (fn.__name__, n_processes)) |
| 50 | + pool = Pool(processes=n_processes, initializer=init, initargs=(lk,)) |
| 51 | + results = pool.map_async(func=fn, iterable=pool_args).get(999999) |
| 52 | + pool.close() |
| 53 | + pool.join() |
| 54 | + return results |
| 55 | + |
| 56 | + |
| 57 | +def check_parallel_results(results, op): |
| 58 | + """Function used to check the results of run_parallel. |
| 59 | +
|
| 60 | + NOTE: This function was originally located in the shell module of |
| 61 | + swift_build_support and should eventually be replaced with a better |
| 62 | + parallel implementation. |
| 63 | + """ |
| 64 | + |
| 65 | + fail_count = 0 |
| 66 | + if results is None: |
| 67 | + return 0 |
| 68 | + for r in results: |
| 69 | + if r is not None: |
| 70 | + if fail_count == 0: |
| 71 | + print("======%s FAILURES======" % op) |
| 72 | + print("%s failed (ret=%d): %s" % (r.repo_path, r.ret, r)) |
| 73 | + fail_count += 1 |
| 74 | + if r.stderr: |
| 75 | + print(r.stderr) |
| 76 | + return fail_count |
| 77 | + |
| 78 | + |
32 | 79 | def confirm_tag_in_repo(tag, repo_name):
|
33 | 80 | tag_exists = shell.capture(['git', 'ls-remote', '--tags',
|
34 | 81 | 'origin', tag], echo=False)
|
@@ -200,8 +247,7 @@ def update_all_repositories(args, config, scheme_name, cross_repos_pr):
|
200 | 247 | cross_repos_pr]
|
201 | 248 | pool_args.append(my_args)
|
202 | 249 |
|
203 |
| - return shell.run_parallel(update_single_repository, pool_args, |
204 |
| - args.n_processes) |
| 250 | + return run_parallel(update_single_repository, pool_args, args.n_processes) |
205 | 251 |
|
206 | 252 |
|
207 | 253 | def obtain_additional_swift_sources(pool_args):
|
@@ -295,8 +341,8 @@ def obtain_all_additional_swift_sources(args, config, with_ssh, scheme_name,
|
295 | 341 | print("Not cloning any repositories.")
|
296 | 342 | return
|
297 | 343 |
|
298 |
| - return shell.run_parallel(obtain_additional_swift_sources, pool_args, |
299 |
| - args.n_processes) |
| 344 | + return run_parallel( |
| 345 | + obtain_additional_swift_sources, pool_args, args.n_processes) |
300 | 346 |
|
301 | 347 |
|
302 | 348 | def dump_repo_hashes(args, config, branch_scheme_name='repro'):
|
@@ -546,8 +592,8 @@ def main():
|
546 | 592 | update_results = update_all_repositories(args, config, scheme,
|
547 | 593 | cross_repos_pr)
|
548 | 594 | fail_count = 0
|
549 |
| - fail_count += shell.check_parallel_results(clone_results, "CLONE") |
550 |
| - fail_count += shell.check_parallel_results(update_results, "UPDATE") |
| 595 | + fail_count += check_parallel_results(clone_results, "CLONE") |
| 596 | + fail_count += check_parallel_results(update_results, "UPDATE") |
551 | 597 | if fail_count > 0:
|
552 | 598 | print("update-checkout failed, fix errors and try again")
|
553 | 599 | else:
|
|
0 commit comments