@@ -144,7 +144,7 @@ def summarize_xml_files(path, workflow_name):
144
144
res = {}
145
145
res_item_list = [ "PASSED" , "SKIPPED" , "XFAILED" , "FAILED" , "ERROR" ]
146
146
test_file_items = set ()
147
- for (k ,v ) in list (test_cases .items ()):
147
+ for (k ,v ) in list (test_suites .items ()):
148
148
file_name = k [0 ]
149
149
if not file_name in test_file_items :
150
150
test_file_items .add (file_name )
@@ -154,13 +154,14 @@ def summarize_xml_files(path, workflow_name):
154
154
res [temp_item ] = {}
155
155
temp_item_statistics = test_file_and_status (file_name , "STATISTICS" )
156
156
res [temp_item_statistics ] = {'TOTAL' : 0 , 'PASSED' : 0 , 'SKIPPED' : 0 , 'XFAILED' : 0 , 'FAILED' : 0 , 'ERROR' : 0 , 'EXECUTION_TIME' : 0 }
157
-
158
- for (k ,v ) in list (test_suites .items ()):
159
- file_name = k [0 ]
160
- test_tuple_key_statistics = test_file_and_status (file_name , "STATISTICS" )
161
- test_running_time = get_test_file_running_time (v )
162
- res [test_tuple_key_statistics ]["EXECUTION_TIME" ] += test_running_time
163
- TOTAL_EXECUTION_TIME += test_running_time
157
+ test_running_time = get_test_file_running_time (v )
158
+ res [temp_item_statistics ]["EXECUTION_TIME" ] += test_running_time
159
+ TOTAL_EXECUTION_TIME += test_running_time
160
+ else :
161
+ test_tuple_key_statistics = test_file_and_status (file_name , "STATISTICS" )
162
+ test_running_time = get_test_file_running_time (v )
163
+ res [test_tuple_key_statistics ]["EXECUTION_TIME" ] += test_running_time
164
+ TOTAL_EXECUTION_TIME += test_running_time
164
165
165
166
for (k ,v ) in list (test_cases .items ()):
166
167
file_name = k [0 ]
@@ -326,13 +327,17 @@ def run_selected_tests(workflow_name, test_run_test_path, overall_logs_path_curr
326
327
327
328
return selected_results_dict
328
329
329
- def run_test_and_summarize_results (
330
- pytorch_root_dir : str ,
331
- priority_tests : bool ,
332
- test_config : List [str ],
333
- default_list : List [str ],
334
- distributed_list : List [str ],
335
- inductor_list : List [str ]) -> Dict [str , Any ]:
330
+ def run_test_and_summarize_results () -> Dict [str , Any ]:
331
+ # parse args
332
+ args = parse_args ()
333
+ pytorch_root_dir = str (args .pytorch_root )
334
+ priority_tests = bool (args .priority_tests )
335
+ test_config = list [str ](args .test_config )
336
+ default_list = list [str ](args .default_list )
337
+ distributed_list = list [str ](args .distributed_list )
338
+ inductor_list = list [str ](args .inductor_list )
339
+ skip_rerun = bool (args .skip_rerun )
340
+
336
341
# copy current environment variables
337
342
_environ = dict (os .environ )
338
343
@@ -341,13 +346,18 @@ def run_test_and_summarize_results(
341
346
test_run_test_path = pytorch_root_dir + "/test/run_test.py"
342
347
repo_test_log_folder_path = pytorch_root_dir + "/.automation_logs/"
343
348
test_reports_src = pytorch_root_dir + "/test/test-reports/"
349
+ run_test_python_file = pytorch_root_dir + "/test/run_test.py"
344
350
345
351
# change directory to pytorch root
346
352
os .chdir (pytorch_root_dir )
347
353
348
354
# all test results dict
349
355
res_all_tests_dict = {}
350
356
357
+ # patterns
358
+ search_text = "--reruns=2"
359
+ replace_text = "--reruns=0"
360
+
351
361
# create logs folder
352
362
if not os .path .exists (repo_test_log_folder_path ):
353
363
os .mkdir (repo_test_log_folder_path )
@@ -358,6 +368,13 @@ def run_test_and_summarize_results(
358
368
os .environ ['HSA_FORCE_FINE_GRAIN_PCIE' ] = '1'
359
369
os .environ ['PYTORCH_TESTING_DEVICE_ONLY_FOR' ] = 'cuda'
360
370
os .environ ['CONTINUE_THROUGH_ERROR' ] = 'True'
371
+ if skip_rerun :
372
+ # modify run_test.py in-place
373
+ with open (run_test_python_file , 'r' ) as file :
374
+ data = file .read ()
375
+ data = data .replace (search_text , replace_text )
376
+ with open (run_test_python_file , 'w' ) as file :
377
+ file .write (data )
361
378
362
379
# Time stamp
363
380
current_datetime = datetime .now ().strftime ("%Y%m%d_%H-%M-%S" )
@@ -455,6 +472,15 @@ def run_test_and_summarize_results(
455
472
os .environ .clear ()
456
473
os .environ .update (_environ )
457
474
475
+ # restore files
476
+ if skip_rerun :
477
+ # modify run_test.py in-place
478
+ with open (run_test_python_file , 'r' ) as file :
479
+ data = file .read ()
480
+ data = data .replace (replace_text , search_text )
481
+ with open (run_test_python_file , 'w' ) as file :
482
+ file .write (data )
483
+
458
484
return res_all_tests_dict
459
485
460
486
def parse_args ():
@@ -465,6 +491,7 @@ def parse_args():
465
491
parser .add_argument ('--distributed_list' , nargs = '+' , default = [], help = "space-separated list of 'distributed' config test suites/files to be executed eg. 'distributed/test_c10d_common distributed/test_c10d_nccl'" )
466
492
parser .add_argument ('--inductor_list' , nargs = '+' , default = [], help = "space-separated list of 'inductor' config test suites/files to be executed eg. 'inductor/test_torchinductor test_ops'" )
467
493
parser .add_argument ('--pytorch_root' , default = '.' , type = str , help = "PyTorch root directory" )
494
+ parser .add_argument ('--skip_rerun' , action = 'store_true' , help = "skip rerun process" )
468
495
parser .add_argument ('--example_output' , type = str , help = "{'workflow_name': {\n "
469
496
" test_file_and_status(file_name='workflow_aggregate', status='STATISTICS'): {}, \n "
470
497
" test_file_and_status(file_name='test_file_name_1', status='ERROR'): {}, \n "
@@ -484,9 +511,7 @@ def check_num_gpus_for_distributed():
484
511
assert num_gpus_visible > 1 , "Number of visible GPUs should be >1 to run distributed unit tests"
485
512
486
513
def main ():
487
- global args
488
- args = parse_args ()
489
- all_tests_results = run_test_and_summarize_results (args .pytorch_root , args .priority_tests , args .test_config , args .default_list , args .distributed_list , args .inductor_list )
514
+ all_tests_results = run_test_and_summarize_results ()
490
515
pprint (dict (all_tests_results ))
491
516
492
517
if __name__ == "__main__" :
0 commit comments