@@ -116,6 +116,11 @@ def get_test_message(test_case, status=None):
116
116
else :
117
117
return ""
118
118
119
+ def get_test_file_running_time (test_suite ):
120
+ if test_suite .__contains__ ('time' ):
121
+ return test_suite ["time" ]
122
+ return 0
123
+
119
124
def get_test_running_time (test_case ):
120
125
if test_case .__contains__ ('time' ):
121
126
return test_case ["time" ]
@@ -129,9 +134,11 @@ def summarize_xml_files(path, workflow_name):
129
134
TOTAL_XFAIL_NUM = 0
130
135
TOTAL_FAILED_NUM = 0
131
136
TOTAL_ERROR_NUM = 0
137
+ TOTAL_EXECUTION_TIME = 0
132
138
133
139
#parse the xml files
134
140
test_cases = parse_xml_reports_as_dict (- 1 , - 1 , 'testcase' , workflow_name , path )
141
+ test_suites = parse_xml_reports_as_dict (- 1 , - 1 , 'testsuite' , workflow_name , path )
135
142
test_file_and_status = namedtuple ("test_file_and_status" , ["file_name" , "status" ])
136
143
# results dict
137
144
res = {}
@@ -146,7 +153,14 @@ def summarize_xml_files(path, workflow_name):
146
153
temp_item = test_file_and_status (file_name , item )
147
154
res [temp_item ] = {}
148
155
temp_item_statistics = test_file_and_status (file_name , "STATISTICS" )
149
- res [temp_item_statistics ] = {'TOTAL' : 0 , 'PASSED' : 0 , 'SKIPPED' : 0 , 'XFAILED' : 0 , 'FAILED' : 0 , 'ERROR' : 0 }
156
+ res [temp_item_statistics ] = {'TOTAL' : 0 , 'PASSED' : 0 , 'SKIPPED' : 0 , 'XFAILED' : 0 , 'FAILED' : 0 , 'ERROR' : 0 , 'EXECUTION_TIME' : 0 }
157
+
158
+ for (k ,v ) in list (test_suites .items ()):
159
+ file_name = k [0 ]
160
+ test_tuple_key_statistics = test_file_and_status (file_name , "STATISTICS" )
161
+ test_running_time = get_test_file_running_time (v )
162
+ res [test_tuple_key_statistics ]["EXECUTION_TIME" ] += test_running_time
163
+ TOTAL_EXECUTION_TIME += test_running_time
150
164
151
165
for (k ,v ) in list (test_cases .items ()):
152
166
file_name = k [0 ]
@@ -195,13 +209,18 @@ def summarize_xml_files(path, workflow_name):
195
209
statistics_dict ["XFAILED" ] = TOTAL_XFAIL_NUM
196
210
statistics_dict ["FAILED" ] = TOTAL_FAILED_NUM
197
211
statistics_dict ["ERROR" ] = TOTAL_ERROR_NUM
212
+ statistics_dict ["EXECUTION_TIME" ] = TOTAL_EXECUTION_TIME
198
213
aggregate_item = workflow_name + "_aggregate"
199
214
total_item = test_file_and_status (aggregate_item , "STATISTICS" )
200
215
res [total_item ] = statistics_dict
201
216
202
217
return res
203
218
204
219
def run_command_and_capture_output (cmd ):
220
+ if os .environ ['TEST_CONFIG' ] == 'distributed' :
221
+ p = subprocess .run ("rocminfo | grep -cE 'Name:\s+gfx'" , shell = True , capture_output = True , text = True )
222
+ num_gpus_visible = int (p .stdout )
223
+ assert num_gpus_visible > 1 , "Number of visible GPUs should be >1 to run TEST_CONFIG=distributed"
205
224
try :
206
225
print (f"Running command '{ cmd } '" )
207
226
with open (CONSOLIDATED_LOG_FILE_PATH , "a+" ) as output_file :
0 commit comments