@@ -39,9 +39,12 @@ def get_pytest_info(path_to_logs, repo_name, branch_name):
39
39
}
40
40
report_file_path = os .path .join (path_to_logs , pytest_hash , "report.json" )
41
41
if not os .path .exists (report_file_path ):
42
- reason_for_failure = open (
43
- os .path .join (path_to_logs , pytest_hash , "test_output.txt" )
44
- ).read ()
42
+ if os .path .exists (os .path .join (path_to_logs , pytest_hash , "test_output.txt" )):
43
+ reason_for_failure = open (
44
+ os .path .join (path_to_logs , pytest_hash , "test_output.txt" )
45
+ ).read ()
46
+ else :
47
+ reason_for_failure = "Unknown failure."
45
48
pytest_info [testname ]["failed_to_run" ] = reason_for_failure
46
49
return pytest_info
47
50
pytest_report = json .load (open (report_file_path ))
@@ -56,7 +59,7 @@ def get_pytest_info(path_to_logs, repo_name, branch_name):
56
59
if "passed" not in pytest_summary :
57
60
pytest_summary ["passed" ] = 0
58
61
for test in pytest_report ["tests" ]:
59
- if test ["outcome" ] == "passed" :
62
+ if test ["outcome" ] in { "passed" , "skipped" } :
60
63
continue
61
64
if "longrepr" in test :
62
65
failure_string = test ["longrepr" ]
@@ -85,17 +88,6 @@ def get_pytest_info(path_to_logs, repo_name, branch_name):
85
88
86
89
87
90
def get_coverage_info (path_to_logs , repo_name , branch_name ):
88
- # coverage_fp = open(os.path.join(path_to_logs, pytest_hash, "coverage.json"))
89
- # for filename, file_coverage in json.load(coverage_fp)["files"].items():
90
- # if not any(relevant_function.startswith(filename) for relevant_function in relevant_functions):
91
- # continue
92
- # for funcname, func_coverage in file_coverage["functions"].items():
93
- # if f"{filename}::{funcname}" not in relevant_functions: continue
94
- # pycov_info[testname][f"{filename}::{funcname}"] = {
95
- # "implementation": submission_info["function_impls"][f"{filename}::{funcname}"],
96
- # "executed_lines": func_coverage["executed_lines"],
97
- # "executed_branches": func_coverage["executed_branches"]
98
- # }
99
91
raise NotImplementedError
100
92
101
93
@@ -182,91 +174,80 @@ def render_mds(subfolder="docs"):
182
174
| Name | Repos Resolved (/{ num_repos } ) | Test Duration (s) | Date | Analysis | Github |
183
175
|------|:-------------------------:|:--------------------:|:----------:|----|----| """
184
176
185
- for branch_name in tqdm .tqdm (glob .glob (os .path .join (analysis_files_path , "*" ))):
186
- branch_name = os .path .basename (branch_name )
187
- if branch_name in {"blank" , "repos" , "submission_repos" }:
177
+ for org_path in tqdm .tqdm (glob .glob (os .path .join (analysis_files_path , "*" ))):
178
+ org_name = os .path .basename (org_path )
179
+ if org_name in {"blank" , "repos" , "submission_repos" }:
188
180
continue
189
181
repos_resolved = 0
190
182
# cum_passed = 0
191
183
total_duration = 0.0
192
- # TODO better way to have submission info loaded up before get into repos...
193
- submission_info = None
194
- submission_page = """# Submission Name: **DISPLAYNAME_GOES_HERE** (split: SPLIT_GOES_HERE)
184
+ for branch_path in glob .glob (os .path .join (org_path , "*.json" )):
185
+ branch_metrics = json .load (open (branch_path ))
186
+ submission_info = branch_metrics ["submission_info" ]
187
+ split = submission_info ["split" ]
188
+ org_name = submission_info ["org_name" ]
189
+ project_page_link = submission_info ["project_page" ]
190
+ display_name = submission_info ["display_name" ]
191
+ submission_date = submission_info ["submission_date" ]
192
+ branch_name = submission_info ["branch" ]
193
+ submission_page = f"""# Submission Name: **{ display_name } ** (split: { split } )
195
194
196
195
| Repository | Resolved | Pass Rate | Test Duration (s) | Analysis | Github Link |
197
196
|------------|---------|:-----:|:-----:|-----|-----|"""
198
- for repo_file in glob .glob (
199
- os .path .join (analysis_files_path , branch_name , "*.json" )
200
- ):
201
- repo_metrics_output_file = os .path .join (
202
- analysis_files_path , branch_name , repo_file
203
- )
204
- repo_metrics = json .load (open (repo_metrics_output_file ))
205
- repo_name = os .path .basename (repo_file [: - len (".json" )])
206
- if submission_info is None :
207
- submission_info = repo_metrics ["submission_info" ]
208
- split = submission_info ["split" ]
209
- org_name = submission_info ["org_name" ]
210
- project_page_link = submission_info ["project_page" ]
211
- display_name = submission_info ["display_name" ]
212
- submission_date = submission_info ["submission_date" ]
213
- branch_name = submission_info ["branch" ]
214
- submission_page = submission_page .replace (
215
- "DISPLAYNAME_GOES_HERE" , display_name
216
- ).replace ("SPLIT_GOES_HERE" , split )
217
- submission_repo_page = (
218
- f"# **{ display_name } **: { repo_name } "
219
- )
220
- for pytest_group , pytest_info in repo_metrics .items ():
221
- if pytest_group == "submission_info" :
222
- continue
223
- pytest_group = os .path .basename (pytest_group .strip ("/" ))
224
- patch_diff = (
225
- f"""\n \n ## Patch diff\n ```diff\n { pytest_info ['patch_diff' ]} ```"""
197
+
198
+ for repo_name , repo_pytest_results in branch_metrics .items ():
199
+ if repo_name == "submission_info" : continue
200
+ submission_repo_page = (
201
+ f"# **{ display_name } **: { repo_name } "
226
202
)
227
- if "failed_to_run" in pytest_info :
228
- submission_repo_page += f"""\n ## Failed to run pytests\n ```\n { pytest_info ['failed_to_run' ]} \n ```"""
229
- resolved = False
230
- pytest_details = "Pytest failed"
231
- duration = "Failed."
232
- else :
233
- submission_repo_page += """\n ## Pytest Summary
203
+ for pytest_group , pytest_info in repo_pytest_results .items ():
204
+ pytest_group = os .path .basename (pytest_group .strip ("/" ))
205
+ patch_diff = (
206
+ f"""\n \n ## Patch diff\n ```diff\n { pytest_info ['patch_diff' ]} ```"""
207
+ )
208
+ if "failed_to_run" in pytest_info :
209
+ submission_repo_page += f"""\n ## Failed to run pytests for test `{ pytest_group } `\n ```\n { pytest_info ['failed_to_run' ]} \n ```"""
210
+ resolved = False
211
+ pytest_details = "Pytest failed"
212
+ duration = "Failed."
213
+ else :
214
+ submission_repo_page += f"""\n ## Pytest Summary for test `{ pytest_group } `
234
215
| status | count |
235
216
|:---------|:-----:|
236
217
"""
237
- total_duration += pytest_info ["duration" ]
238
- # cum_passed += pytest_info["summary"]["passed"]
239
- for category , count in pytest_info ["summary" ].items ():
240
- if category not in {"duration" }:
241
- submission_repo_page += f"""| { category } | { count } |\n """
242
- else :
218
+ total_duration += pytest_info ["duration" ]
219
+ # cum_passed += pytest_info["summary"]["passed"]
220
+ for category , count in pytest_info ["summary" ].items ():
221
+ if category not in {"duration" }:
222
+ submission_repo_page += f"""| { category } | { count } |\n """
223
+ else :
224
+ submission_repo_page += (
225
+ f"""| { category } | { float (count ):.2f} s |\n """
226
+ )
227
+
228
+ submission_repo_page += "\n ## Failed pytests:\n \n "
229
+ for testname , failure in pytest_info ["failures" ].items ():
230
+ shortened_testname = os .path .basename (testname )
243
231
submission_repo_page += (
244
- f"""| { category } | { float (count ):.2f} s |\n """
232
+ f"### { shortened_testname } \n \n <details><summary> <pre>{ shortened_testname } "
233
+ f"</pre></summary><pre>\n { failure ['failure_string' ]} \n </pre>\n </details>\n "
245
234
)
246
-
247
- submission_repo_page += "\n ## Failed pytest:\n \n "
248
- for testname , failure in pytest_info ["failures" ].items ():
249
- shortened_testname = os .path .basename (testname )
250
- submission_repo_page += (
251
- f"### { shortened_testname } \n \n <details><summary> <pre>{ shortened_testname } "
252
- f"</pre></summary><pre>\n { failure ['failure_string' ]} \n </pre>\n </details>\n "
235
+ resolved = ("failed" not in pytest_info ["summary" ]) or (
236
+ pytest_info ["summary" ]["failed" ] == 0
253
237
)
254
- resolved = ("failed" not in pytest_info ["summary" ]) or (
255
- pytest_info ["summary" ]["failed" ] == 0
256
- )
257
- repos_resolved += 1
258
- pytest_details = f"{ pytest_info ['summary' ]['passed' ]} / { pytest_info ['summary' ]['collected' ]} "
259
- duration = f"{ pytest_info ['duration' ]:.2f} "
260
- github_hyperlink = f"{ project_page_link } /{ repo_name } " if branch_name == "reference" else f"{ project_page_link } /{ repo_name } /tree/{ branch_name } "
261
- submission_page += f"""
238
+ repos_resolved += int (resolved )
239
+ pytest_details = f"{ pytest_info ['summary' ]['passed' ]} / { pytest_info ['summary' ]['collected' ]} "
240
+ duration = f"{ pytest_info ['duration' ]:.2f} "
241
+ github_hyperlink = f"{ project_page_link } /{ repo_name } " if branch_name == "reference" else f"{ project_page_link } /{ repo_name } /tree/{ branch_name } "
242
+ submission_page += f"""
262
243
| { repo_name } | { 'Yes' if resolved else 'No' } | { pytest_details } | { duration } | [Analysis](/{ f'analysis_{ org_name } _{ branch_name } _{ repo_name } ' } ) | [Github]({ github_hyperlink } ) |"""
263
- back_button = (
264
- f"[back to { display_name } summary](/{ f'analysis_{ org_name } _{ branch_name } ' } )\n \n "
265
- )
266
- with open (
267
- os .path .join (subfolder , f"analysis_{ org_name } _{ branch_name } _{ repo_name } .md" ), "w"
268
- ) as wf :
269
- wf .write (back_button + submission_repo_page + patch_diff )
244
+ back_button = (
245
+ f"[back to { display_name } summary](/{ f'analysis_{ org_name } _{ branch_name } ' } )\n \n "
246
+ )
247
+ with open (
248
+ os .path .join (subfolder , f"analysis_{ org_name } _{ branch_name } _{ repo_name } .md" ), "w"
249
+ ) as wf :
250
+ wf .write (back_button + submission_repo_page + patch_diff )
270
251
analysis_link = f"[Analysis](/{ f'analysis_{ org_name } _{ branch_name } ' } )"
271
252
github_link = f"[Github]({ project_page_link } )"
272
253
leaderboard [split ] += (
@@ -355,7 +336,20 @@ def main(args):
355
336
f"--commit0-dot-file-path { analysis_files_path } /repos/.commit0.yaml"
356
337
)
357
338
branch_name = "reference"
358
- os .makedirs (os .path .join (analysis_files_path , branch_name ), exist_ok = True )
339
+ org_name = "commit0"
340
+ submission_metrics_output_file = os .path .join (
341
+ analysis_files_path , org_name , f"{ branch_name } .json"
342
+ )
343
+ submission_details = {"submission_info" : {
344
+ "org_name" : org_name ,
345
+ "branch" : branch_name ,
346
+ "display_name" : "Reference (Gold)" ,
347
+ "submission_date" : "NA" ,
348
+ "split" : args .split ,
349
+ "project_page" : "https://github.com/commit-0" ,
350
+ }}
351
+
352
+ os .makedirs (os .path .join (analysis_files_path , org_name ), exist_ok = True )
359
353
if not args .keep_previous_eval :
360
354
for repo_log_path in glob .glob (f"{ os .getcwd ()} /logs/pytest/*" ):
361
355
if os .path .exists (os .path .join (repo_log_path , branch_name )):
@@ -364,40 +358,26 @@ def main(args):
364
358
"commit0 evaluate --reference "
365
359
f"--commit0-dot-file-path { analysis_files_path } /repos/.commit0.yaml"
366
360
)
367
-
368
361
# get coverage and pytest info for each repo
369
362
for example in dataset :
370
363
repo_name = example ["repo" ].split ("/" )[- 1 ]
371
364
if args .split != "all" and repo_name not in SPLIT [args .split ]:
372
365
continue
373
366
374
- repo_metrics_output_file = os .path .join (
375
- analysis_files_path , branch_name , f"{ repo_name } .json"
376
- )
377
-
378
367
path_to_logs = f"{ os .getcwd ()} /logs/pytest/{ repo_name } /{ branch_name } "
379
368
pytest_results = get_pytest_info (path_to_logs , repo_name , branch_name )
380
- pytest_results ["submission_info" ] = {
381
- "org_name" : "gold" ,
382
- "branch" : "reference" ,
383
- "display_name" : "Reference (Gold)" ,
384
- "submission_date" : "NA" ,
385
- "split" : args .split ,
386
- "project_page" : "https://github.com/commit-0" ,
387
- }
388
- json .dump (pytest_results , open (repo_metrics_output_file , "w" ), indent = 4 )
369
+ submission_details [repo_name ] = pytest_results
370
+ json .dump (submission_details , open (submission_metrics_output_file , "w" ), indent = 4 )
371
+ print (f"Saved pytest info to { submission_metrics_output_file } " )
389
372
390
373
if args .analyze_submissions :
391
- commit0_dot_file_path = os .path .join (
392
- analysis_files_path , "submission_repos" , ".commit0.yaml"
393
- )
394
374
if not args .keep_previous_eval :
395
375
for subfolder in glob .glob (os .path .join (analysis_files_path , "*" )):
396
376
if os .path .basename (subfolder .rstrip ("/" )) not in {
397
377
"blank" ,
398
- "reference" ,
399
378
"repos" ,
400
379
"submission_repos" ,
380
+ "commit0"
401
381
}:
402
382
try :
403
383
print (f"Clearing { subfolder } " )
@@ -406,10 +386,17 @@ def main(args):
406
386
print (f"{ e } : when removing { subfolder } " )
407
387
408
388
for submission in tqdm .tqdm (submission_dataset ):
409
- # submission_details = {"submission_info": submission}
389
+ submission_details = {"submission_info" : submission }
410
390
branch_name = submission ["branch" ]
411
391
org_name = submission ["org_name" ]
412
- os .makedirs (os .path .join (analysis_files_path , branch_name ), exist_ok = True )
392
+ submission_metrics_output_file = os .path .join (
393
+ analysis_files_path , org_name , f"{ branch_name } .json"
394
+ )
395
+ os .makedirs (os .path .join (analysis_files_path , org_name ), exist_ok = True )
396
+ commit0_dot_file_path = os .path .join (
397
+ analysis_files_path , "submission_repos" , org_name , ".commit0.yaml"
398
+ )
399
+ print ("commit0_dot_file_path" , commit0_dot_file_path )
413
400
if not args .keep_previous_eval :
414
401
for repo_log_path in glob .glob (f"{ os .getcwd ()} /logs/pytest/*" ):
415
402
if os .path .exists (os .path .join (repo_log_path , branch_name )):
@@ -443,16 +430,11 @@ def main(args):
443
430
if args .split != "all" and repo_name not in SPLIT [args .split ]:
444
431
continue
445
432
446
- repo_metrics_output_file = os .path .join (
447
- analysis_files_path , branch_name , f"{ repo_name } .json"
448
- )
449
-
450
433
path_to_logs = f"{ os .getcwd ()} /logs/pytest/{ repo_name } /{ branch_name } "
451
434
pytest_results = get_pytest_info (path_to_logs , repo_name , branch_name )
452
- # submission_details.update(pytest_results)
453
- pytest_results ["submission_info" ] = submission
454
- json .dump (pytest_results , open (repo_metrics_output_file , "w" ), indent = 4 )
455
- # json.dump(submission_details, open(repo_metrics_output_file, "w"), indent=4)
435
+ submission_details [repo_name ] = pytest_results
436
+ json .dump (submission_details , open (submission_metrics_output_file , "w" ), indent = 4 )
437
+ print (f"Saved pytest info to { submission_metrics_output_file } " )
456
438
457
439
if not args .keep_previous_eval :
458
440
for analysis_file in glob .glob ("docs/analysis*.md" ):
0 commit comments