Skip to content

Commit 17eaf38

Browse files
committed
diff splits of referenced saved sep
1 parent 607df87 commit 17eaf38

File tree

1 file changed

+22
-10
lines changed

1 file changed

+22
-10
lines changed

docs/render_submissions.py

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -161,8 +161,8 @@ def get_blank_repo_metrics(
161161
return blank_repo_metrics
162162

163163
leaderboard_header = """\n\n## Leaderboard ({split})
164-
| Name | Repos Resolved (/{num_repos}) | Test Duration (s) | Date | Analysis | Github |
165-
|------|:-------------------------:|:--------------------:|:----------:|----|----| """
164+
| Name | Repos Resolved (/{num_repos}) | Total Tests Passed (/{total_num_tests}) Test Duration (s) | Date | Analysis | Github |
165+
|------|:-------------------------:|:--------------------:|:--------------------:|:----------:|----|----| """
166166

167167
submission_table_header = """# Submission Name: **{display_name}** (split: {split})
168168
@@ -177,19 +177,25 @@ def get_blank_repo_metrics(
177177
def render_mds(overwrite_previous, subfolder="docs"):
178178
leaderboard = {}
179179

180+
split_to_total_tests = {"lite": 3628, "all": 140926} # hard-coded to skip running it later
180181
for split in tqdm.tqdm(["lite", "all"]):
181182
num_repos = len(SPLIT[split])
183+
# total_num_tests = 0
184+
# for repo_name in SPLIT[split]:
185+
# repo_tests = subprocess.run(['commit0', 'get-tests', repo_name], capture_output=True, text=True).stdout.strip()
186+
# total_num_tests += len(repo_tests.splitlines())
182187
leaderboard[
183188
split
184-
] = leaderboard_header.format(split=split, num_repos=num_repos)
189+
] = leaderboard_header.format(split=split, num_repos=num_repos, total_num_tests=split_to_total_tests[split])
185190

186191
for org_path in tqdm.tqdm(glob.glob(os.path.join(analysis_files_path, "*"))):
187192
org_name = os.path.basename(org_path)
188193
if org_name in {"blank", "repos", "submission_repos"}:
189194
continue
190-
repos_resolved = 0
191-
total_duration = 0.0
192195
for branch_path in glob.glob(os.path.join(org_path, "*.json")):
196+
cum_tests_passed = 0
197+
repos_resolved = 0
198+
total_duration = 0.0
193199
branch_metrics = json.load(open(branch_path))
194200
submission_info = branch_metrics["submission_info"]
195201
split = submission_info["split"]
@@ -262,6 +268,7 @@ def render_mds(overwrite_previous, subfolder="docs"):
262268
f"### {shortened_testname}\n\n<details><summary> <pre>{shortened_testname}"
263269
f"</pre></summary><pre>\n{failure['failure_string']}\n</pre>\n</details>\n"
264270
)
271+
cum_tests_passed += pytest_info["summary"]["passed"]
265272
total_duration += pytest_info["duration"]
266273
repos_resolved += int(resolved)
267274
if write_submission:
@@ -293,6 +300,7 @@ def render_mds(overwrite_previous, subfolder="docs"):
293300
leaderboard[split] += (
294301
f"\n|{display_name}|"
295302
f"{repos_resolved}|"
303+
f"{cum_tests_passed}|"
296304
f"{total_duration:.2f}|"
297305
f"{submission_date}|"
298306
f"{analysis_link}|"
@@ -378,13 +386,17 @@ def main(args):
378386
json.dump(repo_metrics, open(repo_metrics_output_file, "w"), indent=4)
379387

380388
if args.get_reference_details:
389+
branch_name = "reference"
390+
org_name = f"commit0_{args.split}"
391+
commit0_dot_file_path = os.path.join(
392+
analysis_files_path, "repos", org_name, branch_name, ".commit0.yaml"
393+
)
394+
submission_repos_path = os.path.join(analysis_files_path, "repos", org_name, branch_name)
381395
if args.do_setup:
382396
os.system(
383-
f"commit0 setup {args.split} --base-dir {analysis_files_path}/repos "
384-
f"--commit0-dot-file-path {analysis_files_path}/repos/.commit0.yaml"
397+
f"commit0 setup {args.split} --base-dir {submission_repos_path} "
398+
f"--commit0-dot-file-path {commit0_dot_file_path}"
385399
)
386-
branch_name = "reference"
387-
org_name = "commit0"
388400
submission_metrics_output_file = os.path.join(
389401
analysis_files_path, org_name, f"{branch_name}.json"
390402
)
@@ -408,7 +420,7 @@ def main(args):
408420
if args.overwrite_previous_eval or need_re_eval:
409421
os.system(
410422
"commit0 evaluate --reference "
411-
f"--commit0-dot-file-path {analysis_files_path}/repos/.commit0.yaml"
423+
f"--commit0-dot-file-path {commit0_dot_file_path}"
412424
)
413425
# get coverage and pytest info for each repo
414426
for example in dataset:

0 commit comments

Comments
 (0)