19
19
"mistralai/Mistral-7B-v0.1" : "https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/config.json,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/generation_config.json,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/pytorch_model-00001-of-00002.bin,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/pytorch_model-00002-of-00002.bin,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/pytorch_model.bin.index.json,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/special_tokens_map.json,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/tokenizer.json,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/tokenizer.model,https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/tokenizer_config.json" ,
20
20
"mistralai/Mistral-7B-Instruct-v0.1" : "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/config.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/generation_config.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/pytorch_model-00001-of-00002.bin,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/pytorch_model-00002-of-00002.bin,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/pytorch_model.bin.index.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/special_tokens_map.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/tokenizer.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/tokenizer.model,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/tokenizer_config.json" ,
21
21
"mistralai/Mistral-7B-Instruct-v0.2" : "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/config.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/generation_config.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/pytorch_model-00001-of-00003.bin,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/pytorch_model-00002-of-00003.bin,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/pytorch_model-00003-of-00003.bin,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/pytorch_model.bin.index.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/special_tokens_map.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/tokenizer.json,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/tokenizer.model,https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/resolve/main/tokenizer_config.json" ,
22
+
23
+ # huggingface-cli prefixed Models will download using the huggingface-cli tool
24
+ # TODO: Convert all of the MODEL_REPOS with a NamedTuple that includes the install_method
25
+ "huggingface-cli/meta-llama/Meta-Llama-3-8B" : "" ,
22
26
}
23
27
24
28
JOB_RUNNERS = {
@@ -57,7 +61,7 @@ def parse_args() -> Any:
57
61
return parser .parse_args ()
58
62
59
63
60
- def model_should_run_on_event (model : str , event : str ) -> bool :
64
+ def model_should_run_on_event (model : str , event : str , backend : str ) -> bool :
61
65
"""
62
66
A helper function to decide whether a model should be tested on an event (pull_request/push)
63
67
We put higher priority and fast models to pull request and rest to push.
@@ -67,7 +71,11 @@ def model_should_run_on_event(model: str, event: str) -> bool:
67
71
elif event == "push" :
68
72
return model in []
69
73
elif event == "periodic" :
70
- return model in ["openlm-research/open_llama_7b" ]
74
+ # test llama3 on gpu only, see description in https://github.com/pytorch/torchchat/pull/399 for reasoning
75
+ if backend == "gpu" :
76
+ return model in ["openlm-research/open_llama_7b" , "huggingface-cli/meta-llama/Meta-Llama-3-8B" ]
77
+ else :
78
+ return model in ["openlm-research/open_llama_7b" ]
71
79
else :
72
80
return False
73
81
@@ -102,15 +110,25 @@ def export_models_for_ci() -> dict[str, dict]:
102
110
MODEL_REPOS .keys (),
103
111
JOB_RUNNERS [backend ].items (),
104
112
):
105
- if not model_should_run_on_event (repo_name , event ):
113
+ if not model_should_run_on_event (repo_name , event , backend ):
106
114
continue
107
115
116
+ # This is mostly temporary to get this finished quickly while
117
+ # doing minimal changes, see TODO at the top of the file to
118
+ # see how this should probably be done
119
+ install_method = "wget"
120
+ final_repo_name = repo_name
121
+ if repo_name .startswith ("huggingface-cli" ):
122
+ install_method = "huggingface-cli"
123
+ final_repo_name = repo_name .replace ("huggingface-cli/" , "" )
124
+
108
125
record = {
109
- "repo_name" : repo_name ,
110
- "model_name" : repo_name .split ("/" )[- 1 ],
126
+ "repo_name" : final_repo_name ,
127
+ "model_name" : final_repo_name .split ("/" )[- 1 ],
111
128
"resources" : MODEL_REPOS [repo_name ],
112
129
"runner" : runner [0 ],
113
130
"platform" : runner [1 ],
131
+ "install_method" : install_method ,
114
132
"timeout" : 90 ,
115
133
}
116
134
0 commit comments