|
29 | 29 | torch._inductor.config.triton.cudagraphs = True
|
30 | 30 | torch._dynamo.config.cache_size_limit = 100000
|
31 | 31 |
|
32 |
| -try: |
33 |
| - import lm_eval |
| 32 | +import lm_eval |
34 | 33 |
|
35 |
| - lm_eval_available = True |
36 |
| -except: |
37 |
| - lm_eval_available = False |
38 |
| - |
39 |
| - |
40 |
| -if lm_eval_available: |
41 |
| - try: # lm_eval version 0.4 |
42 |
| - from lm_eval.evaluator import evaluate |
43 |
| - from lm_eval.models.huggingface import HFLM as eval_wrapper |
44 |
| - from lm_eval.tasks import get_task_dict |
45 |
| - except: # lm_eval version 0.3 |
46 |
| - from lm_eval import base, evaluator, tasks |
47 |
| - |
48 |
| - eval_wrapper = base.BaseLM |
49 |
| - get_task_dict = tasks.get_task_dict |
50 |
| - evaluate = evaluator.evaluate |
| 34 | +from lm_eval.evaluator import evaluate |
| 35 | +from lm_eval.models.huggingface import HFLM as eval_wrapper |
| 36 | +from lm_eval.tasks import get_task_dict |
51 | 37 |
|
52 | 38 |
|
53 | 39 | def setup_cache_padded_seq_input_pos_max_seq_length_for_prefill(
|
@@ -193,11 +179,6 @@ def eval(
|
193 | 179 | model, tokenizer, max_seq_length, device=device
|
194 | 180 | )
|
195 | 181 |
|
196 |
| - try: |
197 |
| - lm_eval.tasks.initialize_tasks() |
198 |
| - except: |
199 |
| - pass |
200 |
| - |
201 | 182 | if "hendrycks_test" in tasks:
|
202 | 183 | tasks.remove("hendrycks_test")
|
203 | 184 | tasks += list(lm_eval.tasks.hendrycks_test.create_all_tasks().keys())
|
|
0 commit comments