Skip to content

Commit 06b946e

Browse files
Update batch size default
Differential Revision: D75089945 Pull Request resolved: #11009
1 parent ee64dc6 commit 06b946e

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

examples/models/llama/model_args.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class ModelArgs:
1414
multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
1515
ffn_dim_multiplier: Optional[float] = None
1616
norm_eps: float = 1e-5
17-
max_batch_size: int = 32
17+
max_batch_size: int = 1
1818
max_seq_len: int = 2048
1919
max_context_len: int = 2048
2020
moe: bool = False # True to enable the MoE (Mixture of Experts)

0 commit comments

Comments
 (0)