Skip to content

Commit e630be8

Browse files
committed
Update on "Add new export LLM config"
Differential Revision: [D75263991](https://our.internmc.facebook.com/intern/diff/D75263991) [ghstack-poisoned]
2 parents 5f4c78d + eeac438 commit e630be8

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

examples/models/llama/config/llm_config.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -162,11 +162,11 @@ class QuantizationConfig:
162162
pt2e_quantize: Optional[Pt2eQuantize] = None
163163
group_size: Optional[int] = None
164164
use_spin_quant: Optional[SpinQuant] = None
165-
use_qat: Optional[bool] = None
165+
use_qat: bool = False
166166
calibration_tasks: Optional[List[str]] = None
167167
calibration_limit: Optional[int] = None
168168
calibration_seq_length: Optional[int] = None
169-
calibration_data: Optional[str] = None
169+
calibration_data: str = "Once upon a time"
170170

171171
def __post_init__(self):
172172
if self.qmode:
@@ -243,7 +243,7 @@ class QNNConfig:
243243

244244
@dataclass
245245
class MPSConfig:
246-
enabled: Optional[bool] = False
246+
enabled: bool = False
247247

248248

249249
@dataclass

0 commit comments

Comments
 (0)