Skip to content

Commit 6758350

Browse files
committed
up
1 parent ebea293 commit 6758350

File tree

1 file changed

+8
-2
lines changed

1 file changed

+8
-2
lines changed

extension/llm/export/builder.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@
3535

3636
from executorch.extension.llm.export.export_passes import RemoveRedundantTransposes
3737
from pytorch_tokenizers import get_tokenizer
38+
39+
# TODO: remove these once pt2e migration from torch.ao to torchao is complete
3840
from torch.ao.quantization.quantizer import Quantizer as TorchQuantizer
3941
from torch.ao.quantization.quantizer.composable_quantizer import (
4042
ComposableQuantizer as TorchComposableQuantizer,
@@ -374,10 +376,14 @@ def pt2e_quantize(
374376
if self.verbose:
375377
logging.info(f"Applied quantizers: {quantizers}")
376378

377-
if any(isinstance(q, Quantizer) for q in quantizers):
379+
if all(isinstance(q, Quantizer) for q in quantizers):
378380
composed_quantizer = ComposableQuantizer(quantizers)
379-
else:
381+
elif all(isinstance(q, TorchQuantizer) for q in quantizers):
380382
composed_quantizer = TorchComposableQuantizer(quantizers)
383+
else:
384+
raise ValueError(
385+
"Quantizers must be either Quantizer or TorchQuantizer"
386+
)
381387

382388
assert (
383389
self.pre_autograd_graph_module is not None

0 commit comments

Comments
 (0)