File tree Expand file tree Collapse file tree 1 file changed +8
-2
lines changed Expand file tree Collapse file tree 1 file changed +8
-2
lines changed Original file line number Diff line number Diff line change 35
35
36
36
from executorch .extension .llm .export .export_passes import RemoveRedundantTransposes
37
37
from pytorch_tokenizers import get_tokenizer
38
+
39
+ # TODO: remove these once pt2e migration from torch.ao to torchao is complete
38
40
from torch .ao .quantization .quantizer import Quantizer as TorchQuantizer
39
41
from torch .ao .quantization .quantizer .composable_quantizer import (
40
42
ComposableQuantizer as TorchComposableQuantizer ,
@@ -374,10 +376,14 @@ def pt2e_quantize(
374
376
if self .verbose :
375
377
logging .info (f"Applied quantizers: { quantizers } " )
376
378
377
- if any (isinstance (q , Quantizer ) for q in quantizers ):
379
+ if all (isinstance (q , Quantizer ) for q in quantizers ):
378
380
composed_quantizer = ComposableQuantizer (quantizers )
379
- else :
381
+ elif all ( isinstance ( q , TorchQuantizer ) for q in quantizers ) :
380
382
composed_quantizer = TorchComposableQuantizer (quantizers )
383
+ else :
384
+ raise ValueError (
385
+ "Quantizers must be either Quantizer or TorchQuantizer"
386
+ )
381
387
382
388
assert (
383
389
self .pre_autograd_graph_module is not None
You can’t perform that action at this time.
0 commit comments