Skip to content

Commit c06a708

Browse files
huydhnfacebook-github-bot
authored andcommitted
Revert "Add quantize option to the coreml script (#5710)" (#5906)
Summary: Debug coreml failure, no need to review Pull Request resolved: #5906 Reviewed By: cccclai Differential Revision: D63950443 Pulled By: huydhn fbshipit-source-id: 5c2c2ad0b140bf9b33d52c9631ff3cc4a576210f
1 parent 8fc3e20 commit c06a708

File tree

1 file changed

+3
-35
lines changed

1 file changed

+3
-35
lines changed

examples/apple/coreml/scripts/export.py

Lines changed: 3 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -13,20 +13,14 @@
1313
import executorch.exir as exir
1414

1515
import torch
16-
from coremltools.optimize.torch.quantization.quantization_config import (
17-
LinearQuantizerConfig,
18-
QuantizationScheme,
19-
)
2016

2117
from executorch.backends.apple.coreml.compiler import CoreMLBackend
2218

2319
from executorch.backends.apple.coreml.partition import CoreMLPartitioner
24-
from executorch.backends.apple.coreml.quantizer import CoreMLQuantizer
2520
from executorch.devtools.etrecord import generate_etrecord
2621
from executorch.exir import to_edge
2722

2823
from executorch.exir.backend.backend_api import to_backend
29-
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
3024

3125
from torch.export import export
3226

@@ -81,13 +75,6 @@ def parse_args() -> argparse.ArgumentParser:
8175
parser.add_argument("--generate_etrecord", action=argparse.BooleanOptionalAction)
8276
parser.add_argument("--save_processed_bytes", action=argparse.BooleanOptionalAction)
8377

84-
parser.add_argument(
85-
"--quantize",
86-
action=argparse.BooleanOptionalAction,
87-
required=False,
88-
help="Quantize CoreML model",
89-
)
90-
9178
args = parser.parse_args()
9279
return args
9380

@@ -123,10 +110,9 @@ def export_lowered_module_to_executorch_program(lowered_module, example_inputs):
123110
return exec_prog
124111

125112

126-
def save_executorch_program(exec_prog, model_name, compute_unit, quantize):
113+
def save_executorch_program(exec_prog, model_name, compute_unit):
127114
buffer = exec_prog.buffer
128-
data_type = "quantize" if quantize else "fp"
129-
filename = f"{model_name}_coreml_{compute_unit}_{data_type}.pte"
115+
filename = f"{model_name}_coreml_{compute_unit}.pte"
130116
print(f"Saving exported program to {filename}")
131117
with open(filename, "wb") as file:
132118
file.write(buffer)
@@ -182,22 +168,6 @@ def main():
182168
if args.use_partitioner:
183169
model.eval()
184170
exir_program_aten = torch.export.export(model, example_inputs)
185-
if args.quantize:
186-
quantization_config = LinearQuantizerConfig.from_dict(
187-
{
188-
"global_config": {
189-
"quantization_scheme": QuantizationScheme.affine,
190-
"activation_dtype": torch.quint8,
191-
"weight_dtype": torch.qint8,
192-
"weight_per_channel": True,
193-
}
194-
}
195-
)
196-
197-
quantizer = CoreMLQuantizer(quantization_config)
198-
model = prepare_pt2e(model, quantizer) # pyre-fixme[6]
199-
model(*example_inputs)
200-
exir_program_aten = convert_pt2e(model)
201171

202172
edge_program_manager = exir.to_edge(exir_program_aten)
203173
edge_copy = copy.deepcopy(edge_program_manager)
@@ -219,9 +189,7 @@ def main():
219189
example_inputs,
220190
)
221191

222-
save_executorch_program(
223-
exec_program, args.model_name, args.compute_unit, args.quantize
224-
)
192+
save_executorch_program(exec_program, args.model_name, args.compute_unit)
225193
generate_etrecord(f"{args.model_name}_coreml_etrecord.bin", edge_copy, exec_program)
226194

227195
if args.save_processed_bytes and lowered_module is not None:

0 commit comments

Comments
 (0)