Skip to content

Commit 83c503e

Browse files
committed
enable aoti for preprocess ci
ghstack-source-id: bd5aaef Pull Request resolved: #6553
1 parent 09cf982 commit 83c503e

File tree

4 files changed

+39
-28
lines changed

4 files changed

+39
-28
lines changed

.ci/docker/ci_commit_pins/pytorch.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
e47e8794499a4a0130ff4efb8713ff93f4b40c36
1+
c8a648d4dffb9f0133ff4a2ea0e660b42105d3ad

examples/models/llama3_2_vision/preprocess/export_preprocess.py

Lines changed: 9 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -24,29 +24,22 @@ def main():
2424
strict=False,
2525
)
2626

27-
# Executorch
27+
# AOTInductor. Note: export AOTI before ExecuTorch, as
28+
# ExecuTorch will modify the ExportedProgram.
29+
torch._inductor.aot_compile(
30+
ep.module(),
31+
model.get_example_inputs(),
32+
options={"aot_inductor.output_path": "preprocess_aoti.so"},
33+
)
34+
35+
# Executorch.
2836
edge_program = to_edge(
2937
ep, compile_config=EdgeCompileConfig(_check_ir_validity=False)
3038
)
3139
et_program = edge_program.to_executorch()
3240
with open("preprocess_et.pte", "wb") as file:
3341
et_program.write_to_file(file)
3442

35-
# Export.
36-
# ep = torch.export.export(
37-
# model.get_eager_model(),
38-
# model.get_example_inputs(),
39-
# dynamic_shapes=model.get_dynamic_shapes(),
40-
# strict=False,
41-
# )
42-
#
43-
# # AOTInductor
44-
# torch._inductor.aot_compile(
45-
# ep.module(),
46-
# model.get_example_inputs(),
47-
# options={"aot_inductor.output_path": "preprocess_aoti.so"},
48-
# )
49-
5043

5144
if __name__ == "__main__":
5245
main()

examples/models/llama3_2_vision/preprocess/test_preprocess.py

Lines changed: 28 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
)
2727

2828
from PIL import Image
29+
from torch._inductor.package import package_aoti
2930

3031
from torchtune.models.clip.inference._transform import CLIPImageTransform
3132

@@ -55,31 +56,46 @@ def initialize_models(resize_to_max_canvas: bool) -> Dict[str, Any]:
5556
possible_resolutions=None,
5657
)
5758

59+
# Eager model.
5860
model = CLIPImageTransformModel(config)
5961

62+
# Exported model.
6063
exported_model = torch.export.export(
6164
model.get_eager_model(),
6265
model.get_example_inputs(),
6366
dynamic_shapes=model.get_dynamic_shapes(),
6467
strict=False,
6568
)
6669

67-
# aoti_path = torch._inductor.aot_compile(
68-
# exported_model.module(),
69-
# model.get_example_inputs(),
70-
# )
70+
# AOTInductor model.
71+
so = torch._export.aot_compile(
72+
exported_model.module(),
73+
args=model.get_example_inputs(),
74+
options={"aot_inductor.package": True},
75+
dynamic_shapes=model.get_dynamic_shapes(),
76+
)
77+
aoti_path = "preprocess.pt2"
78+
package_aoti(aoti_path, so)
7179

7280
edge_program = to_edge(
7381
exported_model, compile_config=EdgeCompileConfig(_check_ir_validity=False)
7482
)
7583
executorch_model = edge_program.to_executorch()
7684

85+
# Re-export as ExecuTorch edits the ExportedProgram.
86+
exported_model = torch.export.export(
87+
model.get_eager_model(),
88+
model.get_example_inputs(),
89+
dynamic_shapes=model.get_dynamic_shapes(),
90+
strict=False,
91+
)
92+
7793
return {
7894
"config": config,
7995
"reference_model": reference_model,
8096
"model": model,
8197
"exported_model": exported_model,
82-
# "aoti_path": aoti_path,
98+
"aoti_path": aoti_path,
8399
"executorch_model": executorch_model,
84100
}
85101

@@ -265,11 +281,13 @@ def run_preprocess(
265281
), f"Executorch model: expected {reference_ar} but got {et_ar.tolist()}"
266282

267283
# Run aoti model and check it matches reference model.
268-
# aoti_path = models["aoti_path"]
269-
# aoti_model = torch._export.aot_load(aoti_path, "cpu")
270-
# aoti_image, aoti_ar = aoti_model(image_tensor, inscribed_size, best_resolution)
271-
# self.assertTrue(torch.allclose(reference_image, aoti_image))
272-
# self.assertEqual(reference_ar, aoti_ar.tolist())
284+
aoti_path = models["aoti_path"]
285+
aoti_model = torch._inductor.aoti_load_package(aoti_path)
286+
aoti_image, aoti_ar = aoti_model(image_tensor, inscribed_size, best_resolution)
287+
assert_expected(aoti_image, reference_image, rtol=0, atol=1e-4)
288+
assert (
289+
reference_ar == aoti_ar.tolist()
290+
), f"AOTI model: expected {reference_ar} but got {aoti_ar.tolist()}"
273291

274292
# This test setup mirrors the one in torchtune:
275293
# https://github.com/pytorch/torchtune/blob/main/tests/torchtune/models/clip/test_clip_image_transform.py

install_requirements.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ def python_is_compatible():
112112
# NOTE: If a newly-fetched version of the executorch repo changes the value of
113113
# NIGHTLY_VERSION, you should re-run this script to install the necessary
114114
# package versions.
115-
NIGHTLY_VERSION = "dev20241030"
115+
NIGHTLY_VERSION = "dev20241101"
116116

117117
# The pip repository that hosts nightly torch packages.
118118
TORCH_NIGHTLY_URL = "https://download.pytorch.org/whl/nightly/cpu"

0 commit comments

Comments
 (0)