Skip to content

Commit 09cf982

Browse files
authored
Update docs in executorch, remove capture_pre_autograd_graph references
Differential Revision: D65352143 Pull Request resolved: #6613
1 parent 3b458a7 commit 09cf982

File tree

2 files changed

+7
-8
lines changed

2 files changed

+7
-8
lines changed

backends/apple/coreml/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ To quantize a Program in a Core ML favored way, the client may utilize **CoreMLQ
6565
import torch
6666
import executorch.exir
6767

68-
from torch._export import capture_pre_autograd_graph
68+
from torch.export import export_for_training
6969
from torch.ao.quantization.quantize_pt2e import (
7070
convert_pt2e,
7171
prepare_pt2e,
@@ -93,7 +93,7 @@ class Model(torch.nn.Module):
9393
source_model = Model()
9494
example_inputs = (torch.randn((1, 3, 256, 256)), )
9595

96-
pre_autograd_aten_dialect = capture_pre_autograd_graph(model, example_inputs)
96+
pre_autograd_aten_dialect = export_for_training(model, example_inputs).module()
9797

9898
quantization_config = LinearQuantizerConfig.from_dict(
9999
{

docs/source/llm/getting-started.md

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,7 @@ import torch
144144

145145
from executorch.exir import EdgeCompileConfig, to_edge
146146
from torch.nn.attention import sdpa_kernel, SDPBackend
147-
from torch._export import capture_pre_autograd_graph
148-
from torch.export import export
147+
from torch.export import export, export_for_training
149148

150149
from model import GPT
151150

@@ -170,7 +169,7 @@ dynamic_shape = (
170169
# Trace the model, converting it to a portable intermediate representation.
171170
# The torch.no_grad() call tells PyTorch to exclude training-specific logic.
172171
with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]), torch.no_grad():
173-
m = capture_pre_autograd_graph(model, example_inputs, dynamic_shapes=dynamic_shape)
172+
m = export_for_training(model, example_inputs, dynamic_shapes=dynamic_shape).module()
174173
traced_model = export(m, example_inputs, dynamic_shapes=dynamic_shape)
175174

176175
# Convert the model into a runnable ExecuTorch program.
@@ -462,7 +461,7 @@ from executorch.exir import EdgeCompileConfig, to_edge
462461
import torch
463462
from torch.export import export
464463
from torch.nn.attention import sdpa_kernel, SDPBackend
465-
from torch._export import capture_pre_autograd_graph
464+
from torch.export import export_for_training
466465

467466
from model import GPT
468467

@@ -489,7 +488,7 @@ dynamic_shape = (
489488
# Trace the model, converting it to a portable intermediate representation.
490489
# The torch.no_grad() call tells PyTorch to exclude training-specific logic.
491490
with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]), torch.no_grad():
492-
m = capture_pre_autograd_graph(model, example_inputs, dynamic_shapes=dynamic_shape)
491+
m = export_for_training(model, example_inputs, dynamic_shapes=dynamic_shape).module()
493492
traced_model = export(m, example_inputs, dynamic_shapes=dynamic_shape)
494493

495494
# Convert the model into a runnable ExecuTorch program.
@@ -635,7 +634,7 @@ xnnpack_quant_config = get_symmetric_quantization_config(
635634
xnnpack_quantizer = XNNPACKQuantizer()
636635
xnnpack_quantizer.set_global(xnnpack_quant_config)
637636

638-
m = capture_pre_autograd_graph(model, example_inputs)
637+
m = export_for_training(model, example_inputs).module()
639638

640639
# Annotate the model for quantization. This prepares the model for calibration.
641640
m = prepare_pt2e(m, xnnpack_quantizer)

0 commit comments

Comments
 (0)