Skip to content

Commit bdd45d1

Browse files
guangy10facebook-github-bot
authored andcommitted
Fix export config in examples (#34)
Summary: Pull Request resolved: #34 It doesn't look correct that want to export models with one config but test the export with a different one. Certain flags needs more clarification/documentation This diff is to ensure using canonical config for both export and tests. TODO: Rerun the existing tests failed with msg about missing required positional arguments. It's likely related to the params lifting and unlifting. Will figure it out. Differential Revision: D48018569 fbshipit-source-id: fe45e72c0f406715d4c3a68e4dd1434a4d9caf58
1 parent 4a5e147 commit bdd45d1

File tree

4 files changed

+9
-7
lines changed

4 files changed

+9
-7
lines changed

examples/export/test/test_export.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
import torch
1010

11-
from executorch.examples.export.utils import _EDGE_COMPILE_CONFIG
11+
from executorch.examples.export.utils import _CAPTURE_CONFIG, _EDGE_COMPILE_CONFIG
1212
from executorch.examples.models import MODEL_NAME_TO_MODEL
1313

1414

@@ -18,16 +18,15 @@ def _assert_eager_lowered_same_result(
1818
):
1919
import executorch.exir as exir
2020

21-
capture_config = exir.CaptureConfig(enable_dynamic_shape=False)
22-
edge_model = exir.capture(eager_model, example_inputs, capture_config).to_edge(
21+
edge_model = exir.capture(eager_model, example_inputs, _CAPTURE_CONFIG).to_edge(
2322
_EDGE_COMPILE_CONFIG
2423
)
2524

2625
executorch_model = edge_model.to_executorch()
2726
with torch.no_grad():
2827
eager_output = eager_model(*example_inputs)
2928
with torch.no_grad():
30-
executorch_output = executorch_model.graph_module(*example_inputs)
29+
executorch_output = executorch_model(*example_inputs)
3130
self.assertTrue(
3231
torch.allclose(eager_output, executorch_output[0], rtol=1e-5, atol=1e-5)
3332
)

examples/export/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
# Reason is that there memory allocation ops with symbolic shape nodes.
1212
# and when evaulating shape, it doesnt seem that we presenting them with shape env
1313
# that contain those variables.
14-
_CAPTURE_CONFIG = exir.CaptureConfig(enable_aot=True, _unlift=False)
14+
_CAPTURE_CONFIG = exir.CaptureConfig(enable_aot=True)
1515
_EDGE_COMPILE_CONFIG = exir.EdgeCompileConfig(
1616
_check_ir_validity=False,
1717
)

exir/capture/_config.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,8 @@
1919
class CaptureConfig:
2020
pt2_mode: bool = True
2121
enable_functionalization: bool = True
22-
enable_dynamic_shape: bool = False
23-
enable_aot: bool = False
22+
enable_dynamic_shape: bool = False # This flag does nothing if enable_aot is True
23+
enable_aot: bool = False # When it's true it implies automatic dynamic shapes via default dynamo config
2424
_dynamo_config: "ExirDynamoConfig" = field(default_factory=ExirDynamoConfig)
2525
_unlift: bool = False
2626
_use_old_decomp_table: bool = False

exir/program/_program.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,9 @@ def __init__(
129129
self._constant_tensor_alignment: Optional[int] = constant_tensor_alignment
130130
self._delegate_alignment: Optional[int] = delegate_alignment
131131

132+
def __call__(self, *args: Any) -> Any:
133+
return self.exported_program(*args)
134+
132135
@property
133136
def buffer(self) -> bytes:
134137
if self._buffer is None:

0 commit comments

Comments
 (0)