Skip to content

Commit be30614

Browse files
guangy10facebook-github-bot
authored andcommitted
Restructing demos - aot
Differential Revision: D49799049 fbshipit-source-id: f91cbc43b2e15ec12a6bd0a9b8ef0274b7cf931b
1 parent 4c71a97 commit be30614

File tree

24 files changed

+116
-79
lines changed

24 files changed

+116
-79
lines changed

.ci/scripts/gather_test_models.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
from typing import Any
1111

1212
from examples.models import MODEL_NAME_TO_MODEL
13-
from examples.recipes.xnnpack_optimization import MODEL_NAME_TO_OPTIONS
13+
from examples.recipes.xnnpack import MODEL_NAME_TO_OPTIONS
1414

1515
BUILD_TOOLS = [
1616
"buck2",
@@ -51,7 +51,8 @@ def export_models_for_ci() -> None:
5151
for name in MODEL_NAME_TO_MODEL.keys():
5252
quantization_configs = {
5353
False,
54-
name in MODEL_NAME_TO_OPTIONS and MODEL_NAME_TO_OPTIONS[name].quantization,
54+
name in MODEL_NAME_TO_OPTIONS
55+
and MODEL_NAME_TO_OPTIONS[name].xnnpack_quantization,
5556
}
5657
delegation_configs = {
5758
False,

.ci/scripts/test.sh

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ build_cmake_executor_runner() {
5353
}
5454

5555
test_model() {
56-
"${PYTHON_EXECUTABLE}" -m examples.export.export_example --model_name="${MODEL_NAME}"
56+
"${PYTHON_EXECUTABLE}" -m examples.export.portable --model_name="${MODEL_NAME}"
5757

5858
# Run test model
5959
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
@@ -92,17 +92,17 @@ test_model_with_xnnpack() {
9292

9393
# Quantization-only
9494
if [[ ${WITH_QUANTIZATION} == true ]] && [[ ${WITH_DELEGATION} == false ]]; then
95-
bash examples/quantization/test_quantize.sh "${BUILD_TOOL}" "${MODEL_NAME}"
95+
bash examples/quantization/quant_flow/test_quantize.sh "${BUILD_TOOL}" "${MODEL_NAME}"
9696
exit 0
9797
fi
9898

9999
# Delegation
100100
if [[ ${WITH_QUANTIZATION} == true ]]; then
101101
SUFFIX="q8"
102-
"${PYTHON_EXECUTABLE}" -m examples.backend.xnnpack_examples --model_name="${MODEL_NAME}" --delegate --quantize
102+
"${PYTHON_EXECUTABLE}" -m examples.recipes.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate --quantize
103103
else
104104
SUFFIX="fp32"
105-
"${PYTHON_EXECUTABLE}" -m examples.backend.xnnpack_examples --model_name="${MODEL_NAME}" --delegate
105+
"${PYTHON_EXECUTABLE}" -m examples.recipes.xnnpack.aot_compiler --model_name="${MODEL_NAME}" --delegate
106106
fi
107107

108108
OUTPUT_MODEL_PATH="${MODEL_NAME}_xnnpack_${SUFFIX}.pte"
@@ -123,9 +123,9 @@ test_model_with_xnnpack() {
123123

124124
test_demo_backend_delegation() {
125125
echo "Testing demo backend delegation on AddMul"
126-
"${PYTHON_EXECUTABLE}" -m examples.export.export_and_delegate --option "composite"
127-
"${PYTHON_EXECUTABLE}" -m examples.export.export_and_delegate --option "partition"
128-
"${PYTHON_EXECUTABLE}" -m examples.export.export_and_delegate --option "whole"
126+
"${PYTHON_EXECUTABLE}" -m examples.recipes.export_and_delegate --option "composite"
127+
"${PYTHON_EXECUTABLE}" -m examples.recipes.export_and_delegate --option "partition"
128+
"${PYTHON_EXECUTABLE}" -m examples.recipes.export_and_delegate --option "whole"
129129

130130
# Run test model
131131
if [[ "${BUILD_TOOL}" == "buck2" ]]; then

docs/source/getting-started-setup.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -105,17 +105,17 @@ how to do it, we will generate an ExecuTorch program file from an `nn.Module`.
105105
You can generate an ExecuTorch program by using a sample script or by using
106106
the Python interpreter.
107107

108-
We have created the `export_example.py` script that demonstrates a simple model
108+
We have created the `portable.py` script that demonstrates a simple model
109109
export to flatbuffer. This script is available
110110
in the [pytorch/executorch](https://github.com/pytorch/executorch/tree/main/examples/export)
111111
repository.
112112

113113
To generate a sample program, complete the following steps:
114114

115-
1. Run the `export_example.py` script:
115+
1. Run the `portable.py` script:
116116

117117
```bash
118-
python3 -m examples.export.export_example --model_name="add"
118+
python3 -m examples.export.portable --model_name="add"
119119
```
120120

121121
:::{dropdown} Output

docs/website/docs/tutorials/00_setting_up_executorch.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,10 +58,10 @@ corresponsing version of the repo.
5858
Via python script:
5959
```bash
6060
# Creates the file `add.pte`
61-
python3 -m examples.export.export_example --model_name="add"
61+
python3 -m examples.export.portable --model_name="add"
6262

6363
# Creates the delegated program `composite_model.pte`, other options are "whole" and "partition"
64-
python3 -m examples.export.export_and_delegate --option "composite"
64+
python3 -m examples.recipes.export_and_delegate --option "composite"
6565
```
6666

6767
Or via python interpreter:

examples/README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ examples
1010
|── backend # Contains examples for exporting delegate models and running them using custom executor runners
1111
├── custom_ops # Contains examples to register custom operators into PyTorch as well as register its kernels into Executorch runtime
1212
├── example_quantizer_and_delegate # Contains examples to to fully lowered a MobileNetV2 model to the example backend with an example quantizer
13-
├── export # Python helper scripts to illustrate export workflow
13+
├── export # Contains scripts to illustrate export workflow in portable mode
1414
├── ios_demo_apps # Contains iOS demo apps
1515
├── models # Contains a set of out-of-box PyTorch models
1616
├── quantization # Contains examples of quantization workflow
@@ -31,18 +31,18 @@ and executing previously exported binary file(s).
3131
1. Following the setup guide in [Setting up ExecuTorch from GitHub](/docs/website/docs/tutorials/00_setting_up_executorch.md)
3232
you should be able to get the basic development environment for Executorch working.
3333

34-
2. Using the script `export/export_example.py` generate a model binary file by selecting a
34+
2. Using the example script `portable/aot_compiler.py` generate a model binary file by selecting a
3535
model name from the list of available models in the `models` dir.
3636

3737

3838
```bash
3939
cd executorch # To the top level dir
4040

4141
# To get a list of example models
42-
python3 -m examples.export.export_example -h
42+
python3 -m examples.export.portable -h
4343

4444
# To generate a specific pte model
45-
python3 -m examples.export.export_example --model_name="mv2" # for MobileNetv2
45+
python3 -m examples.export.portable --model_name="mv2" # for MobileNetv2
4646

4747
# This should generate ./mv2.pte file, if successful.
4848
```

examples/custom_ops/custom_ops_1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66

77
"""Example of showcasing registering custom operator through torch library API."""
88
import torch
9-
from examples.export.export_example import export_to_exec_prog, save_pte_program
9+
from examples.export.utils import export_to_exec_prog, save_pte_program
1010

1111
from executorch.exir import EdgeCompileConfig
1212
from torch.library import impl, Library

examples/custom_ops/custom_ops_2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
import argparse
1212

1313
import torch
14-
from examples.export.export_example import export_to_exec_prog, save_pte_program
14+
from examples.export.utils import export_to_exec_prog, save_pte_program
1515
from executorch.exir import EdgeCompileConfig
1616

1717

examples/export/export_example.py renamed to examples/export/portable.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111

1212
from ..models import MODEL_NAME_TO_MODEL
1313
from ..models.model_factory import EagerModelFactory
14+
1415
from .utils import export_to_exec_prog, save_pte_program
1516

1617

examples/ios_demo_apps/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ and UI looks like
2020

2121
![](./executorch_mobilenet_ui.png)
2222

23-
Step 1. Export a MobileNetV2 model following example follwing executorch/examples/export/export_example.py. Instead of export mv2 directly, add a softmax at the end
23+
Step 1. Export a MobileNetV2 model following example follwing executorch/examples/export/portable.py. Instead of export mv2 directly, add a softmax at the end
2424
```python
2525
class MobileNetV2Wrapper(torch.nn.Module):
2626
def __init__(self):
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
2+
3+
runtime.python_binary(
4+
name = "example",
5+
main_src = "example.py",
6+
preload_deps = ["//executorch/kernels/quantized:aot_lib"],
7+
deps = [
8+
":quant_utils",
9+
"//caffe2:torch",
10+
"//executorch/examples/export:lib",
11+
"//executorch/examples/recipes/xnnpack:models",
12+
],
13+
)
14+
15+
runtime.python_library(
16+
name = "quant_utils",
17+
srcs = [
18+
"utils.py",
19+
],
20+
visibility = [
21+
"//executorch/examples/...",
22+
],
23+
deps = [
24+
"//caffe2:torch",
25+
],
26+
)

examples/quantization/example.py renamed to examples/quantization/quant_flow/example.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@
2828
XNNPACKQuantizer,
2929
)
3030

31-
from ..export.utils import export_to_edge, save_pte_program
32-
from ..models import MODEL_NAME_TO_MODEL
33-
from ..models.model_factory import EagerModelFactory
34-
from ..recipes.xnnpack_optimization import MODEL_NAME_TO_OPTIONS
31+
from ...export.utils import export_to_edge, save_pte_program
32+
from ...models import MODEL_NAME_TO_MODEL
33+
from ...models.model_factory import EagerModelFactory
34+
from ...recipes.xnnpack import MODEL_NAME_TO_OPTIONS
3535

3636
from .utils import quantize
3737

@@ -176,8 +176,9 @@ def verify_xnnpack_quantizer_matching_fx_quant_model(model_name, model, example_
176176
model = model.eval()
177177
# pre-autograd export. eventually this will become torch.export
178178
model = export.capture_pre_autograd_graph(model, example_inputs)
179+
quantizer = XNNPACKQuantizer()
179180
start = time.perf_counter()
180-
quantized_model = quantize(model, example_inputs)
181+
quantized_model = quantize(model, example_inputs, quantizer)
181182
end = time.perf_counter()
182183
logging.info(f"Quantize time: {end - start}s")
183184

examples/quantization/test_quantize.sh renamed to examples/quantization/quant_flow/test_quantize.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ test_buck2_quantization() {
3232
SO_LIB=$($BUCK build //kernels/quantized:aot_lib --show-output | grep "buck-out" | cut -d" " -f2)
3333

3434
echo "Run example.py"
35-
${PYTHON_EXECUTABLE} -m "examples.quantization.example" --so_library="$SO_LIB" --model_name="$1"
35+
${PYTHON_EXECUTABLE} -m "examples.quantization.quant_flow.example" --so_library="$SO_LIB" --model_name="$1"
3636

3737
echo 'Running executor_runner'
3838
$BUCK run //examples/runtime/portable:executor_runner -- --model_path="./${1}_quantized.pte"
@@ -62,7 +62,7 @@ test_cmake_quantization() {
6262
SO_LIB="cmake-out/kernels/quantized/libquantized_ops_aot_lib$EXT"
6363

6464
echo "Run example.py, shared library $SO_LIB"
65-
${PYTHON_EXECUTABLE} -m "examples.quantization.example" --so_library="$SO_LIB" --model_name="$1"
65+
${PYTHON_EXECUTABLE} -m "examples.quantization.quant_flow.example" --so_library="$SO_LIB" --model_name="$1"
6666

6767
echo 'Running executor_runner'
6868
cmake-out/executor_runner --model_path="./${1}_quantized.pte"

examples/quantization/utils.py renamed to examples/quantization/quant_flow/utils.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@
99
from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e
1010
from torch.ao.quantization.quantizer.xnnpack_quantizer import (
1111
get_symmetric_quantization_config,
12-
XNNPACKQuantizer,
1312
)
1413

1514

16-
def quantize(model, example_inputs):
17-
"""This is the official recommended flow for quantization in pytorch 2.0 export"""
15+
def quantize(model, example_inputs, quantizer):
16+
"""
17+
This is the official recommended flow for quantization in pytorch 2.0 export
18+
"""
1819
logging.info(f"Original model: {model}")
19-
quantizer = XNNPACKQuantizer()
2020
# if we set is_per_channel to True, we also need to add out_variant of quantize_per_channel/dequantize_per_channel
2121
operator_config = get_symmetric_quantization_config(is_per_channel=False)
2222
quantizer.set_global(operator_config)

examples/recipes/TARGETS

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
2+
3+
runtime.python_binary(
4+
name = "export_bundled_program",
5+
main_src = "export_bundled_program.py",
6+
deps = [
7+
"//executorch/bundled_program:config",
8+
"//executorch/bundled_program:core",
9+
"//executorch/bundled_program/serialize:lib",
10+
"//executorch/examples/export:utils",
11+
"//executorch/examples/models:models",
12+
],
13+
)

examples/export/export_bundled_program.py renamed to examples/recipes/export_bundled_program.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,16 +8,21 @@
88

99
import argparse
1010

11+
import logging
12+
1113
from executorch.bundled_program.config import BundledConfig
1214
from executorch.bundled_program.core import create_bundled_program
1315
from executorch.bundled_program.serialize import (
1416
serialize_from_bundled_program_to_flatbuffer,
1517
)
1618

19+
from ..export.utils import export_to_exec_prog, save_pte_program
20+
1721
from ..models import MODEL_NAME_TO_MODEL
1822
from ..models.model_factory import EagerModelFactory
1923

20-
from .utils import export_to_exec_prog, save_pte_program
24+
FORMAT = "[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s"
25+
logging.basicConfig(level=logging.INFO, format=FORMAT)
2126

2227

2328
def save_bundled_program(
@@ -62,7 +67,7 @@ def export_to_pte(model_name, model, example_inputs):
6267
# create a list with the example_inputs tuple used twice. Each instance of example_inputs
6368
# is a Tuple[Union[torch.tenor, int, bool]] which represents one test set for the model.
6469
bundled_inputs = [example_inputs, example_inputs]
65-
print(f"Saving exported program to {model_name}_bundled.pte")
70+
logging.info(f"Saving exported program to {model_name}_bundled.pte")
6671
save_bundled_program(bundled_inputs, exec_prog, model, f"{model_name}_bundled.pte")
6772

6873

examples/backend/README.md renamed to examples/recipes/xnnpack/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ The following command will produce an floating-point XNNPACK delegated model `mv
1010

1111
```bash
1212
# For MobileNet V2
13-
python3 -m examples.backend.xnnpack_examples --model_name="mv2" --delegate
13+
python3 -m examples.recipes.xnnpack.aot_compiler --model_name="mv2" --delegate
1414
```
1515

1616
Once we have the model binary (pte) file, then let's run it with Executorch runtime using the `xnn_executor_runner`.
@@ -23,7 +23,7 @@ buck2 run examples/runtime/xnnpack:xnn_executor_runner -- --model_path ./mv2_xnn
2323
The following command will produce an XNNPACK quantized and delegated model `mv2_xnnpack_q8.pte` that can be run using XNNPACK's operators. It will also print out the lowered graph, showing what parts of the models have been lowered to XNNPACK via `executorch_call_delegate`.
2424

2525
```bash
26-
python3 -m examples.backend.xnnpack_examples --model_name="mv2" --quantize --delegate
26+
python3 -m examples.recipes.xnnpack.aot_compiler --model_name="mv2" --quantize --delegate
2727
```
2828

2929
Once we have the model binary (pte) file, then let's run it with Executorch runtime using the `xnn_executor_runner`.
@@ -44,7 +44,7 @@ We build the benchmarking binary (will be released in the near future, but it is
4444

4545
### Methodology
4646

47-
Models are exported with the steps above for XNNPACK delegation, and with `examples/export:export_example` for portable backend without any optimization. Then use `//examples/runtime/xnnpack:xnn_executor_runner` with profiler (command listed below); or in the future, use the runtime in `//sdk/runners:executor_runner` since it gives more options such as number of iterations after build rules for OSS is added.
47+
Models are exported with the steps above for XNNPACK delegation, and with `examples/export/portable.py` for portable backend without any optimization. Then use `//examples/runtime/xnnpack:xnn_executor_runner` with profiler (command listed below); or in the future, use the runtime in `//sdk/runners:executor_runner` since it gives more options such as number of iterations after build rules for OSS is added.
4848

4949
```
5050
buck run -c executorch.prof_enabled=true -c executorch.prof_buf_size=8096 -c executorch.num_prof_blocks=61 //examples/runtime/xnnpack:xnn_executor_runner -- --model_path mv3.pte
File renamed without changes.

examples/recipes/xnnpack_optimization/models.py renamed to examples/recipes/xnnpack/__init__.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,13 @@
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
66

7+
78
from dataclasses import dataclass
89

910

1011
@dataclass
1112
class OptimizationOptions(object):
12-
quantization: bool
13+
xnnpack_quantization: bool
1314
xnnpack_delegation: bool
1415

1516

@@ -29,3 +30,5 @@ class OptimizationOptions(object):
2930
"edsr": OptimizationOptions(True, False),
3031
"mobilebert": OptimizationOptions(True, False),
3132
}
33+
34+
__all__ = [MODEL_NAME_TO_OPTIONS]

examples/backend/xnnpack_examples.py renamed to examples/recipes/xnnpack/aot_compiler.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,14 @@
1515
from executorch.exir import EdgeCompileConfig
1616
from executorch.exir.backend.backend_api import to_backend
1717

18-
from ..export.utils import export_to_edge, save_pte_program
18+
from torch.ao.quantization.quantizer.xnnpack_quantizer import XNNPACKQuantizer
1919

20-
from ..models import MODEL_NAME_TO_MODEL
21-
from ..models.model_factory import EagerModelFactory
22-
from ..quantization.utils import quantize
23-
from ..recipes.xnnpack_optimization import MODEL_NAME_TO_OPTIONS
20+
from ...export.utils import export_to_edge, save_pte_program
21+
22+
from ...models import MODEL_NAME_TO_MODEL
23+
from ...models.model_factory import EagerModelFactory
24+
from ...quantization.quant_flow.utils import quantize
25+
from . import MODEL_NAME_TO_OPTIONS
2426

2527

2628
FORMAT = "[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s"
@@ -77,7 +79,8 @@
7779

7880
if args.quantize:
7981
logging.info("Quantizing Model...")
80-
model = quantize(model, example_inputs)
82+
quantizer = XNNPACKQuantizer()
83+
model = quantize(model, example_inputs, quantizer)
8184

8285
edge = export_to_edge(
8386
model,

0 commit comments

Comments
 (0)