Skip to content

Commit 59b5088

Browse files
author
Github Executorch
committed
Update on "Integrate torchgen exception boundary with ExecuTorch"
As of #7746, we build with exceptions by default, so we just need to use them. Differential Revision: [D67904052](https://our.internmc.facebook.com/intern/diff/D67904052/) [ghstack-poisoned]
2 parents aff01f5 + fd02b49 commit 59b5088

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+2063
-527
lines changed

.ci/docker/ci_commit_pins/pytorch.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
0a94bb432ed75cc2d950d81b2921363218a7e459
1+
27e35de6c288bffad1b4d18b393579c1d1a95547

.ci/docker/conda-env-ci.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
cmake=3.22.1
22
ninja=1.10.2
33
libuv
4+
llvm-openmp
45
pkg-config

.ci/scripts/setup-macos.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,7 @@ setup_macos_env_variables
121121
# NB: we need buck2 in all cases because cmake build also depends on calling
122122
# buck2 atm
123123
install_buck
124+
brew install libomp
124125
install_pip_dependencies
125126

126127
# TODO(huydhn): Unlike our self-hosted runner, GitHub runner doesn't have access

.github/workflows/pull.yml

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -212,17 +212,14 @@ jobs:
212212
docker-image: executorch-ubuntu-22.04-clang12
213213
submodules: 'true'
214214
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
215-
timeout: 90
215+
timeout: 180
216216
script: |
217217
# The generic Linux job chooses to use base env, not the one setup by the image
218218
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
219219
conda activate "${CONDA_ENV}"
220220
221221
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
222222
223-
# install pybind
224-
bash install_executorch.sh --pybind xnnpack --use-pt-pinned-commit
225-
226223
# install Llava requirements
227224
bash examples/models/llama/install_requirements.sh
228225
bash examples/models/llava/install_requirements.sh
@@ -483,9 +480,6 @@ jobs:
483480
484481
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
485482
486-
# install pybind
487-
bash install_executorch.sh --pybind xnnpack --use-pt-pinned-commit
488-
489483
# install phi-3-mini requirements
490484
bash examples/models/phi-3-mini/install_requirements.sh
491485
@@ -513,9 +507,6 @@ jobs:
513507
514508
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
515509
516-
# install pybind
517-
bash install_executorch.sh --pybind xnnpack --use-pt-pinned-commit
518-
519510
# install llama requirements
520511
bash examples/models/llama/install_requirements.sh
521512
@@ -535,17 +526,14 @@ jobs:
535526
docker-image: executorch-ubuntu-22.04-clang12
536527
submodules: 'true'
537528
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
538-
timeout: 90
529+
timeout: 180
539530
script: |
540531
# The generic Linux job chooses to use base env, not the one setup by the image
541532
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
542533
conda activate "${CONDA_ENV}"
543534
544535
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
545536
546-
# install pybind
547-
bash install_executorch.sh --pybind xnnpack --use-pt-pinned-commit
548-
549537
# install llama requirements
550538
bash examples/models/llama/install_requirements.sh
551539
@@ -573,9 +561,6 @@ jobs:
573561
574562
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "cmake"
575563
576-
# install pybind
577-
bash install_executorch.sh --pybind xnnpack --use-pt-pinned-commit
578-
579564
# install llama requirements
580565
bash examples/models/llama/install_requirements.sh
581566

.mypy.ini

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,9 @@ ignore_missing_imports = True
7777
[mypy-ruamel]
7878
ignore_missing_imports = True
7979

80+
[mypy-serializer.*]
81+
ignore_missing_imports = True
82+
8083
[mypy-setuptools.*]
8184
ignore_missing_imports = True
8285

CMakeLists.txt

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,13 @@ cmake_dependent_option(
240240
"NOT EXECUTORCH_BUILD_ARM_BAREMETAL" OFF
241241
)
242242

243+
244+
if(EXECUTORCH_BUILD_EXTENSION_TRAINING)
245+
set(EXECUTORCH_BUILD_EXTENSION_TENSOR ON)
246+
set(EXECUTORCH_BUILD_EXTENSION_DATA_LOADER ON)
247+
set(EXECUTORCH_BUILD_EXTENSION_MODULE ON)
248+
endif()
249+
243250
if(EXECUTORCH_BUILD_KERNELS_CUSTOM_AOT)
244251
set(EXECUTORCH_BUILD_EXTENSION_TENSOR ON)
245252
set(EXECUTORCH_BUILD_KERNELS_CUSTOM ON)
@@ -802,6 +809,35 @@ if(EXECUTORCH_BUILD_PYBIND)
802809
install(TARGETS portable_lib
803810
LIBRARY DESTINATION executorch/extension/pybindings
804811
)
812+
813+
if(EXECUTORCH_BUILD_EXTENSION_TRAINING)
814+
815+
set(_pybind_training_dep_libs
816+
${TORCH_PYTHON_LIBRARY}
817+
etdump
818+
executorch
819+
util
820+
torch
821+
extension_training
822+
)
823+
824+
if(EXECUTORCH_BUILD_XNNPACK)
825+
# need to explicitly specify XNNPACK and microkernels-prod
826+
# here otherwise uses XNNPACK and microkernel-prod symbols from libtorch_cpu
827+
list(APPEND _pybind_training_dep_libs xnnpack_backend XNNPACK microkernels-prod)
828+
endif()
829+
830+
# pybind training
831+
pybind11_add_module(_training_lib SHARED extension/training/pybindings/_training_lib.cpp)
832+
833+
target_include_directories(_training_lib PRIVATE ${TORCH_INCLUDE_DIRS})
834+
target_compile_options(_training_lib PUBLIC ${_pybind_compile_options})
835+
target_link_libraries(_training_lib PRIVATE ${_pybind_training_dep_libs})
836+
837+
install(TARGETS _training_lib
838+
LIBRARY DESTINATION executorch/extension/training/pybindings
839+
)
840+
endif()
805841
endif()
806842

807843
if(EXECUTORCH_BUILD_KERNELS_CUSTOM)

README.md

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@
1212
<hr>
1313
</div>
1414

15-
**ExecuTorch** is an end-to-end solution for on-device inference and training. It powers much of Meta's on-device AI functionality including: Facebook, Oculus, Meta Glasses, Instagram, WhatsApp and more.
15+
**ExecuTorch** is an end-to-end solution for on-device inference and training. It powers much of Meta's on-device AI experiences across Facebook, Instagram, Meta Quest, Ray-Ban Meta Smart Glasses, WhatsApp, and more.
1616

17-
It covers a wide gamut of models including: LLMs (Large Language Models), CV (Computer Vision), ASR (Automatic Speech Recognition), TTS (Text to Speech).
17+
It supports a wide range of models including LLMs (Large Language Models), CV (Computer Vision), ASR (Automatic Speech Recognition), and TTS (Text to Speech).
1818

1919
Platform Support:
2020
- Operating Systems:
@@ -26,9 +26,10 @@ Platform Support:
2626

2727
- Hardware Acceleration:
2828
- Apple
29-
- ARM
29+
- Arm
3030
- Cadence
3131
- MediaTek
32+
- Qualcomm
3233
- Vulkan
3334
- XNNPACK
3435

backends/arm/_passes/insert_table_ops.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ class InsertTableOpsPass(ExportPass):
3131
"""
3232
For ops in self.table_ops they need to be serialized as a TOSA TABLE. This pass replaces these
3333
edge ops with a tosa._table(input: Tensor, target_str: str) where target_str == str(node.target).
34-
When loweringthe _table node target_str will be used to find the corresponding torch operator
34+
When lowering the _table node target_str will be used to find the corresponding torch operator
3535
which will be used to produce the table values in operators/op_table.py.
3636
"""
3737

@@ -43,6 +43,7 @@ class InsertTableOpsPass(ExportPass):
4343
exir_ops.edge.aten.sigmoid.default: torch.sigmoid,
4444
exir_ops.edge.aten.tanh.default: torch.tanh,
4545
exir_ops.edge.aten.hardsigmoid.default: torch.nn.functional.hardsigmoid,
46+
exir_ops.edge.aten.hardswish.default: torch.nn.functional.hardswish,
4647
}
4748

4849
def __init__(self, exported_program: ExportedProgram) -> None:

backends/arm/arm_partitioner.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ def ops_to_not_decompose(
115115
) -> Tuple[List[torch._ops.OpOverload], Optional[Callable[[torch.fx.Node], bool]]]:
116116
ops_to_not_decompose_if_quant_op = [
117117
torch.ops.aten.hardsigmoid.default,
118+
torch.ops.aten.hardswish.default,
118119
]
119120

120121
def filter_fn(node: torch.fx.Node) -> bool:

backends/arm/operator_support/tosa_supported_operators.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ def is_node_supported(self, submodules, node: fx.Node) -> bool:
8181
exir_ops.edge.aten.permute_copy.default,
8282
exir_ops.edge.aten.hardsigmoid.default,
8383
exir_ops.edge.aten.hardtanh.default,
84+
exir_ops.edge.aten.hardswish.default,
8485
exir_ops.edge.aten.convolution.default,
8586
exir_ops.edge.aten.div.Tensor,
8687
exir_ops.edge.aten.eq.Tensor,

backends/arm/quantizer/quantization_annotator.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ def _match_pattern(
133133
torch.ops.aten.tanh.default,
134134
torch.ops.aten.sum.dim_IntList,
135135
torch.ops.aten.hardsigmoid.default,
136+
torch.ops.aten.hardswish.default,
136137
]
137138

138139
_one_to_one_shared_input_qspec = [

backends/arm/test/common.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,17 @@
99

1010
import tempfile
1111
from datetime import datetime
12+
1213
from pathlib import Path
14+
from typing import Any
1315

16+
import pytest
1417
from executorch.backends.arm.arm_backend import ArmCompileSpecBuilder
18+
from executorch.backends.arm.test.runner_utils import (
19+
arm_executor_runner_exists,
20+
corstone300_installed,
21+
corstone320_installed,
22+
)
1523
from executorch.backends.arm.tosa_specification import TosaSpecification
1624
from executorch.exir.backend.compile_spec_schema import CompileSpec
1725

@@ -145,3 +153,44 @@ def get_u85_compile_spec_unbuilt(
145153
.dump_intermediate_artifacts_to(artifact_path)
146154
)
147155
return compile_spec # type: ignore[return-value]
156+
157+
158+
SkipIfNoCorstone300 = pytest.mark.skipif(
159+
not corstone300_installed() or not arm_executor_runner_exists("corstone-300"),
160+
reason="Did not find Corstone-300 FVP or executor_runner on path",
161+
)
162+
"""Skips a test if Corsone300 FVP is not installed, or if the executor runner is not built"""
163+
164+
SkipIfNoCorstone320 = pytest.mark.skipif(
165+
not corstone320_installed() or not arm_executor_runner_exists("corstone-320"),
166+
reason="Did not find Corstone-320 FVP or executor_runner on path",
167+
)
168+
"""Skips a test if Corsone320 FVP is not installed, or if the executor runner is not built."""
169+
170+
171+
def parametrize(
172+
arg_name: str, test_data: dict[str, Any], xfails: dict[str, str] = None
173+
):
174+
"""
175+
Custom version of pytest.mark.parametrize with some syntatic sugar and added xfail functionality
176+
- test_data is expected as a dict of (id, test_data) pairs
177+
- alllows to specifiy a dict of (id, failure_reason) pairs to mark specific tests as xfail
178+
"""
179+
if xfails is None:
180+
xfails = {}
181+
182+
def decorator_func(func):
183+
"""Test data is transformed from a dict of (id, data) pairs to a list of pytest params to work with the native pytests parametrize function"""
184+
pytest_testsuite = []
185+
for id, test_parameters in test_data.items():
186+
if id in xfails:
187+
pytest_param = pytest.param(
188+
test_parameters, id=id, marks=pytest.mark.xfail(reason=xfails[id])
189+
)
190+
else:
191+
pytest_param = pytest.param(test_parameters, id=id)
192+
pytest_testsuite.append(pytest_param)
193+
194+
return pytest.mark.parametrize(arg_name, pytest_testsuite)(func)
195+
196+
return decorator_func

backends/arm/test/models/test_conformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ class TestConformer(unittest.TestCase):
3434
"executorch_exir_dialects_edge__ops_aten_where_self": 4,
3535
"executorch_exir_dialects_edge__ops_aten_logical_not_default": 4,
3636
"executorch_exir_dialects_edge__ops_aten_any_dim": 2,
37-
"torch.ops.aten._assert_scalar.default": 12,
37+
"torch.ops.aten._assert_scalar.default": 10,
3838
"torch.ops.aten._local_scalar_dense.default": 1,
3939
"torch.ops.aten.scalar_tensor.default": 2,
4040
"torch.ops.higher_order.executorch_call_delegate": 5,

0 commit comments

Comments
 (0)