Skip to content

export and delegate example fix, typos #478

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .ci/scripts/gather_test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@ def export_models_for_ci() -> None:
"quantization": quantization,
"xnnpack_delegation": xnnpack_delegation,
"runner": RUNNERS.get(name, DEFAULT_RUNNER),
# demo_backend_delegation test only supports add_mul model
"demo_backend_delegation": name == "add_mul",
}
)
set_output("models", json.dumps(models))
Expand Down
34 changes: 34 additions & 0 deletions .ci/scripts/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ if [[ -z "${XNNPACK_DELEGATION:-}" ]]; then
XNNPACK_DELEGATION=false
fi

DEMO_BACKEND_DELEGATION=$5
if [[ -z "${DEMO_BACKEND_DELEGATION:-}" ]]; then
DEMO_BACKEND_DELEGATION=false
fi

which "${PYTHON_EXECUTABLE}"
# Just set this variable here, it's cheap even if we use buck2
CMAKE_OUTPUT_DIR=cmake-out
Expand Down Expand Up @@ -106,6 +111,30 @@ test_model_with_xnnpack() {
fi
}

test_demo_backend_delegation() {
echo "Testing demo backend delegation on AddMul"
"${PYTHON_EXECUTABLE}" -m examples.export.export_and_delegate --option "composite"
"${PYTHON_EXECUTABLE}" -m examples.export.export_and_delegate --option "partition"
"${PYTHON_EXECUTABLE}" -m examples.export.export_and_delegate --option "whole"

# Run test model
if [[ "${BUILD_TOOL}" == "buck2" ]]; then
buck2 run //examples/executor_runner:executor_runner -- --model_path "./composite_model.pte"
buck2 run //examples/executor_runner:executor_runner -- --model_path "./partition_lowered_model.pte"
buck2 run //examples/executor_runner:executor_runner -- --model_path "./whole.pte"
elif [[ "${BUILD_TOOL}" == "cmake" ]]; then
if [[ ! -f ${CMAKE_OUTPUT_DIR}/executor_runner ]]; then
build_cmake_executor_runner
fi
./${CMAKE_OUTPUT_DIR}/executor_runner --model_path "./composite_model.pte"
./${CMAKE_OUTPUT_DIR}/executor_runner --model_path "./partition_lowered_model.pte"
./${CMAKE_OUTPUT_DIR}/executor_runner --model_path "./whole.pte"
else
echo "Invalid build tool ${BUILD_TOOL}. Only buck2 and cmake are supported atm"
exit 1
fi
}

echo "Testing ${MODEL_NAME} (fp32, quantized, xnnpack) with ${BUILD_TOOL}..."
# Test the select model without XNNPACK or quantization
test_model
Expand All @@ -128,3 +157,8 @@ fi
if [[ "${XNNPACK_DELEGATION}" == true ]] && [[ "${QUANTIZATION}" == true ]]; then
test_model_with_xnnpack true
fi

# Test demo backend delegation
if [[ "${DEMO_BACKEND_DELEGATION}" == true ]]; then
test_demo_backend_delegation
fi
6 changes: 4 additions & 2 deletions .github/workflows/pull.yml
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,11 @@ jobs:
BUILD_TOOL=${{ matrix.build-tool }}
QUANTIZATION=${{ matrix.quantization }}
XNNPACK_DELEGATION=${{ matrix.xnnpack_delegation }}
DEMO_BACKEND_DELEGATION=${{ matrix.demo_backend_delegation }}

PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh "${BUILD_TOOL}"
# Build and test Executorch
PYTHON_EXECUTABLE=python bash .ci/scripts/test.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${QUANTIZATION}" "${XNNPACK_DELEGATION}"
PYTHON_EXECUTABLE=python bash .ci/scripts/test.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${QUANTIZATION}" "${XNNPACK_DELEGATION}" "${DEMO_BACKEND_DELEGATION}"

test-models-macos:
name: test-models-macos
Expand All @@ -84,11 +85,12 @@ jobs:
BUILD_TOOL=${{ matrix.build-tool }}
QUANTIZATION=${{ matrix.quantization }}
XNNPACK_DELEGATION=${{ matrix.xnnpack_delegation }}
DEMO_BACKEND_DELEGATION=${{ matrix.demo_backend_delegation }}

# Setup MacOS dependencies as there is no Docker support on MacOS atm
PYTHON_EXECUTABLE=python bash .ci/scripts/setup-macos.sh "${BUILD_TOOL}"
# Build and test Executorch
PYTHON_EXECUTABLE=python bash .ci/scripts/test.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${QUANTIZATION}" "${XNNPACK_DELEGATION}"
PYTHON_EXECUTABLE=python bash .ci/scripts/test.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${QUANTIZATION}" "${XNNPACK_DELEGATION}" "${DEMO_BACKEND_DELEGATION}"
popd

test-custom-ops-linux:
Expand Down
6 changes: 3 additions & 3 deletions docs/website/docs/ir_spec/00_exir.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ The `ExportedProgram` has the following attributes:
containing the parameters and buffers.
* `range constraints (Dict[sympy.Symbol, RangeConstraint])`: For programs that
are exported with data dependent behavior, the metadata on each node will
contain symbolic shapes (hich look like `s0`, `i0`). This attribute maps the
contain symbolic shapes (which look like `s0`, `i0`). This attribute maps the
symbolic shapes to their lower/upper ranges.
* `equality_constraints (List[Tuple[InputDim, InputDim]])`: A list of nodes in
the graph and dimensions that have the same shape.
Expand Down Expand Up @@ -196,7 +196,7 @@ A `call_function` node represents a call to an operator.

* **Functional:** We say a callable is “functional” if it satisfy all following requirements:
* Non-aliasing, ie output tensors do not share data storage with each other or with inputs of the operator
* Non-mutating, ie the operator does not mutate value of it’s input (for tensors, this includes both metadata and data)
* Non-mutating, ie the operator does not mutate value of its input (for tensors, this includes both metadata and data)
* No side effects, ie the operator does not mutate states that are visible from outside, like changing values of module parameters.

* **Operator:** is a functional callable with a predefined schema. Examples of
Expand All @@ -208,7 +208,7 @@ A `call_function` node represents a call to an operator.
%name = call_function[target = operator](args = (%x, %y, …), kwargs = {})
```

#### Differences from vanila FX call_function
#### Differences from vanilla FX call_function

1. In FX graph, a call_function can refer to any callable, in EXIR, we restrict
this to only Canonical ATen operators (a select subset of PyTorch ATen operator
Expand Down
2 changes: 1 addition & 1 deletion docs/website/docs/tutorials/exporting_to_executorch.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ dynamic and impose ranges on them. To learn more about constraints, you can look
at [these docs](../export/constraint_apis.md)

The output of `torch._export.capture_pre_autograd_graph` is a fully flattened graph (meaning the graph does
not contain any module heirachy, except in the case of control flow operators).
not contain any module hierachy, except in the case of control flow operators).
Furthermore, the captured graph is in ATen dialect with ATen opset which is autograd safe, i.e. safe for eager mode training.
This is important for quantization as noted in https://github.com/pytorch/executorch/issues/290.

Expand Down
23 changes: 13 additions & 10 deletions examples/export/export_and_delegate.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,9 @@
from executorch.exir.backend.test.op_partitioner_demo import AddMulPartitionerDemo

from ..models import MODEL_NAME_TO_MODEL
from ..models.model_factory import EagerModelFactory

from ..utils import export_to_edge
from .utils import export_to_edge


FORMAT = "[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s"
Expand All @@ -37,7 +38,7 @@
"""


def export_compsite_module_with_lower_graph():
def export_composite_module_with_lower_graph():
"""

AddMulModule:
Expand All @@ -57,9 +58,10 @@ def export_compsite_module_with_lower_graph():
logging.info(
"Running the example to export a composite module with lowered graph..."
)
m, m_inputs = MODEL_NAME_TO_MODEL.get("add_mul")()
m = m.eval()
m_inputs = m.get_example_inputs()

m, m_inputs = EagerModelFactory.create_model(*MODEL_NAME_TO_MODEL["add_mul"])
m_compile_spec = m.get_compile_spec()

# pre-autograd export. eventually this will become torch.export
m = export.capture_pre_autograd_graph(m, m_inputs)
edge = export_to_edge(m, m_inputs)
Expand All @@ -68,7 +70,7 @@ def export_compsite_module_with_lower_graph():
# Lower AddMulModule to the demo backend
logging.info("Lowering to the demo backend...")
lowered_graph = to_backend(
BackendWithCompilerDemo.__name__, edge.exported_program, m.get_compile_spec()
BackendWithCompilerDemo.__name__, edge.exported_program, m_compile_spec
)

# Composite the lower graph with other module
Expand Down Expand Up @@ -166,8 +168,9 @@ def export_and_lower_the_whole_graph():
"""
logging.info("Running the example to export and lower the whole graph...")

m, m_inputs = MODEL_NAME_TO_MODEL.get("add_mul")()
m = m.eval()
m, m_inputs = EagerModelFactory.create_model(*MODEL_NAME_TO_MODEL["add_mul"])
m_compile_spec = m.get_compile_spec()

m_inputs = m.get_example_inputs()
# pre-autograd export. eventually this will become torch.export
m = export.capture_pre_autograd_graph(m, m_inputs)
Expand All @@ -177,7 +180,7 @@ def export_and_lower_the_whole_graph():
# Lower AddMulModule to the demo backend
logging.info("Lowering to the demo backend...")
lowered_module = to_backend(
BackendWithCompilerDemo.__name__, edge, m.get_compile_spec()
BackendWithCompilerDemo.__name__, edge.exported_program, m_compile_spec
)

buffer = lowered_module.buffer()
Expand All @@ -190,7 +193,7 @@ def export_and_lower_the_whole_graph():


OPTIONS_TO_LOWER = {
"composite": export_compsite_module_with_lower_graph,
"composite": export_composite_module_with_lower_graph,
"partition": export_and_lower_partitioned_graph,
"whole": export_and_lower_the_whole_graph,
}
Expand Down