15
15
######################################################################
16
16
# ExecuTorch is a unified ML stack for lowering PyTorch models to edge devices.
17
17
# It introduces improved entry points to perform model, device, and/or use-case
18
- # specific optizations such as backend delegation, user-defined compiler
18
+ # specific optimizations such as backend delegation, user-defined compiler
19
19
# transformations, default or user-defined memory planning, and more.
20
20
#
21
21
# At a high level, the workflow looks as follows:
62
62
# ``torch.export``.
63
63
#
64
64
# Both APIs take in a model (any callable or ``torch.nn.Module``), a tuple of
65
- # positional arguments, optionally a dictionary of keywork arguments (not shown
65
+ # positional arguments, optionally a dictionary of keyword arguments (not shown
66
66
# in the example), and a list of constraints (covered later).
67
67
68
68
import torch
@@ -96,7 +96,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
96
96
# The output of ``torch._export.capture_pre_autograd_graph`` is a fully
97
97
# flattened graph (meaning the graph does not contain any module hierarchy,
98
98
# except in the case of control flow operators). Furthermore, the captured graph
99
- # contains only ATen operators (~3000 ops) which are autograd safe, i.e. safe
99
+ # contains only ATen operators (~3000 ops) which are Autograd safe, for example, safe
100
100
# for eager mode training.
101
101
#
102
102
# The output of ``torch.export`` further compiles the graph to a lower and
@@ -116,7 +116,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
116
116
# Since the result of ``torch.export`` is a graph containing the Core ATen
117
117
# operators, we will call this the ``ATen Dialect``, and since
118
118
# ``torch._export.capture_pre_autograd_graph`` returns a graph containing the
119
- # set of ATen operators which are autograd safe, we will call it the
119
+ # set of ATen operators which are Autograd safe, we will call it the
120
120
# ``Pre-Autograd ATen Dialect``.
121
121
122
122
######################################################################
@@ -231,7 +231,7 @@ def f(x, y):
231
231
# `FX Graph Mode Quantization <https://pytorch.org/tutorials/prototype/fx_graph_mode_ptq_static.html>`__,
232
232
# we will need to call two new APIs: ``prepare_pt2e`` and ``compare_pt2e``
233
233
# instead of ``prepare_fx`` and ``convert_fx``. It differs in that
234
- # ``prepare_pt2e`` takes a backend-specific ``Quantizer`` as an arugument , which
234
+ # ``prepare_pt2e`` takes a backend-specific ``Quantizer`` as an argument , which
235
235
# will annotate the nodes in the graph with information needed to quantize the
236
236
# model properly for a specific backend.
237
237
0 commit comments