Skip to content

Commit 691d964

Browse files
mcr229facebook-github-bot
authored andcommitted
XNNPack --> XNNPACK (#590)
Summary: We should keep naming consistent. Changing all occurences of XNNPack to XNNPACK under executorch folder Reviewed By: digantdesai Differential Revision: D49851704
1 parent 141fcf6 commit 691d964

File tree

8 files changed

+12
-12
lines changed

8 files changed

+12
-12
lines changed

backends/xnnpack/operators/op_to_copy.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ def define_node(
4747
to_contiguous = bool(memory_format_target == torch.contiguous_format)
4848
check_or_raise(
4949
to_channels_last or to_contiguous,
50-
"Unsupported Memory Format for XNNPack",
50+
"Unsupported Memory Format for XNNPACK",
5151
)
5252

5353
input_node = get_input_node(node, 0)

backends/xnnpack/passes/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def __init__(
3232
self, exported_program: ExportedProgram, passes: Optional[List[PassType]] = None
3333
) -> None:
3434
"""
35-
A helper class to run multiple XNNPack passes on a program
35+
A helper class to run multiple XNNPACK passes on a program
3636
If passes list is empty, all passes in XNNPACK will be run.
3737
Else only run passes in the list will be run.
3838
"""

backends/xnnpack/passes/channels_last_tagged_reshape_pass.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,13 @@
1515
# TODO(T151254305) use subgraph_rewriter
1616
class ChannelsLastTaggedReshapePass(XNNPACKPass):
1717
"""
18-
This pass is Internal to XNNPack only! It is meant to give a new representation
19-
of the edge graph to be consumed by XNNPack Preprocess. All added operators
18+
This pass is Internal to XNNPACK only! It is meant to give a new representation
19+
of the edge graph to be consumed by XNNPACK Preprocess. All added operators
2020
will be consumed by delegate and turned to delegate blobs.
2121
2222
Edge IR graph pass to add operator stubs that signal a change in
2323
memory format from contiguous to channels last. This is to help with
24-
XNNPack Delegate to add transpose nodes to change input memory format
24+
XNNPACK Delegate to add transpose nodes to change input memory format
2525
at runtime and run operators in Channels Last Format.
2626
2727
During this pass, nhwc nodes are not converted back to nchw immediately.

backends/xnnpack/runtime/XNNCompiler.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ bool isQuantizedDataType(const xnn_datatype data_type) {
8585

8686
/**
8787
Converts dims from uint32 to size_t. Takes in a flatbuffer vector
88-
of uint32_t and returns a std::vector of size_t. XNNPack takes in
88+
of uint32_t and returns a std::vector of size_t. XNNPACK takes in
8989
dims of size_t* but tensor shape is serialized in flatbuffer as
9090
int32_t. As a result, we need to static cast the shapes to size_t
9191
*/
@@ -143,7 +143,7 @@ Error defineTensor(
143143
// to properly convert the uint32_t* to size_t*
144144
std::vector<size_t> dims_data = flatbufferDimsToVector(tensor_value->dims());
145145

146-
// XNNPack Id
146+
// XNNPACK Id
147147
uint32_t id = XNN_INVALID_VALUE_ID;
148148

149149
// Get Pointer to constant data from flatbuffer, if its non-constant

backends/xnnpack/runtime/XNNCompiler.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ namespace delegate {
2121

2222
class XNNCompiler {
2323
public:
24-
// Takes Flatbuffer Serialized XNNPack Model and rebuilds the xnn-subgraph
24+
// Takes Flatbuffer Serialized XNNPACK Model and rebuilds the xnn-subgraph
2525
// returns an executor object that holds the xnn runtime object which we
2626
// can then use to set inputs and run inference using the xnn graph.
2727
__ET_NODISCARD static Error compileModel(

backends/xnnpack/test/test_xnnpack_passes.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
from torch.testing import FileCheck
4343

4444

45-
class TestXNNPackPasses(unittest.TestCase):
45+
class TestXNNPACKPasses(unittest.TestCase):
4646
class TwoOutputs(OpSequencesAddConv2d):
4747
def __init__(self):
4848
super().__init__(1, 2)

backends/xnnpack/test/test_xnnpack_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def assert_outputs_equal(self, model_output, ref_output):
126126
"""
127127
Helper testing function that asserts that the model output and the reference output
128128
are equal with some tolerance. Due to numerical differences between eager mode and
129-
the XNNPack's backend, we relax the detal such that absolute tolerance is 1e-3. and
129+
the XNNPACK's backend, we relax the detal such that absolute tolerance is 1e-3. and
130130
relative tolerance is 1e-3.
131131
"""
132132

backends/xnnpack/xnnpack_preprocess.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ def preprocess(
225225
)
226226
else:
227227
raise RuntimeError(
228-
f"For {node}, {node.op}:{node.target.__name__} is not supported in XNNPack Delegate"
228+
f"For {node}, {node.op}:{node.target.__name__} is not supported in XNNPACK Delegate"
229229
)
230230
elif node.op in [
231231
"get_attr",
@@ -234,5 +234,5 @@ def preprocess(
234234
]:
235235
continue
236236
else:
237-
raise RuntimeError(f"{node.op} is not supported in XNNPack")
237+
raise RuntimeError(f"{node.op} is not supported in XNNPACK")
238238
return PreprocessResult(processed_bytes=convert_to_flatbuffer(xnnpack_graph))

0 commit comments

Comments
 (0)