Skip to content

Commit c7104db

Browse files
mcremon-metafacebook-github-bot
authored andcommitted
Call the quantizer from the OSS repo (#3597)
Summary: Pull Request resolved: #3597 As titled. Remove the unneeded meta-repo things as well! Reviewed By: tarun292, dulinriley, zonglinpengmeta Differential Revision: D57084231 fbshipit-source-id: 8d23f869080a729d5fab5026194fefe22f7e34c2
1 parent 46ec26b commit c7104db

File tree

3 files changed

+50
-2
lines changed

3 files changed

+50
-2
lines changed
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
load("@fbcode_macros//build_defs:python_library.bzl", "python_library")
2+
3+
oncall("odai_jarvis")
4+
5+
python_library(
6+
name = "utils",
7+
srcs = [
8+
"utils.py",
9+
],
10+
deps = [
11+
"//caffe2:torch",
12+
],
13+
)
14+
15+
python_library(
16+
name = "patterns",
17+
srcs = [
18+
"patterns.py",
19+
],
20+
deps = [
21+
":utils",
22+
"//caffe2:torch",
23+
],
24+
)
25+
26+
python_library(
27+
name = "quantizer",
28+
srcs = [
29+
"quantizer.py",
30+
],
31+
deps = [
32+
":patterns",
33+
":utils",
34+
"//caffe2:torch",
35+
],
36+
)
37+
38+
python_library(
39+
name = "fusion_pass",
40+
srcs = [
41+
"fusion_pass.py",
42+
],
43+
deps = [
44+
":patterns",
45+
":utils",
46+
"//caffe2:torch",
47+
"//executorch/exir:pass_base",
48+
],
49+
)

backends/cadence/aot/quantizer/fusion_pass.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ def get_args_and_kwargs_matmul(
167167
inputs_inputs: List[fx.Node],
168168
dequants_inputs: List[fx.Node],
169169
quant_node: fx.Node,
170-
) -> Tuple[Tuple[Any], Dict[str, Any]]:
170+
) -> Tuple[Tuple[Any, ...], Dict[str, Any]]:
171171
requantize_scale = (
172172
# pyre-ignore[58]: Unsupported operand
173173
dequants_inputs[0].args[1]

backends/cadence/aot/quantizer/patterns.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,6 @@ def get_anchors(self, gm, fused_partition) -> PartitionAnchors:
217217
weights=[],
218218
biases=[],
219219
# Ordering: normalized_shape, weights, bias
220-
# pyre-fixme[6]: Incompatible parameter type
221220
others=others,
222221
output=[(layer_norm_node,)],
223222
)

0 commit comments

Comments
 (0)