Skip to content

Commit d6c4a1b

Browse files
Yi Lifacebook-github-bot
authored andcommitted
Update the API of registering fake kernels to new standard (#5084)
Summary: Pull Request resolved: #5084 Update the decorator functions in registering operator abstract implementations/fake tensors to newer API Reviewed By: zonglinpeng, hsharma35 Differential Revision: D62206602
1 parent b3b9162 commit d6c4a1b

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

backends/cadence/aot/ops_registrations.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111

1212
import torch
1313
from executorch.exir.scalar_type import ScalarType
14-
from torch.library import impl, Library
14+
from torch.library import Library, register_fake
1515

1616
from .utils import get_conv1d_output_size, get_conv2d_output_size
1717

@@ -69,7 +69,7 @@
6969
m = Library("cadence", "IMPL", "Meta")
7070

7171

72-
@impl(m, "quantize_per_tensor")
72+
@register_fake("cadence::quantize_per_tensor")
7373
def quantize_per_tensor_meta(
7474
input: torch.Tensor,
7575
scale: float,
@@ -81,7 +81,7 @@ def quantize_per_tensor_meta(
8181
return input.new_empty(input.size(), dtype=dtype)
8282

8383

84-
@impl(m, "dequantize_per_tensor")
84+
@register_fake("cadence::dequantize_per_tensor")
8585
def dequantize_per_tensor_meta(
8686
input: torch.Tensor,
8787
scale: float,
@@ -93,7 +93,7 @@ def dequantize_per_tensor_meta(
9393
return input.new_empty(input.size(), dtype=torch.float)
9494

9595

96-
@impl(m, "quantized_linear")
96+
@register_fake("cadence::quantized_linear")
9797
def quantized_linear_meta(
9898
src: torch.Tensor,
9999
weight: torch.Tensor,
@@ -115,7 +115,7 @@ def quantized_linear_meta(
115115
return src.new_empty(out_size, dtype=torch.uint8)
116116

117117

118-
@impl(m, "quantized_conv")
118+
@register_fake("cadence::quantized_conv")
119119
def quantized_conv_meta(
120120
input: torch.Tensor,
121121
weight: torch.Tensor,
@@ -153,7 +153,7 @@ def quantized_conv_meta(
153153
return input.new_empty(output_size, dtype=input.dtype)
154154

155155

156-
@impl(m, "quantized_layer_norm")
156+
@register_fake("cadence::quantized_layer_norm")
157157
def quantized_layer_norm_meta(
158158
input: torch.Tensor,
159159
X_scale: torch.Tensor,
@@ -168,7 +168,7 @@ def quantized_layer_norm_meta(
168168
return input.new_empty(input.size(), dtype=torch.uint8)
169169

170170

171-
@impl(m, "quantized_relu")
171+
@register_fake("cadence::quantized_relu")
172172
def quantized_relu_meta(
173173
X: torch.Tensor,
174174
X_zero_point: torch.Tensor,
@@ -179,7 +179,7 @@ def quantized_relu_meta(
179179
return X.new_empty(X.size(), dtype=torch.uint8)
180180

181181

182-
@impl(m, "quantized_matmul")
182+
@register_fake("cadence::quantized_matmul")
183183
def quantized_matmul_meta(
184184
X: torch.Tensor,
185185
X_zero_point: int,

0 commit comments

Comments
 (0)