Skip to content

Commit ffcc921

Browse files
committed
Move optimized target definitions to op_registration.bzl
Pull Request resolved: #10877 So we can use them in codegen.bzl later (can't pull in definitions from targets.bzl files). Differential Revision: [D74741846](https://our.internmc.facebook.com/intern/diff/D74741846/) ghstack-source-id: 283972269
1 parent 9ded0a2 commit ffcc921

File tree

2 files changed

+124
-125
lines changed

2 files changed

+124
-125
lines changed

kernels/optimized/cpu/targets.bzl

Lines changed: 3 additions & 125 deletions
Original file line numberDiff line numberDiff line change
@@ -1,127 +1,5 @@
11
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
2-
load("@fbsource//xplat/executorch/kernels/optimized:op_registration_util.bzl", "define_op_target", "op_target")
3-
4-
_OPTIMIZED_ATEN_OPS = (
5-
op_target(
6-
name = "op_add",
7-
deps = [
8-
":binary_ops",
9-
":add_sub_impl",
10-
"//executorch/kernels/portable/cpu:scalar_utils",
11-
"//executorch/kernels/portable/cpu/util:broadcast_util",
12-
],
13-
),
14-
op_target(
15-
name = "op_bmm",
16-
deps = [
17-
"//executorch/kernels/optimized:libblas",
18-
"//executorch/kernels/portable/cpu/util:matmul_ops_util",
19-
],
20-
),
21-
op_target(
22-
name = "op_div",
23-
deps = [
24-
":binary_ops",
25-
"//executorch/kernels/portable/cpu:scalar_utils",
26-
"//executorch/kernels/portable/cpu/util:broadcast_util",
27-
],
28-
),
29-
op_target(
30-
name = "op_elu",
31-
deps = [
32-
"//executorch/extension/threadpool:threadpool",
33-
"//executorch/kernels/portable/cpu:scalar_utils",
34-
"//executorch/runtime/core/portable_type/c10/c10:aten_headers_for_executorch",
35-
],
36-
),
37-
op_target(name = "op_exp"),
38-
op_target(
39-
name = "op_fft_c2r",
40-
compiler_flags = [] if runtime.is_oss else [
41-
"-Wno-global-constructors",
42-
"-Wno-shadow",
43-
],
44-
deps = [":fft_utils"],
45-
),
46-
op_target(
47-
name = "op_fft_r2c",
48-
compiler_flags = [] if runtime.is_oss else [
49-
"-Wno-global-constructors",
50-
"-Wno-shadow",
51-
],
52-
deps = [":fft_utils"],
53-
),
54-
op_target(name = "op_sigmoid"),
55-
op_target(
56-
name = "op_gelu",
57-
deps = [
58-
"//executorch/kernels/portable/cpu/util:activation_ops_util",
59-
"//executorch/runtime/core/portable_type/c10/c10:aten_headers_for_executorch",
60-
],
61-
),
62-
op_target(
63-
name = "op_le",
64-
deps = [
65-
"//executorch/kernels/portable/cpu:scalar_utils",
66-
"//executorch/kernels/portable/cpu/util:broadcast_util",
67-
],
68-
),
69-
op_target(
70-
name = "op_linear",
71-
deps = [
72-
"//executorch/kernels/optimized:libblas",
73-
"//executorch/kernels/portable/cpu/util:matmul_ops_util",
74-
],
75-
),
76-
op_target(
77-
name = "op_log_softmax",
78-
deps = [
79-
"//executorch/kernels/portable/cpu/util:activation_ops_util",
80-
"//executorch/runtime/core/portable_type/c10/c10:aten_headers_for_executorch",
81-
],
82-
),
83-
op_target(
84-
name = "op_mm",
85-
deps = [
86-
"//executorch/kernels/optimized:libblas",
87-
"//executorch/kernels/portable/cpu/util:matmul_ops_util",
88-
],
89-
),
90-
op_target(
91-
name = "op_mul",
92-
deps = [
93-
":binary_ops",
94-
"//executorch/kernels/portable/cpu:scalar_utils",
95-
"//executorch/kernels/portable/cpu/util:broadcast_util",
96-
"//executorch/runtime/core/exec_aten/util:tensor_util",
97-
],
98-
),
99-
op_target(
100-
name = "op_native_layer_norm",
101-
deps = [
102-
":moments_utils",
103-
"//executorch/kernels/portable/cpu/util:normalization_ops_util",
104-
],
105-
),
106-
op_target(name = "op_neg"),
107-
op_target(
108-
name = "op_sub",
109-
deps = [
110-
":binary_ops",
111-
":add_sub_impl",
112-
"//executorch/kernels/portable/cpu:scalar_utils",
113-
"//executorch/kernels/portable/cpu/util:broadcast_util",
114-
],
115-
),
116-
op_target(
117-
name = "op_where",
118-
deps = [
119-
"//executorch/extension/threadpool:threadpool",
120-
"//executorch/kernels/portable/cpu/util:elementwise_util",
121-
],
122-
),
123-
)
124-
2+
load("@fbsource//xplat/executorch/kernels/optimized:op_registration_util.bzl", "OPTIMIZED_ATEN_OPS", "define_op_target", "op_target")
1253

1264
def get_sleef_preprocessor_flags():
1275
if runtime.is_oss:
@@ -137,10 +15,10 @@ def define_common_targets():
13715
"""
13816

13917
# Define build targets for all operators registered in the tables above.
140-
for op in _OPTIMIZED_ATEN_OPS:
18+
for op in OPTIMIZED_ATEN_OPS:
14119
define_op_target(**op)
14220

143-
aten_op_targets = [":{}".format(op["name"]) for op in _OPTIMIZED_ATEN_OPS]
21+
aten_op_targets = [":{}".format(op["name"]) for op in OPTIMIZED_ATEN_OPS]
14422
all_op_targets = aten_op_targets
14523

14624
runtime.cxx_library(

kernels/optimized/op_registration_util.bzl

Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,3 +137,124 @@ def define_op_target(name, compiler_flags, deps):
137137
compiler_flags = compiler_flags,
138138
deps = deps,
139139
)
140+
141+
OPTIMIZED_ATEN_OPS = (
142+
op_target(
143+
name = "op_add",
144+
deps = [
145+
":binary_ops",
146+
":add_sub_impl",
147+
"//executorch/kernels/portable/cpu:scalar_utils",
148+
"//executorch/kernels/portable/cpu/util:broadcast_util",
149+
],
150+
),
151+
op_target(
152+
name = "op_bmm",
153+
deps = [
154+
"//executorch/kernels/optimized:libblas",
155+
"//executorch/kernels/portable/cpu/util:matmul_ops_util",
156+
],
157+
),
158+
op_target(
159+
name = "op_div",
160+
deps = [
161+
":binary_ops",
162+
"//executorch/kernels/portable/cpu:scalar_utils",
163+
"//executorch/kernels/portable/cpu/util:broadcast_util",
164+
],
165+
),
166+
op_target(
167+
name = "op_elu",
168+
deps = [
169+
"//executorch/extension/threadpool:threadpool",
170+
"//executorch/kernels/portable/cpu:scalar_utils",
171+
"//executorch/runtime/core/portable_type/c10/c10:aten_headers_for_executorch",
172+
],
173+
),
174+
op_target(name = "op_exp"),
175+
op_target(
176+
name = "op_fft_c2r",
177+
compiler_flags = [] if runtime.is_oss else [
178+
"-Wno-global-constructors",
179+
"-Wno-shadow",
180+
],
181+
deps = [":fft_utils"],
182+
),
183+
op_target(
184+
name = "op_fft_r2c",
185+
compiler_flags = [] if runtime.is_oss else [
186+
"-Wno-global-constructors",
187+
"-Wno-shadow",
188+
],
189+
deps = [":fft_utils"],
190+
),
191+
op_target(name = "op_sigmoid"),
192+
op_target(
193+
name = "op_gelu",
194+
deps = [
195+
"//executorch/kernels/portable/cpu/util:activation_ops_util",
196+
"//executorch/runtime/core/portable_type/c10/c10:aten_headers_for_executorch",
197+
],
198+
),
199+
op_target(
200+
name = "op_le",
201+
deps = [
202+
"//executorch/kernels/portable/cpu:scalar_utils",
203+
"//executorch/kernels/portable/cpu/util:broadcast_util",
204+
],
205+
),
206+
op_target(
207+
name = "op_linear",
208+
deps = [
209+
"//executorch/kernels/optimized:libblas",
210+
"//executorch/kernels/portable/cpu/util:matmul_ops_util",
211+
],
212+
),
213+
op_target(
214+
name = "op_log_softmax",
215+
deps = [
216+
"//executorch/kernels/portable/cpu/util:activation_ops_util",
217+
"//executorch/runtime/core/portable_type/c10/c10:aten_headers_for_executorch",
218+
],
219+
),
220+
op_target(
221+
name = "op_mm",
222+
deps = [
223+
"//executorch/kernels/optimized:libblas",
224+
"//executorch/kernels/portable/cpu/util:matmul_ops_util",
225+
],
226+
),
227+
op_target(
228+
name = "op_mul",
229+
deps = [
230+
":binary_ops",
231+
"//executorch/kernels/portable/cpu:scalar_utils",
232+
"//executorch/kernels/portable/cpu/util:broadcast_util",
233+
"//executorch/runtime/core/exec_aten/util:tensor_util",
234+
],
235+
),
236+
op_target(
237+
name = "op_native_layer_norm",
238+
deps = [
239+
":moments_utils",
240+
"//executorch/kernels/portable/cpu/util:normalization_ops_util",
241+
],
242+
),
243+
op_target(name = "op_neg"),
244+
op_target(
245+
name = "op_sub",
246+
deps = [
247+
":binary_ops",
248+
":add_sub_impl",
249+
"//executorch/kernels/portable/cpu:scalar_utils",
250+
"//executorch/kernels/portable/cpu/util:broadcast_util",
251+
],
252+
),
253+
op_target(
254+
name = "op_where",
255+
deps = [
256+
"//executorch/extension/threadpool:threadpool",
257+
"//executorch/kernels/portable/cpu/util:elementwise_util",
258+
],
259+
),
260+
)

0 commit comments

Comments
 (0)