Skip to content

Commit 8f446e3

Browse files
lucylqfacebook-github-bot
authored andcommitted
Update portable op names to match functions.yaml (#1049)
Summary: Pull Request resolved: #1049 - selected_op.yaml; contains *op_names* collected from the model - dtype selective build; for each *op_name*, generate kernel code only if the dtype is used in the model - this diff: make sure op_names in op_<op>.cpp file match the op name collected by selected_op.yaml. This is the op name from [functions.yaml](https://fburl.com/code/uok7njmb) Note: the op names are not used for anything besides selective build, so this change should not break anything. Reviewed By: manuelcandales Differential Revision: D50425923 fbshipit-source-id: e91932d67be3bad1369f6065a02836abdd7490fd
1 parent 156c7f0 commit 8f446e3

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

76 files changed

+1374
-1088
lines changed

kernels/portable/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ in other cases we can consider adding the missing features.
5353

5454
### Do your initial work in fbcode (skip this if in OSS)
5555

56-
Althouth ExecuTorch is mapped into both `xplat` and `fbcode`, we recommend
56+
Although ExecuTorch is mapped into both `xplat` and `fbcode`, we recommend
5757
setting up the initial targets while working from `fbcode`. Once everything's in
5858
place, you should be able to build from either spot.
5959

kernels/portable/cpu/op_abs.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ Tensor& abs_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
2424
ET_CHECK_MSG(error == Error::Ok, "Failed to resize output tensor.");
2525
ET_CHECK_SAME_SHAPE_AND_DTYPE2(in, out);
2626

27-
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "abs", CTYPE, [&] {
27+
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "abs.out", CTYPE, [&] {
2828
apply_unary_map_fn(
2929
[](const CTYPE val_in) {
3030
if (val_in < 0) {

kernels/portable/cpu/op_add.cpp

Lines changed: 45 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -33,26 +33,28 @@ Tensor& add_out(
3333

3434
ET_CHECK(canCast(common_type, out_type));
3535

36-
ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "add", CTYPE_A, [&]() {
37-
ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "add", CTYPE_B, [&]() {
38-
ET_SWITCH_REAL_TYPES_AND(Bool, common_type, ctx, "add", CTYPE_IN, [&]() {
39-
ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, "add", CTYPE_OUT, [&]() {
40-
CTYPE_IN alpha_val;
41-
ET_EXTRACT_SCALAR(alpha, alpha_val);
42-
43-
apply_binary_elementwise_fn<CTYPE_A, CTYPE_B, CTYPE_OUT>(
44-
[alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) {
45-
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a);
46-
CTYPE_IN b_casted = static_cast<CTYPE_IN>(val_b);
47-
CTYPE_IN value = a_casted + alpha_val * b_casted;
48-
49-
return static_cast<CTYPE_OUT>(value);
50-
},
51-
a,
52-
b,
53-
out);
54-
});
55-
});
36+
ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "add.out", CTYPE_A, [&]() {
37+
ET_SWITCH_REAL_TYPES_AND(Bool, b_type, ctx, "add.out", CTYPE_B, [&]() {
38+
ET_SWITCH_REAL_TYPES_AND(
39+
Bool, common_type, ctx, "add.out", CTYPE_IN, [&]() {
40+
ET_SWITCH_REAL_TYPES_AND(
41+
Bool, out_type, ctx, "add.out", CTYPE_OUT, [&]() {
42+
CTYPE_IN alpha_val;
43+
ET_EXTRACT_SCALAR(alpha, alpha_val);
44+
45+
apply_binary_elementwise_fn<CTYPE_A, CTYPE_B, CTYPE_OUT>(
46+
[alpha_val](const CTYPE_A val_a, const CTYPE_B val_b) {
47+
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a);
48+
CTYPE_IN b_casted = static_cast<CTYPE_IN>(val_b);
49+
CTYPE_IN value = a_casted + alpha_val * b_casted;
50+
51+
return static_cast<CTYPE_OUT>(value);
52+
},
53+
a,
54+
b,
55+
out);
56+
});
57+
});
5658
});
5759
});
5860

@@ -78,27 +80,29 @@ Tensor& add_scalar_out(
7880

7981
ET_CHECK(common_type == out_type);
8082

81-
ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "add", CTYPE_A, [&]() {
82-
ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "add", CTYPE_B, [&]() {
83-
ET_SWITCH_REAL_TYPES_AND(Bool, common_type, ctx, "add", CTYPE_IN, [&]() {
84-
ET_SWITCH_REAL_TYPES_AND(Bool, out_type, ctx, "add", CTYPE_OUT, [&]() {
85-
CTYPE_B b_val;
86-
ET_EXTRACT_SCALAR(b, b_val);
87-
CTYPE_IN b_casted = static_cast<CTYPE_IN>(b_val);
88-
CTYPE_IN alpha_val;
89-
ET_EXTRACT_SCALAR(alpha, alpha_val);
90-
91-
apply_unary_map_fn(
92-
[b_casted, alpha_val](const CTYPE_A val_a) {
93-
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a);
94-
CTYPE_IN value = a_casted + alpha_val * b_casted;
95-
return static_cast<CTYPE_OUT>(value);
96-
},
97-
a.const_data_ptr<CTYPE_A>(),
98-
out.mutable_data_ptr<CTYPE_OUT>(),
99-
out.numel());
100-
});
101-
});
83+
ET_SWITCH_REAL_TYPES_AND(Bool, a_type, ctx, "add.Scalar_out", CTYPE_A, [&]() {
84+
ET_SWITCH_SCALAR_OBJ_TYPES(b_type, ctx, "add.Scalar_out", CTYPE_B, [&]() {
85+
ET_SWITCH_REAL_TYPES_AND(
86+
Bool, common_type, ctx, "add.Scalar_out", CTYPE_IN, [&]() {
87+
ET_SWITCH_REAL_TYPES_AND(
88+
Bool, out_type, ctx, "add.Scalar_out", CTYPE_OUT, [&]() {
89+
CTYPE_B b_val;
90+
ET_EXTRACT_SCALAR(b, b_val);
91+
CTYPE_IN b_casted = static_cast<CTYPE_IN>(b_val);
92+
CTYPE_IN alpha_val;
93+
ET_EXTRACT_SCALAR(alpha, alpha_val);
94+
95+
apply_unary_map_fn(
96+
[b_casted, alpha_val](const CTYPE_A val_a) {
97+
CTYPE_IN a_casted = static_cast<CTYPE_IN>(val_a);
98+
CTYPE_IN value = a_casted + alpha_val * b_casted;
99+
return static_cast<CTYPE_OUT>(value);
100+
},
101+
a.const_data_ptr<CTYPE_A>(),
102+
out.mutable_data_ptr<CTYPE_OUT>(),
103+
out.numel());
104+
});
105+
});
102106
});
103107
});
104108

kernels/portable/cpu/op_addmm.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,9 @@ Tensor& addmm_out(
4747

4848
ScalarType alpha_dtype = utils::get_scalar_dtype(alpha);
4949
ScalarType beta_dtype = utils::get_scalar_dtype(beta);
50-
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "addmm", CTYPE, [&]() {
51-
ET_SWITCH_SCALAR_OBJ_TYPES(alpha_dtype, ctx, "addmm", ALPHA_T, [&]() {
52-
ET_SWITCH_SCALAR_OBJ_TYPES(beta_dtype, ctx, "addmm", BETA_T, [&]() {
50+
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "addmm.out", CTYPE, [&]() {
51+
ET_SWITCH_SCALAR_OBJ_TYPES(alpha_dtype, ctx, "addmm.out", ALPHA_T, [&]() {
52+
ET_SWITCH_SCALAR_OBJ_TYPES(beta_dtype, ctx, "addmm.out", BETA_T, [&]() {
5353
size_t m = mat1.size(0);
5454
size_t n = mat1.size(1);
5555
size_t p = mat2.size(1);

kernels/portable/cpu/op_amax.cpp

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -55,18 +55,19 @@ Tensor& amax_out(
5555
Error e = resize_reduction_out(in, dim_list, keepdim, out);
5656
ET_CHECK_MSG(e == Error::Ok, "Failed to resize out tensor in amax_out");
5757

58-
ET_SWITCH_REAL_TYPES_AND(Bool, in.scalar_type(), ctx, "amax", CTYPE, [&]() {
59-
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
60-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
61-
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
62-
[](CTYPE v, CTYPE max_v) {
63-
return std::isnan(v) || v > max_v ? v : max_v;
64-
},
65-
in,
66-
dim_list,
67-
out_ix);
68-
}
69-
});
58+
ET_SWITCH_REAL_TYPES_AND(
59+
Bool, in.scalar_type(), ctx, "amax.out", CTYPE, [&]() {
60+
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
61+
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
62+
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
63+
[](CTYPE v, CTYPE max_v) {
64+
return std::isnan(v) || v > max_v ? v : max_v;
65+
},
66+
in,
67+
dim_list,
68+
out_ix);
69+
}
70+
});
7071

7172
return out;
7273
}

kernels/portable/cpu/op_amin.cpp

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -55,18 +55,19 @@ Tensor& amin_out(
5555
Error e = resize_reduction_out(in, dim_list, keepdim, out);
5656
ET_CHECK_MSG(e == Error::Ok, "Failed to resize out tensor in amin_out");
5757

58-
ET_SWITCH_REAL_TYPES_AND(Bool, in.scalar_type(), ctx, "amin", CTYPE, [&]() {
59-
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
60-
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
61-
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
62-
[](CTYPE v, CTYPE min_v) {
63-
return std::isnan(v) || v < min_v ? v : min_v;
64-
},
65-
in,
66-
dim_list,
67-
out_ix);
68-
}
69-
});
58+
ET_SWITCH_REAL_TYPES_AND(
59+
Bool, in.scalar_type(), ctx, "amin.out", CTYPE, [&]() {
60+
CTYPE* out_data = out.mutable_data_ptr<CTYPE>();
61+
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {
62+
out_data[out_ix] = reduce_over_dim_list<CTYPE>(
63+
[](CTYPE v, CTYPE min_v) {
64+
return std::isnan(v) || v < min_v ? v : min_v;
65+
},
66+
in,
67+
dim_list,
68+
out_ix);
69+
}
70+
});
7071

7172
return out;
7273
}

kernels/portable/cpu/op_any.cpp

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -24,18 +24,19 @@ Tensor& any_all_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
2424
ScalarType in_type = in.scalar_type();
2525
ScalarType out_type = out.scalar_type();
2626

27-
ET_SWITCH_REAL_TYPES_AND(Bool, in_type, ctx, __func__, CTYPE_IN, [&] {
28-
ET_SWITCH_TWO_TYPES(Bool, Byte, out_type, ctx, __func__, CTYPE_OUT, [&] {
29-
const auto data_in = in.const_data_ptr<CTYPE_IN>();
30-
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
31-
data_out[0] = static_cast<CTYPE_OUT>(false);
32-
for (auto i = 0; i < in.numel(); ++i) {
33-
if (static_cast<CTYPE_OUT>(data_in[i])) {
34-
data_out[0] = static_cast<CTYPE_OUT>(true);
35-
break;
36-
}
37-
}
38-
});
27+
ET_SWITCH_REAL_TYPES_AND(Bool, in_type, ctx, "any.all_out", CTYPE_IN, [&] {
28+
ET_SWITCH_TWO_TYPES(
29+
Bool, Byte, out_type, ctx, "any.all_out", CTYPE_OUT, [&] {
30+
const auto data_in = in.const_data_ptr<CTYPE_IN>();
31+
auto data_out = out.mutable_data_ptr<CTYPE_OUT>();
32+
data_out[0] = static_cast<CTYPE_OUT>(false);
33+
for (auto i = 0; i < in.numel(); ++i) {
34+
if (static_cast<CTYPE_OUT>(data_in[i])) {
35+
data_out[0] = static_cast<CTYPE_OUT>(true);
36+
break;
37+
}
38+
}
39+
});
3940
});
4041

4142
return out;

kernels/portable/cpu/op_arange.cpp

Lines changed: 27 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ Tensor& arange_out(RuntimeContext& ctx, const Scalar& end, Tensor& out) {
3030
ScalarType end_type = utils::get_scalar_dtype(end);
3131

3232
double end_val = 0;
33-
ET_SWITCH_SCALAR_OBJ_TYPES(end_type, ctx, __func__, CTYPE_END, [&]() {
33+
ET_SWITCH_SCALAR_OBJ_TYPES(end_type, ctx, "arange.out", CTYPE_END, [&]() {
3434
CTYPE_END end_v;
3535
ET_EXTRACT_SCALAR(end, end_v);
3636
ET_KERNEL_CHECK_MSG(
@@ -49,7 +49,7 @@ Tensor& arange_out(RuntimeContext& ctx, const Scalar& end, Tensor& out) {
4949
ET_KERNEL_CHECK_MSG(
5050
ctx, status == Error::Ok, InvalidArgument, out, "resize_tensor fails");
5151

52-
ET_SWITCH_REAL_TYPES(out.scalar_type(), ctx, __func__, CTYPE, [&]() {
52+
ET_SWITCH_REAL_TYPES(out.scalar_type(), ctx, "arange.out", CTYPE, [&]() {
5353
auto out_data = out.mutable_data_ptr<CTYPE>();
5454
for (size_t i = 0; i < size; i++) {
5555
out_data[i] = static_cast<CTYPE>(i);
@@ -72,25 +72,28 @@ Tensor& arange_start_out(
7272
ScalarType step_type = utils::get_scalar_dtype(step);
7373

7474
double d_start = 0;
75-
ET_SWITCH_SCALAR_OBJ_TYPES(start_type, ctx, __func__, CTYPE_END, [&]() {
76-
CTYPE_END start_v;
77-
ET_EXTRACT_SCALAR(start, start_v);
78-
d_start = static_cast<double>(start_v);
79-
});
75+
ET_SWITCH_SCALAR_OBJ_TYPES(
76+
start_type, ctx, "arange.start_out", CTYPE_END, [&]() {
77+
CTYPE_END start_v;
78+
ET_EXTRACT_SCALAR(start, start_v);
79+
d_start = static_cast<double>(start_v);
80+
});
8081

8182
double d_end = 0;
82-
ET_SWITCH_SCALAR_OBJ_TYPES(end_type, ctx, __func__, CTYPE_END, [&]() {
83-
CTYPE_END end_v;
84-
ET_EXTRACT_SCALAR(end, end_v);
85-
d_end = static_cast<double>(end_v);
86-
});
83+
ET_SWITCH_SCALAR_OBJ_TYPES(
84+
end_type, ctx, "arange.start_out", CTYPE_END, [&]() {
85+
CTYPE_END end_v;
86+
ET_EXTRACT_SCALAR(end, end_v);
87+
d_end = static_cast<double>(end_v);
88+
});
8789

8890
double d_step = 0;
89-
ET_SWITCH_SCALAR_OBJ_TYPES(step_type, ctx, __func__, CTYPE_END, [&]() {
90-
CTYPE_END step_v;
91-
ET_EXTRACT_SCALAR(step, step_v);
92-
d_step = static_cast<double>(step_v);
93-
});
91+
ET_SWITCH_SCALAR_OBJ_TYPES(
92+
step_type, ctx, "arange.start_out", CTYPE_END, [&]() {
93+
CTYPE_END step_v;
94+
ET_EXTRACT_SCALAR(step, step_v);
95+
d_step = static_cast<double>(step_v);
96+
});
9497

9598
ET_KERNEL_CHECK_MSG(
9699
ctx,
@@ -107,12 +110,13 @@ Tensor& arange_start_out(
107110
ET_KERNEL_CHECK_MSG(
108111
ctx, status == Error::Ok, InvalidArgument, out, "resize_tensor fails");
109112

110-
ET_SWITCH_REAL_TYPES(out.scalar_type(), ctx, __func__, CTYPE, [&]() {
111-
auto out_data = out.mutable_data_ptr<CTYPE>();
112-
for (size_t i = 0; i < size; i++) {
113-
out_data[i] = convert<CTYPE, double>(d_start + i * d_step);
114-
}
115-
});
113+
ET_SWITCH_REAL_TYPES(
114+
out.scalar_type(), ctx, "arange.start_out", CTYPE, [&]() {
115+
auto out_data = out.mutable_data_ptr<CTYPE>();
116+
for (size_t i = 0; i < size; i++) {
117+
out_data[i] = convert<CTYPE, double>(d_start + i * d_step);
118+
}
119+
});
116120

117121
return out;
118122
}

kernels/portable/cpu/op_argmax.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ Tensor& argmax_out(
6363
Error error = resize_reduction_out(in, dim, keepdim, out);
6464
ET_CHECK_MSG(error == Error::Ok, "Failed to resize out tensor in argmax_out");
6565

66-
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "argmax", CTYPE, [&] {
66+
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "argmax.out", CTYPE, [&] {
6767
long* out_data = out.mutable_data_ptr<long>();
6868

6969
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {

kernels/portable/cpu/op_argmin.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ Tensor& argmin_out(
6363
Error error = resize_reduction_out(in, dim, keepdim, out);
6464
ET_CHECK_MSG(error == Error::Ok, "Failed to resize out tensor in argmin_out");
6565

66-
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "argmin", CTYPE, [&] {
66+
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "argmin.out", CTYPE, [&] {
6767
long* out_data = out.mutable_data_ptr<long>();
6868

6969
for (size_t out_ix = 0; out_ix < out.numel(); ++out_ix) {

kernels/portable/cpu/op_avg_pool2d.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ Tensor& avg_pool2d_out(
6262
out);
6363

6464
ScalarType in_type = in.scalar_type();
65-
ET_SWITCH_FLOAT_TYPES_AND(Long, in_type, ctx, __func__, CTYPE, [&]() {
65+
ET_SWITCH_FLOAT_TYPES_AND(Long, in_type, ctx, "avg_pool2d.out", CTYPE, [&]() {
6666
if (divisor_override.has_value()) {
6767
int64_t divisor = divisor_override.value();
6868
// If divisor_override is specified, then we don't need to use `count` in

0 commit comments

Comments
 (0)