Skip to content

Commit c4054f1

Browse files
[et][dim order] empty dim order
[et][dim order] dim order variant empty operator Pull Request resolved: #7154 ghstack-source-id: 256435474 @exported-using-ghexport This diff introduce dim order variant of empty operator, to replace the original empty operator when using dim order in ExecuTorch Differential Revision: [D66683250](https://our.internmc.facebook.com/intern/diff/D66683250/) Co-authored-by: gasoonjia <[email protected]>
1 parent afc2472 commit c4054f1

File tree

8 files changed

+373
-0
lines changed

8 files changed

+373
-0
lines changed
Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
10+
#include <executorch/runtime/kernel/kernel_includes.h>
11+
12+
#include <cstdint>
13+
#include <cstring>
14+
15+
namespace torch {
16+
namespace executor {
17+
namespace native {
18+
19+
using exec_aten::IntArrayRef;
20+
using exec_aten::Tensor;
21+
using OptionalIntArrayRef = exec_aten::OptionalArrayRef<int64_t>;
22+
using DimOrderArrayRef = exec_aten::ArrayRef<executorch::aten::DimOrderType>;
23+
// Out Aten tensor shall have same memory format stride as dim_order
24+
const size_t kMaxNumOfDimensions = 16;
25+
26+
namespace {
27+
28+
inline bool _check__empty_out_dim_order(
29+
OptionalIntArrayRef dim_order,
30+
Tensor& out) {
31+
exec_aten::ArrayRef<int64_t> dim_order_ref;
32+
std::vector<int64_t> dim_order_vec;
33+
34+
if (dim_order.has_value()) {
35+
// out tensor's dim order shall equal to input dim order
36+
dim_order_ref = exec_aten::ArrayRef<int64_t>(
37+
dim_order.value().data(), dim_order.value().size());
38+
} else { // dim_order is not set, out tensor should be contiguous dim order
39+
for (int i = 0; i < out.dim(); i++) {
40+
dim_order_vec.push_back(i);
41+
}
42+
dim_order_ref = exec_aten::ArrayRef<int64_t>(dim_order_vec);
43+
}
44+
45+
// dim order size shall equal to input dim
46+
ET_LOG_AND_RETURN_IF_FALSE(dim_order_ref.size() == out.dim());
47+
48+
ET_LOG_AND_RETURN_IF_FALSE(
49+
is_channels_last_dim_order(dim_order_ref.data(), dim_order_ref.size()) ||
50+
is_contiguous_dim_order(dim_order_ref.data(), dim_order_ref.size()));
51+
52+
ET_LOG_AND_RETURN_IF_FALSE(kMaxNumOfDimensions >= out.dim());
53+
exec_aten::StridesType target_strides[kMaxNumOfDimensions];
54+
dim_order_to_stride_nocheck(
55+
out.sizes().data(),
56+
dim_order_ref.data(),
57+
dim_order_ref.size(),
58+
target_strides);
59+
60+
for (size_t i = 0; i < dim_order_ref.size(); i++) {
61+
ET_LOG_AND_RETURN_IF_FALSE(target_strides[i] == out.strides()[i]);
62+
}
63+
64+
return true;
65+
}
66+
67+
} // namespace
68+
69+
/*
70+
* Empty out tensor with specified dim order
71+
*
72+
* _empty_dim_order.out(SymInt[] size, *, int[]? dim_order=None, Tensor(a!) out)
73+
* -> Tensor(a!)
74+
*/
75+
Tensor& _empty_dim_order_out(
76+
KernelRuntimeContext& context,
77+
IntArrayRef size,
78+
OptionalIntArrayRef dim_order,
79+
Tensor& out) {
80+
(void)context;
81+
82+
// Check if dim_order is valid
83+
ET_KERNEL_CHECK(
84+
context,
85+
_check__empty_out_dim_order(dim_order, out),
86+
InvalidArgument,
87+
out);
88+
89+
// Resize for dynamic shape
90+
ET_KERNEL_CHECK_MSG(
91+
context,
92+
resize_tensor(out, size) == Error::Ok,
93+
InvalidArgument,
94+
out,
95+
"Failed to resize output tensor.");
96+
97+
return out;
98+
}
99+
100+
Tensor& _empty_dim_order_out(
101+
IntArrayRef size,
102+
OptionalIntArrayRef dim_order,
103+
Tensor& out) {
104+
executorch::runtime::KernelRuntimeContext ctx{};
105+
return _empty_dim_order_out(ctx, size, dim_order, out);
106+
}
107+
108+
} // namespace native
109+
} // namespace executor
110+
} // namespace torch

kernels/aten/cpu/targets.bzl

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@ load("@fbsource//xplat/executorch/kernels/portable:op_registration_util.bzl", "d
99
# ops, and must be split. They can, however, share common code via a library dep
1010
# if necessary.
1111
_EDGE_DIALECT_OPS = (
12+
op_target(
13+
name = "op__empty_dim_order",
14+
),
1215
op_target(
1316
name = "op__to_dim_order_copy",
1417
deps = [

kernels/aten/edge_dialect_aten_op.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,11 @@
22
#
33
# This yaml file contains operators that are defined by ExecuTorch and used in ATen mode.
44

5+
- func: dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
6+
kernels:
7+
- arg_meta: null
8+
kernel_name: torch::executor::_empty_dim_order_out
9+
510
- func: dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
611
kernels:
712
- arg_meta: null
Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,82 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
10+
#include <executorch/runtime/kernel/kernel_includes.h>
11+
12+
#include <cstdint>
13+
#include <cstring>
14+
15+
namespace torch {
16+
namespace executor {
17+
namespace native {
18+
19+
using exec_aten::Tensor;
20+
using OptionalIntArrayRef = exec_aten::OptionalArrayRef<int64_t>;
21+
using DimOrderArrayRef = exec_aten::ArrayRef<executorch::aten::DimOrderType>;
22+
23+
namespace {
24+
25+
bool _check__empty_out_dim_order(OptionalIntArrayRef dim_order, Tensor& out) {
26+
DimOrderArrayRef out_dim_order = out.dim_order();
27+
28+
if (dim_order.has_value()) {
29+
// out tensor's dim order shall equal to input dim order
30+
IntArrayRef dim_order_ref = dim_order.value();
31+
32+
ET_LOG_AND_RETURN_IF_FALSE(
33+
is_channels_last_dim_order(
34+
dim_order.value().data(), dim_order.value().size()) ||
35+
is_contiguous_dim_order(
36+
dim_order.value().data(), dim_order.value().size()));
37+
38+
// Out tensor shall have same dim order as dim_order
39+
ET_LOG_AND_RETURN_IF_FALSE(out_dim_order.size() == dim_order_ref.size());
40+
for (size_t i = 0; i < dim_order_ref.size(); i++) {
41+
ET_LOG_AND_RETURN_IF_FALSE(out_dim_order[i] == dim_order_ref[i]);
42+
}
43+
} else { // dim_order is not set, out tensor should be contiguous memory
44+
// format
45+
ET_LOG_AND_RETURN_IF_FALSE(
46+
is_contiguous_dim_order(out_dim_order.data(), out_dim_order.size()));
47+
}
48+
return true;
49+
}
50+
51+
} // namespace
52+
53+
/*
54+
* Empty out tensor with specified dim order
55+
*
56+
* _empty_dim_order.out(SymInt[] size, *, int[]? dim_order=None, Tensor(a!) out)
57+
* -> Tensor(a!)
58+
*/
59+
Tensor& _empty_dim_order_out(
60+
KernelRuntimeContext& context,
61+
IntArrayRef size,
62+
OptionalIntArrayRef dim_order,
63+
Tensor& out) {
64+
(void)context;
65+
66+
// Check if dim_order is valid
67+
_check__empty_out_dim_order(dim_order, out);
68+
69+
// Resize for dynamic shape
70+
ET_KERNEL_CHECK_MSG(
71+
context,
72+
resize_tensor(out, size) == Error::Ok,
73+
InvalidArgument,
74+
out,
75+
"Failed to resize output tensor.");
76+
77+
return out;
78+
}
79+
80+
} // namespace native
81+
} // namespace executor
82+
} // namespace torch

kernels/portable/functions.yaml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -937,6 +937,11 @@
937937
- arg_meta: null
938938
kernel_name: torch::executor::zeros_out
939939

940+
- func: dim_order_ops::_empty_dim_order.out(int[] size, *, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
941+
kernels:
942+
- arg_meta: null
943+
kernel_name: torch::executor::_empty_dim_order_out
944+
940945
- func: dim_order_ops::_to_dim_order_copy.out(Tensor self, *, bool non_blocking=False, int[]? dim_order=None, Tensor(a!) out) -> Tensor(a!)
941946
kernels:
942947
- arg_meta: null
Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
/*
2+
* Copyright (c) Meta Platforms, Inc. and affiliates.
3+
* All rights reserved.
4+
*
5+
* This source code is licensed under the BSD-style license found in the
6+
* LICENSE file in the root directory of this source tree.
7+
*/
8+
9+
#include <executorch/kernels/test/FunctionHeaderWrapper.h> // Declares the operator
10+
#include <executorch/kernels/test/TestUtil.h>
11+
#include <executorch/kernels/test/supported_features.h>
12+
#include <executorch/runtime/core/exec_aten/exec_aten.h>
13+
#include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
14+
#include <executorch/runtime/core/exec_aten/testing_util/tensor_util.h>
15+
16+
#include <gtest/gtest.h>
17+
18+
using namespace ::testing;
19+
using exec_aten::DimOrderType;
20+
using exec_aten::IntArrayRef;
21+
using exec_aten::optional;
22+
using exec_aten::OptionalArrayRef;
23+
using exec_aten::ScalarType;
24+
using exec_aten::Tensor;
25+
using torch::executor::testing::TensorFactory;
26+
27+
class OpEmptyDimOrderOutTest : public OperatorTest {
28+
protected:
29+
Tensor& op_empty_dim_order_out(
30+
IntArrayRef size,
31+
OptionalArrayRef<int64_t> dim_order,
32+
Tensor& out) {
33+
return torch::executor::dim_order_ops::_empty_dim_order_outf(
34+
context_, size, dim_order, out);
35+
}
36+
37+
template <ScalarType DTYPE>
38+
void test_op_empty_dim_order_out(std::vector<int32_t>&& size_int32_t) {
39+
TensorFactory<DTYPE> tf;
40+
std::vector<int64_t> sizes(size_int32_t.begin(), size_int32_t.end());
41+
auto aref = exec_aten::ArrayRef<int64_t>(sizes.data(), sizes.size());
42+
OptionalArrayRef<int64_t> dim_order;
43+
Tensor out = tf.ones(size_int32_t);
44+
45+
op_empty_dim_order_out(aref, dim_order, out);
46+
}
47+
48+
void too_short_dim_order_die() {
49+
TensorFactory<ScalarType::Float> tf;
50+
51+
int64_t sizes[3] = {3, 2, 4};
52+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
53+
54+
int64_t raw_dim_order[2] = {0, 1};
55+
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
56+
Tensor out =
57+
tf.ones({3, 2, 4}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
58+
ET_EXPECT_KERNEL_FAILURE(
59+
context_, op_empty_dim_order_out(sizes_aref, dim_order, out));
60+
}
61+
62+
void illegal_dim_order_die() {
63+
TensorFactory<ScalarType::Float> tf;
64+
65+
int64_t sizes[2] = {3, 2};
66+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
67+
68+
int64_t raw_dim_order[2] = {1, 2};
69+
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
70+
Tensor out =
71+
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
72+
ET_EXPECT_KERNEL_FAILURE(
73+
context_, op_empty_dim_order_out(sizes_aref, dim_order, out));
74+
}
75+
76+
void wrong_dim_order_die() {
77+
TensorFactory<ScalarType::Float> tf;
78+
79+
int64_t sizes[4] = {3, 2, 4, 5};
80+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
81+
82+
// should be {0, 2, 3, 1}
83+
int64_t raw_dim_order[4] = {0, 1, 2, 3};
84+
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
85+
Tensor out = tf.full_channels_last(
86+
{3, 2, 4, 5}, 1, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
87+
ET_EXPECT_KERNEL_FAILURE(
88+
context_, op_empty_dim_order_out(sizes_aref, dim_order, out));
89+
}
90+
};
91+
92+
#define GENERATE_TEST(_, DTYPE) \
93+
TEST_F(OpEmptyDimOrderOutTest, DTYPE##Tensors) { \
94+
test_op_empty_dim_order_out<ScalarType::DTYPE>({2, 3, 4}); \
95+
test_op_empty_dim_order_out<ScalarType::DTYPE>({2, 0, 4}); \
96+
test_op_empty_dim_order_out<ScalarType::DTYPE>({}); \
97+
}
98+
99+
ET_FORALL_REAL_TYPES_AND(Bool, GENERATE_TEST)
100+
101+
TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUpperBoundSameAsExpected) {
102+
TensorFactory<ScalarType::Float> tf;
103+
104+
int64_t sizes[2] = {3, 2};
105+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
106+
OptionalArrayRef<int64_t> dim_order;
107+
Tensor out =
108+
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
109+
op_empty_dim_order_out(sizes_aref, dim_order, out);
110+
}
111+
112+
TEST_F(OpEmptyDimOrderOutTest, ContiguousDimOrderSuccees) {
113+
TensorFactory<ScalarType::Float> tf;
114+
115+
int64_t sizes[2] = {3, 2};
116+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
117+
118+
int64_t raw_dim_order[2] = {0, 1};
119+
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
120+
Tensor out =
121+
tf.ones({3, 2}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
122+
op_empty_dim_order_out(sizes_aref, dim_order, out);
123+
}
124+
125+
TEST_F(OpEmptyDimOrderOutTest, ChannelsLastsDimOrderSuccees) {
126+
TensorFactory<ScalarType::Float> tf;
127+
128+
int64_t sizes[4] = {3, 2, 4, 5};
129+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
130+
131+
int64_t raw_dim_order[4] = {0, 2, 3, 1};
132+
auto dim_order = OptionalArrayRef<int64_t>(raw_dim_order);
133+
Tensor out = tf.full_channels_last(
134+
{3, 2, 4, 5}, 1, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
135+
op_empty_dim_order_out(sizes_aref, dim_order, out);
136+
}
137+
138+
TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUpperBoundLargerThanExpected) {
139+
TensorFactory<ScalarType::Float> tf;
140+
141+
int64_t sizes[2] = {3, 2};
142+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
143+
OptionalArrayRef<int64_t> dim_order;
144+
Tensor out =
145+
tf.ones({10, 10}, torch::executor::TensorShapeDynamism::DYNAMIC_BOUND);
146+
op_empty_dim_order_out(sizes_aref, dim_order, out);
147+
}
148+
149+
TEST_F(OpEmptyDimOrderOutTest, DynamicShapeUnbound) {
150+
if (!torch::executor::testing::SupportedFeatures::get()->output_resize) {
151+
GTEST_SKIP() << "Dynamic shape unbound not supported";
152+
}
153+
TensorFactory<ScalarType::Float> tf;
154+
155+
int64_t sizes[2] = {3, 2};
156+
auto sizes_aref = exec_aten::ArrayRef<int64_t>(sizes);
157+
OptionalArrayRef<int64_t> dim_order;
158+
Tensor out =
159+
tf.ones({1, 1}, torch::executor::TensorShapeDynamism::DYNAMIC_UNBOUND);
160+
op_empty_dim_order_out(sizes_aref, dim_order, out);
161+
}

kernels/test/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,7 @@ def define_common_targets():
174174
codegen_function_header_wrapper("executorch/kernels/test/custom_kernel_example", "custom_kernel_example")
175175

176176
_common_op_test("op__to_dim_order_copy_test", ["aten", "portable"])
177+
_common_op_test("op__empty_dim_order_test", ["aten", "portable"])
177178
_common_op_test("op_abs_test", ["aten", "portable"])
178179
_common_op_test("op_acos_test", ["aten", "portable"])
179180
_common_op_test("op_acosh_test", ["aten", "portable"])

0 commit comments

Comments
 (0)