Skip to content

Commit 5fb15f7

Browse files
Songhao Jiafacebook-github-bot
authored andcommitted
introduce dim order tests to op test (#2637)
Summary: Pull Request resolved: #2637 This diff introduces dim order sanity check utils, as well as dim-order related test to operator tests, to help our system maintain its correctness when introducing new dim order ([0, 2, 3, 1]) which we never support before. The goal is checking whether or not every operator support its input's memory format, and using related tests for regular tests. The high levels of sanity check and test will be: 1. the dim order of input and output should be same. 2. the dim order of all input tensors should be same, unless operaotr-specific requirement for some input (e.g. some operator may request some input have to be contiguous, although I haven't found the actual example yet.) 3. make the operator support as much dim order as possible (e,g, if a operator can support both contiguous and channels last, then the sanity check has to make the both input valid.) I also updated `op_abs` in this diff to demonstrate how the sanity check as well as tests will be inserted. Differential Revision: https://internalfb.com/D55227304
1 parent 6dbb4dc commit 5fb15f7

File tree

9 files changed

+459
-63
lines changed

9 files changed

+459
-63
lines changed

kernels/portable/cpu/op_abs.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@ Tensor& abs_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
2828
"Failed to resize output tensor.");
2929

3030
ET_KERNEL_CHECK(ctx, tensors_have_same_dtype(in, out), InvalidArgument, out);
31+
ET_KERNEL_CHECK(
32+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
3133

3234
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "abs.out", CTYPE, [&] {
3335
apply_unary_map_fn(

kernels/test/TestUtil.h

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,24 @@
3030
#define ET_EXPECT_KERNEL_FAILURE_WITH_MSG(_context, _statement, _matcher) \
3131
EXPECT_ANY_THROW(_statement)
3232

33+
#define ET_TEST_OP_SUPPORTS_MEMORY_FORMATS( \
34+
tf, op, input_contiguous, expected_contiguous, channels_last_support) \
35+
Tensor input_channels_last = tf.channels_last_like(input_contiguous); \
36+
Tensor expected_channel_last = tf.channels_last_like(expected_contiguous); \
37+
\
38+
Tensor output_contiguous = tf.zeros_like(expected_contiguous); \
39+
Tensor output_channels_last = tf.channels_last_like(output_contiguous); \
40+
\
41+
Tensor ret = op(input_channels_last, output_channels_last); \
42+
if (channels_last_support) { \
43+
EXPECT_TENSOR_EQ(output_channels_last, expected_channel_last); \
44+
} else { \
45+
EXPECT_TENSOR_NE(output_channels_last, expected_channel_last); \
46+
} \
47+
EXPECT_TENSOR_EQ(output_channels_last, ret); \
48+
ET_EXPECT_KERNEL_FAILURE( \
49+
context_, op(input_channels_last, output_contiguous));
50+
3351
#else
3452

3553
#define ET_EXPECT_KERNEL_FAILURE(_context, _statement) \
@@ -52,6 +70,26 @@
5270
} \
5371
} while (false)
5472

73+
#define ET_TEST_OP_SUPPORTS_MEMORY_FORMATS( \
74+
tf, op, input_contiguous, expected_contiguous, channels_last_support) \
75+
Tensor input_channels_last = tf.channels_last_like(input_contiguous); \
76+
Tensor expected_channel_last = tf.channels_last_like(expected_contiguous); \
77+
\
78+
Tensor output_contiguous = tf.zeros_like(expected_contiguous); \
79+
Tensor output_channels_last = tf.channels_last_like(output_contiguous); \
80+
\
81+
Tensor ret = op(input_channels_last, output_channels_last); \
82+
if (channels_last_support) { \
83+
EXPECT_TENSOR_EQ(output_channels_last, expected_channel_last); \
84+
} else { \
85+
EXPECT_TENSOR_NE(output_channels_last, expected_channel_last); \
86+
} \
87+
EXPECT_TENSOR_EQ(output_channels_last, ret); \
88+
ET_EXPECT_KERNEL_FAILURE( \
89+
context_, op(input_channels_last, output_contiguous)); \
90+
ET_EXPECT_KERNEL_FAILURE( \
91+
context_, op(input_contiguous, output_channels_last));
92+
5593
#endif // USE_ATEN_LIB
5694

5795
/*

kernels/test/op_abs_test.cpp

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,3 +38,28 @@ TEST_F(OpAbsTest, SanityCheck) {
3838
EXPECT_TENSOR_EQ(out, ret);
3939
EXPECT_TENSOR_EQ(out, expected);
4040
}
41+
42+
TEST_F(OpAbsTest, MemoryFormatCheck) {
43+
TensorFactory<ScalarType::Float> tf;
44+
45+
std::vector<int32_t> sizes = {2, 3, 1, 5};
46+
47+
Tensor input_contiguous =
48+
tf.make(sizes, {0.8737, 0.5359, 0.3743, -0.3040, -0.7800, -0.2306,
49+
-0.7684, -0.5364, 0.3478, -0.3289, 0.0829, 0.2939,
50+
-0.8211, 0.8572, -0.0802, 0.9252, -0.2093, 0.9013,
51+
-0.4197, 0.3987, -0.5291, -0.5567, 0.2691, 0.7819,
52+
-0.8009, -0.4286, -0.9299, 0.2143, 0.2565, -0.5701});
53+
Tensor expected_contiguous = tf.make(
54+
sizes, {0.8737, 0.5359, 0.3743, 0.3040, 0.7800, 0.2306, 0.7684, 0.5364,
55+
0.3478, 0.3289, 0.0829, 0.2939, 0.8211, 0.8572, 0.0802, 0.9252,
56+
0.2093, 0.9013, 0.4197, 0.3987, 0.5291, 0.5567, 0.2691, 0.7819,
57+
0.8009, 0.4286, 0.9299, 0.2143, 0.2565, 0.5701});
58+
59+
ET_TEST_OP_SUPPORTS_MEMORY_FORMATS(
60+
tf,
61+
op_abs_out,
62+
input_contiguous,
63+
expected_contiguous,
64+
/*channels_last_support=*/true);
65+
}

runtime/core/exec_aten/testing_util/tensor_factory.h

Lines changed: 140 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,10 @@
33
#pragma once
44

55
#include <algorithm>
6+
#include <cstdint>
67

78
#include <executorch/runtime/core/exec_aten/exec_aten.h>
9+
#include <executorch/runtime/core/exec_aten/util/dim_order_util.h>
810
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
911
#include <executorch/runtime/core/tensor_shape_dynamism.h>
1012
#include <executorch/runtime/platform/assert.h>
@@ -54,7 +56,7 @@ inline size_t sizes_to_numel(const std::vector<int32_t>& sizes) {
5456

5557
inline bool check_strides(
5658
const std::vector<int32_t> sizes,
57-
const std::vector<int32_t> strides) {
59+
const std::vector<exec_aten::StridesType> strides) {
5860
if (sizes.size() != strides.size()) {
5961
// The length of stride vector shall equal to size vector.
6062
return false;
@@ -147,14 +149,14 @@ inline bool check_dim_order(
147149
return true;
148150
}
149151

150-
inline std::vector<int32_t> strides_from_dim_order(
152+
inline std::vector<exec_aten::StridesType> strides_from_dim_order(
151153
const std::vector<int32_t>& sizes,
152154
const std::vector<uint8_t>& dim_order) {
153155
bool legal = check_dim_order(sizes, dim_order);
154156
ET_CHECK_MSG(legal, "The input dim_order variable is illegal.");
155157

156158
size_t ndim = sizes.size();
157-
std::vector<int32_t> strides(ndim);
159+
std::vector<exec_aten::StridesType> strides(ndim);
158160
strides[dim_order[ndim - 1]] = 1;
159161
for (int i = ndim - 2; i >= 0; --i) {
160162
uint8_t cur_dim = dim_order[i];
@@ -258,7 +260,7 @@ class TensorFactory {
258260
at::Tensor make(
259261
const std::vector<int32_t>& sizes,
260262
const std::vector<ctype>& data,
261-
const std::vector<int32_t> strides = {},
263+
const std::vector<exec_aten::StridesType> strides = {},
262264
__ET_UNUSED TensorShapeDynamism dynamism =
263265
TensorShapeDynamism::DYNAMIC_UNBOUND) {
264266
auto expected_numel = internal::sizes_to_numel(sizes);
@@ -344,6 +346,67 @@ class TensorFactory {
344346
sizes, data, internal::channels_last_dim_order(sizes.size()), dynamism);
345347
}
346348

349+
/**
350+
* Given data in contiguous memory format, returns a new Tensor with the
351+
* specified shape and the same data but in channels last memory format.
352+
*
353+
* @param[in] sizes The sizes of the dimensions of the Tensor.
354+
* @param[in] data The data in contiguous memory format that the Tensor should
355+
* be initialized with. The size of this vector must be equal to the product
356+
* of the elements of `sizes`.
357+
*
358+
* @return A new Tensor with the specified shape and data in channls last
359+
* memory format.
360+
*/
361+
at::Tensor channels_last_like(
362+
const Tensor& input,
363+
TensorShapeDynamism dynamism = TensorShapeDynamism::STATIC) {
364+
const std::vector<int32_t> sizes(
365+
input.sizes().begin(), input.sizes().end());
366+
367+
std::vector<uint8_t> channels_last_dim_order =
368+
internal::channels_last_dim_order(sizes.size());
369+
std::vector<exec_aten::StridesType> channels_last_strides =
370+
internal::strides_from_dim_order(sizes, channels_last_dim_order);
371+
372+
for (int32_t i = 0; i < input.dim(); i++) {
373+
ET_CHECK_MSG(
374+
input.strides()[i] == channels_last_strides[i],
375+
"Input tensor is not contiguous");
376+
}
377+
378+
int32_t N = sizes[0];
379+
int32_t C = sizes[1];
380+
int32_t H = sizes[2];
381+
int32_t W = sizes[3];
382+
383+
std::vector<ctype> contiguous_data(
384+
input.data_ptr<ctype>(), input.data_ptr<ctype>() + input.numel());
385+
std::vector<ctype> channels_last_data(
386+
N * C * H * W); // Create a new blob with the same total size to contain
387+
// channels_last data
388+
for (int32_t n = 0; n < N; ++n) {
389+
for (int32_t c = 0; c < C; ++c) {
390+
for (int32_t h = 0; h < H; ++h) {
391+
for (int32_t w = 0; w < W; ++w) {
392+
// Calculate the index in the original blob
393+
int32_t old_index = ((n * C + c) * H + h) * W + w;
394+
// Calculate the index in the new blob
395+
int32_t new_index = ((n * H + h) * W + w) * C + c;
396+
// Copy the data
397+
channels_last_data[new_index] = contiguous_data[old_index];
398+
}
399+
}
400+
}
401+
}
402+
403+
return make_with_dimorder(
404+
sizes,
405+
channels_last_data,
406+
internal::channels_last_dim_order(sizes.size()),
407+
dynamism);
408+
}
409+
347410
/**
348411
* Returns a new Tensor with the specified shape, containing contiguous
349412
* data will all elements set to `value`.
@@ -459,14 +522,13 @@ class TensorFactory {
459522
*/
460523
at::Tensor empty_strided(
461524
const std::vector<int32_t>& sizes,
462-
const std::vector<int32_t>& strides,
525+
const std::vector<exec_aten::StridesType>& strides,
463526
__ET_UNUSED TensorShapeDynamism dynamism =
464527
TensorShapeDynamism::DYNAMIC_UNBOUND) {
465528
auto sizes64 = vec_32_to_64(sizes);
466-
auto strides64 = vec_32_to_64(strides);
467529
return at::empty_strided(
468530
sizes64,
469-
strides64,
531+
strides,
470532
DTYPE,
471533
/*layout_opt=*/at::Layout::Strided,
472534
/*device_opt=*/at::Device(at::DeviceType::CPU),
@@ -665,7 +727,7 @@ class TensorFactory {
665727
torch::executor::Tensor make(
666728
const std::vector<int32_t>& sizes,
667729
const std::vector<ctype>& data,
668-
const std::vector<int32_t> strides = {},
730+
const std::vector<exec_aten::StridesType> strides = {},
669731
TensorShapeDynamism dynamism = TensorShapeDynamism::STATIC) {
670732
std::vector<int32_t> default_strides;
671733
// Generate strides from the tensor dimensions, assuming contiguous data if
@@ -745,7 +807,7 @@ class TensorFactory {
745807

746808
/**
747809
* Returns a new Tensor with the specified shape and data in channels last
748-
* memory layout.
810+
* memory format.
749811
*
750812
* @param[in] sizes The sizes of the dimensions of the Tensor.
751813
* @param[in] data The data that the Tensor should be initialized with. The
@@ -763,6 +825,60 @@ class TensorFactory {
763825
sizes, data, internal::channels_last_dim_order(sizes.size()), dynamism);
764826
}
765827

828+
/**
829+
* Given data in contiguous memory format, returns a new Tensor with the
830+
* specified shape and the same data but in channels last memory format.
831+
*
832+
* @param[in] sizes The sizes of the dimensions of the Tensor.
833+
* @param[in] data The data in contiguous memory format that the Tensor should
834+
* be initialized with. The size of this vector must be equal to the product
835+
* of the elements of `sizes`.
836+
*
837+
* @return A new Tensor with the specified shape and data in channls last
838+
* memory format.
839+
*/
840+
torch::executor::Tensor channels_last_like(
841+
const Tensor& input,
842+
TensorShapeDynamism dynamism = TensorShapeDynamism::STATIC) {
843+
const std::vector<int32_t> sizes(
844+
input.sizes().begin(), input.sizes().end());
845+
846+
ET_CHECK_MSG(sizes.size() == 4, "Only 4D tensors can be channels last");
847+
ET_CHECK_MSG(
848+
is_contiguous_dim_order(input.dim_order().data(), input.dim()) == true,
849+
"Input tensor is not contiguous");
850+
int32_t N = sizes[0];
851+
int32_t C = sizes[1];
852+
int32_t H = sizes[2];
853+
int32_t W = sizes[3];
854+
855+
std::vector<ctype> contiguous_data(
856+
input.data_ptr<ctype>(), input.data_ptr<ctype>() + input.numel());
857+
std::vector<ctype> channels_last_data(
858+
N * C * H * W); // Create a new blob with the same total size to contain
859+
// channels_last data
860+
for (int32_t n = 0; n < N; ++n) {
861+
for (int32_t c = 0; c < C; ++c) {
862+
for (int32_t h = 0; h < H; ++h) {
863+
for (int32_t w = 0; w < W; ++w) {
864+
// Calculate the index in the original blob
865+
int32_t old_index = ((n * C + c) * H + h) * W + w;
866+
// Calculate the index in the new blob
867+
int32_t new_index = ((n * H + h) * W + w) * C + c;
868+
// Copy the data
869+
channels_last_data[new_index] = contiguous_data[old_index];
870+
}
871+
}
872+
}
873+
}
874+
875+
return make_with_dimorder(
876+
sizes,
877+
channels_last_data,
878+
internal::channels_last_dim_order(sizes.size()),
879+
dynamism);
880+
}
881+
766882
/**
767883
* Returns a new Tensor with the specified shape, containing contiguous data
768884
* will all elements set to `value`.
@@ -798,7 +914,20 @@ class TensorFactory {
798914

799915
/**
800916
* Returns a new Tensor with the specified shape, containing contiguous data
801-
* with all `0` elements.
917+
* in channels last memory format with all `0` elements.
918+
*
919+
* @param[in] sizes The sizes of the dimensions of the Tensor.
920+
* @return A new Tensor with the specified shape.
921+
*/
922+
torch::executor::Tensor zeros_channels_last(
923+
const std::vector<int32_t>& sizes,
924+
TensorShapeDynamism dynamism = TensorShapeDynamism::STATIC) {
925+
return full_channels_last(sizes, 0, dynamism);
926+
}
927+
928+
/**
929+
* Returns a new Tensor with the specified shape, containing contiguous data
930+
* in contiguous memory format with all `0` elements.
802931
*
803932
* @param[in] sizes The sizes of the dimensions of the Tensor.
804933
* @return A new Tensor with the specified shape.
@@ -877,7 +1006,7 @@ class TensorFactory {
8771006
std::vector<int32_t> sizes_;
8781007
std::vector<ctype> data_;
8791008
std::vector<uint8_t> dim_order_;
880-
std::vector<int32_t> strides_;
1009+
std::vector<exec_aten::StridesType> strides_;
8811010
TensorImpl impl_;
8821011
};
8831012

0 commit comments

Comments
 (0)