Skip to content

Commit a658082

Browse files
Gasoonjiafacebook-github-bot
authored andcommitted
q to s start ops | add dim order sanity check (#4332)
Summary: Pull Request resolved: #4332 This diff updates the sanity checks on operators starting with q to s for the dim order regulation. tracking table https://docs.google.com/spreadsheets/d/1Gttxkur8H6QnNfiCGfSAKwtBqdL6MSxn9eJ62bVYS_w/edit?gid=0#gid=0 Reviewed By: digantdesai Differential Revision: D59984028
1 parent b52d4b6 commit a658082

23 files changed

+110
-0
lines changed

kernels/portable/cpu/op_reflection_pad1d.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,11 @@ Tensor& reflection_pad1d_out(
2828
InvalidArgument,
2929
out);
3030

31+
ET_KERNEL_CHECK(
32+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
33+
34+
ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out);
35+
3136
Tensor::SizesType target_sizes[kTensorDimensionLimit];
3237
size_t target_ndim = 0;
3338
get_padding_out_target_size(1, in, padding, target_sizes, &target_ndim);

kernels/portable/cpu/op_reflection_pad2d.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,11 @@ Tensor& reflection_pad2d_out(
2828
InvalidArgument,
2929
out);
3030

31+
ET_KERNEL_CHECK(
32+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
33+
34+
ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out);
35+
3136
Tensor::SizesType target_sizes[kTensorDimensionLimit];
3237
size_t target_ndim = 0;
3338
get_padding_out_target_size(2, in, padding, target_sizes, &target_ndim);

kernels/portable/cpu/op_reflection_pad3d.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,11 @@ Tensor& reflection_pad3d_out(
2828
InvalidArgument,
2929
out);
3030

31+
ET_KERNEL_CHECK(
32+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
33+
34+
ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out);
35+
3136
Tensor::SizesType target_sizes[kTensorDimensionLimit];
3237
size_t target_ndim = 0;
3338
get_padding_out_target_size(3, in, padding, target_sizes, &target_ndim);

kernels/portable/cpu/op_relu.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,9 @@ Tensor& relu_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
3535

3636
ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out);
3737

38+
ET_KERNEL_CHECK(
39+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
40+
3841
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "relu.out", CTYPE, [&]() {
3942
apply_unary_map_fn(
4043
[](const CTYPE val_in) {

kernels/portable/cpu/op_remainder.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,9 @@ Tensor& remainder_Tensor_out(
8080
InvalidArgument,
8181
out);
8282

83+
ET_KERNEL_CHECK(
84+
ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out);
85+
8386
ScalarType a_type = a.scalar_type();
8487
ScalarType b_type = b.scalar_type();
8588
ScalarType common_type = promoteTypes(a_type, b_type);
@@ -124,6 +127,9 @@ Tensor& remainder_Scalar_out(
124127
out,
125128
"Failed to resize output tensor.");
126129

130+
ET_KERNEL_CHECK(
131+
ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out);
132+
127133
ScalarType a_type = a.scalar_type();
128134
ScalarType b_type = utils::get_scalar_dtype(b);
129135
ScalarType common_type = utils::promote_type_with_scalar(a_type, b);

kernels/portable/cpu/op_repeat.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,11 @@ Tensor& repeat_out(
6262
InvalidArgument,
6363
out);
6464

65+
ET_KERNEL_CHECK(
66+
ctx, tensors_have_same_dim_order(self, out), InvalidArgument, out);
67+
68+
ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(self), InvalidArgument, out);
69+
6570
// Resize for dynamic shape
6671
ET_KERNEL_CHECK_MSG(
6772
ctx,

kernels/portable/cpu/op_roll.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,9 @@ Tensor& roll_out(
6060
ET_KERNEL_CHECK(
6161
ctx, check_roll_args(in, shifts, dims, out), InvalidArgument, out);
6262

63+
ET_KERNEL_CHECK(
64+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
65+
6366
if (in.numel() == 0) {
6467
return out;
6568
}

kernels/portable/cpu/op_round.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,9 @@ Tensor& round_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
4545
ctx, tensors_have_same_shape_and_dtype(in, out), InvalidArgument, out);
4646
ET_KERNEL_CHECK(ctx, tensor_is_real_type(out), InvalidArgument, out);
4747

48+
ET_KERNEL_CHECK(
49+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
50+
4851
auto in_scalar_type = in.scalar_type();
4952

5053
ET_SWITCH_REAL_TYPES(in.scalar_type(), ctx, "round.out", CTYPE, [&] {

kernels/portable/cpu/op_rsub.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,9 @@ Tensor& rsub_scalar_out(
3131
out,
3232
"Failed to resize output tensor.");
3333

34+
ET_KERNEL_CHECK(
35+
ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out);
36+
3437
ET_KERNEL_CHECK(ctx, tensor_is_realhb_type(out), InvalidArgument, out);
3538

3639
ScalarType a_type = a.scalar_type();

kernels/portable/cpu/op_scatter_add.cpp

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,15 @@ Tensor& scatter_add_out(
6565
InvalidArgument,
6666
out);
6767

68+
ET_KERNEL_CHECK(
69+
context,
70+
tensors_have_same_dim_order(self, src, out),
71+
InvalidArgument,
72+
out);
73+
74+
ET_KERNEL_CHECK(
75+
context, tensor_is_default_dim_order(index), InvalidArgument, out);
76+
6877
if (dim < 0) {
6978
dim += nonzero_dim(self);
7079
}

kernels/portable/cpu/op_select_scatter.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ Tensor& select_scatter_out(
3333
ET_KERNEL_CHECK(
3434
ctx, resize_tensor(out, in.sizes()) == Error::Ok, InvalidArgument, out);
3535

36+
ET_KERNEL_CHECK(
37+
ctx, tensors_have_same_dim_order(in, src, out), InvalidArgument, out);
38+
3639
// Account for negative indices
3740
if (dim < 0) {
3841
dim += in.dim();

kernels/portable/cpu/op_sigmoid.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,9 @@ Tensor& sigmoid_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
2424
ctx, in.scalar_type() != ScalarType::Bool, InvalidArgument, out);
2525
ET_KERNEL_CHECK(ctx, tensor_is_floating_type(out), InvalidArgument, out);
2626

27+
ET_KERNEL_CHECK(
28+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
29+
2730
// Resize for dynamic shape
2831
ET_KERNEL_CHECK_MSG(
2932
ctx,

kernels/portable/cpu/op_sign.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,9 @@ Tensor& sign_out(RuntimeContext& ctx, const Tensor& in, Tensor& out) {
3030
out,
3131
"Failed to resize output tensor.");
3232

33+
ET_KERNEL_CHECK(
34+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
35+
3336
ET_KERNEL_CHECK(
3437
ctx, tensors_have_same_shape_and_dtype(in, out), InvalidArgument, out);
3538

kernels/portable/cpu/op_slice_copy.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ Tensor& slice_copy_Tensor_out(
3333
dim += in.dim();
3434
}
3535

36+
ET_KERNEL_CHECK(
37+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
38+
3639
// If user do not set value to end_val, set end to in.size(dim) (largest
3740
// value available)
3841
int64_t end = end_val.has_value() ? end_val.value() : in.size(dim);

kernels/portable/cpu/op_slice_scatter.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,9 @@ Tensor& slice_scatter_out(
4040
InvalidArgument,
4141
out);
4242

43+
ET_KERNEL_CHECK(
44+
ctx, tensors_have_same_dim_order(input, out), InvalidArgument, out);
45+
4346
if (input.numel() == 0) {
4447
return out;
4548
}

kernels/portable/cpu/op_softmax.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,9 @@ Tensor& softmax_out(
3636
ET_KERNEL_CHECK(
3737
ctx, resize_tensor(out, in.sizes()) == Error::Ok, InvalidArgument, out);
3838

39+
ET_KERNEL_CHECK(
40+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
41+
3942
// Adjust for negative dim
4043
dim = dim < 0 ? dim + nonzero_dim(in) : dim;
4144

kernels/portable/cpu/op_split_copy.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,11 @@ void split_copy_Tensor_out(
4646
check_split_copy_args(input, split_size, dim, out),
4747
InvalidArgument, );
4848

49+
for (size_t i = 0; i < out.size(); ++i) {
50+
ET_KERNEL_CHECK(
51+
ctx, tensors_have_same_dim_order(input, out[i]), InvalidArgument, );
52+
}
53+
4954
const size_t leading_dims = getLeadingDims(input, dim);
5055
const size_t trailing_dims = getTrailingDims(input, dim);
5156
const size_t step = input.size(dim) * trailing_dims;

kernels/portable/cpu/op_split_with_sizes_copy.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,11 @@ void split_with_sizes_copy_out(
3838
check_split_with_sizes_copy_args(in, split_sizes, dim, out),
3939
InvalidArgument, );
4040

41+
for (size_t i = 0; i < out.size(); ++i) {
42+
ET_KERNEL_CHECK(
43+
ctx, tensors_have_same_dim_order(in, out[i]), InvalidArgument, );
44+
}
45+
4146
// If out is empty, then nothing needs to be done after checking the args.
4247
// Valid args implies that in.size(dim) == 0 and split_sizes is also empty.
4348
if (out.size() == 0) {

kernels/portable/cpu/op_squeeze_copy.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,11 @@ Tensor& squeeze_copy_dim_out(
2929
ET_KERNEL_CHECK(
3030
ctx, check_squeeze_copy_dim_args(in, dim, out), InvalidArgument, out);
3131

32+
ET_KERNEL_CHECK(
33+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
34+
35+
ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out);
36+
3237
if (dim < 0) {
3338
dim += nonzero_dim(in);
3439
}
@@ -62,6 +67,11 @@ Tensor& squeeze_copy_dims_out(
6267
ET_KERNEL_CHECK(
6368
ctx, check_squeeze_copy_dims_args(in, dims, out), InvalidArgument, out);
6469

70+
ET_KERNEL_CHECK(
71+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
72+
73+
ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out);
74+
6575
Tensor::SizesType expected_out_size[kTensorDimensionLimit];
6676
size_t expected_out_dim = 0;
6777
get_squeeze_copy_dims_out_target_size(

kernels/portable/cpu/op_stack.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,16 @@ Tensor& stack_out(
3131
ET_KERNEL_CHECK(
3232
ctx, check_stack_args(tensors, dim, out), InvalidArgument, out);
3333

34+
for (size_t i = 0; i < tensors.size(); ++i) {
35+
ET_KERNEL_CHECK(
36+
ctx,
37+
tensors_have_same_dim_order(tensors[i], out),
38+
InvalidArgument,
39+
out);
40+
}
41+
42+
ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(out), InvalidArgument, out);
43+
3444
Tensor::SizesType expected_out_size[kTensorDimensionLimit];
3545
size_t expected_out_dim = 0;
3646
get_stack_out_target_size(tensors, dim, expected_out_size, &expected_out_dim);

kernels/portable/cpu/op_sub.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,9 @@ Tensor& sub_out(
7878
InvalidArgument,
7979
out);
8080

81+
ET_KERNEL_CHECK(
82+
ctx, tensors_have_same_dim_order(a, b, out), InvalidArgument, out);
83+
8184
ET_KERNEL_CHECK(ctx, tensor_is_realh_type(out), InvalidArgument, out);
8285

8386
ScalarType a_type = a.scalar_type();
@@ -131,6 +134,9 @@ Tensor& sub_scalar_out(
131134

132135
ET_KERNEL_CHECK(ctx, tensor_is_realh_type(out), InvalidArgument, out);
133136

137+
ET_KERNEL_CHECK(
138+
ctx, tensors_have_same_dim_order(a, out), InvalidArgument, out);
139+
134140
ScalarType a_type = a.scalar_type();
135141
ScalarType b_type = utils::get_scalar_dtype(b);
136142
ScalarType alpha_type = utils::get_scalar_dtype(alpha);

kernels/portable/cpu/op_sum.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,11 @@ Tensor& sum_dim_out(
3838
InvalidArgument,
3939
out);
4040

41+
ET_KERNEL_CHECK(
42+
ctx, tensors_have_same_dim_order(in, out), InvalidArgument, out);
43+
44+
ET_KERNEL_CHECK(ctx, tensor_is_default_dim_order(in), InvalidArgument, out);
45+
4146
ET_SWITCH_REAL_TYPES_AND(
4247
Bool, in.scalar_type(), ctx, "sum.IntList_out", CTYPE_IN, [&] {
4348
ET_SWITCH_REAL_TYPES_AND(

kernels/portable/cpu/util/select_copy_util.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,10 @@ Error select_copy_util(
3838
return Error::InvalidArgument;
3939
}
4040

41+
if (!tensors_have_same_dim_order(in, out)) {
42+
return Error::InvalidArgument;
43+
}
44+
4145
// If the input is a empty tensor, no other operation could be done. We just
4246
// return the output.
4347
if (in.numel() == 0) {

0 commit comments

Comments
 (0)