Skip to content

Commit cb22b75

Browse files
manuelcandalesfacebook-github-bot
authored andcommitted
fix resizing scalar tensor with empty sizes (#548)
Summary: Resizing scalar tensor providing empty sizes should run successfully. Resizing logic assumed the sizes were non-empty, resulting in a crash Reviewed By: JacobSzwejbka, dbort Differential Revision: D49789758
1 parent 1de10f4 commit cb22b75

File tree

3 files changed

+44
-0
lines changed

3 files changed

+44
-0
lines changed

runtime/core/exec_aten/util/test/tensor_util_test.cpp

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -543,3 +543,11 @@ TEST_F(TensorUtilTest, TensorIsContiguous) {
543543
EXPECT_TRUE(tensor_is_contiguous(c));
544544
EXPECT_TRUE(tensor_is_contiguous(d));
545545
}
546+
547+
TEST_F(TensorUtilTest, ResizeZeroDimTensor) {
548+
using namespace torch::executor;
549+
Tensor a = tf_float_.ones({});
550+
551+
EXPECT_EQ(resize_tensor(a, {}), Error::Ok);
552+
EXPECT_EQ(a.dim(), 0);
553+
}

runtime/core/portable_type/tensor_impl.cpp

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,18 @@ Error TensorImpl::internal_resize_contiguous(ArrayRef<SizesType> new_sizes) {
113113
dim_,
114114
new_sizes.size());
115115

116+
// Kernels don't check that the provided out tensors have the right size.
117+
// Instead they always attempt to resize the out tensor to the right size,
118+
// even when the out tensor already had the right size. Therefore, if we call
119+
// an op with inputs that will produce a zero-dimensional output, and the out
120+
// tensor that we pass has non-STATIC dynamism, then we will end up here.
121+
// Since we have already checked above that the out tensor has the right
122+
// number of dimensions, it must be that the provided out tensor has zero
123+
// rank, therefore it already has the right size and we should just return.
124+
if (dim_ == 0) {
125+
return Error::Ok;
126+
}
127+
116128
// Can only resize a StaticShape Tensor to the same size
117129
if (shape_dynamism_ == TensorShapeDynamism::STATIC) {
118130
for (int i = 0; i < new_sizes.size(); i++) {

runtime/core/portable_type/test/tensor_impl_test.cpp

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,30 @@ TEST_F(TensorImplTest, TestSetSizesContigUpperBounded) {
181181
ET_EXPECT_DEATH(t.set_sizes_contiguous({new_sizes_4, 1}), "");
182182
}
183183

184+
TEST_F(TensorImplTest, TestZeroDimSetEmptySizesContig) {
185+
SizesType sizes[0] = {};
186+
DimOrderType dim_order[0] = {};
187+
StridesType strides[0] = {};
188+
float data[1] = {1.0};
189+
TensorImpl t(
190+
ScalarType::Float,
191+
0,
192+
sizes,
193+
data,
194+
dim_order,
195+
strides,
196+
TensorShapeDynamism::DYNAMIC_BOUND);
197+
198+
ArrayRef<SizesType> new_sizes_empty{};
199+
// Can resize with empty sizes
200+
t.set_sizes_contiguous(new_sizes_empty);
201+
EXPECT_EQ(t.dim(), 0);
202+
203+
SizesType new_sizes_1[1] = {1};
204+
// Can't change rank of tensor
205+
ET_EXPECT_DEATH(t.set_sizes_contiguous({new_sizes_1, 1}), "");
206+
}
207+
184208
TEST_F(TensorImplTest, TestWriteRead) {
185209
SizesType sizes[1] = {1};
186210
DimOrderType dim_order[1] = {0};

0 commit comments

Comments
 (0)