Skip to content

fix resizing scalar tensor with empty sizes #548

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions runtime/core/exec_aten/util/test/tensor_util_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -543,3 +543,11 @@ TEST_F(TensorUtilTest, TensorIsContiguous) {
EXPECT_TRUE(tensor_is_contiguous(c));
EXPECT_TRUE(tensor_is_contiguous(d));
}

TEST_F(TensorUtilTest, ResizeZeroDimTensor) {
using namespace torch::executor;
Tensor a = tf_float_.ones({});

EXPECT_EQ(resize_tensor(a, {}), Error::Ok);
EXPECT_EQ(a.dim(), 0);
}
12 changes: 12 additions & 0 deletions runtime/core/portable_type/tensor_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,18 @@ Error TensorImpl::internal_resize_contiguous(ArrayRef<SizesType> new_sizes) {
dim_,
new_sizes.size());

// Kernels don't check that the provided out tensors have the right size.
// Instead they always attempt to resize the out tensor to the right size,
// even when the out tensor already had the right size. Therefore, if we call
// an op with inputs that will produce a zero-dimensional output, and the out
// tensor that we pass has non-STATIC dynamism, then we will end up here.
// Since we have already checked above that the out tensor has the right
// number of dimensions, it must be that the provided out tensor has zero
// rank, therefore it already has the right size and we should just return.
if (dim_ == 0) {
return Error::Ok;
}

// Can only resize a StaticShape Tensor to the same size
if (shape_dynamism_ == TensorShapeDynamism::STATIC) {
for (int i = 0; i < new_sizes.size(); i++) {
Expand Down
24 changes: 24 additions & 0 deletions runtime/core/portable_type/test/tensor_impl_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,30 @@ TEST_F(TensorImplTest, TestSetSizesContigUpperBounded) {
ET_EXPECT_DEATH(t.set_sizes_contiguous({new_sizes_4, 1}), "");
}

TEST_F(TensorImplTest, TestZeroDimSetEmptySizesContig) {
SizesType sizes[0] = {};
DimOrderType dim_order[0] = {};
StridesType strides[0] = {};
float data[1] = {1.0};
TensorImpl t(
ScalarType::Float,
0,
sizes,
data,
dim_order,
strides,
TensorShapeDynamism::DYNAMIC_BOUND);

ArrayRef<SizesType> new_sizes_empty{};
// Can resize with empty sizes
t.set_sizes_contiguous(new_sizes_empty);
EXPECT_EQ(t.dim(), 0);

SizesType new_sizes_1[1] = {1};
// Can't change rank of tensor
ET_EXPECT_DEATH(t.set_sizes_contiguous({new_sizes_1, 1}), "");
}

TEST_F(TensorImplTest, TestWriteRead) {
SizesType sizes[1] = {1};
DimOrderType dim_order[1] = {0};
Expand Down