Skip to content

Provide more options to create an owning tensor. #5169

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion extension/tensor/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ def define_common_targets():
],
deps = [
"//executorch/runtime/core/exec_aten/util:dim_order_util" + aten_suffix,
"//executorch/runtime/core/exec_aten/util:scalar_type_util" + aten_suffix,
"//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix,
],
exported_deps = [
"//executorch/runtime/core/exec_aten:lib" + aten_suffix,
"//executorch/runtime/core/exec_aten/util:scalar_type_util" + aten_suffix,
],
)
23 changes: 23 additions & 0 deletions extension/tensor/tensor_impl_ptr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,5 +121,28 @@ TensorImplPtr make_tensor_impl_ptr(
#endif // USE_ATEN_LIB
}

TensorImplPtr make_tensor_impl_ptr(
exec_aten::ScalarType scalar_type,
std::vector<exec_aten::SizesType> sizes,
std::vector<uint8_t> data,
std::vector<exec_aten::DimOrderType> dim_order,
std::vector<exec_aten::StridesType> strides,
exec_aten::TensorShapeDynamism dynamism) {
ET_CHECK_MSG(
data.size() >= exec_aten::compute_numel(sizes.data(), sizes.size()) *
exec_aten::elementSize(scalar_type),
"Data size is smaller than required by sizes and scalar type.");
auto raw_data_ptr = data.data();
auto data_ptr = std::make_shared<std::vector<uint8_t>>(std::move(data));
return make_tensor_impl_ptr(
scalar_type,
std::move(sizes),
raw_data_ptr,
std::move(dim_order),
std::move(strides),
dynamism,
[data_ptr = std::move(data_ptr)](void*) {});
}

} // namespace extension
} // namespace executorch
47 changes: 36 additions & 11 deletions extension/tensor/tensor_impl_ptr.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,32 +74,32 @@ TensorImplPtr make_tensor_impl_ptr(
* specified properties.
*
* This template overload is specialized for cases where the tensor data is
* provided as a vector of a specific scalar type, rather than a raw pointer.
* The deleter ensures that the data vector is properly managed and its
* lifetime is tied to the TensorImpl.
* provided as a vector. The scalar type is automatically deduced from the
* vector's data type. The deleter ensures that the data vector is properly
* managed and its lifetime is tied to the TensorImpl.
*
* @tparam T The scalar type of the tensor elements.
* @tparam T The C++ type of the tensor elements, deduced from the vector.
* @param sizes A vector specifying the size of each dimension.
* @param data A vector containing the tensor's data.
* @param dim_order A vector specifying the order of dimensions.
* @param strides A vector specifying the strides of each dimension.
* @param dynamism Specifies the mutability of the tensor's shape.
* @return A TensorImplPtr managing the newly created TensorImpl.
* @return A TensorImplPtr that manages the newly created TensorImpl.
*/
template <exec_aten::ScalarType T = exec_aten::ScalarType::Float>
template <typename T = float>
TensorImplPtr make_tensor_impl_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<typename runtime::ScalarTypeToCppType<T>::type> data,
std::vector<T> data,
std::vector<exec_aten::DimOrderType> dim_order = {},
std::vector<exec_aten::StridesType> strides = {},
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::STATIC) {
constexpr exec_aten::ScalarType scalar_type =
runtime::CppTypeToScalarType<T>::value;
auto raw_data_ptr = data.data();
auto data_ptr = std::make_shared<
std::vector<typename runtime::ScalarTypeToCppType<T>::type>>(
std::move(data));
auto data_ptr = std::make_shared<std::vector<T>>(std::move(data));
return make_tensor_impl_ptr(
T,
scalar_type,
std::move(sizes),
raw_data_ptr,
std::move(dim_order),
Expand All @@ -108,5 +108,30 @@ TensorImplPtr make_tensor_impl_ptr(
[data_ptr = std::move(data_ptr)](void*) {});
}

/**
* Creates a TensorImplPtr that manages a newly created TensorImpl with the
* specified properties.
*
* This overload accepts a raw memory buffer stored in a std::vector<uint8_t>
* and a scalar type to interpret the data. The vector is managed, and the
* memory's lifetime is tied to the TensorImpl.
*
* @param scalar_type The scalar type of the tensor elements.
* @param sizes A vector specifying the size of each dimension.
* @param data A vector containing the raw memory for the tensor's data.
* @param dim_order A vector specifying the order of dimensions.
* @param strides A vector specifying the strides of each dimension.
* @param dynamism Specifies the mutability of the tensor's shape.
* @return A TensorImplPtr managing the newly created TensorImpl.
*/
TensorImplPtr make_tensor_impl_ptr(
exec_aten::ScalarType scalar_type,
std::vector<exec_aten::SizesType> sizes,
std::vector<uint8_t> data,
std::vector<exec_aten::DimOrderType> dim_order = {},
std::vector<exec_aten::StridesType> strides = {},
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::STATIC);

} // namespace extension
} // namespace executorch
48 changes: 40 additions & 8 deletions extension/tensor/tensor_ptr.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,27 +141,59 @@ inline TensorPtr make_tensor_ptr(
* Creates a TensorPtr that manages a Tensor with the specified properties.
*
* This template overload is specialized for cases where the tensor data is
* provided as a vector of a specific scalar type, rather than a raw pointer.
* The deleter ensures that the data vector is properly managed and its
* lifetime is tied to the TensorImpl.
* provided as a vector. The scalar type is automatically deduced from the
* vector's data type. The deleter ensures that the data vector is properly
* managed and its lifetime is tied to the TensorImpl.
*
* @tparam T The scalar type of the tensor elements.
* @tparam T The C++ type of the tensor elements, deduced from the vector.
* @param sizes A vector specifying the size of each dimension.
* @param data A vector containing the tensor's data.
* @param dim_order A vector specifying the order of dimensions.
* @param strides A vector specifying the strides of each dimension.
* @param dynamism Specifies the mutability of the tensor's shape.
* @return A TensorImplPtr managing the newly created TensorImpl.
* @return A TensorPtr that manages the newly created TensorImpl.
*/
template <exec_aten::ScalarType T = exec_aten::ScalarType::Float>
template <typename T = float>
TensorPtr make_tensor_ptr(
std::vector<exec_aten::SizesType> sizes,
std::vector<typename runtime::ScalarTypeToCppType<T>::type> data,
std::vector<T> data,
std::vector<exec_aten::DimOrderType> dim_order = {},
std::vector<exec_aten::StridesType> strides = {},
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::STATIC) {
return make_tensor_ptr(make_tensor_impl_ptr<T>(
return make_tensor_ptr(make_tensor_impl_ptr(
std::move(sizes),
std::move(data),
std::move(dim_order),
std::move(strides),
dynamism));
}

/**
* Creates a TensorPtr that manages a Tensor with the specified properties.
*
* This overload accepts a raw memory buffer stored in a std::vector<uint8_t>
* and a scalar type to interpret the data. The vector is managed, and the
* memory's lifetime is tied to the TensorImpl.
*
* @param scalar_type The scalar type of the tensor elements.
* @param sizes A vector specifying the size of each dimension.
* @param data A vector containing the raw memory for the tensor's data.
* @param dim_order A vector specifying the order of dimensions.
* @param strides A vector specifying the strides of each dimension.
* @param dynamism Specifies the mutability of the tensor's shape.
* @return A TensorPtr managing the newly created Tensor.
*/
inline TensorPtr make_tensor_ptr(
exec_aten::ScalarType scalar_type,
std::vector<exec_aten::SizesType> sizes,
std::vector<uint8_t> data,
std::vector<exec_aten::DimOrderType> dim_order = {},
std::vector<exec_aten::StridesType> strides = {},
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::STATIC) {
return make_tensor_ptr(make_tensor_impl_ptr(
scalar_type,
std::move(sizes),
std::move(data),
std::move(dim_order),
Expand Down
1 change: 0 additions & 1 deletion extension/tensor/test/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,5 @@ def define_common_targets():
],
deps = [
"//executorch/extension/tensor:tensor" + aten_suffix,
"//executorch/runtime/core/exec_aten/testing_util:tensor_util" + aten_suffix,
],
)
62 changes: 62 additions & 0 deletions extension/tensor/test/tensor_impl_ptr_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -224,3 +224,65 @@ TEST_F(TensorImplPtrTest, CustomDeleterWithSharedData) {
EXPECT_TRUE(deleter_called);
EXPECT_EQ(data.use_count(), 1);
}

TEST_F(TensorImplPtrTest, TensorImplDeducedScalarType) {
std::vector<double> data = {1.0, 2.0, 3.0, 4.0};
auto tensor_impl = make_tensor_impl_ptr({2, 2}, std::move(data));

EXPECT_EQ(tensor_impl->dim(), 2);
EXPECT_EQ(tensor_impl->size(0), 2);
EXPECT_EQ(tensor_impl->size(1), 2);
EXPECT_EQ(tensor_impl->strides()[0], 2);
EXPECT_EQ(tensor_impl->strides()[1], 1);
EXPECT_EQ(((double*)tensor_impl->data())[0], 1.0);
EXPECT_EQ(((double*)tensor_impl->data())[3], 4.0);
}

TEST_F(TensorImplPtrTest, TensorImplUint8BufferWithFloatScalarType) {
std::vector<uint8_t> data(
4 * exec_aten::elementSize(exec_aten::ScalarType::Float));

float* float_data = reinterpret_cast<float*>(data.data());
float_data[0] = 1.0f;
float_data[1] = 2.0f;
float_data[2] = 3.0f;
float_data[3] = 4.0f;

auto tensor_impl = make_tensor_impl_ptr(
exec_aten::ScalarType::Float, {2, 2}, std::move(data));

EXPECT_EQ(tensor_impl->dim(), 2);
EXPECT_EQ(tensor_impl->size(0), 2);
EXPECT_EQ(tensor_impl->size(1), 2);
EXPECT_EQ(tensor_impl->strides()[0], 2);
EXPECT_EQ(tensor_impl->strides()[1], 1);

EXPECT_EQ(((float*)tensor_impl->data())[0], 1.0f);
EXPECT_EQ(((float*)tensor_impl->data())[1], 2.0f);
EXPECT_EQ(((float*)tensor_impl->data())[2], 3.0f);
EXPECT_EQ(((float*)tensor_impl->data())[3], 4.0f);
}

TEST_F(TensorImplPtrTest, TensorImplUint8BufferTooSmallExpectDeath) {
std::vector<uint8_t> data(
2 * exec_aten::elementSize(exec_aten::ScalarType::Float));
ET_EXPECT_DEATH(
{
auto tensor_impl = make_tensor_impl_ptr(
exec_aten::ScalarType::Float, {2, 2}, std::move(data));
},
"");
}

TEST_F(TensorImplPtrTest, TensorImplUint8BufferTooLarge) {
std::vector<uint8_t> data(
4 * exec_aten::elementSize(exec_aten::ScalarType::Float));
auto tensor_impl = make_tensor_impl_ptr(
exec_aten::ScalarType::Float, {2, 2}, std::move(data));

EXPECT_EQ(tensor_impl->dim(), 2);
EXPECT_EQ(tensor_impl->size(0), 2);
EXPECT_EQ(tensor_impl->size(1), 2);
EXPECT_EQ(tensor_impl->strides()[0], 2);
EXPECT_EQ(tensor_impl->strides()[1], 1);
}
Loading