Skip to content

Add an overload to skip dtype and sizes. #5177

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Sep 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 35 additions & 1 deletion extension/tensor/tensor_impl_ptr.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ TensorImplPtr make_tensor_impl_ptr(
exec_aten::TensorShapeDynamism::STATIC) {
constexpr exec_aten::ScalarType scalar_type =
runtime::CppTypeToScalarType<T>::value;
auto raw_data_ptr = data.data();
const auto raw_data_ptr = data.data();
auto data_ptr = std::make_shared<std::vector<T>>(std::move(data));
return make_tensor_impl_ptr(
scalar_type,
Expand All @@ -108,6 +108,40 @@ TensorImplPtr make_tensor_impl_ptr(
[data_ptr = std::move(data_ptr)](void*) {});
}

/**
* Creates a TensorImplPtr that manages a newly created TensorImpl with the
* specified properties.
*
* This template overload is specialized for cases where the tensor data is
* provided as a vector. The scalar type is automatically deduced from the
* vector's data type. The deleter ensures that the data vector is properly
* managed and its lifetime is tied to the TensorImpl.
*
* @tparam T The C++ type of the tensor elements, deduced from the vector.
* @param data A vector containing the tensor's data.
* @param dynamism Specifies the mutability of the tensor's shape.
* @return A TensorImplPtr that manages the newly created TensorImpl.
*/
template <typename T = float>
TensorImplPtr make_tensor_impl_ptr(
std::vector<T> data,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::STATIC) {
constexpr exec_aten::ScalarType scalar_type =
runtime::CppTypeToScalarType<T>::value;
std::vector<exec_aten::SizesType> sizes{exec_aten::SizesType(data.size())};
const auto raw_data_ptr = data.data();
auto data_ptr = std::make_shared<std::vector<T>>(std::move(data));
return make_tensor_impl_ptr(
scalar_type,
std::move(sizes),
raw_data_ptr,
{0},
{1},
dynamism,
[data_ptr = std::move(data_ptr)](void*) {});
}

/**
* Creates a TensorImplPtr that manages a newly created TensorImpl with the
* specified properties.
Expand Down
21 changes: 21 additions & 0 deletions extension/tensor/tensor_ptr.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,27 @@ TensorPtr make_tensor_ptr(
dynamism));
}

/**
* Creates a TensorPtr that manages a Tensor with the specified properties.
*
* This template overload is specialized for cases where the tensor data is
* provided as a vector. The scalar type is automatically deduced from the
* vector's data type. The deleter ensures that the data vector is properly
* managed and its lifetime is tied to the TensorImpl.
*
* @tparam T The C++ type of the tensor elements, deduced from the vector.
* @param data A vector containing the tensor's data.
* @param dynamism Specifies the mutability of the tensor's shape.
* @return A TensorPtr that manages the newly created TensorImpl.
*/
template <typename T = float>
TensorPtr make_tensor_ptr(
std::vector<T> data,
exec_aten::TensorShapeDynamism dynamism =
exec_aten::TensorShapeDynamism::STATIC) {
return make_tensor_ptr(make_tensor_impl_ptr(std::move(data), dynamism));
}

/**
* Creates a TensorPtr that manages a Tensor with the specified properties.
*
Expand Down
70 changes: 69 additions & 1 deletion extension/tensor/test/tensor_impl_ptr_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ TEST_F(TensorImplPtrTest, TensorImplOwningData) {
}

TEST_F(TensorImplPtrTest, TensorImplOwningEmptyData) {
auto tensor_impl = make_tensor_impl_ptr({0, 5}, {});
auto tensor_impl = make_tensor_impl_ptr({0, 5}, std::vector<float>());

EXPECT_EQ(tensor_impl->dim(), 2);
EXPECT_EQ(tensor_impl->size(0), 0);
Expand All @@ -182,6 +182,74 @@ TEST_F(TensorImplPtrTest, TensorImplOwningEmptyData) {
EXPECT_EQ(tensor_impl->data(), nullptr);
}

TEST_F(TensorImplPtrTest, TensorImplDataOnlyDoubleType) {
std::vector<double> data = {1.0, 2.0, 3.0, 4.0};
auto tensor_impl = make_tensor_impl_ptr(std::move(data));

EXPECT_EQ(tensor_impl->dim(), 1);
EXPECT_EQ(tensor_impl->size(0), 4);
EXPECT_EQ(tensor_impl->strides()[0], 1);
EXPECT_EQ(((double*)tensor_impl->data())[0], 1.0);
EXPECT_EQ(((double*)tensor_impl->data())[3], 4.0);
}

TEST_F(TensorImplPtrTest, TensorImplDataOnlyInt32Type) {
std::vector<int32_t> data = {10, 20, 30, 40};
auto tensor_impl = make_tensor_impl_ptr(std::move(data));

EXPECT_EQ(tensor_impl->dim(), 1);
EXPECT_EQ(tensor_impl->size(0), 4);
EXPECT_EQ(tensor_impl->strides()[0], 1);
EXPECT_EQ(((int32_t*)tensor_impl->data())[0], 10);
EXPECT_EQ(((int32_t*)tensor_impl->data())[3], 40);
}

TEST_F(TensorImplPtrTest, TensorImplDataOnlyInt64Type) {
std::vector<int64_t> data = {100, 200, 300, 400};
auto tensor_impl = make_tensor_impl_ptr(std::move(data));

EXPECT_EQ(tensor_impl->dim(), 1);
EXPECT_EQ(tensor_impl->size(0), 4);
EXPECT_EQ(tensor_impl->strides()[0], 1);
EXPECT_EQ(((int64_t*)tensor_impl->data())[0], 100);
EXPECT_EQ(((int64_t*)tensor_impl->data())[3], 400);
}

TEST_F(TensorImplPtrTest, TensorImplDataOnlyUint8Type) {
std::vector<uint8_t> data = {10, 20, 30, 40};
auto tensor_impl = make_tensor_impl_ptr(std::move(data));

EXPECT_EQ(tensor_impl->dim(), 1);
EXPECT_EQ(tensor_impl->size(0), 4);
EXPECT_EQ(tensor_impl->strides()[0], 1);
EXPECT_EQ(((uint8_t*)tensor_impl->data())[0], 10);
EXPECT_EQ(((uint8_t*)tensor_impl->data())[3], 40);
}

TEST_F(TensorImplPtrTest, TensorImplAmbiguityWithMixedVectors) {
std::vector<exec_aten::SizesType> sizes = {2, 2};
std::vector<float> data = {1.0f, 2.0f, 3.0f, 4.0f};
auto tensor_impl = make_tensor_impl_ptr(std::move(sizes), std::move(data));

EXPECT_EQ(tensor_impl->dim(), 2);
EXPECT_EQ(tensor_impl->size(0), 2);
EXPECT_EQ(tensor_impl->size(1), 2);
EXPECT_EQ(tensor_impl->strides()[0], 2);
EXPECT_EQ(tensor_impl->strides()[1], 1);
EXPECT_EQ(((float*)tensor_impl->data())[0], 1.0f);
EXPECT_EQ(((float*)tensor_impl->data())[3], 4.0f);

auto tensor_impl2 = make_tensor_impl_ptr({2, 2}, {1.0f, 2.0f, 3.0f, 4.0f});

EXPECT_EQ(tensor_impl2->dim(), 2);
EXPECT_EQ(tensor_impl2->size(0), 2);
EXPECT_EQ(tensor_impl2->size(1), 2);
EXPECT_EQ(tensor_impl2->strides()[0], 2);
EXPECT_EQ(tensor_impl2->strides()[1], 1);
EXPECT_EQ(((float*)tensor_impl2->data())[0], 1.0f);
EXPECT_EQ(((float*)tensor_impl2->data())[3], 4.0f);
}

TEST_F(TensorImplPtrTest, SharedDataManagement) {
auto data = std::make_shared<std::vector<float>>(100, 1.0f);
auto tensor_impl1 = make_tensor_impl_ptr(
Expand Down
70 changes: 69 additions & 1 deletion extension/tensor/test/tensor_ptr_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ TEST_F(TensorPtrTest, TensorOwningData) {
}

TEST_F(TensorPtrTest, TensorOwningEmptyData) {
auto tensor = make_tensor_ptr({0, 5}, {});
auto tensor = make_tensor_ptr({0, 5}, std::vector<float>());

EXPECT_EQ(tensor->dim(), 2);
EXPECT_EQ(tensor->size(0), 0);
Expand All @@ -177,6 +177,74 @@ TEST_F(TensorPtrTest, TensorOwningEmptyData) {
EXPECT_EQ(tensor->data_ptr<float>(), nullptr);
}

TEST_F(TensorPtrTest, TensorImplDataOnlyDoubleType) {
std::vector<double> data = {1.0, 2.0, 3.0, 4.0};
auto tensor = make_tensor_ptr(std::move(data));

EXPECT_EQ(tensor->dim(), 1);
EXPECT_EQ(tensor->size(0), 4);
EXPECT_EQ(tensor->strides()[0], 1);
EXPECT_EQ(tensor->const_data_ptr<double>()[0], 1.0);
EXPECT_EQ(tensor->const_data_ptr<double>()[3], 4.0);
}

TEST_F(TensorPtrTest, TensorImplDataOnlyInt32Type) {
std::vector<int32_t> data = {10, 20, 30, 40};
auto tensor = make_tensor_ptr(std::move(data));

EXPECT_EQ(tensor->dim(), 1);
EXPECT_EQ(tensor->size(0), 4);
EXPECT_EQ(tensor->strides()[0], 1);
EXPECT_EQ(tensor->const_data_ptr<int32_t>()[0], 10);
EXPECT_EQ(tensor->const_data_ptr<int32_t>()[3], 40);
}

TEST_F(TensorPtrTest, TensorImplDataOnlyInt64Type) {
std::vector<int64_t> data = {100, 200, 300, 400};
auto tensor = make_tensor_ptr(std::move(data));

EXPECT_EQ(tensor->dim(), 1);
EXPECT_EQ(tensor->size(0), 4);
EXPECT_EQ(tensor->strides()[0], 1);
EXPECT_EQ(tensor->const_data_ptr<int64_t>()[0], 100);
EXPECT_EQ(tensor->const_data_ptr<int64_t>()[3], 400);
}

TEST_F(TensorPtrTest, TensorImplDataOnlyUint8Type) {
std::vector<uint8_t> data = {10, 20, 30, 40};
auto tensor = make_tensor_ptr(std::move(data));

EXPECT_EQ(tensor->dim(), 1);
EXPECT_EQ(tensor->size(0), 4);
EXPECT_EQ(tensor->strides()[0], 1);
EXPECT_EQ(tensor->const_data_ptr<uint8_t>()[0], 10);
EXPECT_EQ(tensor->const_data_ptr<uint8_t>()[3], 40);
}

TEST_F(TensorPtrTest, TensorImplAmbiguityWithMixedVectors) {
std::vector<exec_aten::SizesType> sizes = {2, 2};
std::vector<float> data = {1.0f, 2.0f, 3.0f, 4.0f};
auto tensor = make_tensor_ptr(std::move(sizes), std::move(data));

EXPECT_EQ(tensor->dim(), 2);
EXPECT_EQ(tensor->size(0), 2);
EXPECT_EQ(tensor->size(1), 2);
EXPECT_EQ(tensor->strides()[0], 2);
EXPECT_EQ(tensor->strides()[1], 1);
EXPECT_EQ(tensor->const_data_ptr<float>()[0], 1.0f);
EXPECT_EQ(tensor->const_data_ptr<float>()[3], 4.0f);

auto tensor2 = make_tensor_ptr({2, 2}, {1.0f, 2.0f, 3.0f, 4.0f});

EXPECT_EQ(tensor2->dim(), 2);
EXPECT_EQ(tensor2->size(0), 2);
EXPECT_EQ(tensor2->size(1), 2);
EXPECT_EQ(tensor2->strides()[0], 2);
EXPECT_EQ(tensor2->strides()[1], 1);
EXPECT_EQ(tensor2->const_data_ptr<float>()[0], 1.0f);
EXPECT_EQ(tensor2->const_data_ptr<float>()[3], 4.0f);
}

TEST_F(TensorPtrTest, TensorSharingImplModifiesSharedDataVector) {
std::vector<float> data = {1, 2, 3, 4, 5, 6};

Expand Down
Loading