Skip to content

Commit 0fa2da2

Browse files
dbortfacebook-github-bot
authored andcommitted
Fix non-kernel refs to the deprecated Tensor::data_ptr()
Summary: Plain `Tensor::data_ptr()` is deprecated, so we should stop using it internally. Reviewed By: larryliu0820 Differential Revision: D48076575 fbshipit-source-id: 0f2fda783a86f1e673494700a79fb61faef229f7
1 parent 0b576d9 commit 0fa2da2

File tree

10 files changed

+33
-32
lines changed

10 files changed

+33
-32
lines changed

backends/xnnpack/runtime/XNNExecutor.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,12 +56,12 @@ Error XNNExecutor::set_external_input(uint32_t id, Tensor* input) {
5656
e == Error::Ok, Internal, "QuantizePerTensor() failed");
5757
externals_.emplace_back(xnn_external_value{
5858
id,
59-
qinput.data_ptr(),
59+
qinput.mutable_data_ptr(),
6060
{static_cast<float>(input_qparam.scale),
6161
static_cast<int8_t>(input_qparam.zero_point)},
6262
batch_size});
6363
} else {
64-
externals_.emplace_back(xnn_external_value{id, input->data_ptr()});
64+
externals_.emplace_back(xnn_external_value{id, input->mutable_data_ptr()});
6565
}
6666
return Error::Ok;
6767
}

backends/xnnpack/runtime/XNNExecutor.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,8 +111,8 @@ class XNNExecutor {
111111
outputs.size());
112112

113113
for (int i = 0; i < outputs.size(); i++) {
114-
externals_.emplace_back(
115-
xnn_external_value{output_ids_[i], outputs[i]->data_ptr<float>()});
114+
externals_.emplace_back(xnn_external_value{
115+
output_ids_[i], outputs[i]->mutable_data_ptr<float>()});
116116
}
117117

118118
return Error::Ok;

extension/aten_util/aten_bridge.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -135,15 +135,15 @@ void alias_etensor_to_attensor(
135135
// then in the next call copying will not be the correct behavior.
136136
ET_CHECK_MSG(aten_tensor.is_contiguous(), "Input tensor must be contiguous");
137137
check_tensor_meta(aten_tensor, mutable_et);
138-
mutable_et.unsafeGetTensorImpl()->set_data(aten_tensor.data_ptr());
138+
mutable_et.unsafeGetTensorImpl()->set_data(aten_tensor.mutable_data_ptr());
139139
}
140140

141141
at::Tensor alias_attensor_to_etensor(const torch::executor::Tensor& etensor) {
142142
c10::ScalarType dtype = execuTorchtoTorchScalarType(etensor.scalar_type());
143143
std::vector<int64_t> at_tensor_sizes(
144144
etensor.sizes().begin(), etensor.sizes().end());
145145
at::Tensor t = at::from_blob(
146-
etensor.data_ptr(), at_tensor_sizes, at::TensorOptions(dtype));
146+
etensor.mutable_data_ptr(), at_tensor_sizes, at::TensorOptions(dtype));
147147
check_tensor_meta(t, etensor);
148148
return t;
149149
}
@@ -164,7 +164,7 @@ std::unique_ptr<torch::executor::TensorImpl> eTensorFromAtTensor(
164164
torchToExecuTorchScalarType(options.dtype());
165165

166166
return std::make_unique<torch::executor::TensorImpl>(
167-
edtype, sizes32.size(), sizes32.data(), tensor.data_ptr());
167+
edtype, sizes32.size(), sizes32.data(), tensor.mutable_data_ptr());
168168
}
169169

170170
at::Tensor atTensorFromETensor(

extension/aten_util/test/aten_bridge_test.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ TEST(ATenBridgeTest, AliasETensorToATenTensor) {
4747
strides.data());
4848
torch::executor::Tensor etensor(&tensor_impl);
4949
alias_etensor_to_attensor(at_tensor, etensor);
50-
EXPECT_EQ(at_tensor.data_ptr(), etensor.data_ptr());
50+
EXPECT_EQ(at_tensor.const_data_ptr(), etensor.const_data_ptr());
5151
}
5252

5353
TEST(ATenBridgeTest, AliasETensorToATenTensorFail) {
@@ -99,8 +99,8 @@ TEST(ATenBridgeTest, AliasETensorToATenTensorNonContiguous) {
9999
strides.data());
100100
torch::executor::Tensor etensor(&tensor_impl);
101101
alias_etensor_to_attensor(sliced_tensor_contig, etensor);
102-
EXPECT_EQ(sliced_tensor_contig.data_ptr(), etensor.data_ptr());
103-
EXPECT_NE(sliced_tensor.data_ptr(), etensor.data_ptr());
102+
EXPECT_EQ(sliced_tensor_contig.const_data_ptr(), etensor.const_data_ptr());
103+
EXPECT_NE(sliced_tensor.const_data_ptr(), etensor.const_data_ptr());
104104
}
105105

106106
TEST(ATenBridgeTest, AliasETensorToATenTensorNonContiguousFail) {
@@ -144,5 +144,5 @@ TEST(ATenBridgeTest, AliasATTensorToETensor) {
144144
strides.data());
145145
torch::executor::Tensor etensor(&tensor_impl);
146146
auto aliased_at_tensor = alias_attensor_to_etensor(etensor);
147-
EXPECT_EQ(aliased_at_tensor.data_ptr(), etensor_data.data());
147+
EXPECT_EQ(aliased_at_tensor.const_data_ptr(), etensor_data.data());
148148
}

runtime/core/exec_aten/testing_util/tensor_util.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,8 @@ bool tensors_are_close(
8686
// Since the two tensors have same shape and strides, any two elements that
8787
// share same index from underlying data perspective will also share same
8888
// index from tensor perspective, whatever the size and strides really are.
89-
// e.g. if a[i_1, i_2, ... i_n] = a.data_ptr()[m], we can assert
90-
// b[i_1, i_2, ... i_n] = b.data_ptr()[m])
89+
// e.g. if a[i_1, i_2, ... i_n] = a.const_data_ptr()[m], we can assert
90+
// b[i_1, i_2, ... i_n] = b.const_data_ptr()[m])
9191
// So we can just compare the two underlying data sequentially to figure out
9292
// if the two tensors are same.
9393

@@ -112,7 +112,7 @@ bool tensors_are_close(
112112
atol);
113113
} else {
114114
// Non-floating-point types can be compared bitwise.
115-
return memcmp(a.data_ptr(), b.data_ptr(), a.nbytes()) == 0;
115+
return memcmp(a.const_data_ptr(), b.const_data_ptr(), a.nbytes()) == 0;
116116
}
117117
}
118118

@@ -152,7 +152,7 @@ bool tensor_data_is_close(
152152
atol);
153153
} else {
154154
// Non-floating-point types can be compared bitwise.
155-
return memcmp(a.data_ptr(), b.data_ptr(), a.nbytes()) == 0;
155+
return memcmp(a.const_data_ptr(), b.const_data_ptr(), a.nbytes()) == 0;
156156
}
157157
}
158158

runtime/core/exec_aten/util/scalar_type_util.h

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -732,14 +732,15 @@ inline size_t sizeof_scalar_type(exec_aten::ScalarType type) {
732732
// An example usage is:
733733
//
734734
// ET_SWITCH_REAL_TYPES(input.scalar_type(), "example", CTYPE, [&]() {
735-
// output.const_data_ptr<CTYPE>[0] = input.data_ptr<CTYPE>[0];
735+
// output.mutable_data_ptr<CTYPE>[0] = input.const_data_ptr<CTYPE>[0];
736736
// });
737737
//
738738
// Note that these can be nested as well:
739739
//
740740
// ET_SWITCH_REAL_TYPES(input.scalar_type(), "example", CTYPE_IN, [&]() {
741741
// ET_SWITCH_REAL_TYPES(output.scalar_type(), "example", CTYPE_OUT, [&]() {
742-
// output.const_data_ptr<CTYPE_OUT>[0] = input.data_ptr<CTYPE_IN>[0];
742+
// output.mutable_data_ptr<CTYPE_OUT>[0] =
743+
// input.const_data_ptr<CTYPE_IN>[0];
743744
// });
744745
// });
745746
//

runtime/core/exec_aten/util/tensor_util_aten.cpp

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -51,35 +51,35 @@ Error share_tensor_data(const at::Tensor& t_dst, const at::Tensor& t_src) {
5151
InvalidArgument,
5252
"Source tensor should have data_ptr not being nullptr.");
5353
// Assign the dataptr as the input tensor dataptr
54-
storage->set_data_ptr(at::DataPtr(t_src.data_ptr(), DeviceType::CPU));
54+
storage->set_data_ptr(at::DataPtr(t_src.mutable_data_ptr(), DeviceType::CPU));
5555
storage->set_nbytes(t_src.nbytes());
5656

5757
return Error::Ok;
5858
}
5959

6060
Error copy_tensor_data(const at::Tensor& t_dst, const at::Tensor& t_src) {
61-
at::StorageImpl* storage =
62-
t_dst.unsafeGetTensorImpl()->unsafe_storage().unsafeGetStorageImpl();
63-
void* data_ptr = storage->data_ptr().get();
61+
void* dst_data_ptr = t_dst.mutable_data_ptr();
6462

6563
// Currently even 0 sized tensors receive a dataptr in pre_allocated
6664
// memory planning so we can do this check.
6765
// TODO(jakeszwe, shunting, gasoonjia): this should be clear in design if
6866
// other people make their own memory plans
6967
ET_CHECK_OR_RETURN_ERROR(
70-
data_ptr != nullptr,
68+
dst_data_ptr != nullptr,
7169
InvalidArgument,
72-
"Source tensor should have data_ptr not being nullptr.");
73-
// memcpy the data of given tensor list to preallocated memory of input
74-
// inputs with a size 0 dimension can be nullptr
75-
if (t_src.data_ptr() != nullptr) {
70+
"Destination tensor data pointer must not be null.");
71+
72+
// Sources with a size 0 dimension can be nullptr
73+
if (t_src.const_data_ptr() != nullptr) {
7674
ET_CHECK_OR_RETURN_ERROR(
7775
t_dst.nbytes() == t_src.nbytes(),
7876
InvalidArgument,
7977
"t_dst.nbytes() %lu != t_src.nbytes(). %lu",
8078
t_dst.nbytes(),
8179
t_src.nbytes());
82-
std::memcpy(data_ptr, t_src.data_ptr(), t_src.nbytes());
80+
// Copy the source data to the preallocated memory of the destination, which
81+
// must be the same size as the source.
82+
std::memcpy(dst_data_ptr, t_src.const_data_ptr(), t_src.nbytes());
8383
}
8484

8585
return Error::Ok;

runtime/executor/method.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -624,7 +624,8 @@ Error Method::init(executorch_flatbuffer::ExecutionPlan* s_plan) {
624624
// Get pre_allocation info for input tensors
625625
for (int i = 0; i < inputs_size(); i++) {
626626
if (get_input(i).isTensor()) {
627-
pre_allocated_input_ = get_input(i).toTensor().data_ptr() != nullptr;
627+
pre_allocated_input_ =
628+
get_input(i).toTensor().const_data_ptr() != nullptr;
628629
break;
629630
}
630631
}

util/bundled_program_verification.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ at::Tensor tensor_like(executorch_flatbuffer::BundledTensor* bundled_tensor) {
4646
{ret_t_sizes, bundled_tensor->sizes()->size()},
4747
at::dtype(static_cast<ScalarType>(bundled_tensor->scalar_type())));
4848
memcpy(
49-
ret_tensor.data_ptr(),
49+
ret_tensor.mutable_data_ptr(),
5050
static_cast<const void*>(bundled_tensor->data()->Data()),
5151
ret_tensor.nbytes());
5252
return ret_tensor;

util/util.h

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,7 @@ inline exec_aten::ArrayRef<void*> PrepareInputTensors(const Method& method) {
7575
const auto& t = method.get_input(i).toTensor();
7676
at::StorageImpl* storage =
7777
t.unsafeGetTensorImpl()->unsafe_storage().unsafeGetStorageImpl();
78-
auto& data_ptr = storage->data_ptr();
79-
if (data_ptr.get() == nullptr) {
78+
if (storage->data_ptr().get() == nullptr) {
8079
ET_LOG(Info, "input not initialized.");
8180
inputs[num_allocated++] = malloc(t.nbytes());
8281
storage->set_data_ptr(at::DataPtr(
@@ -97,7 +96,7 @@ inline exec_aten::ArrayRef<void*> PrepareInputTensors(const Method& method) {
9796
continue;
9897
}
9998
const auto& t = method.get_input(i).toTensor();
100-
if (t.data_ptr() == nullptr) {
99+
if (t.const_data_ptr() == nullptr) {
101100
ET_LOG(Info, "input not initialized.");
102101
inputs[num_allocated++] = malloc(t.nbytes());
103102
t.set_data(inputs[num_allocated - 1]);

0 commit comments

Comments
 (0)