Skip to content

Commit ca45bbf

Browse files
JacobSzwejbkafacebook-github-bot
authored andcommitted
update set_tensor_data to use MethodMeta and set_input (#524)
Summary: Pull Request resolved: #524 Switch the util used in all of our examples to use "set_input" instead of get_input, set_data. Theres still more work to be done on this util like make it follow our style guide, put it in a more specific file, use RAII but thatll be after MVP. Reviewed By: dbort Differential Revision: D49747239 fbshipit-source-id: 8124b7941e4a6a9232f1ce173e4aeab9b4d2572b
1 parent c216552 commit ca45bbf

File tree

4 files changed

+77
-46
lines changed

4 files changed

+77
-46
lines changed

runtime/executor/method.h

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -89,17 +89,18 @@ class Method final {
8989
}
9090

9191
/**
92-
* Sets a specific method input to the provided value.
92+
* Sets the internal input value to be equivalent to the to the provided
93+
* value.
9394
*
94-
* NOTE: Based on the memory plan of the method, the inputs may not have
95-
* buffer space pre-allocated for them, in this case the executor will alias
96-
* the memory of the tensors provided as inputs here, so the user should take
97-
* care that the life span of this memory outlasts the executor forward.
95+
* @param[in] input_evalue The evalue to copy into the method input. If the
96+
* evalue is a tensor, the data is copied in most cases, so the tensor
97+
* passed in here does not always need to outlive this call. But there is
98+
* a case where the Method will keep a pointer to the tensor's data.
99+
* Based on the memory plan of the method, the inputs may not have
100+
* buffer space pre-allocated for them. In this case the executor will
101+
* alias the memory of the tensors provided as inputs here rather then
102+
* deepcopy the input into the memory planned arena.
98103
*
99-
* @param[in] input_evalue The value to set the input to. The type of this
100-
* must match the type of the corresponding input. If this value is a
101-
* tensor, attempts to allow dynamic shape, but the dtype must always
102-
* agree.
103104
* @param[in] input_idx Zero-based index of the input to set. Must be less
104105
* than the value returned by inputs_size().
105106
*
@@ -110,7 +111,7 @@ class Method final {
110111
/**
111112
* Sets the values of all method inputs.
112113
*
113-
* See NOTE on set_input().
114+
* See set_input() for a more detailed description of the behavior.
114115
*
115116
* @param[in] input_evalues The new values for all of the method inputs. The
116117
* type of each element must match the type of corresponding input. If the

test/size_test.cpp

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
#include <executorch/runtime/platform/log.h>
1313
#include <executorch/runtime/platform/profiler.h>
1414
#include <executorch/runtime/platform/runtime.h>
15-
#include <executorch/util/util.h>
1615
#include <stdio.h>
1716

1817
using namespace torch::executor;
@@ -65,7 +64,13 @@ int main(int argc, char** argv) {
6564

6665
// Prepare for inputs
6766
// It assumes the input is one tensor.
68-
auto inputs = torch::executor::util::PrepareInputTensors(*method);
67+
float data[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0};
68+
Tensor::SizesType sizes[] = {6};
69+
Tensor::DimOrderType dim_order[] = {0};
70+
TensorImpl impl(ScalarType::Float, 1, sizes, data, dim_order);
71+
Tensor t(&impl);
72+
Error set_input_error = method->set_input(t, 0);
73+
ET_CHECK(set_input_error == Error::Ok);
6974

7075
ET_LOG(Info, "Inputs prepared.");
7176

@@ -90,7 +95,6 @@ int main(int argc, char** argv) {
9095
ET_LOG(Info, "%f", data_output[j]);
9196
}
9297
}
93-
torch::executor::util::FreeInputs(inputs);
9498
prof_result_t prof_result;
9599
EXECUTORCH_DUMP_PROFILE_RESULTS(&prof_result);
96100
if (prof_result.num_bytes != 0) {

test/targets.bzl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ SIZE_TEST_SOURCES = [
77
SIZE_TEST_DEPS = [
88
"//executorch/runtime/executor:program",
99
"//executorch/extension/data_loader:file_data_loader",
10-
"//executorch/util:util",
1110
]
1211

1312
def define_common_targets():

util/util.h

Lines changed: 59 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212

1313
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1414
#include <executorch/runtime/executor/method.h>
15+
#include <executorch/runtime/executor/method_meta.h>
1516
#include <executorch/runtime/platform/log.h>
1617
#ifdef USE_ATEN_LIB
1718
#include <ATen/ATen.h> // @manual=//caffe2/aten:ATen-core
@@ -61,51 +62,77 @@ inline void FillOnes(Tensor tensor) {
6162
* @returns An array of pointers that must be passed to `FreeInputs()` after
6263
* the Method is no longer needed.
6364
*/
64-
inline exec_aten::ArrayRef<void*> PrepareInputTensors(const Method& method) {
65+
inline exec_aten::ArrayRef<void*> PrepareInputTensors(Method& method) {
66+
auto method_meta = method.method_meta();
6567
size_t input_size = method.inputs_size();
6668
size_t num_allocated = 0;
6769
void** inputs = (void**)malloc(input_size * sizeof(void*));
68-
#ifdef USE_ATEN_LIB
69-
auto deleteByNone = [](void* p) {};
70+
7071
for (size_t i = 0; i < input_size; i++) {
71-
if (!method.get_input(i).isTensor()) {
72+
if (*method_meta.input_tag(i) != Tag::Tensor) {
7273
ET_LOG(Info, "input %zu is not a tensor, skipping", i);
7374
continue;
7475
}
75-
const auto& t = method.get_input(i).toTensor();
76-
at::StorageImpl* storage =
77-
t.unsafeGetTensorImpl()->unsafe_storage().unsafeGetStorageImpl();
78-
if (storage->data_ptr().get() == nullptr) {
79-
ET_LOG(Info, "input not initialized.");
80-
inputs[num_allocated++] = malloc(t.nbytes());
81-
storage->set_data_ptr(at::DataPtr(
82-
inputs[num_allocated - 1],
83-
inputs[num_allocated - 1],
84-
deleteByNone,
85-
DeviceType::CPU));
86-
storage->set_nbytes(t.nbytes());
87-
} else {
88-
ET_LOG(Info, "input already initialized, refilling.");
76+
77+
// Tensor Input. Grab meta data and allocate buffer
78+
auto tensor_meta = method_meta.input_tensor_meta(i);
79+
inputs[num_allocated++] = malloc(tensor_meta->nbytes());
80+
81+
#ifdef USE_ATEN_LIB
82+
std::vector<int64_t> at_tensor_sizes;
83+
for (auto s : tensor_meta->sizes()) {
84+
at_tensor_sizes.push_back(s);
8985
}
86+
at::Tensor t = at::from_blob(
87+
inputs[num_allocated - 1],
88+
at_tensor_sizes,
89+
at::TensorOptions(tensor_meta->scalar_type()));
9090
t.fill_(1.0f);
91-
}
92-
#else
93-
for (size_t i = 0; i < input_size; i++) {
94-
if (!method.get_input(i).isTensor()) {
95-
ET_LOG(Info, "input %zu is not a tensor, skipping", i);
96-
continue;
91+
92+
#else // Portable Tensor
93+
// The only memory that needs to persist after set_input is called is the
94+
// data ptr of the input tensor, and that is only if the Method did not
95+
// memory plan buffer space for the inputs and instead is expecting the user
96+
// to provide them. Meta data like sizes and dim order are used to ensure
97+
// the input aligns with the values expected by the plan, but references to
98+
// them are not held onto.
99+
100+
TensorImpl::SizesType* sizes = static_cast<TensorImpl::SizesType*>(
101+
malloc(sizeof(TensorImpl::SizesType) * tensor_meta->sizes().size()));
102+
TensorImpl::DimOrderType* dim_order =
103+
static_cast<TensorImpl::DimOrderType*>(malloc(
104+
sizeof(TensorImpl::DimOrderType) *
105+
tensor_meta->dim_order().size()));
106+
107+
for (size_t size_idx = 0; size_idx < tensor_meta->sizes().size();
108+
size_idx++) {
109+
sizes[size_idx] = tensor_meta->sizes()[size_idx];
97110
}
98-
const auto& t = method.get_input(i).toTensor();
99-
if (t.const_data_ptr() == nullptr) {
100-
ET_LOG(Info, "input not initialized.");
101-
inputs[num_allocated++] = malloc(t.nbytes());
102-
t.set_data(inputs[num_allocated - 1]);
103-
} else {
104-
ET_LOG(Info, "input already initialized, refilling.");
111+
for (size_t dim_idx = 0; dim_idx < tensor_meta->dim_order().size();
112+
dim_idx++) {
113+
dim_order[dim_idx] = tensor_meta->dim_order()[dim_idx];
105114
}
115+
116+
TensorImpl impl = TensorImpl(
117+
tensor_meta->scalar_type(),
118+
tensor_meta->sizes().size(),
119+
sizes,
120+
inputs[num_allocated - 1],
121+
dim_order);
122+
Tensor t(&impl);
106123
FillOnes(t);
107-
}
108124
#endif
125+
auto error = method.set_input(t, i);
126+
ET_CHECK_MSG(
127+
error == Error::Ok,
128+
"Error: 0x%" PRIx32 " setting input %zu.",
129+
error,
130+
i);
131+
#ifndef USE_ATEN_LIB // Portable Tensor
132+
free(sizes);
133+
free(dim_order);
134+
#endif
135+
}
109136
return {inputs, num_allocated};
110137
}
111138

0 commit comments

Comments
 (0)