Skip to content

Commit f7cd7c3

Browse files
JacobSzwejbkafacebook-github-bot
authored andcommitted
update set_tensor_data to use MethodMeta and set_input (#524)
Summary: Pull Request resolved: #524 Switch the util used in all of our examples to use "set_input" instead of get_input, set_data. Theres still more work to be done on this util like make it follow our style guide, put it in a more specific file, use RAII but thatll be after MVP. Differential Revision: D49747239 fbshipit-source-id: 1a8920044627573726862da6d02d85f80da845f0
1 parent f09426c commit f7cd7c3

File tree

10 files changed

+68
-45
lines changed

10 files changed

+68
-45
lines changed

examples/bundled_executor_runner/bundled_executor_runner.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,8 @@ int main(int argc, char** argv) {
212212
status);
213213
} else {
214214
// Use ones-initialized inputs.
215-
inputs = torch::executor::util::PrepareInputTensors(*method);
215+
inputs = torch::executor::util::PrepareInputTensors(
216+
*(program->method_meta(method_name)), *method);
216217
}
217218
ET_LOG(Info, "Inputs prepared.");
218219

examples/executor_runner/executor_runner.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,8 @@ int main(int argc, char** argv) {
157157

158158
// Prepare the inputs.
159159
// Use ones-initialized inputs.
160-
auto inputs = util::PrepareInputTensors(*method);
160+
auto inputs =
161+
util::PrepareInputTensors(*(program->method_meta(method_name)), *method);
161162
ET_LOG(Info, "Inputs prepared.");
162163

163164
// Run the model.

runtime/executor/test/allocation_failure_stress_test.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,8 @@ TEST_F(AllocationFailureStressTest, End2EndIncreaseRuntimeMemUntilSuccess) {
8484
// Execution does not use the runtime allocator, so it should always succeed
8585
// once load was successful.
8686
exec_aten::ArrayRef<void*> inputs =
87-
torch::executor::util::PrepareInputTensors(*method);
87+
torch::executor::util::PrepareInputTensors(
88+
*(program_->method_meta("forward")), *method);
8889
err = method->execute();
8990
torch::executor::util::FreeInputs(inputs);
9091
ASSERT_EQ(err, Error::Ok);
@@ -120,7 +121,8 @@ TEST_F(AllocationFailureStressTest, End2EndNonConstantMemUntilSuccess) {
120121
// Execution does not use the runtime allocator, so it should always succeed
121122
// once load was successful.
122123
exec_aten::ArrayRef<void*> inputs =
123-
torch::executor::util::PrepareInputTensors(*method);
124+
torch::executor::util::PrepareInputTensors(
125+
*(program_->method_meta("forward")), *method);
124126
err = method->execute();
125127
torch::executor::util::FreeInputs(inputs);
126128
ASSERT_EQ(err, Error::Ok);

runtime/executor/test/backend_integration_test.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,8 @@ TEST_P(BackendIntegrationTest, EndToEndTestWithProcessedAsHandle) {
415415
auto method(std::move(method_res.get()));
416416
// Execute the model.
417417
exec_aten::ArrayRef<void*> inputs =
418-
torch::executor::util::PrepareInputTensors(method);
418+
torch::executor::util::PrepareInputTensors(
419+
program->method_meta("forward").get(), method);
419420
auto err = method.execute();
420421
torch::executor::util::FreeInputs(inputs);
421422
EXPECT_EQ(err, Error::Ok);

runtime/executor/test/kernel_integration_test.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,8 @@ class KernelIntegrationTest : public ::testing::Test {
156156
method_ = std::make_unique<Method>(std::move(method.get()));
157157

158158
// Set up its inputs.
159-
inputs_ = torch::executor::util::PrepareInputTensors(*method_);
159+
inputs_ = torch::executor::util::PrepareInputTensors(
160+
*(program->method_meta("forward")), *method_);
160161
}
161162

162163
void TearDown() override {

runtime/executor/test/method_test.cpp

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,8 @@ TEST_F(MethodTest, MoveTest) {
7272

7373
// Can execute the method.
7474
exec_aten::ArrayRef<void*> inputs =
75-
torch::executor::util::PrepareInputTensors(*method);
75+
torch::executor::util::PrepareInputTensors(
76+
*(programs_["add"]->method_meta("forward")), *method);
7677
Error err = method->execute();
7778
ASSERT_EQ(err, Error::Ok);
7879

@@ -97,7 +98,8 @@ TEST_F(MethodTest, SetPrimInputTest) {
9798

9899
// Can execute the method.
99100
exec_aten::ArrayRef<void*> inputs =
100-
torch::executor::util::PrepareInputTensors(*method);
101+
torch::executor::util::PrepareInputTensors(
102+
*(programs_["add"]->method_meta("forward")), *method);
101103

102104
// The args to the method are x, y, alpha. x and y are tensors handled above
103105
// alpha is a prim.
@@ -191,12 +193,13 @@ TEST_F(MethodTest, AliasedIOTest) {
191193
// TEST_F(MethodTest, OptionalTensorListDeserialization) {
192194
// ManagedMemoryManager mmm(kDefaultNonConstMemBytes,
193195
// kDefaultRuntimeMemBytes); Result<Method> method =
194-
// index_program_->load_method("forward", &mmm.get());
196+
// programs_["index"]->load_method("forward", &mmm.get());
195197
// ASSERT_EQ(method.error(), Error::Ok);
196198

197199
// // Can execute the method.
198200
// exec_aten::ArrayRef<void*> inputs =
199-
// torch::executor::util::PrepareInputTensors(*method);
201+
// torch::executor::util::PrepareInputTensors(*(programs_["index"]->method_meta("forward")),
202+
// *method);
200203
// Error err = method->execute();
201204
// ASSERT_EQ(err, Error::Ok);
202205

sdk/runners/executor_runner.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,8 @@ int main(int argc, char** argv) {
363363
status);
364364
} else {
365365
// Use ones-initialized inputs.
366-
inputs = torch::executor::util::PrepareInputTensors(*method);
366+
inputs = torch::executor::util::PrepareInputTensors(
367+
*(program->method_meta(method_name)), *method);
367368
}
368369
ET_LOG(Info, "Inputs prepared.");
369370

test/multi_runner.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,8 @@ class PreparedModel final {
8989
memory_manager_(non_const_mem_bytes, runtime_mem_bytes),
9090
method_(load_method_or_die(program_, &memory_manager_.get())),
9191
has_run_(false) {
92-
inputs_ = torch::executor::util::PrepareInputTensors(method_);
92+
inputs_ = torch::executor::util::PrepareInputTensors(
93+
program_.method_meta(name.c_str()).get(), method_);
9394
}
9495

9596
void run() {

test/size_test.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,8 @@ int main(int argc, char** argv) {
6565

6666
// Prepare for inputs
6767
// It assumes the input is one tensor.
68-
auto inputs = torch::executor::util::PrepareInputTensors(*method);
68+
auto inputs = torch::executor::util::PrepareInputTensors(
69+
*(program->method_meta(method_name)), *method);
6970

7071
ET_LOG(Info, "Inputs prepared.");
7172

util/util.h

Lines changed: 43 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,11 @@
99
#pragma once
1010

1111
#include <algorithm>
12+
#include <functional>
1213

1314
#include <executorch/runtime/core/exec_aten/exec_aten.h>
1415
#include <executorch/runtime/executor/method.h>
16+
#include <executorch/runtime/executor/method_meta.h>
1517
#include <executorch/runtime/platform/log.h>
1618
#ifdef USE_ATEN_LIB
1719
#include <ATen/ATen.h> // @manual=//caffe2/aten:ATen-core
@@ -61,51 +63,60 @@ inline void FillOnes(Tensor tensor) {
6163
* @returns An array of pointers that must be passed to `FreeInputs()` after
6264
* the Method is no longer needed.
6365
*/
64-
inline exec_aten::ArrayRef<void*> PrepareInputTensors(const Method& method) {
66+
inline exec_aten::ArrayRef<void*> PrepareInputTensors(
67+
const MethodMeta& method_meta,
68+
Method& method) {
6569
size_t input_size = method.inputs_size();
6670
size_t num_allocated = 0;
6771
void** inputs = (void**)malloc(input_size * sizeof(void*));
68-
#ifdef USE_ATEN_LIB
69-
auto deleteByNone = [](void* p) {};
72+
7073
for (size_t i = 0; i < input_size; i++) {
71-
if (!method.get_input(i).isTensor()) {
74+
if (*method_meta.input_tag(i) != Tag::Tensor) {
7275
ET_LOG(Info, "input %zu is not a tensor, skipping", i);
7376
continue;
7477
}
75-
const auto& t = method.get_input(i).toTensor();
76-
at::StorageImpl* storage =
77-
t.unsafeGetTensorImpl()->unsafe_storage().unsafeGetStorageImpl();
78-
if (storage->data_ptr().get() == nullptr) {
79-
ET_LOG(Info, "input not initialized.");
80-
inputs[num_allocated++] = malloc(t.nbytes());
81-
storage->set_data_ptr(at::DataPtr(
82-
inputs[num_allocated - 1],
83-
inputs[num_allocated - 1],
84-
deleteByNone,
85-
DeviceType::CPU));
86-
storage->set_nbytes(t.nbytes());
87-
} else {
88-
ET_LOG(Info, "input already initialized, refilling.");
78+
79+
// Tensor Input. Grab meta data and allocate buffer
80+
auto input_meta = method_meta.input_tensor_meta(i);
81+
inputs[num_allocated++] = malloc(input_meta->nbytes());
82+
83+
#ifdef USE_ATEN_LIB
84+
std::vector<int64_t> at_tensor_sizes;
85+
for (auto s : input_meta->sizes()) {
86+
at_tensor_sizes.push_back(s);
8987
}
88+
at::Tensor t = at::from_blob(
89+
inputs[num_allocated - 1],
90+
at_tensor_sizes,
91+
at::TensorOptions(input_meta->scalar_type()));
9092
t.fill_(1.0f);
91-
}
92-
#else
93-
for (size_t i = 0; i < input_size; i++) {
94-
if (!method.get_input(i).isTensor()) {
95-
ET_LOG(Info, "input %zu is not a tensor, skipping", i);
96-
continue;
93+
94+
#else // Portable Tensor
95+
std::vector<TensorImpl::SizesType> sizes;
96+
std::vector<TensorImpl::DimOrderType> dim_order;
97+
for (auto s : input_meta->sizes()) {
98+
sizes.push_back(s);
9799
}
98-
const auto& t = method.get_input(i).toTensor();
99-
if (t.const_data_ptr() == nullptr) {
100-
ET_LOG(Info, "input not initialized.");
101-
inputs[num_allocated++] = malloc(t.nbytes());
102-
t.set_data(inputs[num_allocated - 1]);
103-
} else {
104-
ET_LOG(Info, "input already initialized, refilling.");
100+
for (auto d : input_meta->dim_order()) {
101+
dim_order.push_back(d);
105102
}
103+
104+
TensorImpl impl = TensorImpl(
105+
input_meta->scalar_type(),
106+
input_meta->sizes().size(),
107+
sizes.data(),
108+
inputs[num_allocated - 1],
109+
dim_order.data());
110+
Tensor t(&impl);
106111
FillOnes(t);
107-
}
108112
#endif
113+
auto error = method.set_input(t, i);
114+
ET_CHECK_MSG(
115+
error == Error::Ok,
116+
"Error: 0x%" PRIx32 " setting input %zu.",
117+
error,
118+
i);
119+
}
109120
return {inputs, num_allocated};
110121
}
111122

0 commit comments

Comments
 (0)