Skip to content

Commit 93bf6ce

Browse files
JacobSzwejbkafacebook-github-bot
authored andcommitted
Remove strange helpers for python -> tensor conversions, and at -> et tensor conversions
Summary: Keepalive was difficult to manage and Im not even sure how sound it was for things like strides and dim order. Now the run_method loop is a little grosser, but I think the intent is much clearer. We cast from python types to at types, and then possible have to do a hard conversion from at::tensor to ETensor which can be pretty gross since ETensor doesnt manage any memory. This highlights the need for like an ETensor wrapper that manages a bunch of state like sizes and TensorImpl Differential Revision: D48618092 fbshipit-source-id: 2b6075348c0afc2c55a8c45aaf12e29f9b22d293
1 parent 1523ddc commit 93bf6ce

File tree

3 files changed

+93
-123
lines changed

3 files changed

+93
-123
lines changed

extension/aten_util/aten_bridge.cpp

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -147,39 +147,5 @@ at::Tensor alias_attensor_to_etensor(const torch::executor::Tensor& etensor) {
147147
check_tensor_meta(t, etensor);
148148
return t;
149149
}
150-
151-
std::unique_ptr<torch::executor::TensorImpl> eTensorFromAtTensor(
152-
const at::Tensor& tensor,
153-
KeepAliveSizes& keep_alive) {
154-
auto sizes = tensor.sizes();
155-
auto options = tensor.options();
156-
keep_alive.sizes32.emplace_back(sizes.size());
157-
auto& sizes32 = keep_alive.sizes32.back();
158-
for (size_t i = 0; i < sizes.size(); ++i) {
159-
// NOLINTNEXTLINE
160-
sizes32[i] = sizes[i];
161-
}
162-
163-
const torch::executor::ScalarType edtype =
164-
torchToExecuTorchScalarType(options.dtype());
165-
166-
return std::make_unique<torch::executor::TensorImpl>(
167-
edtype, sizes32.size(), sizes32.data(), tensor.mutable_data_ptr());
168-
}
169-
170-
at::Tensor atTensorFromETensor(
171-
torch::executor::TensorImpl* etensor,
172-
KeepAliveSizes& keep_alive) {
173-
c10::ScalarType dtype = execuTorchtoTorchScalarType(etensor->scalar_type());
174-
keep_alive.sizes64.emplace_back(etensor->sizes().size());
175-
auto& sizes64 = keep_alive.sizes64.back();
176-
for (size_t i = 0; i < etensor->sizes().size(); ++i) {
177-
// NOLINTNEXTLINE
178-
sizes64[i] = etensor->sizes()[i];
179-
}
180-
return at::from_blob(
181-
etensor->mutable_data(), sizes64, at::TensorOptions(dtype));
182-
}
183-
184150
} // namespace util
185151
} // namespace torch

extension/aten_util/aten_bridge.h

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -21,22 +21,6 @@
2121
namespace torch {
2222
namespace util {
2323

24-
using sizes32_t = std::vector<int32_t>;
25-
using sizes64_t = std::vector<int64_t>;
26-
27-
struct KeepAliveSizes {
28-
std::vector<sizes32_t> sizes32;
29-
std::vector<sizes64_t> sizes64;
30-
};
31-
32-
// TODO: we should really remove this as
33-
__ET_DEPRECATED std::unique_ptr<torch::executor::TensorImpl>
34-
eTensorFromAtTensor(const at::Tensor& tensor, KeepAliveSizes& keep_alive);
35-
36-
__ET_DEPRECATED at::Tensor atTensorFromETensor(
37-
torch::executor::TensorImpl* etensor,
38-
KeepAliveSizes& keep_alive);
39-
4024
torch::executor::ScalarType torchToExecuTorchScalarType(caffe2::TypeMeta type);
4125

4226
c10::ScalarType execuTorchtoTorchScalarType(torch::executor::ScalarType type);

extension/pybindings/module.cpp

Lines changed: 93 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,6 @@
5555
})
5656

5757
namespace py = pybind11;
58-
using ATTensor = at::Tensor;
5958
namespace torch {
6059
namespace executor {
6160

@@ -134,15 +133,7 @@ class Module final {
134133

135134
/// Executes the specified method on the provided inputs and returns its
136135
/// outputs.
137-
template <typename... Types>
138136
std::vector<EValue> run_method(
139-
const std::string& method_name,
140-
Types&&... args) {
141-
return run_method_internal(method_name, std::vector<EValue>{args...});
142-
}
143-
144-
private:
145-
std::vector<EValue> run_method_internal(
146137
const std::string& method_name,
147138
const std::vector<EValue>& args) {
148139
auto& method = methods_[method_name];
@@ -187,6 +178,7 @@ class Module final {
187178
return result;
188179
}
189180

181+
private:
190182
/// A wrapper/util class for executorch memory allocations/manager.
191183
class Memory {
192184
public:
@@ -266,66 +258,6 @@ inline std::unique_ptr<Module> load_from_file(const std::string& path) {
266258
return std::make_unique<Module>(std::move(loader));
267259
}
268260

269-
// Struct used to manage the memory of tensors allocated in lean (not aten) mode
270-
#ifdef USE_ATEN_LIB
271-
struct KeepAlive {};
272-
#else
273-
struct KeepAlive {
274-
std::vector<std::unique_ptr<exec_aten::TensorImpl>> tensors;
275-
torch::util::KeepAliveSizes sizes;
276-
};
277-
#endif
278-
279-
EValue pyToEValue(py::handle h, KeepAlive& keep_alive) {
280-
const std::string& type_str = py::str(h.get_type());
281-
EXECUTORCH_SCOPE_PROF("pyToEValue");
282-
if (type_str == "<class 'torch.Tensor'>") {
283-
auto atTensor = h.cast<ATTensor>();
284-
#ifdef USE_ATEN_LIB
285-
EValue evalue(atTensor);
286-
#else
287-
auto etensorImpl =
288-
torch::util::eTensorFromAtTensor(atTensor, keep_alive.sizes);
289-
EValue evalue(torch::executor::Tensor(etensorImpl.get()));
290-
keep_alive.tensors.push_back(std::move(etensorImpl));
291-
#endif
292-
return evalue;
293-
} else if (py::isinstance<py::none>(h)) {
294-
return EValue();
295-
} else if (py::isinstance<py::bool_>(h)) {
296-
return EValue(py::cast<bool>(h));
297-
} else if (py::isinstance<py::int_>(h)) {
298-
return EValue(py::cast<int64_t>(h));
299-
} else {
300-
// Unsupported pytype
301-
ET_ASSERT_UNREACHABLE_MSG(type_str.c_str());
302-
}
303-
}
304-
305-
py::object pyFromEValue(const EValue& v, KeepAlive& keep_alive) {
306-
EXECUTORCH_SCOPE_PROF("pyFromEValue");
307-
if (Tag::None == v.tag) {
308-
return py::none();
309-
} else if (Tag::Int == v.tag) {
310-
return py::cast(v.toInt());
311-
} else if (Tag::Double == v.tag) {
312-
return py::cast(v.toDouble());
313-
} else if (Tag::Bool == v.tag) {
314-
return py::cast(v.toBool());
315-
} else if (Tag::Tensor == v.tag) {
316-
#ifdef USE_ATEN_LIB
317-
return py::cast(v.toTensor().clone());
318-
#else
319-
// Clone so the outputs in python do not share a lifetime with the module
320-
// object
321-
return py::cast(torch::util::atTensorFromETensor(
322-
v.toTensor().unsafeGetTensorImpl(), keep_alive.sizes)
323-
.clone());
324-
#endif
325-
}
326-
ET_ASSERT_UNREACHABLE();
327-
}
328-
329261
static constexpr size_t kDEFAULT_BUNDLED_INPUT_POOL_SIZE = 16 * 1024U;
330262

331263
struct PyBundledModule final {
@@ -406,19 +338,108 @@ struct PyModule final {
406338
py::list run_method(
407339
const std::string& method_name,
408340
const py::sequence& inputs) {
409-
std::vector<EValue> cpp_inputs;
410341
const auto inputs_size = py::len(inputs);
342+
std::vector<EValue> cpp_inputs;
411343
cpp_inputs.reserve(inputs_size);
344+
#ifndef USE_ATEN_LIB // Portable mode
345+
// So the ETensors and their metadata stay in scope for Module->run_method.
346+
std::vector<torch::executor::TensorImpl> input_tensors;
347+
std::vector<std::vector<torch::executor::Tensor::SizesType>> input_sizes;
348+
std::vector<std::vector<torch::executor::Tensor::StridesType>>
349+
input_strides;
350+
std::vector<std::vector<torch::executor::Tensor::DimOrderType>>
351+
input_dim_order;
352+
// Important so we dont lose the pointers on a vector resize.
353+
input_tensors.reserve(inputs_size);
354+
input_sizes.reserve(inputs_size);
355+
input_strides.reserve(inputs_size);
356+
input_dim_order.reserve(inputs_size);
357+
#endif
412358
for (size_t i = 0; i < inputs_size; ++i) {
413-
cpp_inputs.emplace_back(pyToEValue(inputs[i], keep_alive_));
359+
auto h = inputs[i];
360+
const std::string& type_str = py::str(h.get_type());
361+
if (type_str == "<class 'torch.Tensor'>") {
362+
auto at_tensor = h.cast<at::Tensor>();
363+
// alias_etensor_to_attensor will assert on this later, so to better
364+
// propogate up to python we check early and throw an exception.
365+
if (!at_tensor.is_contiguous()) {
366+
auto error_msg = "Input " + std::to_string(i) + "for method " +
367+
method_name + " is not contiguous.";
368+
throw std::runtime_error(error_msg);
369+
}
370+
#ifdef USE_ATEN_LIB
371+
EValue evalue(at_tensor);
372+
#else
373+
// convert at::Tensor to torch::executor::tensor
374+
auto type = torch::util::torchToExecuTorchScalarType(
375+
at_tensor.options().dtype());
376+
size_t dim = at_tensor.dim();
377+
// cant directly alias at::tensor sizes and strides due to int64 vs
378+
// int32 typing conflict
379+
input_sizes.emplace_back(
380+
at_tensor.sizes().begin(), at_tensor.sizes().end());
381+
input_strides.emplace_back(
382+
at_tensor.strides().begin(), at_tensor.strides().end());
383+
384+
// Only works for MemoryFormat::Contiguous inputs
385+
std::vector<torch::executor::Tensor::DimOrderType> dim_order;
386+
for (size_t cur_dim = 0; cur_dim < dim; cur_dim++) {
387+
dim_order.push_back(cur_dim);
388+
}
389+
input_dim_order.push_back(std::move(dim_order));
390+
input_tensors.emplace_back(
391+
type,
392+
dim,
393+
input_sizes[i].data(),
394+
nullptr,
395+
input_dim_order[i].data(),
396+
input_strides[i].data());
397+
398+
torch::executor::Tensor temp =
399+
torch::executor::Tensor(&input_tensors[i]);
400+
torch::util::alias_etensor_to_attensor(at_tensor, temp);
401+
EValue evalue(temp);
402+
#endif
403+
cpp_inputs.push_back(evalue);
404+
} else if (py::isinstance<py::none>(h)) {
405+
cpp_inputs.push_back(EValue());
406+
} else if (py::isinstance<py::bool_>(h)) {
407+
cpp_inputs.push_back(EValue(py::cast<bool>(h)));
408+
} else if (py::isinstance<py::int_>(h)) {
409+
cpp_inputs.push_back(EValue(py::cast<int64_t>(h)));
410+
} else {
411+
// Unsupported pytype
412+
ET_ASSERT_UNREACHABLE_MSG(type_str.c_str());
413+
}
414414
}
415415

416416
auto outputs = module_->run_method(method_name, cpp_inputs);
417417

418+
// Retrieve outputs
418419
const auto outputs_size = outputs.size();
419420
py::list list(outputs_size);
420421
for (size_t i = 0; i < outputs_size; ++i) {
421-
list[i] = pyFromEValue(outputs[i], keep_alive_);
422+
auto& v = outputs[i];
423+
if (Tag::None == v.tag) {
424+
list[i] = py::none();
425+
} else if (Tag::Int == v.tag) {
426+
list[i] = py::cast(v.toInt());
427+
} else if (Tag::Double == v.tag) {
428+
list[i] = py::cast(v.toDouble());
429+
} else if (Tag::Bool == v.tag) {
430+
list[i] = py::cast(v.toBool());
431+
} else if (Tag::Tensor == v.tag) {
432+
#ifdef USE_ATEN_LIB
433+
// Clone so the outputs in python do not share a lifetime with the
434+
// module object
435+
list[i] = py::cast(v.toTensor().clone());
436+
#else
437+
list[i] = py::cast(
438+
torch::util::alias_attensor_to_etensor(v.toTensor()).clone());
439+
#endif
440+
} else {
441+
ET_ASSERT_UNREACHABLE_MSG("Invalid model output type");
442+
}
422443
}
423444
return list;
424445
}
@@ -428,7 +449,6 @@ struct PyModule final {
428449
}
429450

430451
private:
431-
KeepAlive keep_alive_;
432452
std::unique_ptr<Module> module_;
433453
};
434454

0 commit comments

Comments
 (0)