Skip to content

Commit 5acd5c9

Browse files
JacobSzwejbkafacebook-github-bot
authored andcommitted
Less Noisy Pybindings (#5828)
Summary: Pull Request resolved: #5828 Only log actual errors, no longer use error codes to check for if a tensor is memory planned or not. Also cleaned up some vector logic while Im here Reviewed By: dbort Differential Revision: D63786751 fbshipit-source-id: 1b4e29d307305afc825a6c95315926089a6472bf
1 parent dc24983 commit 5acd5c9

File tree

1 file changed

+31
-23
lines changed

1 file changed

+31
-23
lines changed

extension/pybindings/pybindings.cpp

Lines changed: 31 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -147,19 +147,19 @@ void setup_output_storage(
147147
}
148148
for (size_t i = 0; i < output_storages.size(); ++i) {
149149
if (output_storages[i].size() == 0) {
150-
// Skip empty output storages, this would happen for non-tensor outputs.
150+
// Skip empty output storages, this would happen for non-tensor outputs
151+
// and memory planned outputs.
151152
continue;
152153
}
153154
Error output_status = method.set_output_data_ptr(
154155
output_storages[i].data(), output_storages[i].size(), i);
155-
// InvalidState can be the status if outputs are already memory planned.
156-
// That's fine and we don't need to alert the user to that error.
157-
if (output_status != Error::Ok && output_status != Error::InvalidState) {
158-
ET_LOG(
159-
Error,
160-
"Cannot set_output_data_ptr(): 0x%" PRIx32,
161-
static_cast<uint32_t>(output_status));
162-
}
156+
// We already should be skipping non-tensor outputs, and memory planned
157+
// outputs so any error is real.
158+
THROW_IF_ERROR(
159+
output_status,
160+
"set_output_data_ptr failed for output %zu with error 0x%" PRIx32,
161+
i,
162+
static_cast<uint32_t>(output_status));
163163
}
164164
}
165165

@@ -890,26 +890,34 @@ struct PyModule final {
890890

891891
std::vector<std::vector<uint8_t>> make_output_storages(const Method& method) {
892892
const auto num_outputs = method.outputs_size();
893-
// These output storages will not be used if the ExecuTorch program already
894-
// pre-allocated output space. That is represented by an error from
895-
// set_output_data_ptr.
896-
std::vector<std::vector<uint8_t>> output_storages(num_outputs);
893+
// Create a buffer for each output tensor. Memory planned outputs and non
894+
// tensor outputs get an empty buffer in this list which is ignored later.
895+
std::vector<std::vector<uint8_t>> output_storages;
896+
output_storages_.reserve(num_outputs);
897+
auto meta = method.method_meta();
897898
for (size_t i = 0; i < num_outputs; ++i) {
899+
auto output_type = meta.output_tag(i);
900+
THROW_IF_ERROR(
901+
output_type.error(), "Failed to get output type for output %zu", i);
902+
if (output_type.get() != Tag::Tensor) {
903+
// Skip allocating storage for non-tensor outputs.
904+
output_storages.emplace_back();
905+
continue;
906+
}
898907
const auto& output_tensor_meta =
899908
method.method_meta().output_tensor_meta(i);
900-
if (!output_tensor_meta.ok()) {
901-
// If the output isn't a tensor it won't have a tensor meta.
902-
ET_LOG(
903-
Error,
904-
"Tensor meta doesn't exist for output %zu, error is 0x%" PRIx32
905-
", skipping allocating storage",
906-
i,
907-
static_cast<uint32_t>(output_tensor_meta.error()));
908-
output_storages[i] = std::vector<uint8_t>();
909+
THROW_IF_ERROR(
910+
output_tensor_meta.error(),
911+
"Failed to get output tensor meta for output %zu",
912+
i);
913+
if (output_tensor_meta.get().is_memory_planned()) {
914+
// Skip allocating storage for planned memory outputs.
915+
output_storages.emplace_back();
909916
continue;
910917
}
918+
// Allocate storage for the output tensor.
911919
const size_t output_size = output_tensor_meta.get().nbytes();
912-
output_storages[i] = std::vector<uint8_t>(output_size);
920+
output_storages.emplace_back(output_size);
913921
}
914922
return output_storages;
915923
}

0 commit comments

Comments
 (0)