Skip to content

loose sdk example verification requirement #906

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion bundled_program/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ python_library(
deps = [
"fbsource//third-party/pypi/typing-extensions:typing-extensions",
"//caffe2:torch",
"//executorch/extension/pytree:pylib",
],
)

Expand Down
3 changes: 1 addition & 2 deletions bundled_program/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from typing import Any, get_args, List, Union

import torch
from executorch.extension.pytree import tree_flatten
from torch.utils._pytree import tree_flatten

from typing_extensions import TypeAlias

Expand Down Expand Up @@ -126,7 +126,6 @@ def _tree_flatten(unflatten_data: Any) -> List[ConfigValue]:
Returns:
flatten_data: Flatten data with legal type.
"""
# pyre-fixme[16]: Module `pytree` has no attribute `tree_flatten`.
flatten_data, _ = tree_flatten(unflatten_data)

for data in flatten_data:
Expand Down
15 changes: 0 additions & 15 deletions examples/portable/executor_runner/executor_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
#include <executorch/runtime/executor/method.h>
#include <executorch/runtime/executor/program.h>
#include <executorch/runtime/platform/log.h>
#include <executorch/runtime/platform/profiler.h>
#include <executorch/runtime/platform/runtime.h>
#include <executorch/util/util.h>

Expand All @@ -37,10 +36,6 @@ DEFINE_string(
model_path,
"model.pte",
"Model serialized in flatbuffer format.");
DEFINE_string(
prof_result_path,
"prof_result.bin",
"ExecuTorch profiler output path.");

using namespace torch::executor;
using torch::executor::util::FileDataLoader;
Expand Down Expand Up @@ -113,7 +108,6 @@ int main(int argc, char** argv) {
// In this example we use a statically allocated memory pool.
MemoryAllocator method_allocator{
MemoryAllocator(sizeof(method_allocator_pool), method_allocator_pool)};
method_allocator.enable_profiling("method allocator");

// The memory-planned buffers will back the mutable tensors used by the
// method. The sizes of these buffers were determined ahead of time during the
Expand Down Expand Up @@ -180,15 +174,6 @@ int main(int argc, char** argv) {
std::cout << "Output " << i << ": " << outputs[i] << std::endl;
}

// Dump the profiling data to the specified file.
torch::executor::prof_result_t prof_result;
EXECUTORCH_DUMP_PROFILE_RESULTS(&prof_result);
if (prof_result.num_bytes != 0) {
FILE* ptr = fopen(FLAGS_prof_result_path.c_str(), "w+");
fwrite(prof_result.prof_data, 1, prof_result.num_bytes, ptr);
fclose(ptr);
}

util::FreeInputs(inputs);
return 0;
}
51 changes: 22 additions & 29 deletions examples/sdk/sdk_example_runner/sdk_example_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
#include <executorch/runtime/executor/method.h>
#include <executorch/runtime/executor/program.h>
#include <executorch/runtime/platform/log.h>
#include <executorch/runtime/platform/profiler.h>
#include <executorch/runtime/platform/runtime.h>
#include <executorch/sdk/etdump/etdump_flatcc.h>
#include <executorch/util/bundled_program_verification.h>
Expand All @@ -41,11 +40,6 @@ DEFINE_string(
"model_bundled.bp",
"Model serialized in flatbuffer format.");

DEFINE_string(
prof_result_path,
"prof_result.bin",
"ExecuTorch profiler output path.");

DEFINE_int32(
testset_idx,
0,
Expand All @@ -62,6 +56,11 @@ DEFINE_bool(
false,
"Comapre the model output to the reference outputs present in the BundledProgram.");

DEFINE_bool(
print_output,
false,
"Print the output of the ET model to stdout, if needs.");

using namespace torch::executor;
using torch::executor::util::FileDataLoader;

Expand Down Expand Up @@ -158,7 +157,6 @@ int main(int argc, char** argv) {
// In this example we use a statically allocated memory pool.
MemoryAllocator method_allocator{
MemoryAllocator(sizeof(method_allocator_pool), method_allocator_pool)};
method_allocator.enable_profiling("method allocator");

// The memory-planned buffers will back the mutable tensors used by the
// method. The sizes of these buffers were determined ahead of time during the
Expand Down Expand Up @@ -230,29 +228,24 @@ int main(int argc, char** argv) {
ET_LOG(Info, "Model executed successfully.");

// Print the outputs.
std::vector<EValue> outputs(method->outputs_size());
status = method->get_outputs(outputs.data(), outputs.size());
ET_CHECK(status == Error::Ok);
for (EValue& output : outputs) {
// TODO(T159700776): This assumes that all outputs are fp32 tensors. Add
// support for other EValues and Tensor dtypes, and print tensors in a more
// readable way.
auto output_tensor = output.toTensor();
auto data_output = output_tensor.const_data_ptr<float>();
for (size_t j = 0; j < output_tensor.numel(); ++j) {
ET_LOG(Info, "%f", data_output[j]);
if (FLAGS_print_output) {
std::vector<EValue> outputs(method->outputs_size());
status = method->get_outputs(outputs.data(), outputs.size());
ET_CHECK(status == Error::Ok);
for (EValue& output : outputs) {
// TODO(T159700776): This assumes that all outputs are fp32 tensors. Add
// support for other EValues and Tensor dtypes, and print tensors in a
// more readable way.
auto output_tensor = output.toTensor();
auto data_output = output_tensor.const_data_ptr<float>();
for (size_t j = 0; j < output_tensor.numel(); ++j) {
ET_LOG(Info, "%f", data_output[j]);
}
}
}

// Dump the profiling data to the specified file.
torch::executor::prof_result_t prof_result;
EXECUTORCH_DUMP_PROFILE_RESULTS(&prof_result);
if (prof_result.num_bytes != 0) {
FILE* ptr = fopen(FLAGS_prof_result_path.c_str(), "w+");
fwrite(prof_result.prof_data, 1, prof_result.num_bytes, ptr);
fclose(ptr);
}

// Dump the etdump data containing profiling/debugging data to the specified
// file.
etdump_result result = etdump_gen.get_etdump_data();
if (result.buf != nullptr && result.size > 0) {
FILE* f = fopen(FLAGS_etdump_path.c_str(), "w+");
Expand All @@ -269,8 +262,8 @@ int main(int argc, char** argv) {
&bundled_input_allocator,
method_name,
FLAGS_testset_idx,
1e-4, // rtol
1e-8 // atol
1e-3, // rtol
1e-5 // atol
);
ET_CHECK_MSG(
status == Error::Ok,
Expand Down