Skip to content

make bundled_executor_runner only for bp #532

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
92 changes: 44 additions & 48 deletions examples/bundled_executor_runner/bundled_executor_runner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include <executorch/runtime/platform/log.h>
#include <executorch/runtime/platform/profiler.h>
#include <executorch/runtime/platform/runtime.h>
#include <executorch/schema/bundled_program_schema_generated.h>
#include <executorch/util/bundled_program_verification.h>
#include <executorch/util/util.h>

Expand All @@ -36,19 +37,14 @@ static constexpr size_t kBundledAllocatorPoolSize = 16 * 1024U;
static uint8_t bundled_allocator_pool[kBundledAllocatorPoolSize];

DEFINE_string(
model_path,
"model.pte",
bundled_program_path,
"model_bundled.bp",
"Model serialized in flatbuffer format.");
DEFINE_string(
prof_result_path,
"prof_result.bin",
"ExecuTorch profiler output path.");

DEFINE_bool(
bundled_program,
false,
"True for running bundled program, false for executorch_flatbuffer::program");

DEFINE_int32(
testset_idx,
0,
Expand All @@ -74,8 +70,8 @@ int main(int argc, char** argv) {
// Create a loader to get the data of the program file. There are other
// DataLoaders that use mmap() or point to data that's already in memory, and
// users can create their own DataLoaders to load from arbitrary sources.
const char* model_path = FLAGS_model_path.c_str();
Result<FileDataLoader> loader = FileDataLoader::from(model_path);
const char* bundled_program_path = FLAGS_bundled_program_path.c_str();
Result<FileDataLoader> loader = FileDataLoader::from(bundled_program_path);
ET_CHECK_MSG(
loader.ok(), "FileDataLoader::from() failed: 0x%" PRIx32, loader.error());

Expand All @@ -84,9 +80,16 @@ int main(int argc, char** argv) {
ET_CHECK_MSG(
file_data.ok(),
"Could not load contents of file '%s': 0x%x",
model_path,
bundled_program_path,
(unsigned int)file_data.error());

// Check whether the file is a bundled program.
ET_CHECK_MSG(
executorch_flatbuffer::BundledProgramBufferHasIdentifier(
file_data->data()),
"The file '%s' is not a bundled program.",
bundled_program_path);

// Find the offset to the embedded Program.
const void* program_data;
size_t program_data_len;
Expand All @@ -98,7 +101,7 @@ int main(int argc, char** argv) {
ET_CHECK_MSG(
status == Error::Ok,
"GetProgramData() failed on file '%s': 0x%x",
model_path,
bundled_program_path,
(unsigned int)status);

auto buffer_data_loader =
Expand All @@ -108,10 +111,10 @@ int main(int argc, char** argv) {
// between multiple execution invocations across multiple threads.
Result<Program> program = Program::load(&buffer_data_loader);
if (!program.ok()) {
ET_LOG(Error, "Failed to parse model file %s", model_path);
ET_LOG(Error, "Failed to parse model file %s", bundled_program_path);
return 1;
}
ET_LOG(Info, "Model file %s is loaded.", model_path);
ET_LOG(Info, "Model file %s is loaded.", bundled_program_path);

// Use the first method in the program.
const char* method_name = nullptr;
Expand Down Expand Up @@ -198,22 +201,18 @@ int main(int argc, char** argv) {
MemoryAllocator bundled_input_allocator{
MemoryAllocator(kBundledAllocatorPoolSize, bundled_allocator_pool)};
exec_aten::ArrayRef<void*> inputs;
if (FLAGS_bundled_program) {
// Use the inputs embedded in the bundled program.
status = torch::executor::util::LoadBundledInput(
*method,
file_data->data(),
&bundled_input_allocator,
0, // Using the 0th indexed program
FLAGS_testset_idx);
ET_CHECK_MSG(
status == Error::Ok,
"LoadBundledInput failed with status 0x%" PRIx32,
status);
} else {
// Use ones-initialized inputs.
inputs = torch::executor::util::PrepareInputTensors(*method);
}
// Use the inputs embedded in the bundled program.
status = torch::executor::util::LoadBundledInput(
*method,
file_data->data(),
&bundled_input_allocator,
method_name,
FLAGS_testset_idx);
ET_CHECK_MSG(
status == Error::Ok,
"LoadBundledInput failed with status 0x%" PRIx32,
status);

ET_LOG(Info, "Inputs prepared.");

// Run the model.
Expand Down Expand Up @@ -249,24 +248,21 @@ int main(int argc, char** argv) {
fclose(ptr);
}

// Handle the outputs.
if (FLAGS_bundled_program) {
status = torch::executor::util::VerifyResultWithBundledExpectedOutput(
*method,
file_data->data(),
&bundled_input_allocator,
0,
FLAGS_testset_idx,
1e-5, // rtol
1e-8 // atol
);
ET_CHECK_MSG(
status == Error::Ok,
"Bundle verification failed with status 0x%" PRIx32,
status);
ET_LOG(Info, "Model verified successfully.");
} else {
torch::executor::util::FreeInputs(inputs);
}
// Verify the outputs.
status = torch::executor::util::VerifyResultWithBundledExpectedOutput(
*method,
file_data->data(),
&bundled_input_allocator,
method_name,
FLAGS_testset_idx,
1e-5, // rtol
1e-8 // atol
);
ET_CHECK_MSG(
status == Error::Ok,
"Bundle verification failed with status 0x%" PRIx32,
status);
ET_LOG(Info, "Model verified successfully.");

return 0;
}
1 change: 1 addition & 0 deletions examples/bundled_executor_runner/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ def define_common_targets():
"//executorch/runtime/executor:program",
"//executorch/extension/data_loader:file_data_loader",
"//executorch/extension/data_loader:buffer_data_loader",
"//executorch/schema:bundled_program_schema",
"//executorch/util:util",
"//executorch/util:bundled_program_verification",
],
Expand Down
25 changes: 15 additions & 10 deletions examples/export/export_bundled_program.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@


def save_bundled_program(
method_names,
inputs,
exec_prog,
graph_module,
Expand All @@ -30,20 +31,20 @@ def save_bundled_program(
# set for the model. If we wish to test the model with multiple inputs then they can be
# appended to this list. len(inputs) == number of test sets we want to run.
#
# If we have multiple execution plans in this program then we add another list of tuples
# to test that corresponding execution plan. Index of list of tuples will match the index
# of the execution plan against which it will be tested.
# If we have multiple methods in this program then we add another list of tuples to test
# that corresponding method. Index of list of tuples will match the index of the method's name
# in the method_names list forwarded to BundledConfig against which it will be tested.
bundled_inputs = [inputs for _ in range(len(exec_prog.program.execution_plan))]

# For each input tuple we run the graph module and put the resulting output in a list. This
# is repeated over all the tuples present in the input list and then repeated for each execution
# plan we want to test against.
# is repeated over all the tuples present in the input list and then repeated for each method
# name we want to test against.
expected_outputs = [
[[graph_module(*x)] for x in inputs]
for i in range(len(exec_prog.program.execution_plan))
]

bundled_config = BundledConfig(bundled_inputs, expected_outputs)
bundled_config = BundledConfig(method_names, bundled_inputs, expected_outputs)

bundled_program = create_bundled_program(exec_prog.program, bundled_config)
bundled_program_buffer = serialize_from_bundled_program_to_flatbuffer(
Expand All @@ -54,16 +55,18 @@ def save_bundled_program(
file.write(bundled_program_buffer)


def export_to_pte(model_name, model, example_inputs):
def export_to_pte(model_name, model, method_names, example_inputs):
exec_prog = export_to_exec_prog(model, example_inputs)
save_pte_program(exec_prog.buffer, model_name)

# Just as an example to show how multiple input sets can be bundled along, here we
# create a list with the example_inputs tuple used twice. Each instance of example_inputs
# is a Tuple[Union[torch.tenor, int, bool]] which represents one test set for the model.
bundled_inputs = [example_inputs, example_inputs]
print(f"Saving exported program to {model_name}_bundled.pte")
save_bundled_program(bundled_inputs, exec_prog, model, f"{model_name}_bundled.pte")
print(f"Saving exported program to {model_name}_bundled.bp")
save_bundled_program(
method_names, bundled_inputs, exec_prog, model, f"{model_name}_bundled.bp"
)


if __name__ == "__main__":
Expand All @@ -87,4 +90,6 @@ def export_to_pte(model_name, model, example_inputs):
*MODEL_NAME_TO_MODEL[args.model_name]
)

export_to_pte(args.model_name, model, example_inputs)
method_names = ["forward"]

export_to_pte(args.model_name, model, method_names, example_inputs)
1 change: 1 addition & 0 deletions schema/targets.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ def define_common_targets():
visibility = [
"//executorch/bundled_program/...",
"//executorch/extension/pybindings/...",
"//executorch/examples/bundled_executor_runner/...",
"//executorch/util/...", # bundled_program_verification
],
exported_headers = {
Expand Down