Skip to content

Add event tracing and ETDumps to executor_runner #4502

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Copyright 2024 Arm Limited and/or its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
Expand Down Expand Up @@ -581,6 +582,10 @@ if(EXECUTORCH_BUILD_EXECUTOR_RUNNER)
list(APPEND _executor_runner_libs portable_ops_lib)
endif()

if(EXECUTORCH_ENABLE_EVENT_TRACER)
list(APPEND _executor_runner_libs etdump flatccrt)
endif()

# Generate lib to register quantized ops
if(EXECUTORCH_BUILD_KERNELS_QUANTIZED)
list(APPEND _executor_runner_libs quantized_ops_lib)
Expand Down
8 changes: 7 additions & 1 deletion backends/xnnpack/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# Copyright 2024 Arm Limited and/or its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
Expand Down Expand Up @@ -113,8 +114,13 @@ if(NOT CMAKE_TOOLCHAIN_FILE MATCHES ".*(iOS|ios\.toolchain)\.cmake$")
#
list(TRANSFORM _xnn_executor_runner__srcs PREPEND "${EXECUTORCH_ROOT}/")
add_executable(xnn_executor_runner ${_xnn_executor_runner__srcs})

if(EXECUTORCH_ENABLE_EVENT_TRACER)
list(APPEND xnn_executor_runner_libs etdump)
endif()

target_link_libraries(
xnn_executor_runner xnnpack_backend gflags portable_ops_lib
xnn_executor_runner gflags portable_ops_lib ${xnn_executor_runner_libs}
)
target_compile_options(xnn_executor_runner PUBLIC ${_common_compile_options})
endif()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ Since weight packing creates an extra copy of the weights inside XNNPACK, We fre
When executing the XNNPACK subgraphs, we prepare the tensor inputs and outputs and feed them to the XNNPACK runtime graph. After executing the runtime graph, the output pointers are filled with the computed tensors.

#### **Profiling**
We have enabled basic profiling for XNNPACK delegate that can be enabled with the following compiler flag `-DENABLE_XNNPACK_PROFILING`. With ExecuTorch's SDK integration, you can also now use the SDK tools to profile the model. You can follow the steps in [Using the ExecuTorch SDK to Profile a Model](./tutorials/sdk-integration-tutorial) on how to profile ExecuTorch models and use SDK's Inspector API to view XNNPACK's internal profiling information.
We have enabled basic profiling for the XNNPACK delegate that can be enabled with the compiler flag `-DEXECUTORCH_ENABLE_EVENT_TRACER` (add `-DENABLE_XNNPACK_PROFILING` for additional details). With ExecuTorch's SDK integration, you can also now use the SDK tools to profile the model. You can follow the steps in [Using the ExecuTorch SDK to Profile a Model](./tutorials/sdk-integration-tutorial) on how to profile ExecuTorch models and use SDK's Inspector API to view XNNPACK's internal profiling information. An example implementation is available in the `xnn_executor_runner` (see [tutorial here](tutorial-xnnpack-delegate-lowering.md#profiling)).


[comment]: <> (TODO: Refactor quantizer to a more official quantization doc)
Expand Down
3 changes: 3 additions & 0 deletions docs/source/tutorial-xnnpack-delegate-lowering.md
Original file line number Diff line number Diff line change
Expand Up @@ -171,3 +171,6 @@ Now you should be able to find the executable built at `./cmake-out/backends/xnn

## Building and Linking with the XNNPACK Backend
You can build the XNNPACK backend [CMake target](https://github.com/pytorch/executorch/blob/main/backends/xnnpack/CMakeLists.txt#L83), and link it with your application binary such as an Android or iOS application. For more information on this you may take a look at this [resource](demo-apps-android.md) next.

## Profiling
To enable profiling in the `xnn_executor_runner` pass the flags `-DEXECUTORCH_ENABLE_EVENT_TRACER=ON` and `-DEXECUTORCH_BUILD_SDK=ON` to the build command (add `-DENABLE_XNNPACK_PROFILING=ON` for additional details). This will enable ETDump generation when running the inference and enables command line flags for profiling (see `xnn_executor_runner --help` for details).
58 changes: 48 additions & 10 deletions examples/portable/executor_runner/executor_runner.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* Copyright 2024 Arm Limited and/or its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
Expand Down Expand Up @@ -29,13 +30,23 @@
#include <executorch/runtime/executor/program.h>
#include <executorch/runtime/platform/log.h>
#include <executorch/runtime/platform/runtime.h>
#ifdef ET_EVENT_TRACER_ENABLED
#include <executorch/sdk/etdump/etdump_flatcc.h>
#endif // ET_EVENT_TRACER_ENABLED

static uint8_t method_allocator_pool[4 * 1024U * 1024U]; // 4 MB

DEFINE_string(
model_path,
"model.pte",
"Model serialized in flatbuffer format.");
DEFINE_uint32(num_executions, 1, "Number of times to run the model.");
#ifdef ET_EVENT_TRACER_ENABLED
DEFINE_string(
etdump_path,
"model.etdump",
"If ETDump generation is enabled an ETDump will be written out to this path.");
#endif // ET_EVENT_TRACER_ENABLED

using namespace torch::executor;
using torch::executor::util::FileDataLoader;
Expand Down Expand Up @@ -142,8 +153,20 @@ int main(int argc, char** argv) {
// the method can mutate the memory-planned buffers, so the method should only
// be used by a single thread at at time, but it can be reused.
//

Result<Method> method = program->load_method(method_name, &memory_manager);
EventTracer* event_tracer_ptr = nullptr;
#ifdef ET_EVENT_TRACER_ENABLED
std::unique_ptr<FILE, decltype(&fclose)> etdump_file(
fopen(FLAGS_etdump_path.c_str(), "w+"), fclose);
ET_CHECK_MSG(
etdump_file,
"Failed to open ETDump file at %s.",
FLAGS_etdump_path.c_str());

torch::executor::ETDumpGen etdump_gen = torch::executor::ETDumpGen();
event_tracer_ptr = &etdump_gen;
#endif // ET_EVENT_TRACER_ENABLED
Result<Method> method =
program->load_method(method_name, &memory_manager, event_tracer_ptr);
ET_CHECK_MSG(
method.ok(),
"Loading of method %s failed with status 0x%" PRIx32,
Expand All @@ -162,24 +185,39 @@ int main(int argc, char** argv) {
ET_LOG(Info, "Inputs prepared.");

// Run the model.
Error status = method->execute();
ET_CHECK_MSG(
status == Error::Ok,
"Execution of method %s failed with status 0x%" PRIx32,
method_name,
(uint32_t)status);
ET_LOG(Info, "Model executed successfully.");
for (uint32_t i = 0; i < FLAGS_num_executions; i++) {
Error status = method->execute();
ET_CHECK_MSG(
status == Error::Ok,
"Execution of method %s failed with status 0x%" PRIx32,
method_name,
(uint32_t)status);
}
ET_LOG(Info, "Model executed successfully %i time(s).", FLAGS_num_executions);

// Print the outputs.
std::vector<EValue> outputs(method->outputs_size());
ET_LOG(Info, "%zu outputs: ", outputs.size());
status = method->get_outputs(outputs.data(), outputs.size());
Error status = method->get_outputs(outputs.data(), outputs.size());
ET_CHECK(status == Error::Ok);
// Print the first and last 100 elements of long lists of scalars.
std::cout << torch::executor::util::evalue_edge_items(100);
for (int i = 0; i < outputs.size(); ++i) {
std::cout << "Output " << i << ": " << outputs[i] << std::endl;
}

#ifdef ET_EVENT_TRACER_ENABLED
// Dump the ETDump data containing profiling/debugging data to the specified
// file.
etdump_result result = etdump_gen.get_etdump_data();
if (result.buf != nullptr && result.size > 0) {
fwrite((uint8_t*)result.buf, 1, result.size, etdump_file.get());
free(result.buf);
ET_LOG(Info, "ETDump written to file '%s'.", FLAGS_etdump_path.c_str());
} else {
ET_LOG(Error, "No ETDump data available!");
}
#endif // ET_EVENT_TRACER_ENABLED

return 0;
}
Loading