Skip to content

Commit 76a393a

Browse files
committed
tidied delegate_runner output
Signed-off-by: Rob Elliott <[email protected]>
1 parent e6ede01 commit 76a393a

File tree

1 file changed

+41
-43
lines changed

1 file changed

+41
-43
lines changed

examples/backend/arm/ethos-u-setup/core_platform/patches/0007-Add-delegate-runner-test.patch

Lines changed: 41 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
From 8201e36f90fed6e80fea7021ec4bad325d329bae Mon Sep 17 00:00:00 2001
1+
From 0fe8caba3068da05021232912c069124a81e0d94 Mon Sep 17 00:00:00 2001
22
From: Rob Elliott <[email protected]>
33
Date: Wed, 4 Oct 2023 13:31:33 +0000
44
Subject: [PATCH] Add delegate runner test
@@ -7,9 +7,9 @@ Signed-off-by: Rob Elliott <[email protected]>
77
---
88
applications/executorch_tests/CMakeLists.txt | 27 ++-
99
.../executorch_tests/pte_to_header.py | 11 +-
10-
.../executorch_tests/runner_delegate.cpp | 162 ++++++++++++++++++
10+
.../executorch_tests/runner_delegate.cpp | 160 ++++++++++++++++++
1111
cmake/toolchain/arm-none-eabi-gcc.cmake | 6 +-
12-
4 files changed, 197 insertions(+), 9 deletions(-)
12+
4 files changed, 195 insertions(+), 9 deletions(-)
1313
create mode 100644 applications/executorch_tests/runner_delegate.cpp
1414

1515
diff --git a/applications/executorch_tests/CMakeLists.txt b/applications/executorch_tests/CMakeLists.txt
@@ -116,10 +116,10 @@ index 37d88aa..be3282d 100644
116116
with open(args.pte, "rb") as fr, open(
117117
diff --git a/applications/executorch_tests/runner_delegate.cpp b/applications/executorch_tests/runner_delegate.cpp
118118
new file mode 100644
119-
index 0000000..6af6a92
119+
index 0000000..ff40084
120120
--- /dev/null
121121
+++ b/applications/executorch_tests/runner_delegate.cpp
122-
@@ -0,0 +1,162 @@
122+
@@ -0,0 +1,160 @@
123123
+/*
124124
+ * SPDX-FileCopyrightText: Copyright 2021-2023 Arm Limited and/or its affiliates <[email protected]>
125125
+ *
@@ -167,12 +167,12 @@ index 0000000..6af6a92
167167
+void et_pal_init(void) {}
168168
+
169169
+__ET_NORETURN void et_pal_abort(void) {
170-
+ __builtin_trap();
170+
+ __builtin_trap();
171171
+}
172172
+
173173
+et_timestamp_t et_pal_current_ticks(void) {
174-
+ // libc.a - warning: _gettimeofday is not implemented and will always fail
175-
+ return 11223344;
174+
+ // libc.a - warning: _gettimeofday is not implemented and will always fail
175+
+ return 11223344;
176176
+}
177177
+
178178
+/**
@@ -186,46 +186,45 @@ index 0000000..6af6a92
186186
+ size_t line,
187187
+ const char* message,
188188
+ __ET_UNUSED size_t length) {
189-
+ fprintf(
190-
+ stderr,
191-
+ "%c executorch:%s:%zu] %s\n",
192-
+ level,
193-
+ filename,
194-
+ line,
195-
+ message);
189+
+ fprintf(
190+
+ stderr,
191+
+ "%c executorch:%s:%zu] %s\n",
192+
+ level,
193+
+ filename,
194+
+ line,
195+
+ message);
196196
+}
197197
+
198198
+int main()
199199
+{
200-
+ printf("test test test NG ^2 22\n");
201-
+ printf("main: Initialising runtime\n");
200+
+ ET_LOG(Info, "Initialising runtime");
202201
+ torch::executor::runtime_init();
203202
+
204203
+ using torch::executor::Result;
205204
+ using torch::executor::Error;
206205
+
207-
+ // Load pte from the global model_pte .pte file loaded into SRAM.
206+
+ // Load pte from the global model_pte .pte file loaded into SRAM.
208207
+ auto loader = torch::executor::util::BufferDataLoader(model_pte, sizeof(model_pte));
209208
+ Result<torch::executor::Program> program = torch::executor::Program::load(&loader);
210209
+ if(!program.ok()) {
211-
+ printf("main: Program loading failed @ 0x%p: 0x%x", model_pte, (int)program.error());
210+
+ ET_LOG(Info, "Program loading failed @ 0x%p: 0x%x", model_pte, (int)program.error());
212211
+ }
213-
+ printf("main: Model buffer loaded, has %u methods\n", program->num_methods());
212+
+ ET_LOG(Info, "Model buffer loaded, has %u methods", program->num_methods());
214213
+
215-
+ // Find our entrypoint in the .pte program
214+
+ // Find our entrypoint in the .pte program
216215
+ const char* method_name = nullptr;
217-
+ const auto method_name_result = program->get_method_name(0);
218-
+ ET_CHECK_MSG(method_name_result.ok(), "Program has no methods");
219-
+ method_name = *method_name_result;
220-
+ printf("main: Found (and will run) method '%s'\n", method_name);
216+
+ const auto method_name_result = program->get_method_name(0);
217+
+ ET_CHECK_MSG(method_name_result.ok(), "Program has no methods");
218+
+ method_name = *method_name_result;
219+
+ ET_LOG(Info, "Found (and will run) method '%s'", method_name);
221220
+
222-
+ // Allocate necessary memories for this method
221+
+ // Allocate necessary memories for this method
223222
+ Result<torch::executor::MethodMeta> method_meta = program->method_meta(method_name);
224223
+ if (!method_meta.ok()) {
225-
+ printf("main: Failed to get method_meta for %s: 0x%x",
224+
+ ET_LOG(Info, "Failed to get method_meta for %s: 0x%x",
226225
+ method_name, (unsigned int)method_meta.error());
227226
+ }
228-
+
227+
+
229228
+ torch::executor::MemoryAllocator method_allocator{
230229
+ torch::executor::MemoryAllocator(sizeof(method_allocator_pool), method_allocator_pool)};
231230
+
@@ -235,7 +234,7 @@ index 0000000..6af6a92
235234
+
236235
+ for (size_t id = 0; id < num_memory_planned_buffers; ++id) {
237236
+ size_t buffer_size = static_cast<size_t>(method_meta->memory_planned_buffer_size(id).get());
238-
+ printf("main: Setting up planned buffer %zu, size %zu.\n", id, buffer_size);
237+
+ ET_LOG(Info, "Setting up planned buffer %zu, size %zu.", id, buffer_size);
239238
+
240239
+ planned_buffers.push_back(std::make_unique<uint8_t[]>(buffer_size));
241240
+ planned_spans.push_back({planned_buffers.back().get(), buffer_size});
@@ -249,36 +248,35 @@ index 0000000..6af6a92
249248
+ Result<torch::executor::Method> method = program->load_method(method_name, &memory_manager);
250249
+
251250
+ if(!method.ok()) {
252-
+ printf("main: Loading of method %s failed with status 0x%x\n", method_name, (int)method.error());
251+
+ ET_LOG(Info, "Loading of method %s failed with status 0x%x", method_name, (int)method.error());
253252
+ }
254-
+ printf("main: Loading of method '%s' succesful\n", method_name);
253+
+ ET_LOG(Info, "Loading of method '%s' succesful", method_name);
255254
+
256-
+ printf("main: Preparing inputs...\n");
257255
+ auto inputs = torch::executor::util::PrepareInputTensors(*method);
258256
+
259-
+ printf("main: Starting the model execution...\n");
257+
+ ET_LOG(Info, "Starting the model execution...");
260258
+ Error status = method->execute();
261259
+ if(status != Error::Ok){
262-
+ printf("main: Execution of method %s failed with status 0x%x\n", method_name, (int)status);
260+
+ ET_LOG(Info, "Execution of method %s failed with status 0x%x", method_name, (int)status);
263261
+ } else {
264-
+ printf("main: Model executed successfully.\n");
262+
+ ET_LOG(Info, "Model executed successfully.");
265263
+ }
266264
+
267265
+ // Print the outputs.
268266
+ std::vector<torch::executor::EValue> outputs(method->outputs_size());
269-
+ printf("main: %d outputs - ", outputs.size());
267+
+ ET_LOG(Info, "%d outputs - ", outputs.size());
270268
+ status = method->get_outputs(outputs.data(), outputs.size());
271269
+ ET_CHECK(status == Error::Ok);
272270
+ for (size_t i = 0; i < outputs.size(); ++i)
273-
+ {
274-
+ printf("main: Output %d numel %d\n", i, outputs[i].toTensor().numel());
275-
+ for (size_t j = 0; j < outputs[i].toTensor().numel(); ++j)
276-
+ {
277-
+ printf("main: Output[%d]: %d\n", j, outputs[i].toTensor().const_data_ptr<int>()[j]);
278-
+ }
271+
+ {
272+
+ ET_LOG(Info, "Output %d numel %d", i, outputs[i].toTensor().numel());
273+
+ for (size_t j = 0; j < outputs[i].toTensor().numel(); ++j)
274+
+ {
275+
+ ET_LOG(Info, " Output[%d]: %d", j, outputs[i].toTensor().const_data_ptr<int>()[j]);
276+
+ }
279277
+ }
280278
+
281-
+ return 0;
279+
+ return 0;
282280
+}
283281
+
284282
+

0 commit comments

Comments
 (0)