|
15 | 15 | #include <stdio.h>
|
16 | 16 | #include <cstddef>
|
17 | 17 | #include <string>
|
18 |
| -// |
19 |
| -#include <executorch/extension/data_loader/file_data_loader.h> |
| 18 | + |
| 19 | +#include <executorch/extension/data_loader/shared_ptr_data_loader.h> |
20 | 20 | #include <executorch/runtime/core/exec_aten/testing_util/tensor_factory.h>
|
21 | 21 | #include <executorch/runtime/executor/method.h>
|
22 | 22 | #include <executorch/runtime/executor/program.h>
|
|
33 | 33 | #include <string>
|
34 | 34 |
|
35 | 35 | using namespace torch::executor;
|
36 |
| -using torch::executor::util::FileDataLoader; |
| 36 | + |
| 37 | +using torch::executor::util::SharedPtrDataLoader; |
37 | 38 | using torch::executor::testing::TensorFactory;
|
38 | 39 |
|
39 | 40 | static constexpr size_t kRuntimeMemorySize = 64021120;
|
@@ -120,37 +121,21 @@ - (nullable instancetype)initWithFileAtPath:(NSString*)filePath {
|
120 | 121 | - (char*)segmentImage:(void*)imageBuffer
|
121 | 122 | withWidth:(int)width
|
122 | 123 | withHeight:(int)height {
|
123 |
| - printf("running segmentImage..."); |
124 | 124 | float* floatData = static_cast<float*>(imageBuffer);
|
125 |
| - for (int i = 0; i < 10; i++) { |
126 |
| - printf("float Data first item: %f: ", floatData[i]); |
127 |
| - } |
128 |
| - |
129 |
| - printf("last element: %f: ", floatData[1 * 3 * 224 * 224 - 1]); |
130 | 125 |
|
131 | 126 | runtime_init();
|
132 | 127 | Error status;
|
133 | 128 |
|
134 | 129 | TensorFactory<ScalarType::Float> tensor_inputs;
|
135 |
| - // const std::vector<int32_t> sizes = {1}; |
136 |
| - // EValue evalue_inputs(tensor_inputs.make(sizes, /*data=*/{0.2})); |
137 |
| - |
138 | 130 | const std::vector<int32_t> sizes = {1, 3, 224, 224};
|
139 | 131 | std::vector<float> floatVector(floatData, floatData + 1 * 3 * 224 * 224);
|
140 | 132 | EValue evalue_inputs(tensor_inputs.make(sizes, /*data=*/floatVector));
|
141 | 133 |
|
142 |
| - // NSString *filePath = [[NSBundle mainBundle] |
143 |
| - // pathForResource:@"lowered_sin" ofType:@"ff"]; executorch_module_name = |
144 |
| - // filePath.UTF8String; |
145 | 134 | NSString* filePath = [[NSBundle mainBundle] pathForResource:@"mv2_softmax"
|
146 | 135 | ofType:@"pte"];
|
147 |
| - // NSString *filePath = [[NSBundle mainBundle] pathForResource:@"mv3" |
148 |
| - // ofType:@"ff"]; NSString *filePath = [[NSBundle mainBundle] |
149 |
| - // pathForResource:@"lowered_sin" ofType:@"ff"]; |
150 | 136 | NSLog(@" filePath: %@", filePath);
|
151 | 137 | std::string log_message = "Start logging...\n";
|
152 | 138 | ET_LOG(Info, "Hello world.");
|
153 |
| - NSString* objcString = @(log_message.c_str()); |
154 | 139 |
|
155 | 140 | MemoryAllocator const_allocator{MemoryAllocator(0, nullptr)};
|
156 | 141 | const_allocator.enable_profiling("const allocator");
|
@@ -191,40 +176,38 @@ - (char*)segmentImage:(void*)imageBuffer
|
191 | 176 | // Allocate memory
|
192 | 177 | std::shared_ptr<char> file_data = std::shared_ptr<char>(
|
193 | 178 | new char[file_length + 1], std::default_delete<char[]>());
|
| 179 | + |
194 | 180 | if (!file_data) {
|
195 | 181 | ET_LOG(Error, "Unable to allocate memory to read file %s\n", file_name);
|
196 | 182 | fclose(file);
|
197 | 183 | }
|
198 | 184 | ET_LOG(Info, "Allocate memory Finish.");
|
199 |
| - // |
200 |
| - // // Read file contents into buffer |
| 185 | + |
201 | 186 | fread(file_data.get(), file_length, 1, file);
|
202 | 187 | ET_LOG(Info, "Load file Finish.");
|
203 |
| - // |
204 |
| - const void* program_data = file_data.get(); |
205 | 188 |
|
206 |
| - const auto program = torch::executor::Program(program_data); |
| 189 | + // TODO(chenlai): use FileDataLoader or MmapDataLoader to load model |
| 190 | + SharedPtrDataLoader data_loader(file_data, file_length); |
| 191 | + Result<Program> program = Program::load(&data_loader); |
207 | 192 |
|
208 |
| - if (!program.is_valid()) { |
209 |
| - ET_LOG(Info, "Failed to parse model file %s", file_name); |
| 193 | + if (!program.ok()) { |
| 194 | + ET_LOG(Error, "Failed to parse model file %s", file_name); |
| 195 | + return nil; |
210 | 196 | }
|
211 | 197 |
|
212 |
| - // Use the first method in the program. |
213 | 198 | const char* method_name = nullptr;
|
214 | 199 | {
|
215 |
| - const auto method_name_result = program.get_method_name(0); |
| 200 | + const auto method_name_result = program->get_method_name(0); |
216 | 201 | ET_CHECK_MSG(method_name_result.ok(), "Program has no methods");
|
217 | 202 | method_name = *method_name_result;
|
218 | 203 | }
|
219 |
| - ET_LOG(Info, "Loading method %s", method_name); |
220 |
| - log_message = log_message + "Loading method " + method_name + "\n"; |
| 204 | + ET_LOG(Info, "Using method %s", method_name); |
221 | 205 |
|
222 |
| - Result<Method> method = program.load_method(method_name, &memory_manager); |
| 206 | + Result<Method> method = program->load_method(method_name, &memory_manager); |
223 | 207 |
|
224 | 208 | ET_CHECK(method.ok());
|
225 | 209 | ET_LOG(Info, "Method loaded.");
|
226 | 210 | method->set_input(evalue_inputs, 0);
|
227 |
| - // auto inputs = torch::executor::util::PrepareInputTensors(*method); |
228 | 211 |
|
229 | 212 | ET_LOG(Info, "Inputs prepared.");
|
230 | 213 |
|
|
0 commit comments