@@ -75,14 +75,21 @@ int main(int argc, char** argv) {
75
75
ET_LOG (Info, " Model file %s is loaded." , model_path);
76
76
77
77
// Use the first method in the program.
78
- const size_t plan_index = 0 ;
79
78
const char * method_name = nullptr ;
80
79
{
81
- const auto method_name_result = program->get_method_name (plan_index );
80
+ const auto method_name_result = program->get_method_name (0 );
82
81
ET_CHECK_MSG (method_name_result.ok (), " Program has no methods" );
83
82
method_name = *method_name_result;
84
83
}
85
- ET_LOG (Info, " Running method %s" , method_name);
84
+ ET_LOG (Info, " Using method %s" , method_name);
85
+
86
+ // MethodMeta describes the memory requirements of the method.
87
+ Result<MethodMeta> method_meta = program->method_meta (method_name);
88
+ ET_CHECK_MSG (
89
+ method_meta.ok (),
90
+ " Failed to get method_meta for %s: 0x%x" ,
91
+ method_name,
92
+ (unsigned int )method_meta.error ());
86
93
87
94
//
88
95
// The runtime does not use malloc/new; it allocates all memory using the
@@ -116,20 +123,9 @@ int main(int argc, char** argv) {
116
123
// have more than one for, e.g., slow/large DRAM and fast/small SRAM.
117
124
std::vector<std::unique_ptr<uint8_t []>> non_const_buffers;
118
125
std::vector<MemoryAllocator> non_const_allocators;
119
- size_t num_non_const_buffers = 0 ;
120
- {
121
- auto result = program->num_non_const_buffers (method_name);
122
- ET_CHECK_MSG (
123
- result.ok (),
124
- " Failed to get number of non-const buffers for method %s: 0x%x" ,
125
- method_name,
126
- (unsigned int )result.error ());
127
- num_non_const_buffers = *result;
128
- }
129
- // Note that this loop starts at ID 1, because ID 0 is reserved. But, the
130
- // HierarchicalAllocator indices are zero-based, so it's later adjusted by -1.
131
- for (size_t id = 1 ; id < num_non_const_buffers; ++id) {
132
- auto buffer_size = program->get_non_const_buffer_size (id, method_name);
126
+ size_t num_non_const_buffers = method_meta->num_non_const_buffers ();
127
+ for (size_t id = 0 ; id < num_non_const_buffers; ++id) {
128
+ auto buffer_size = method_meta->non_const_buffer_size (id);
133
129
ET_CHECK_MSG (
134
130
buffer_size.ok (),
135
131
" Failed to get size of non-const buffer %zu for method %s: 0x%x" ,
@@ -139,8 +135,6 @@ int main(int argc, char** argv) {
139
135
ET_LOG (
140
136
Info, " Setting up non-const buffer %zu, size %zu." , id, *buffer_size);
141
137
non_const_buffers.push_back (std::make_unique<uint8_t []>(*buffer_size));
142
- // Since the list of allocators began empty, buffer ID N will live at index
143
- // N-1.
144
138
non_const_allocators.push_back (
145
139
MemoryAllocator (*buffer_size, non_const_buffers.back ().get ()));
146
140
non_const_allocators.back ().enable_profiling (" non_const_allocators" );
@@ -194,19 +188,17 @@ int main(int argc, char** argv) {
194
188
status);
195
189
ET_LOG (Info, " Model executed successfully." );
196
190
197
- auto output_list =
198
- runtime_allocator. allocateList <EValue>(method->outputs_size ());
199
- status = method->get_outputs (output_list, method-> outputs_size ());
191
+ // Print the outputs.
192
+ std::vector <EValue> outputs (method->outputs_size ());
193
+ status = method->get_outputs (outputs. data (), outputs. size ());
200
194
ET_CHECK (status == Error::Ok);
201
- // The following code assumes all output EValues are floating point
202
- // tensors. We need to handle other types of EValues and tensor
203
- // dtypes. Furthermore, we need a util to print tensors in a more
204
- // interpretable (e.g. size, dtype) and readable way.
205
- // TODO for the above at T159700776
206
- for (size_t i = 0 ; i < method->outputs_size (); i++) {
207
- auto output_tensor = output_list[i].toTensor ();
195
+ for (EValue& output : outputs) {
196
+ // TODO(T159700776): This assumes that all outputs are fp32 tensors. Add
197
+ // support for other EValues and Tensor dtypes, and print tensors in a more
198
+ // readable way.
199
+ auto output_tensor = output.toTensor ();
208
200
auto data_output = output_tensor.const_data_ptr <float >();
209
- for (size_t j = 0 ; j < output_list[i]. toTensor () .numel (); ++j) {
201
+ for (size_t j = 0 ; j < output_tensor .numel (); ++j) {
210
202
ET_LOG (Info, " %f" , data_output[j]);
211
203
}
212
204
}
0 commit comments