|
31 | 31 | from executorch.bundled_program.serialize import (
|
32 | 32 | serialize_from_bundled_program_to_flatbuffer,
|
33 | 33 | )
|
| 34 | +from executorch.exir import ExecutorchProgram, ExirExportedProgram |
34 | 35 | from executorch.exir.backend.backend_api import to_backend, validation_disabled
|
35 | 36 |
|
36 | 37 | from executorch.exir.passes.spec_prop_pass import SpecPropPass
|
37 |
| -from executorch.exir.serialize import serialize_to_flatbuffer |
38 | 38 | from executorch.exir.tracer import _default_decomposition_table
|
39 | 39 |
|
40 | 40 | # pyre-ignore[21]: Could not find module `executorch.extension.pybindings.portable`.
|
@@ -158,16 +158,16 @@ def lower_module_and_test_output(
|
158 | 158 | quantized_dynamic: bool = False,
|
159 | 159 | # TODO: remove this after we migrate to use long term flow
|
160 | 160 | quantizer_api_test: bool = False,
|
161 |
| - dump_ff: bool = False, # for debugging, dump the generated flatbuffer file |
162 |
| - ) -> exir.ExirExportedProgram: |
| 161 | + dump_bundled_program: bool = False, # for debugging, dump the generated bundled program file |
| 162 | + ) -> ExirExportedProgram: |
163 | 163 | """
|
164 | 164 | Helper testing function that takes a torch.nn.Module and lowers it to XNNPACK with
|
165 | 165 | the given sample inputs. It then runs the lowered module and compares its
|
166 | 166 | outputs with the outputs of the eager module.
|
167 | 167 | """
|
168 | 168 |
|
169 | 169 | if quantizer_api_test:
|
170 |
| - assert isinstance(module, exir.ExirExportedProgram) |
| 170 | + assert isinstance(module, ExirExportedProgram) |
171 | 171 | edge_program = module
|
172 | 172 | else:
|
173 | 173 |
|
@@ -197,46 +197,42 @@ def forward(self, *args):
|
197 | 197 | edge_program.exported_program, partitioner
|
198 | 198 | )
|
199 | 199 |
|
200 |
| - program = delegated_program.to_executorch( |
| 200 | + executorch_program: ExecutorchProgram = delegated_program.to_executorch( |
201 | 201 | get_xnnpack_executorch_backend_config([SpecPropPass()]),
|
202 |
| - ).program |
| 202 | + ) |
203 | 203 | else:
|
204 | 204 | delegated_program = to_backend(
|
205 | 205 | "XnnpackBackend", edge_program.exported_program, []
|
206 | 206 | )
|
207 | 207 |
|
208 |
| - exported_program = capture_graph_for_xnnpack( |
| 208 | + exported_program: ExirExportedProgram = capture_graph_for_xnnpack( |
209 | 209 | delegated_program, sample_inputs
|
210 | 210 | )
|
211 |
| - program = exported_program.to_executorch( |
| 211 | + executorch_program: ExecutorchProgram = exported_program.to_executorch( |
212 | 212 | get_xnnpack_executorch_backend_config(),
|
213 |
| - ).program |
| 213 | + ) |
214 | 214 |
|
215 | 215 | # print("Graph Module with delegate:")
|
216 | 216 | # delegated_module.print_readable()
|
217 | 217 |
|
218 | 218 | # Assert the backend name is xnnpack
|
219 | 219 | self.assertEqual(
|
220 |
| - program.execution_plan[0].delegates[0].id, |
| 220 | + executorch_program.program.execution_plan[0].delegates[0].id, |
221 | 221 | XnnpackBackend.__name__,
|
222 | 222 | )
|
223 |
| - buffer = serialize_to_flatbuffer(program) |
224 | 223 |
|
225 | 224 | ref_output = delegated_program(*sample_inputs)
|
226 |
| - if dump_ff: |
227 |
| - filename = f"/tmp/xnnpack_test_{randint(1, 99999)}.pte" |
228 |
| - print(f"Writing flatbuffer to {filename} ...") |
229 |
| - |
| 225 | + if dump_bundled_program: |
230 | 226 | save_bundled_program(
|
231 | 227 | representative_inputs=sample_inputs,
|
232 |
| - program=program, |
| 228 | + program=executorch_program.program, |
233 | 229 | ref_output=ref_output,
|
234 |
| - output_path=filename, |
| 230 | + output_path=f"/tmp/xnnpack_test_{randint(1, 99999)}", |
235 | 231 | )
|
236 | 232 |
|
237 | 233 | # Test the model with executor
|
238 |
| - # pyre-ignore |
239 |
| - executorch_module = _load_for_executorch_from_buffer(buffer) |
| 234 | + # pyre-ignore[16]: Module `executorch.extension.pybindings` has no attribute `portable`. |
| 235 | + executorch_module = _load_for_executorch_from_buffer(executorch_program.buffer) |
240 | 236 | # pyre-fixme[16]: Module `pytree` has no attribute `tree_flatten`.
|
241 | 237 | inputs_flattened, _ = tree_flatten(sample_inputs)
|
242 | 238 |
|
@@ -353,7 +349,7 @@ def _test_xnnpack_dqlinear(
|
353 | 349 | self,
|
354 | 350 | weight_qconfig,
|
355 | 351 | use_bias: bool,
|
356 |
| - dump_ff: bool = False, |
| 352 | + dump_bundled_program: bool = False, |
357 | 353 | ):
|
358 | 354 | assert weight_qconfig in [
|
359 | 355 | weight_observer_range_neg_127_to_127,
|
@@ -413,39 +409,39 @@ def forward(self, x):
|
413 | 409 | composite_model = CompositeModule()
|
414 | 410 | composite_model(*example_inputs)
|
415 | 411 |
|
416 |
| - exported_program = capture_graph_for_xnnpack(composite_model, example_inputs) |
417 |
| - program = exported_program.to_executorch( |
| 412 | + exported_program: ExirExportedProgram = capture_graph_for_xnnpack( |
| 413 | + composite_model, example_inputs |
| 414 | + ) |
| 415 | + executorch_program: ExecutorchProgram = exported_program.to_executorch( |
418 | 416 | get_xnnpack_executorch_backend_config(),
|
419 |
| - ).program |
| 417 | + ) |
420 | 418 |
|
421 | 419 | self.assertEqual(
|
422 |
| - program.execution_plan[0].delegates[0].id, |
| 420 | + executorch_program.program.execution_plan[0].delegates[0].id, |
423 | 421 | XnnpackBackend.__name__,
|
424 | 422 | )
|
425 | 423 |
|
426 | 424 | ref_output = captured_dqlinear(*example_inputs)
|
427 | 425 | ref_output = composite_model(*example_inputs)
|
428 | 426 | print("ref_output:", ref_output)
|
429 | 427 |
|
430 |
| - if dump_ff: |
| 428 | + if dump_bundled_program: |
431 | 429 | mm_str = "addmm" if use_bias else "mm"
|
432 | 430 | filename = f"/tmp/dqlinear_{mm_str}"
|
433 | 431 | if weight_qconfig == weight_observer_range_neg_127_to_127:
|
434 | 432 | filename = f"{filename}_per_tensor"
|
435 | 433 | else:
|
436 | 434 | filename = f"{filename}_per_channel"
|
437 |
| - print(f"Writing flatbuffer to {filename} ...") |
438 | 435 |
|
439 | 436 | save_bundled_program(
|
440 | 437 | representative_inputs=example_inputs,
|
441 |
| - program=program, |
| 438 | + program=executorch_program.program, |
442 | 439 | ref_output=ref_output,
|
443 | 440 | output_path=filename,
|
444 | 441 | )
|
445 | 442 |
|
446 |
| - buffer = serialize_to_flatbuffer(program) |
447 | 443 | # pyre-ignore
|
448 |
| - executorch_module = _load_for_executorch_from_buffer(buffer) |
| 444 | + executorch_module = _load_for_executorch_from_buffer(executorch_program.buffer) |
449 | 445 | # pyre-fixme[16]: Module `pytree` has no attribute `tree_flatten`.
|
450 | 446 | inputs_flattened, _ = tree_flatten(example_inputs)
|
451 | 447 |
|
|
0 commit comments