@@ -340,10 +340,8 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
340
340
exir .print_program .pretty_print (program )
341
341
342
342
deboxed_int_list = []
343
- for item in program .execution_plan [0 ].values [5 ].val .items : # pyre-ignore[16]
344
- deboxed_int_list .append (
345
- program .execution_plan [0 ].values [item ].val .int_val # pyre-ignore[16]
346
- )
343
+ for item in program .execution_plan [0 ].values [5 ].val .items :
344
+ deboxed_int_list .append (program .execution_plan [0 ].values [item ].val .int_val )
347
345
348
346
self .assertEqual (IntList (deboxed_int_list ), IntList ([2 , 0 , 1 ]))
349
347
@@ -459,11 +457,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
459
457
# Check the mul operator's stack trace contains f -> g -> h
460
458
self .assertTrue (
461
459
"return torch.mul(x, torch.randn(3, 2))"
462
- in program .execution_plan [0 ] # pyre-ignore[16]
463
- .chains [0 ]
464
- .stacktrace [1 ]
465
- .items [- 1 ]
466
- .context
460
+ in program .execution_plan [0 ].chains [0 ].stacktrace [1 ].items [- 1 ].context
467
461
)
468
462
self .assertEqual (
469
463
program .execution_plan [0 ].chains [0 ].stacktrace [1 ].items [- 1 ].name , "f"
@@ -616,11 +610,7 @@ def false_fn(y: torch.Tensor) -> torch.Tensor:
616
610
if not isinstance (inst .instr_args , KernelCall ):
617
611
continue
618
612
619
- op = (
620
- program .execution_plan [0 ]
621
- .operators [inst .instr_args .op_index ] # pyre-ignore[16]
622
- .name
623
- )
613
+ op = program .execution_plan [0 ].operators [inst .instr_args .op_index ].name
624
614
625
615
if "mm" in op :
626
616
num_mm += 1
@@ -657,19 +647,13 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
657
647
# generate the tensor on which this iteration will operate on.
658
648
self .assertEqual (
659
649
op_table [
660
- program .execution_plan [0 ] # pyre-ignore[16]
661
- .chains [0 ]
662
- .instructions [0 ]
663
- .instr_args .op_index
650
+ program .execution_plan [0 ].chains [0 ].instructions [0 ].instr_args .op_index
664
651
].name ,
665
652
"aten::sym_size" ,
666
653
)
667
654
self .assertEqual (
668
655
op_table [
669
- program .execution_plan [0 ] # pyre-ignore[16]
670
- .chains [0 ]
671
- .instructions [1 ]
672
- .instr_args .op_index
656
+ program .execution_plan [0 ].chains [0 ].instructions [1 ].instr_args .op_index
673
657
].name ,
674
658
"aten::select_copy" ,
675
659
)
@@ -681,28 +665,19 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
681
665
# We check here that both of these have been generated.
682
666
self .assertEqual (
683
667
op_table [
684
- program .execution_plan [0 ] # pyre-ignore[16]
685
- .chains [0 ]
686
- .instructions [- 5 ]
687
- .instr_args .op_index
668
+ program .execution_plan [0 ].chains [0 ].instructions [- 5 ].instr_args .op_index
688
669
].name ,
689
670
"executorch_prim::et_copy_index" ,
690
671
)
691
672
self .assertEqual (
692
673
op_table [
693
- program .execution_plan [0 ] # pyre-ignore[16]
694
- .chains [0 ]
695
- .instructions [- 4 ]
696
- .instr_args .op_index
674
+ program .execution_plan [0 ].chains [0 ].instructions [- 4 ].instr_args .op_index
697
675
].name ,
698
676
"executorch_prim::add" ,
699
677
)
700
678
self .assertEqual (
701
679
op_table [
702
- program .execution_plan [0 ] # pyre-ignore[16]
703
- .chains [0 ]
704
- .instructions [- 3 ]
705
- .instr_args .op_index
680
+ program .execution_plan [0 ].chains [0 ].instructions [- 3 ].instr_args .op_index
706
681
].name ,
707
682
"executorch_prim::eq" ,
708
683
)
@@ -716,10 +691,7 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
716
691
)
717
692
self .assertEqual (
718
693
op_table [
719
- program .execution_plan [0 ] # pyre-ignore[16]
720
- .chains [0 ]
721
- .instructions [- 1 ]
722
- .instr_args .op_index
694
+ program .execution_plan [0 ].chains [0 ].instructions [- 1 ].instr_args .op_index
723
695
].name ,
724
696
"executorch_prim::sub" ,
725
697
)
@@ -1300,9 +1272,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
1300
1272
# this triggers the actual emission of the graph
1301
1273
program = program_mul ._emitter_output .program
1302
1274
node = None
1303
- program .execution_plan [0 ].chains [0 ].instructions [ # pyre-ignore[16]
1304
- 0
1305
- ].instr_args .op_index
1275
+ program .execution_plan [0 ].chains [0 ].instructions [0 ].instr_args .op_index
1306
1276
1307
1277
# Find the multiplication node in the graph that was emitted.
1308
1278
for node in program_mul .exported_program ().graph .nodes :
@@ -1314,7 +1284,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
1314
1284
# Find the multiplication instruction in the program that was emitted.
1315
1285
for idx in range (len (program .execution_plan [0 ].chains [0 ].instructions )):
1316
1286
instruction = program .execution_plan [0 ].chains [0 ].instructions [idx ]
1317
- op_index = instruction .instr_args .op_index # pyre-ignore[16]
1287
+ op_index = instruction .instr_args .op_index
1318
1288
if "mul" in program .execution_plan [0 ].operators [op_index ].name :
1319
1289
break
1320
1290
@@ -1453,9 +1423,7 @@ def forward(self, x, y):
1453
1423
exec_prog ._emitter_output .program
1454
1424
self .assertIsNotNone (exec_prog .delegate_map )
1455
1425
self .assertIsNotNone (exec_prog .delegate_map .get ("forward" ))
1456
- self .assertIsNotNone (
1457
- exec_prog .delegate_map .get ("forward" ).get (0 ) # pyre-ignore[16]
1458
- )
1426
+ self .assertIsNotNone (exec_prog .delegate_map .get ("forward" ).get (0 ))
1459
1427
self .assertEqual (
1460
1428
exec_prog .delegate_map .get ("forward" ).get (0 ).get ("name" ),
1461
1429
"BackendWithCompilerExample" ,
@@ -1568,9 +1536,7 @@ def forward(self, x):
1568
1536
model = model .to_executorch ()
1569
1537
model .dump_executorch_program (True )
1570
1538
self .assertTrue (
1571
- model .executorch_program .execution_plan [0 ] # pyre-ignore[16]
1572
- .values [0 ]
1573
- .val .allocation_info
1539
+ model .executorch_program .execution_plan [0 ].values [0 ].val .allocation_info
1574
1540
is not None
1575
1541
)
1576
1542
executorch_module = _load_for_executorch_from_buffer (model .buffer )
@@ -1611,9 +1577,7 @@ def forward(self, x):
1611
1577
)
1612
1578
model .dump_executorch_program (True )
1613
1579
self .assertTrue (
1614
- model .executorch_program .execution_plan [0 ] # pyre-ignore[16]
1615
- .values [0 ]
1616
- .val .allocation_info
1580
+ model .executorch_program .execution_plan [0 ].values [0 ].val .allocation_info
1617
1581
is not None
1618
1582
)
1619
1583
executorch_module = _load_for_executorch_from_buffer (model .buffer )
0 commit comments