Skip to content

Commit b2a0ee4

Browse files
tarun292facebook-github-bot
authored andcommitted
Add scuba logging to edge API's (#7103)
Summary: Adding scuba logging to the Edge AOT API's. Whenever the API's are used this will log some basic details to Scuba and also if the API call fails it will log the stack trace and the reason why it failed, to Scuba. This is only enabled for internal users and for OSS users it'll essentially be a no-op. The cases where logging is done: - User uses the API in a notebook - User uses the API via `buck run` or `buck test` on devserver Logging will not be done if the API is called via automated tests. Reviewed By: JacobSzwejbka Differential Revision: D66385141
1 parent c726a9b commit b2a0ee4

File tree

6 files changed

+54
-33
lines changed

6 files changed

+54
-33
lines changed

exir/emit/test/test_emit.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -340,9 +340,9 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
340340
exir.print_program.pretty_print(program)
341341

342342
deboxed_int_list = []
343-
for item in program.execution_plan[0].values[5].val.items: # pyre-ignore[16]
343+
for item in program.execution_plan[0].values[5].val.items:
344344
deboxed_int_list.append(
345-
program.execution_plan[0].values[item].val.int_val # pyre-ignore[16]
345+
program.execution_plan[0].values[item].val.int_val
346346
)
347347

348348
self.assertEqual(IntList(deboxed_int_list), IntList([2, 0, 1]))
@@ -459,7 +459,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
459459
# Check the mul operator's stack trace contains f -> g -> h
460460
self.assertTrue(
461461
"return torch.mul(x, torch.randn(3, 2))"
462-
in program.execution_plan[0] # pyre-ignore[16]
462+
in program.execution_plan[0]
463463
.chains[0]
464464
.stacktrace[1]
465465
.items[-1]
@@ -618,7 +618,7 @@ def false_fn(y: torch.Tensor) -> torch.Tensor:
618618

619619
op = (
620620
program.execution_plan[0]
621-
.operators[inst.instr_args.op_index] # pyre-ignore[16]
621+
.operators[inst.instr_args.op_index]
622622
.name
623623
)
624624

@@ -657,7 +657,7 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
657657
# generate the tensor on which this iteration will operate on.
658658
self.assertEqual(
659659
op_table[
660-
program.execution_plan[0] # pyre-ignore[16]
660+
program.execution_plan[0]
661661
.chains[0]
662662
.instructions[0]
663663
.instr_args.op_index
@@ -666,7 +666,7 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
666666
)
667667
self.assertEqual(
668668
op_table[
669-
program.execution_plan[0] # pyre-ignore[16]
669+
program.execution_plan[0]
670670
.chains[0]
671671
.instructions[1]
672672
.instr_args.op_index
@@ -681,7 +681,7 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
681681
# We check here that both of these have been generated.
682682
self.assertEqual(
683683
op_table[
684-
program.execution_plan[0] # pyre-ignore[16]
684+
program.execution_plan[0]
685685
.chains[0]
686686
.instructions[-5]
687687
.instr_args.op_index
@@ -690,7 +690,7 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
690690
)
691691
self.assertEqual(
692692
op_table[
693-
program.execution_plan[0] # pyre-ignore[16]
693+
program.execution_plan[0]
694694
.chains[0]
695695
.instructions[-4]
696696
.instr_args.op_index
@@ -699,7 +699,7 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
699699
)
700700
self.assertEqual(
701701
op_table[
702-
program.execution_plan[0] # pyre-ignore[16]
702+
program.execution_plan[0]
703703
.chains[0]
704704
.instructions[-3]
705705
.instr_args.op_index
@@ -716,7 +716,7 @@ def map_fn(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
716716
)
717717
self.assertEqual(
718718
op_table[
719-
program.execution_plan[0] # pyre-ignore[16]
719+
program.execution_plan[0]
720720
.chains[0]
721721
.instructions[-1]
722722
.instr_args.op_index
@@ -1300,7 +1300,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
13001300
# this triggers the actual emission of the graph
13011301
program = program_mul._emitter_output.program
13021302
node = None
1303-
program.execution_plan[0].chains[0].instructions[ # pyre-ignore[16]
1303+
program.execution_plan[0].chains[0].instructions[
13041304
0
13051305
].instr_args.op_index
13061306

@@ -1314,7 +1314,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
13141314
# Find the multiplication instruction in the program that was emitted.
13151315
for idx in range(len(program.execution_plan[0].chains[0].instructions)):
13161316
instruction = program.execution_plan[0].chains[0].instructions[idx]
1317-
op_index = instruction.instr_args.op_index # pyre-ignore[16]
1317+
op_index = instruction.instr_args.op_index
13181318
if "mul" in program.execution_plan[0].operators[op_index].name:
13191319
break
13201320

@@ -1454,7 +1454,7 @@ def forward(self, x, y):
14541454
self.assertIsNotNone(exec_prog.delegate_map)
14551455
self.assertIsNotNone(exec_prog.delegate_map.get("forward"))
14561456
self.assertIsNotNone(
1457-
exec_prog.delegate_map.get("forward").get(0) # pyre-ignore[16]
1457+
exec_prog.delegate_map.get("forward").get(0)
14581458
)
14591459
self.assertEqual(
14601460
exec_prog.delegate_map.get("forward").get(0).get("name"),
@@ -1568,7 +1568,7 @@ def forward(self, x):
15681568
model = model.to_executorch()
15691569
model.dump_executorch_program(True)
15701570
self.assertTrue(
1571-
model.executorch_program.execution_plan[0] # pyre-ignore[16]
1571+
model.executorch_program.execution_plan[0]
15721572
.values[0]
15731573
.val.allocation_info
15741574
is not None
@@ -1611,7 +1611,7 @@ def forward(self, x):
16111611
)
16121612
model.dump_executorch_program(True)
16131613
self.assertTrue(
1614-
model.executorch_program.execution_plan[0] # pyre-ignore[16]
1614+
model.executorch_program.execution_plan[0]
16151615
.values[0]
16161616
.val.allocation_info
16171617
is not None

exir/program/TARGETS

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
load("@fbcode_macros//build_defs:python_library.bzl", "python_library")
2+
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
23

34
oncall("executorch")
45

@@ -43,7 +44,7 @@ python_library(
4344
"//executorch/exir/passes:spec_prop_pass",
4445
"//executorch/exir/passes:weights_to_outputs_pass",
4546
"//executorch/exir/verification:verifier",
46-
],
47+
] + (["//executorch/exir/program/fb:logger"] if not runtime.is_oss else [])
4748
)
4849

4950
python_library(

exir/program/_program.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,24 @@
7575

7676
Val = Any
7777

78+
from typing import Any, Callable
79+
7880
from torch.library import Library
7981

82+
try:
83+
from executorch.exir.program.fb.logger import et_logger
84+
except ImportError:
85+
# Define a stub decorator that does nothing
86+
def et_logger(api_name: str) -> Callable[[Any], Any]:
87+
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
88+
def wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
89+
return func(self, *args, **kwargs)
90+
91+
return wrapper
92+
93+
return decorator
94+
95+
8096
# This is the reserved namespace that is used to register ops to that will
8197
# be prevented from being decomposed during to_edge_transform_and_lower.
8298
edge_no_decomp_namespace = "EDGE_DO_NOT_DECOMP"
@@ -957,6 +973,7 @@ def _gen_edge_manager_for_partitioners(
957973
return edge_manager
958974

959975

976+
@et_logger("to_edge_transform_and_lower")
960977
def to_edge_transform_and_lower(
961978
programs: Union[ExportedProgram, Dict[str, ExportedProgram]],
962979
transform_passes: Optional[
@@ -1110,6 +1127,7 @@ def to_edge_with_preserved_ops(
11101127
)
11111128

11121129

1130+
@et_logger("to_edge")
11131131
def to_edge(
11141132
programs: Union[ExportedProgram, Dict[str, ExportedProgram]],
11151133
constant_methods: Optional[Dict[str, Any]] = None,
@@ -1204,8 +1222,10 @@ def exported_program(self, method_name: str = "forward") -> ExportedProgram:
12041222
"""
12051223
Returns the ExportedProgram specified by 'method_name'.
12061224
"""
1225+
12071226
return self._edge_programs[method_name]
12081227

1228+
@et_logger("transform")
12091229
def transform(
12101230
self,
12111231
passes: Union[Sequence[PassType], Dict[str, Sequence[PassType]]],
@@ -1253,6 +1273,7 @@ def transform(
12531273
new_programs, copy.deepcopy(self._config_methods), compile_config
12541274
)
12551275

1276+
@et_logger("to_backend")
12561277
def to_backend(
12571278
self, partitioner: Union[Partitioner, Dict[str, Partitioner]]
12581279
) -> "EdgeProgramManager":
@@ -1296,6 +1317,7 @@ def to_backend(
12961317
new_edge_programs, copy.deepcopy(self._config_methods), config
12971318
)
12981319

1320+
@et_logger("to_executorch")
12991321
def to_executorch(
13001322
self,
13011323
config: Optional[ExecutorchBackendConfig] = None,

exir/tests/test_joint_graph.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -73,25 +73,25 @@ def forward(self, x, y):
7373

7474
# assert that the weight and bias have proper data_buffer_idx and allocation_info
7575
self.assertEqual(
76-
et.executorch_program.execution_plan[0] # pyre-ignore
76+
et.executorch_program.execution_plan[0]
7777
.values[0]
7878
.val.data_buffer_idx,
7979
1,
8080
)
8181
self.assertEqual(
82-
et.executorch_program.execution_plan[0] # pyre-ignore
82+
et.executorch_program.execution_plan[0]
8383
.values[1]
8484
.val.data_buffer_idx,
8585
2,
8686
)
8787
self.assertEqual(
88-
et.executorch_program.execution_plan[0] # pyre-ignore
88+
et.executorch_program.execution_plan[0]
8989
.values[0]
9090
.val.allocation_info.memory_offset_low,
9191
0,
9292
)
9393
self.assertEqual(
94-
et.executorch_program.execution_plan[0] # pyre-ignore
94+
et.executorch_program.execution_plan[0]
9595
.values[1]
9696
.val.allocation_info.memory_offset_low,
9797
48,
@@ -106,7 +106,7 @@ def forward(self, x, y):
106106

107107
self.assertTrue(torch.allclose(loss, et_outputs[0]))
108108
self.assertTrue(
109-
torch.allclose(m.linear.weight.grad, et_outputs[1]) # pyre-ignore[6]
109+
torch.allclose(m.linear.weight.grad, et_outputs[1])[6] # pyre-ignore
110110
)
111111
self.assertTrue(torch.allclose(m.linear.bias.grad, et_outputs[2]))
112112
self.assertTrue(torch.allclose(m.linear.weight, et_outputs[3]))
@@ -118,22 +118,22 @@ def forward(self, x, y):
118118

119119
# gradient outputs start at index 1
120120
self.assertEqual(
121-
et.executorch_program.execution_plan[1] # pyre-ignore
121+
et.executorch_program.execution_plan[1]
122122
.values[0]
123123
.val.int_val,
124124
1,
125125
)
126126

127127
self.assertEqual(
128-
et.executorch_program.execution_plan[2] # pyre-ignore
128+
et.executorch_program.execution_plan[2]
129129
.values[0]
130130
.val.string_val,
131131
"linear.weight",
132132
)
133133

134134
# parameter outputs start at index 3
135135
self.assertEqual(
136-
et.executorch_program.execution_plan[3] # pyre-ignore
136+
et.executorch_program.execution_plan[3]
137137
.values[0]
138138
.val.int_val,
139139
3,

exir/tests/test_remove_view_copy.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -197,23 +197,23 @@ def test_spec(self) -> None:
197197
self.assertEqual(len(instructions), 7)
198198

199199
self.assertEqual(
200-
instructions[0].instr_args.op_index, 0 # pyre-ignore
200+
instructions[0].instr_args.op_index, 0
201201
) # view @ idx2
202202
self.assertEqual(
203-
instructions[1].instr_args.op_index, 0 # pyre-ignore
203+
instructions[1].instr_args.op_index, 0
204204
) # view @ idx3
205205
self.assertEqual(
206-
instructions[2].instr_args.op_index, 1 # pyre-ignore
206+
instructions[2].instr_args.op_index, 1
207207
) # aten:mul @ idx6
208208
self.assertEqual(
209-
instructions[3].instr_args.op_index, 0 # pyre-ignore
209+
instructions[3].instr_args.op_index, 0
210210
) # view @ idx7
211211
self.assertEqual(
212-
instructions[4].instr_args.op_index, 1 # pyre-ignore
212+
instructions[4].instr_args.op_index, 1
213213
) # aten:mul @ idx9
214214
self.assertEqual(
215-
instructions[5].instr_args.op_index, 2 # pyre-ignore
215+
instructions[5].instr_args.op_index, 2
216216
) # aten:view_copy @ idx11
217217
self.assertEqual(
218-
instructions[6].instr_args.op_index, 2 # pyre-ignore
218+
instructions[6].instr_args.op_index, 2
219219
) # aten:view_copy @ idx11

extension/llm/export/quantizer_lib.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -184,14 +184,12 @@ def get_qnn_quantizer(
184184
)
185185
qnn_quantizer.set_per_channel_conv_quant(enable=False)
186186
qnn_quantizer.set_per_channel_linear_quant(enable=False)
187-
# pyre-ignore: Undefined attribute [16]: Module `executorch.backends` has no attribute `qualcomm`.
188187
qnn_quantizer.set_quant_config(
189188
quant_dtype, is_qat=is_qat, act_observer=MinMaxObserver
190189
)
191190
elif quant_config == "16a4w":
192191
# pyre-ignore: Undefined attribute [16]: Module `executorch.backends` has no attribute `qualcomm`.
193192
quant_dtype = QuantDtype.use_16a4w
194-
# pyre-ignore: Undefined attribute [16]: Module `executorch.backends` has no attribute `qualcomm`.
195193
qnn_quantizer.set_quant_config(
196194
quant_dtype, is_qat=is_qat, act_observer=MinMaxObserver
197195
)

0 commit comments

Comments
 (0)