Skip to content

Add support for outputting constants #1774

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 13 additions & 3 deletions exir/emit/_emitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -1369,9 +1369,19 @@ def output(
self.outputs.append(args_tuple.id)
else:
for arg in args_tuple:
# Every output should already have its value emitted outputs should only be abstract
# IDs at this point.
assert isinstance(arg, _AbstractValue)
if isinstance(arg, (int, float, bool)):
arg = self._emit_evalue(self._constant_to_evalue(arg, None))
elif isinstance(arg, (type(None), str)):
raise InternalError(
self._emit_node_specific_error(
self.node,
f"Returning {arg} is not yet supported in the emitter.",
)
)
else:
# Every other output should already have its value emitted.
# They should only be abstract IDs at this point.
assert isinstance(arg, _AbstractValue)
self.outputs.append(arg.id)

def plan(self) -> ExecutionPlan:
Expand Down
33 changes: 33 additions & 0 deletions exir/emit/test/test_emit.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,39 @@ def forward(
"T2#1#0(T1#1($),D0())",
)

def test_constant_output(self):
class M(torch.nn.Module):
def forward(self, x):
return [((1, 3, 1.2), True, [x + x, x * x])]

ep = torch.export.export(M(), (torch.ones(2, 3),))
res = ep(torch.ones(2, 3))
self.assertEqual(res[0][0], (1, 3, 1.2))
program = to_edge(ep).to_executorch().executorch_program
outputs = program.execution_plan[0].outputs
self.assertEqual(len(outputs), 6)
self.assertEqual(program.execution_plan[0].values[outputs[0]].val.int_val, 1)
self.assertEqual(program.execution_plan[0].values[outputs[1]].val.int_val, 3)
self.assertEqual(
program.execution_plan[0].values[outputs[2]].val.double_val, 1.2
)
self.assertEqual(
program.execution_plan[0].values[outputs[3]].val.bool_val, True
)

def test_int_list_input(self):
class M(torch.nn.Module):
def forward(self, x, y, z):
return x + y, x + x, x + y + z

ep = torch.export.export(M(), (torch.ones(2, 3), 2, True))
ep(torch.ones(2, 3), 2, True)
program = to_edge(ep).to_executorch().executorch_program
inputs = program.execution_plan[0].inputs
self.assertEqual(len(inputs), 3)
self.assertEqual(program.execution_plan[0].values[inputs[1]].val.int_val, 2)
self.assertEqual(program.execution_plan[0].values[inputs[2]].val.bool_val, True)

def test_buffers_with_perfect_alignment(self) -> None:
class Foo(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
Expand Down
6 changes: 5 additions & 1 deletion exir/memory_planning.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,11 @@ def get_node_tensor_specs(
if not isinstance(specs, (list, tuple)):
return []
else:
return specs
return [
spec
for spec in specs
if not isinstance(spec, (int, float, bool, str, type(None)))
]


@register_algo
Expand Down