Skip to content

Commit d519b4d

Browse files
authored
[executorch] Add logs for helping debug address space overflow issue
Differential Revision: D62142486 Pull Request resolved: #5035
1 parent 23f03b9 commit d519b4d

File tree

4 files changed

+25
-6
lines changed

4 files changed

+25
-6
lines changed

exir/emit/_emitter.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,7 @@
7979
TensorShapeDynamism,
8080
)
8181
from executorch.exir.tensor import (
82+
AddressSpaceOverflowException,
8283
layout_enum,
8384
make_allocation_info,
8485
make_tensor_value,
@@ -349,7 +350,20 @@ def _tensor_spec_to_evalue(self, spec: TensorSpec) -> EValue:
349350
self.node,
350351
f"Non-const tensor should be an activation tensor: mem_offset {spec.mem_offset}",
351352
)
352-
allocation_info = make_allocation_info(spec.mem_id, spec.mem_offset)
353+
try:
354+
allocation_info = make_allocation_info(spec.mem_id, spec.mem_offset)
355+
except AddressSpaceOverflowException as e:
356+
raise InternalError(
357+
self._emit_node_specific_error(
358+
self.node,
359+
(
360+
f"{e}\nHint: If you are using a memory pass based on dynamic shape bounds, "
361+
f"such as ConstraintBasedSymShapeEvalPass, this may be the cause of an "
362+
f"unbacked SymInt with its upper bound lazily set to 2^64-1 (uint64 max) "
363+
"during torch.export()."
364+
),
365+
)
366+
)
353367

354368
if spec.const:
355369
# Tensor with a blob we need to serialize. May not actually be constant at runtime
@@ -1527,7 +1541,6 @@ def placeholder(
15271541
is_user_input = True
15281542

15291543
if isinstance(target, str) and isinstance(spec, TensorSpec):
1530-
15311544
fqn, is_mutable_buffer = self._find_fqn_for_placeholder(target, spec)
15321545

15331546
# From the fqn find the corresponding tensor

exir/passes/sym_shape_eval_pass.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ class HintBasedSymShapeEvalPass(PassBase):
196196
197197
Warning: if you're using torch.export with constrain API, this method doesn't respect the input constraints.
198198
199-
Not inherit from ExportPass since we simply need a way to iterate thru
199+
Not inherited from ExportPass since we simply need a way to iterate thru
200200
every node's output. PassBase is easier for that purpose.
201201
"""
202202

@@ -260,7 +260,7 @@ class ConstraintBasedSymShapeEvalPass(PassBase):
260260
formula. We should convert those symbolic formula to concrete value for
261261
static/upperbound tensors so we can properly do memory planning for them.
262262
263-
Not inherit from ExportPass since we simply need a way to iterate thru
263+
Not inherited from ExportPass since we simply need a way to iterate through
264264
every node's output. PassBase is easier for that purpose.
265265
"""
266266

exir/tensor.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,10 @@
2222
from executorch.exir.sym_util import eval_shape
2323

2424

25+
class AddressSpaceOverflowException(Exception):
26+
pass
27+
28+
2529
def num_bytes_from_shape_and_dtype(shape: torch.Size, dtype: torch.dtype) -> int:
2630
"""
2731
Assume the tensor is a contiguous one.
@@ -297,7 +301,9 @@ def make_allocation_info(mem_id: int, mem_offset: int) -> schema.AllocationDetai
297301
memory_offset_low = mem_offset & ((1 << 32) - 1)
298302
memory_offset_high = mem_offset >> 32
299303
if memory_offset_high >= 1 << 32:
300-
raise ValueError(f"mem_offset {mem_offset} does not fit in 64 bits")
304+
raise AddressSpaceOverflowException(
305+
f"mem_offset {mem_offset} does not fit in 64 bits"
306+
)
301307

302308
allocation_info = schema.AllocationDetails(
303309
memory_id=mem_id,

exir/tests/test_tensor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ def test_allocation_info_fails(self) -> None:
171171
)
172172
for test_case in test_cases:
173173
kwargs = test_case[0]
174-
with self.assertRaisesRegex(ValueError, test_case[1], msg=f"{kwargs}"):
174+
with self.assertRaisesRegex(Exception, test_case[1], msg=f"{kwargs}"):
175175
make_allocation_info(**kwargs)
176176

177177
def test_contiguous_stride_from_shape(self) -> None:

0 commit comments

Comments
 (0)