Skip to content

Fix memory profiling for memory.view ops #7925

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jan 28, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions exir/memory_planning.py
Original file line number Diff line number Diff line change
Expand Up @@ -747,6 +747,7 @@ def apply_algo(
storage with tensors in the outer module.
TODO: make these optimizations once we have some baseline working.
"""

specs = update_all_tensors_lifetime(graph_module, graph_signature)
bufsizes: List[int] = algo(
graph_module, alignment, graph_signature, alloc_graph_input, alloc_graph_output
Expand Down
26 changes: 17 additions & 9 deletions util/activation_memory_profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@
import torch
from executorch.exir import ExecutorchProgramManager
from executorch.exir.memory_planning import get_node_tensor_specs
from executorch.exir.tensor import num_bytes_from_shape_and_dtype

from executorch.exir.tensor import num_bytes_from_shape_and_dtype, TensorSpec
from torch.export import ExportedProgram


Expand Down Expand Up @@ -53,10 +54,11 @@ def create_tensor_allocation_info(graph: torch.fx.Graph) -> List[MemoryTimeline]
"""
nodes = graph.nodes
memory_timeline: List[Optional[MemoryTimeline]] = [None for _ in range(len(nodes))]
unique_specs: set[TensorSpec] = set()
for _, node in enumerate(nodes):
if node.op == "output":
continue
if node.target == memory.alloc:
if node.target == memory.alloc or node.target == memory.view:
continue
tensor_specs = get_node_tensor_specs(node)
if tensor_specs is None:
Expand All @@ -65,6 +67,9 @@ def create_tensor_allocation_info(graph: torch.fx.Graph) -> List[MemoryTimeline]
# TODO: Make use of mem_id in the allocation info
if tensor_spec is None or tensor_spec.mem_id is None or tensor_spec.const:
continue
if tensor_spec in unique_specs:
continue
unique_specs.add(tensor_spec)
start, end = tensor_spec.lifetime
size = num_bytes_from_shape_and_dtype(
typing.cast(torch.Size, tensor_spec.shape), tensor_spec.dtype
Expand All @@ -75,6 +80,7 @@ def create_tensor_allocation_info(graph: torch.fx.Graph) -> List[MemoryTimeline]
memory_timeline_j = memory_timeline[j]
if memory_timeline_j is None:
memory_timeline_j = MemoryTimeline()
memory_timeline[j] = memory_timeline_j
assert memory_timeline_j
memory_timeline_j.allocations.append(
Allocation(
Expand Down Expand Up @@ -106,6 +112,7 @@ def generate_memory_trace(
chrome_trace_filename: str,
enable_memory_offsets: bool = False,
method_name: str = "forward",
ommit_metadata: bool = False,
):
"""
Generate the memory timeline from the given ExecuTorch program.
Expand Down Expand Up @@ -151,13 +158,14 @@ def generate_memory_trace(
e["pid"] = int(allocation.memory_id)
e["tid"] = tid
e["args"] = {}
e["args"]["op_name"] = f"{allocation.op_name}"
# ID refers to memory space, typically from 1 to N.
# For CPU, everything is allocated on one "space", other backends may have multiple.
e["args"]["Memory ID"] = allocation.memory_id
e["args"]["fqn"] = f"{allocation.fqn}"
e["args"]["source"] = f"{allocation.file_and_line_num}"
e["args"]["bytes"] = allocation.size_bytes
if not ommit_metadata:
e["args"]["op_name"] = f"{allocation.op_name}"
# ID refers to memory space, typically from 1 to N.
# For CPU, everything is allocated on one "space", other backends may have multiple.
e["args"]["Memory ID"] = allocation.memory_id
e["args"]["fqn"] = f"{allocation.fqn}"
e["args"]["source"] = f"{allocation.file_and_line_num}"
e["args"]["bytes"] = allocation.size_bytes
start_time += allocation_size_kb
trace_events.append(e)
tid += 1
Expand Down
Loading