Skip to content

Commit ce4917c

Browse files
authored
remove pad custom op (#4801)
1 parent ea4a187 commit ce4917c

File tree

5 files changed

+0
-137
lines changed

5 files changed

+0
-137
lines changed

examples/models/flamingo/export_preprocess_lib.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,6 @@
1515
from torch.export import Dim, ExportedProgram
1616
from torchtune.models.clip.inference._transforms import _CLIPImageTransform
1717

18-
from .passes.replace_custom_ops_with_aten_ops_pass import (
19-
ReplaceCustomOpsWithAtenOpsPass,
20-
)
21-
2218

2319
def get_example_inputs() -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
2420
image = torch.ones(3, 800, 600)
@@ -59,7 +55,6 @@ def export_preprocess(
5955
)
6056

6157
# Replace non-exportable ops with custom ops.
62-
image_transform_model.pad = torch.ops.preprocess.pad.default
6358
image_transform_model.tile_crop = torch.ops.preprocess.tile_crop.default
6459

6560
# Export.
@@ -80,8 +75,6 @@ def lower_to_executorch_preprocess(
8075
edge_program = to_edge(
8176
exported_program, compile_config=EdgeCompileConfig(_check_ir_validity=False)
8277
)
83-
# Replace custom ops with aten ops.
84-
edge_program = edge_program.transform([ReplaceCustomOpsWithAtenOpsPass()])
8578

8679
et_program = edge_program.to_executorch(ExecutorchBackendConfig())
8780
return et_program

examples/models/flamingo/passes/__init__.py

Whitespace-only changes.

examples/models/flamingo/passes/replace_custom_ops_with_aten_ops_pass.py

Lines changed: 0 additions & 31 deletions
This file was deleted.

examples/models/flamingo/passes/test_passes.py

Lines changed: 0 additions & 50 deletions
This file was deleted.

extension/llm/custom_ops/preprocess_custom_ops.py

Lines changed: 0 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -7,61 +7,12 @@
77
# pyre-unsafe
88

99

10-
from typing import List
11-
1210
import torch
1311

1412
from torch.library import impl, Library
1513

1614
preprocess_op_lib = Library("preprocess", "DEF")
1715

18-
# Register and define pad and out variant.
19-
# Note: pad doesn't require an explicit meta kernel because
20-
# CompositeExplicitAutograd automatically registers the implementation to meta,
21-
# and meta kernels do not go through functionalization. The implementation
22-
# does not export due to issues during functionalization.
23-
# See: https://github.com/pytorch/pytorch/issues/120288
24-
preprocess_op_lib.define("pad(Tensor image, SymInt[] padding) -> Tensor")
25-
26-
27-
@impl(preprocess_op_lib, "pad", dispatch_key="CompositeExplicitAutograd")
28-
def pad_impl(
29-
image: torch.Tensor,
30-
padding: List[int],
31-
) -> torch.Tensor:
32-
output = torch.empty(
33-
[image.shape[0], image.shape[1] + padding[3], image.shape[2] + padding[1]],
34-
dtype=image.dtype,
35-
device=image.device,
36-
requires_grad=False,
37-
)
38-
output = torch.fill(output, 0)
39-
output.narrow(1, 0, image.shape[1]).narrow(2, 0, image.shape[2]).copy_(image)
40-
return output
41-
42-
43-
preprocess_op_lib.define(
44-
"pad.out(Tensor image, SymInt[] padding, *, Tensor(a!) out) -> Tensor(a!)"
45-
)
46-
47-
48-
@impl(preprocess_op_lib, "pad.out", dispatch_key="CompositeExplicitAutograd")
49-
def pad_out_impl(
50-
image: torch.Tensor,
51-
padding: List[int],
52-
out: torch.Tensor,
53-
) -> torch.Tensor:
54-
out = torch.empty(
55-
[image.shape[0], image.shape[1] + padding[3], image.shape[2] + padding[1]],
56-
dtype=image.dtype,
57-
device=image.device,
58-
requires_grad=False,
59-
)
60-
out = torch.fill(out, 0)
61-
out.narrow(1, 0, image.shape[1]).narrow(2, 0, image.shape[2]).copy_(image)
62-
return out
63-
64-
6516
# Register and define tile_crop and out variant.
6617
preprocess_op_lib.define("tile_crop(Tensor input, int tile_size) -> Tensor")
6718

0 commit comments

Comments
 (0)