Skip to content

Commit ec31ecf

Browse files
committed
update constant pad
1 parent fed0533 commit ec31ecf

File tree

1 file changed

+33
-51
lines changed
  • py/torch_tensorrt/dynamo/conversion/impl

1 file changed

+33
-51
lines changed

py/torch_tensorrt/dynamo/conversion/impl/pad.py

Lines changed: 33 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,7 @@
66
from torch.fx.node import Target
77
from torch_tensorrt.dynamo._SourceIR import SourceIR
88
from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext
9-
from torch_tensorrt.fx.converters.converter_utils import (
10-
has_dynamic_shape,
11-
set_layer_name,
12-
)
9+
from torch_tensorrt.fx.converters.converter_utils import has_dynamic_shape
1310
from torch_tensorrt.fx.types import TRTTensor
1411

1512

@@ -22,58 +19,43 @@ def constant_padNd(
2219
pad: Sequence[int],
2320
value: Union[int, float] = 0,
2421
) -> TRTTensor:
22+
"""
23+
Note: IPaddingLayer is deprecated in TensorRT 8.2 and will be removed in TensorRT 10.0.
24+
Use ISliceLayer to pad the tensor, which supports new non-constant, reflects padding
25+
mode and clamp, and supports padding output with dynamic shape.
26+
"""
2527
if has_dynamic_shape(input.shape):
2628
assert input.shape[1] != -1, "Channel dim can't be dynamic for padding."
2729

28-
pad_len = len(pad)
29-
30-
if pad_len == 4 and value == 0:
31-
pre_padding = (pad[2], pad[0])
32-
post_padding = (pad[3], pad[1])
33-
34-
# add padding layer
35-
pad_layer = ctx.net.add_padding_nd(
36-
input=input,
37-
pre_padding=pre_padding,
38-
post_padding=post_padding,
30+
# Implement constant padding via concat
31+
curr_dim = len(input.shape) - 1
32+
33+
for i in range(0, len(pad), 2):
34+
input_shape = list(input.shape)
35+
36+
pre_pad = pad[i]
37+
post_pad = pad[i + 1]
38+
pre_pad_shape = copy.deepcopy(input_shape)
39+
pre_pad_shape[curr_dim] = pre_pad
40+
pre_pad_tensor = torch.full(pre_pad_shape, float(value))
41+
if pre_pad == post_pad:
42+
post_pad_tensor = pre_pad_tensor
43+
else:
44+
post_pad_shape = copy.deepcopy(input_shape)
45+
post_pad_shape[curr_dim] = post_pad
46+
post_pad_tensor = torch.full(post_pad_shape, float(value))
47+
output = impl.cat.cat(
48+
ctx,
49+
target,
50+
source_ir,
51+
f"{name}_concat{curr_dim}",
52+
input=(pre_pad_tensor, input, post_pad_tensor),
53+
dim=curr_dim,
3954
)
55+
curr_dim -= 1
56+
input = output
4057

41-
pad_layer.pre_padding_nd = pre_padding
42-
pad_layer.post_padding_nd = post_padding
43-
44-
set_layer_name(pad_layer, target, name, source_ir)
45-
return pad_layer.get_output(0)
46-
47-
else:
48-
# Implement constant padding via concat
49-
curr_dim = len(input.shape) - 1
50-
51-
for i in range(0, pad_len, 2):
52-
input_shape = list(input.shape)
53-
54-
pre_pad = pad[i]
55-
post_pad = pad[i + 1]
56-
pre_pad_shape = copy.deepcopy(input_shape)
57-
pre_pad_shape[curr_dim] = pre_pad
58-
pre_pad_tensor = torch.full(pre_pad_shape, float(value))
59-
if pre_pad == post_pad:
60-
post_pad_tensor = pre_pad_tensor
61-
else:
62-
post_pad_shape = copy.deepcopy(input_shape)
63-
post_pad_shape[curr_dim] = post_pad
64-
post_pad_tensor = torch.full(post_pad_shape, float(value))
65-
output = impl.cat.cat(
66-
ctx,
67-
target,
68-
source_ir,
69-
f"{name}_concat{curr_dim}",
70-
input=(pre_pad_tensor, input, post_pad_tensor),
71-
dim=curr_dim,
72-
)
73-
curr_dim -= 1
74-
input = output
75-
76-
return output
58+
return output
7759

7860

7961
def reflection_padNd(

0 commit comments

Comments
 (0)