Skip to content

Commit fab3d79

Browse files
apbosegs-olive
authored andcommitted
converter reorg
1 parent 12f545c commit fab3d79

File tree

5 files changed

+133
-7
lines changed

5 files changed

+133
-7
lines changed

py/torch_tensorrt/fx/converters/acc_ops_converters.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1023,17 +1023,14 @@ def acc_ops_leaky_relu(
10231023
kwargs: Dict[str, Argument],
10241024
name: str,
10251025
) -> Union[TRTTensor, Sequence[TRTTensor]]:
1026-
input_val = kwargs["input"]
1027-
negative_slope = kwargs["negative_slope"]
1028-
operation_type = trt.ActivationType.LEAKY_RELU
1029-
return activation.convert_activation(
1026+
1027+
return activation.leaky_relu(
10301028
network,
10311029
target,
10321030
SourceIR.ACC,
10331031
name,
1034-
operation_type,
1035-
input_val,
1036-
alpha=negative_slope,
1032+
kwargs["input"],
1033+
kwargs["negative_slope"]
10371034
)
10381035

10391036

py/torch_tensorrt/fx/converters/aten_ops_converters.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -201,6 +201,41 @@ def aten_ops_fmod(
201201
return acc_ops_converters.acc_ops_fmod(network, target, None, kwargs_new, name)
202202

203203

204+
@tensorrt_converter(torch.ops.aten.fmod.Tensor)
205+
def aten_ops_fmod(
206+
network: TRTNetwork,
207+
target: Target,
208+
args: Tuple[Argument, ...],
209+
kwargs: Dict[str, Argument],
210+
name: str,
211+
) -> Union[TRTTensor, Sequence[TRTTensor]]:
212+
kwargs_new = {
213+
"input": args[0],
214+
"other": args[1],
215+
}
216+
return acc_ops_converters.acc_ops_fmod(network, target, None, kwargs_new, name)
217+
218+
219+
220+
@tensorrt_converter(torch.ops.aten.leaky_relu.default)
221+
def aten_ops_leaky_relu(
222+
network: TRTNetwork,
223+
target: Target,
224+
args: Tuple[Argument, ...],
225+
kwargs: Dict[str, Argument],
226+
name: str,
227+
) -> Union[TRTTensor, Sequence[TRTTensor]]:
228+
229+
return activation.leaky_relu(
230+
network,
231+
target,
232+
SourceIR.ATEN,
233+
name,
234+
args[0],
235+
args[1]
236+
)
237+
238+
204239
@tensorrt_converter(torch.ops.aten.linear)
205240
def aten_ops_linear(
206241
network: TRTNetwork,

py/torch_tensorrt/fx/converters/impl/activation.py

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,3 +90,28 @@ def relu_dyn_range_fn(dyn_range):
9090
input_val,
9191
dyn_range_fn=relu_dyn_range_fn,
9292
)
93+
94+
95+
def leaky_relu(
96+
network: TRTNetwork,
97+
target: Target,
98+
source_ir: Optional[SourceIR],
99+
name: str,
100+
input_val: TRTTensor,
101+
alpha: Optional[Any]
102+
):
103+
operation_type = trt.ActivationType.LEAKY_RELU
104+
105+
def leaky_relu_dyn_range_fn(dyn_range):
106+
return (max(0, dyn_range[0]) + alpha * min(0, dyn_range[0])), (max(0, dyn_range[1]) + alpha * min(0, dyn_range[1]))
107+
108+
return convert_activation(
109+
network,
110+
target,
111+
source_ir,
112+
name,
113+
operation_type,
114+
input_val,
115+
alpha,
116+
dyn_range_fn=leaky_relu_dyn_range_fn,
117+
)

py/torch_tensorrt/fx/converters/nn_ops_converters.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,3 +22,19 @@ def relu(network, submod, args, kwargs, layer_name):
2222
name=layer_name,
2323
input_val=kwargs["input"],
2424
)
25+
26+
27+
@tensorrt_converter(torch.nn.functional.leaky_relu)
28+
@tensorrt_converter(torch.nn.modules.activation.LeakyReLU)
29+
def relu(network, submod, args, kwargs, layer_name):
30+
# args/kwargs should have already been normalized to kwargs
31+
assert len(args) == 0
32+
33+
return activation.relu(
34+
network=network,
35+
target="torch.nn.functional.leaky_relu",
36+
source_ir=SourceIR.NN,
37+
name=layer_name,
38+
input_val=kwargs["input"],
39+
alpha=kwargs["negative_slope"]
40+
)
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
import torch
2+
import torch.nn as nn
3+
from torch.testing._internal.common_utils import run_tests
4+
from torch_tensorrt.fx.tools.common_fx2trt import DispatchTestCase, InputTensorSpec
5+
6+
7+
class TestLeakyReLUConverter(DispatchTestCase):
8+
def test_leaky_relu(self):
9+
class TestModule(nn.Module):
10+
def forward(self, x):
11+
return nn.functional.leaky_relu(x, negative_slope=0.05)
12+
13+
inputs = [torch.randn(1, 10)]
14+
self.run_test(
15+
TestModule(), inputs, expected_ops={torch.ops.aten.leaky_relu.default}
16+
)
17+
18+
def test_leaky_relu_with_dynamic_shape(self):
19+
class TestModule(nn.Module):
20+
def forward(self, x):
21+
return nn.functional.leaky_relu(x, negative_slope=0.05)
22+
23+
input_specs = [
24+
InputTensorSpec(
25+
shape=(-1, -1, -1),
26+
dtype=torch.float32,
27+
shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],
28+
),
29+
]
30+
self.run_test_with_dynamic_shape(
31+
TestModule(), input_specs, expected_ops={torch.ops.aten.leaky_relu.default}
32+
)
33+
34+
def test_leaky_relu_with_dynamic_shape_four_dimensions(self):
35+
class TestModule(nn.Module):
36+
def forward(self, x):
37+
return nn.functional.leaky_relu(x, negative_slope=0.05)
38+
39+
input_specs = [
40+
InputTensorSpec(
41+
shape=(-1, -1, -1, -1),
42+
dtype=torch.float32,
43+
shape_ranges=[((1, 1, 1, 5), (1, 2, 3, 5), (3, 3, 3, 5))],
44+
),
45+
]
46+
47+
self.run_test_with_dynamic_shape(
48+
TestModule(), input_specs, expected_ops={torch.ops.aten.leaky_relu.default}
49+
)
50+
51+
52+
if __name__ == "__main__":
53+
run_tests()

0 commit comments

Comments
 (0)