Skip to content

Commit e8a8e38

Browse files
committed
fx2trt fixing add converter and python linting changes
1 parent 038520d commit e8a8e38

File tree

9 files changed

+130
-73
lines changed

9 files changed

+130
-73
lines changed

py/torch_tensorrt/fx/converters/acc_ops_converters.py

Lines changed: 50 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -680,7 +680,8 @@ def acc_ops_batch_norm(
680680
@tensorrt_converter(acc_ops.layer_norm)
681681
def acc_ops_layer_norm(network, target, args, kwargs, name):
682682
return add_layer_norm(network, target, kwargs, name)
683-
683+
684+
684685
@tensorrt_converter(acc_ops.softmax)
685686
def acc_ops_softmax(
686687
network: TRTNetwork,
@@ -731,7 +732,8 @@ def acc_ops_tile(
731732
name: str,
732733
) -> Union[TRTTensor, Sequence[TRTTensor]]:
733734
return add_tile(network, target, kwargs, name)
734-
735+
736+
735737
@tensorrt_converter(acc_ops.sign)
736738
def acc_ops_sign(
737739
network: TRTNetwork,
@@ -760,6 +762,7 @@ def acc_ops_relu(
760762
) -> Union[TRTTensor, Sequence[TRTTensor]]:
761763
return add_relu(network, target, kwargs, name)
762764

765+
763766
@tensorrt_converter(acc_ops.leaky_relu)
764767
def acc_ops_leaky_relu(
765768
network: TRTNetwork,
@@ -769,7 +772,8 @@ def acc_ops_leaky_relu(
769772
name: str,
770773
) -> Union[TRTTensor, Sequence[TRTTensor]]:
771774
return add_leaky_relu(network, target, kwargs, name)
772-
775+
776+
773777
@tensorrt_converter(acc_ops.elu)
774778
def acc_ops_elu(
775779
network: TRTNetwork,
@@ -779,7 +783,8 @@ def acc_ops_elu(
779783
name: str,
780784
) -> Union[TRTTensor, Sequence[TRTTensor]]:
781785
return add_elu(network, target, kwargs, name)
782-
786+
787+
783788
@tensorrt_converter(acc_ops.selu)
784789
def acc_ops_selu(
785790
network: TRTNetwork,
@@ -790,6 +795,7 @@ def acc_ops_selu(
790795
) -> Union[TRTTensor, Sequence[TRTTensor]]:
791796
return add_selu(network, target, kwargs, name)
792797

798+
793799
@tensorrt_converter(acc_ops.softsign)
794800
def acc_ops_softsign(
795801
network: TRTNetwork,
@@ -799,7 +805,8 @@ def acc_ops_softsign(
799805
name: str,
800806
) -> Union[TRTTensor, Sequence[TRTTensor]]:
801807
return add_softsign(network, target, kwargs, name)
802-
808+
809+
803810
@tensorrt_converter(acc_ops.sin)
804811
def acc_ops_sin(
805812
network: TRTNetwork,
@@ -875,6 +882,7 @@ def acc_ops_tanh(
875882
) -> Union[TRTTensor, Sequence[TRTTensor]]:
876883
return add_tanh(network, target, kwargs, name)
877884

885+
878886
@tensorrt_converter(acc_ops.asin)
879887
def acc_ops_asin(
880888
network: TRTNetwork,
@@ -1191,7 +1199,8 @@ def acc_ops_maximum(
11911199
name: str,
11921200
) -> Union[TRTTensor, Sequence[TRTTensor]]:
11931201
return add_maximum(network, target, kwargs, name)
1194-
1202+
1203+
11951204
@tensorrt_converter(acc_ops.minimum)
11961205
def acc_ops_minimum(
11971206
network: TRTNetwork,
@@ -1201,7 +1210,8 @@ def acc_ops_minimum(
12011210
name: str,
12021211
) -> Union[TRTTensor, Sequence[TRTTensor]]:
12031212
return add_minimum(network, target, kwargs, name)
1204-
1213+
1214+
12051215
@tensorrt_converter(acc_ops.dtype)
12061216
def acc_ops_dtype(
12071217
network: TRTNetwork,
@@ -1271,6 +1281,7 @@ def acc_ops_logical_and(
12711281
) -> Union[TRTTensor, Sequence[TRTTensor]]:
12721282
return add_logical_and(network, target, kwargs, name)
12731283

1284+
12741285
@tensorrt_converter(acc_ops.ne, no_implicit_batch_dim=True)
12751286
def acc_ops_ne(
12761287
network: TRTNetwork,
@@ -1280,7 +1291,8 @@ def acc_ops_ne(
12801291
name: str,
12811292
) -> Union[TRTTensor, Sequence[TRTTensor]]:
12821293
return add_ne(network, target, kwargs, name)
1283-
1294+
1295+
12841296
@tensorrt_converter(acc_ops.eq, no_implicit_batch_dim=True)
12851297
def acc_ops_eq(
12861298
network: TRTNetwork,
@@ -1290,7 +1302,8 @@ def acc_ops_eq(
12901302
name: str,
12911303
) -> Union[TRTTensor, Sequence[TRTTensor]]:
12921304
return add_eq(network, target, kwargs, name)
1293-
1305+
1306+
12941307
@tensorrt_converter(acc_ops.gt, no_implicit_batch_dim=True)
12951308
def acc_ops_gt(
12961309
network: TRTNetwork,
@@ -1300,7 +1313,8 @@ def acc_ops_gt(
13001313
name: str,
13011314
) -> Union[TRTTensor, Sequence[TRTTensor]]:
13021315
return add_gt(network, target, kwargs, name)
1303-
1316+
1317+
13041318
@tensorrt_converter(acc_ops.lt, no_implicit_batch_dim=True)
13051319
def acc_ops_lt(
13061320
network: TRTNetwork,
@@ -1310,7 +1324,7 @@ def acc_ops_lt(
13101324
name: str,
13111325
) -> Union[TRTTensor, Sequence[TRTTensor]]:
13121326
return add_lt(network, target, kwargs, name)
1313-
1327+
13141328

13151329
@tensorrt_converter(acc_ops.logical_or, no_implicit_batch_dim=True)
13161330
def acc_ops_logical_or(
@@ -1321,7 +1335,8 @@ def acc_ops_logical_or(
13211335
name: str,
13221336
) -> Union[TRTTensor, Sequence[TRTTensor]]:
13231337
return add_logical_or(network, target, kwargs, name)
1324-
1338+
1339+
13251340
@tensorrt_converter(acc_ops.logical_xor, no_implicit_batch_dim=True)
13261341
def acc_ops_logical_xor(
13271342
network: TRTNetwork,
@@ -1331,7 +1346,8 @@ def acc_ops_logical_xor(
13311346
name: str,
13321347
) -> Union[TRTTensor, Sequence[TRTTensor]]:
13331348
return add_logical_xor(network, target, kwargs, name)
1334-
1349+
1350+
13351351
# T113156424 Have some accuracy problems in hf_T5.
13361352
# [TRT] [W] Weights [name=isinf_1_inf_t]: Converted FP32 value in weights (either FP32 infinity or FP32 value outside FP16 range) to corresponding FP16 infinity. If this is not the desired behavior, please modify the weights or retrain with regularization to reduce the magnitude of the weights.
13371353
# @tensorrt_converter(acc_ops.isinf)
@@ -1425,6 +1441,7 @@ def acc_ops_fmod(
14251441
) -> Union[TRTTensor, Sequence[TRTTensor]]:
14261442
return add_fmod(network, target, kwargs, name)
14271443

1444+
14281445
# T113156424 embedding implemenatation is very limited and shows no usage in hf models due to the indices are int64.
14291446
# if we cast to int32, it will create accuracy issues. We'd better leave it to future implementation.
14301447
# @tensorrt_converter(acc_ops.embedding, no_implicit_batch_dim=True)
@@ -1653,6 +1670,7 @@ def acc_ops_add(
16531670
) -> Union[TRTTensor, Sequence[TRTTensor]]:
16541671
return add_add(network, target, kwargs, name)
16551672

1673+
16561674
@tensorrt_converter(acc_ops.sub)
16571675
def acc_ops_sub(
16581676
network: TRTNetwork,
@@ -1663,6 +1681,7 @@ def acc_ops_sub(
16631681
) -> Union[TRTTensor, Sequence[TRTTensor]]:
16641682
return add_sub(network, target, kwargs, name)
16651683

1684+
16661685
@tensorrt_converter(acc_ops.div)
16671686
def acc_ops_div(
16681687
network: TRTNetwork,
@@ -1673,6 +1692,7 @@ def acc_ops_div(
16731692
) -> Union[TRTTensor, Sequence[TRTTensor]]:
16741693
return add_div(network, target, kwargs, name)
16751694

1695+
16761696
@tensorrt_converter(acc_ops.floor_div)
16771697
def acc_ops_floor_div(
16781698
network: TRTNetwork,
@@ -1682,7 +1702,8 @@ def acc_ops_floor_div(
16821702
name: str,
16831703
) -> Union[TRTTensor, Sequence[TRTTensor]]:
16841704
return add_floor_div(network, target, kwargs, name)
1685-
1705+
1706+
16861707
@tensorrt_converter(acc_ops.trunc_div)
16871708
def acc_ops_trunc_div(
16881709
network: TRTNetwork,
@@ -1692,7 +1713,8 @@ def acc_ops_trunc_div(
16921713
name: str,
16931714
) -> Union[TRTTensor, Sequence[TRTTensor]]:
16941715
return add_trunc_div(network, target, kwargs, name)
1695-
1716+
1717+
16961718
@tensorrt_converter(acc_ops.mul)
16971719
def acc_ops_mul(
16981720
network: TRTNetwork,
@@ -1702,7 +1724,8 @@ def acc_ops_mul(
17021724
name: str,
17031725
) -> Union[TRTTensor, Sequence[TRTTensor]]:
17041726
return add_mul(network, target, kwargs, name)
1705-
1727+
1728+
17061729
@tensorrt_converter(acc_ops.pow)
17071730
def acc_ops_pow(
17081731
network: TRTNetwork,
@@ -1713,6 +1736,7 @@ def acc_ops_pow(
17131736
) -> Union[TRTTensor, Sequence[TRTTensor]]:
17141737
return add_pow(network, target, kwargs, name)
17151738

1739+
17161740
@tensorrt_converter(acc_ops.unsqueeze)
17171741
def acc_ops_unsqueeze(
17181742
network: TRTNetwork,
@@ -1982,7 +2006,7 @@ def acc_ops_slice_tensor(
19822006
name: str,
19832007
) -> Union[TRTTensor, Sequence[TRTTensor]]:
19842008
return add_slice(network, target, kwargs, name)
1985-
2009+
19862010

19872011
@tensorrt_converter(acc_ops.expand)
19882012
def acc_ops_expand_tensor(
@@ -1993,7 +2017,7 @@ def acc_ops_expand_tensor(
19932017
name: str,
19942018
) -> Union[TRTTensor, Sequence[TRTTensor]]:
19952019
return add_expand(network, target, kwargs, name)
1996-
2020+
19972021

19982022
@tensorrt_converter(acc_ops.where)
19992023
def acc_ops_where(
@@ -2215,7 +2239,8 @@ def acc_ops_linear(
22152239
name: str,
22162240
) -> Union[TRTTensor, Sequence[TRTTensor]]:
22172241
return add_linear(network, target, kwargs, name)
2218-
2242+
2243+
22192244
def add_clamp(network, input, val, op, name):
22202245
if not len(input.shape):
22212246
# clamping scalar
@@ -2468,7 +2493,8 @@ def acc_ops_matmul(
24682493
name: str,
24692494
) -> Union[TRTTensor, Sequence[TRTTensor]]:
24702495
return add_matmul(network, target, kwargs, name)
2471-
2496+
2497+
24722498
@tensorrt_converter(acc_ops.hardsigmoid)
24732499
def acc_ops_hard_sigmoid(
24742500
network: TRTNetwork,
@@ -2690,7 +2716,7 @@ def acc_ops_gelu(
26902716
name: str,
26912717
) -> Union[TRTTensor, Sequence[TRTTensor]]:
26922718
return add_gelu(network, target, kwargs, name)
2693-
2719+
26942720

26952721
@tensorrt_converter(acc_ops.chunk)
26962722
def acc_ops_chunk(
@@ -2767,7 +2793,8 @@ def acc_ops_cumsum(
27672793
name: str,
27682794
) -> Union[TRTTensor, Sequence[TRTTensor]]:
27692795
return add_cumsum(network, target, kwargs, name)
2770-
2796+
2797+
27712798
@tensorrt_converter(acc_ops.hardtanh)
27722799
def acc_ops_hardtanh(
27732800
network: TRTNetwork,
@@ -2778,6 +2805,7 @@ def acc_ops_hardtanh(
27782805
) -> Union[TRTTensor, Sequence[TRTTensor]]:
27792806
return add_hardtanh(network, target, kwargs, name)
27802807

2808+
27812809
@tensorrt_converter(acc_ops.interpolate)
27822810
def acc_ops_interpolate(
27832811
network: TRTNetwork,

0 commit comments

Comments
 (0)