Skip to content

Commit 4c23ed2

Browse files
committed
Merge branch 'master' of https://github.com/NVIDIA/TRTorch
2 parents 7aa57c3 + 8d5b123 commit 4c23ed2

File tree

23 files changed

+1163
-70
lines changed

23 files changed

+1163
-70
lines changed
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
---
2+
name: "\U00002194 Op Converter Request"
3+
about: Submit a proposal/request to support a new PyTorch operator in TRTorch
4+
5+
---
6+
7+
##
8+
<!-- Name of operator to add support for -->
9+
10+
- **Function Schema**:
11+
12+
- **Original PyTorch API**:
13+
14+
- **Relevant TensorRT Documentation**:
15+
16+
## Alternatives
17+
18+
<!-- A clear and concise description of any alternative solutions you've considered, if any. -->
19+
20+
## Additional context
21+
22+
<!-- Add any other context or screenshots about the operator request here. -->

BUILD

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ pkg_tar(
1111
"//core/conversion/var:include",
1212
"//core/conversion/tensorcontainer:include",
1313
"//core/conversion/evaluators:include",
14+
"//core/conversion/converters/impl/plugins:include",
1415
"//core/execution:include",
1516
"//core/lowering:include",
1617
"//core/lowering/passes:include",
@@ -47,7 +48,6 @@ pkg_tar(
4748
)
4849

4950

50-
5151
pkg_tar(
5252
name = "libtrtorch",
5353
extension = "tar.gz",

README.md

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -203,10 +203,6 @@ Thanks for wanting to contribute! There are two main ways to handle supporting a
203203
204204
You can register a converter for your op using the `NodeConverterRegistry` inside your application.
205205

206-
## Known Limitations
207-
208-
- You cannot use both Adaptive Pooling in PyTorch and also use TRTorch Dynamic input shape (follow [#49](https://github.com/NVIDIA/TRTorch/issues/49) for the latest on the issue)
209-
210206
## Structure of the repo
211207

212208
| Component | Description |

core/conversion/converters/BUILD

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ config_setting(
1010
cc_library(
1111
name = "converters",
1212
hdrs = [
13-
"converters.h",
13+
"converters.h"
1414
],
1515
srcs = [
1616
"NodeConverterRegistry.cpp",
@@ -28,14 +28,17 @@ cc_library(
2828
"impl/shuffle.cpp",
2929
"impl/softmax.cpp",
3030
"impl/unary.cpp",
31-
"impl/interpolate.cpp"
31+
"impl/interpolate.cpp",
32+
"impl/select.cpp",
33+
"impl/stack.cpp"
3234
],
3335
deps = [
3436
"@tensorrt//:nvinfer",
3537
"//core/util:prelude",
3638
"//core/conversion/var",
3739
"//core/conversion/tensorcontainer",
3840
"//core/conversion/conversionctx",
41+
"//core/conversion/converters/impl/plugins"
3942
] + select({
4043
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
4144
"//conditions:default": ["@libtorch//:libtorch"],

core/conversion/converters/impl/interpolate.cpp

Lines changed: 138 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
#include "torch/torch.h"
22
#include "core/util/prelude.h"
33
#include "core/conversion/converters/converters.h"
4-
5-
#include <csignal>
4+
#include "plugins/interpolate_plugin.h"
5+
#include "NvInfer.h"
6+
#include "NvInferRuntimeCommon.h"
67

78
namespace trtorch {
89
namespace core {
@@ -11,6 +12,54 @@ namespace converters {
1112
namespace impl {
1213
namespace {
1314

15+
/*
16+
* Helper functions
17+
*/
18+
19+
void create_plugin(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* in, const char* name,
20+
std::vector<int64_t> in_shape,
21+
std::vector<int64_t> out_shape,
22+
std::vector<int64_t> out_size,
23+
std::string mode) {
24+
LOG_WARNING("Interpolation layer will be run through ATen, not TensorRT. Performance may differ.");
25+
26+
auto creator = new plugins::InterpolatePluginCreator();
27+
auto plugin = creator->createPlugin(name, in_shape, out_shape, out_size, mode, false);
28+
29+
auto resize_layer = ctx->net->addPluginV2(reinterpret_cast<nvinfer1::ITensor* const*>(&in), 1, *plugin);
30+
TRTORCH_CHECK(resize_layer, "Unable to create interpolation plugin from node" << *n);
31+
32+
resize_layer->setName(util::node_info(n).c_str());
33+
34+
auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0));
35+
36+
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());
37+
}
38+
39+
void resize_layer_size(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* in, std::vector<int64_t> out_shape,
40+
nvinfer1::ResizeMode mode) {
41+
auto resize_layer = ctx->net->addResize(*in);
42+
TRTORCH_CHECK(resize_layer, "Unable to create interpolation (resizing) layer from node" << *n);
43+
44+
resize_layer->setOutputDimensions(util::toDims(out_shape));
45+
resize_layer->setResizeMode(mode);
46+
resize_layer->setName(util::node_info(n).c_str());
47+
48+
// if interpolation mode is linear, align corners must have been set to true. else, don't use align corners.
49+
if (mode == nvinfer1::ResizeMode::kLINEAR) {
50+
resize_layer->setAlignCorners(true);
51+
}
52+
53+
auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0));
54+
55+
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());
56+
}
57+
58+
59+
/*
60+
* Interpolate Converter
61+
*/
62+
1463
auto interpolate_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
1564
.pattern({
1665
"aten::upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> (Tensor)",
@@ -27,15 +76,7 @@ auto interpolate_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
2776
auto out_shape = in_shape;
2877
std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size()));
2978

30-
auto resize_layer = ctx->net->addResize(*in);
31-
TRTORCH_CHECK(resize_layer, "Unable to create interpolation (resizing) layer from node" << *n);
32-
33-
resize_layer->setOutputDimensions(util::toDims(out_shape));
34-
resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
35-
resize_layer->setName(util::node_info(n).c_str());
36-
37-
auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0));
38-
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());
79+
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kNEAREST);
3980
} else {
4081
TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nScale factor parameter for upsample_nearest1d not supported yet.");
4182
}
@@ -57,15 +98,7 @@ auto interpolate_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
5798
auto out_shape = in_shape;
5899
std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size()));
59100

60-
auto resize_layer = ctx->net->addResize(*in);
61-
TRTORCH_CHECK(resize_layer, "Unable to create interpolation (resizing) layer from node" << *n);
62-
63-
resize_layer->setOutputDimensions(util::toDims(out_shape));
64-
resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
65-
resize_layer->setName(util::node_info(n).c_str());
66-
67-
auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0));
68-
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());
101+
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kNEAREST);
69102
} else {
70103
TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nScale factor parameter for upsample_nearest2d not supported yet.");
71104
}
@@ -86,18 +119,94 @@ auto interpolate_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
86119

87120
auto out_shape = in_shape;
88121
std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size()));
122+
123+
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kNEAREST);
124+
} else {
125+
TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nScale factor parameter for upsample_nearest3d not supported yet.");
126+
}
127+
128+
return true;
129+
}
130+
}).pattern({
131+
"aten::upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, float? scales=None) -> (Tensor)",
132+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
133+
auto in = args[0].ITensor();
134+
auto in_shape = util::toVec(in->getDimensions());
135+
bool align_corners = args[2].unwrapToBool();
136+
137+
// Case 1: user uses output size and not scales
138+
if (!args[1].IValue()->isNone() && args[3].IValue()->isNone()) {
139+
auto out_size = util::toVec(util::toDims(args[1].unwrapToIntList()));
140+
141+
TRTORCH_ASSERT(out_size.size() == 1, "aten::upsample_linear1d input Tensor and output size dimension mismatch");
89142

90-
auto resize_layer = ctx->net->addResize(*in);
91-
TRTORCH_CHECK(resize_layer, "Unable to create interpolation (resizing) layer from node" << *n);
143+
auto out_shape = in_shape;
144+
std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size()));
92145

93-
resize_layer->setOutputDimensions(util::toDims(out_shape));
94-
resize_layer->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
95-
resize_layer->setName(util::node_info(n).c_str());
146+
if (!align_corners) {
147+
// align_corners not supported in TensorRT, create plugin and run layer through PyTorch
148+
create_plugin(ctx, n, in, "linear1d", in_shape, out_shape, out_size, std::string("linear"));
149+
} else {
150+
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR);
151+
}
152+
} else {
153+
TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nScale factor parameter for upsample_linear1d not supported yet.");
154+
}
155+
156+
return true;
157+
}
158+
}).pattern({
159+
"aten::upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> (Tensor)",
160+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
161+
auto in = args[0].ITensor();
162+
auto in_shape = util::toVec(in->getDimensions());
163+
bool align_corners = args[2].unwrapToBool();
96164

97-
auto layer_output = ctx->AssociateValueAndTensor(n->outputs()[0], resize_layer->getOutput(0));
98-
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());
165+
// Case 1: user uses output size and not scales_h, scales_w
166+
if (!args[1].IValue()->isNone() && args[3].IValue()->isNone() && args[4].IValue()->isNone()) {
167+
auto out_size = util::toVec(util::toDims(args[1].unwrapToIntList()));
168+
169+
TRTORCH_ASSERT(out_size.size() == 2, "aten::upsample_bilinear2d input Tensor and output size dimension mismatch");
170+
171+
auto out_shape = in_shape;
172+
std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size()));
173+
174+
if (!align_corners) {
175+
// align_corners not supported in TensorRT, create plugin and run layer through PyTorch
176+
create_plugin(ctx, n, in, "bilinear2d", in_shape, out_shape, out_size, std::string("bilinear"));
177+
} else {
178+
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR);
179+
}
99180
} else {
100-
TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nScale factor parameter for upsample_nearest3d not supported yet.");
181+
TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nScale factor parameter for upsample_bilinear2d not supported yet.");
182+
}
183+
184+
return true;
185+
}
186+
}).pattern({
187+
"aten::upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> (Tensor)",
188+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
189+
auto in = args[0].ITensor();
190+
auto in_shape = util::toVec(in->getDimensions());
191+
bool align_corners = args[2].unwrapToBool();
192+
193+
// Case 1: user uses output size and not scales_d, scales_h, scales_w
194+
if (!args[1].IValue()->isNone() && args[3].IValue()->isNone() && args[4].IValue()->isNone() && args[5].IValue()->isNone()) {
195+
auto out_size = util::toVec(util::toDims(args[1].unwrapToIntList()));
196+
197+
TRTORCH_ASSERT(out_size.size() == 3, "aten::upsample_trilinear3d input Tensor and output size dimension mismatch");
198+
199+
auto out_shape = in_shape;
200+
std::copy(out_size.begin(), out_size.end(), out_shape.begin() + (in_shape.size() - out_size.size()));
201+
202+
if (!align_corners) {
203+
// align_corners not supported in TensorRT, create plugin and run layer through PyTorch
204+
create_plugin(ctx, n, in, "trilinear3d", in_shape, out_shape, out_size, std::string("trilinear"));
205+
} else {
206+
resize_layer_size(ctx, n, in, out_shape, nvinfer1::ResizeMode::kLINEAR);
207+
}
208+
} else {
209+
TRTORCH_THROW_ERROR("Unable to convert node: " << util::node_info(n) << "\nScale factor parameter for upsample_trilinear3d not supported yet.");
101210
}
102211

103212
return true;
@@ -110,4 +219,4 @@ auto interpolate_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns()
110219
} // namespace converters
111220
} // namespace conversion
112221
} // namespace core
113-
} // namespace trtorch
222+
} // namespace trtorch
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
package(default_visibility = ["//visibility:public"])
2+
3+
config_setting(
4+
name = "use_pre_cxx11_abi",
5+
values = {
6+
"define": "abi=pre_cxx11_abi",
7+
}
8+
)
9+
10+
cc_library(
11+
name = "plugins",
12+
hdrs = [
13+
"interpolate_plugin.h"
14+
],
15+
srcs = [
16+
"interpolate_plugin.cpp"
17+
],
18+
deps = [
19+
"@tensorrt//:nvinfer",
20+
"//core/util:prelude",
21+
"//core/conversion/conversionctx",
22+
] + select({
23+
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
24+
"//conditions:default": ["@libtorch//:libtorch"],
25+
}),
26+
alwayslink = True,
27+
)
28+
29+
load("@rules_pkg//:pkg.bzl", "pkg_tar")
30+
31+
pkg_tar(
32+
name = "include",
33+
package_dir = "core/conversion/converters/impl/plugins",
34+
srcs = ["interpolate_plugin.h"],
35+
)

0 commit comments

Comments
 (0)