Skip to content

Commit f9d29d0

Browse files
committed
Fix merge conflicts
Signed-off-by: Dheeraj Peri <[email protected]>
2 parents 65c8e0a + c73d0d1 commit f9d29d0

30 files changed

+1164
-21
lines changed

.github/workflows/stale.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,11 @@ jobs:
1313
- uses: actions/stale@v1
1414
with:
1515
repo-token: ${{ secrets.GITHUB_TOKEN }}
16-
stale-issue-message: 'This issue has not seen activity for 30 days, Remove stale label or comment or this will be closed in 5 days'
17-
stale-pr-message: 'This PR has not seen activity for 30 days, Remove stale label or comment or this will be closed in 5 days'
16+
stale-issue-message: 'This issue has not seen activity for 90 days, Remove stale label or comment or this will be closed in 10 days'
17+
stale-pr-message: 'This PR has not seen activity for 90 days, Remove stale label or comment or this will be closed in 10 days'
1818
stale-issue-label: 'No Activity'
1919
exempt-issue-labels: 'feature request'
2020
stale-pr-label: 'No Activity'
2121
exempt-pr-labels: 'WIP'
22-
days-before-stale: 30
23-
days-before-close: 5
22+
days-before-stale: 90
23+
days-before-close: 10

core/conversion/converters/BUILD

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ cc_library(
3939
"impl/constant.cpp",
4040
"impl/conv_deconv.cpp",
4141
"impl/element_wise.cpp",
42+
"impl/expand.cpp",
4243
"impl/linear.cpp",
4344
"impl/matrix_multiply.cpp",
4445
"impl/pooling.cpp",
@@ -48,8 +49,11 @@ cc_library(
4849
"impl/unary.cpp",
4950
"impl/interpolate.cpp",
5051
"impl/select.cpp",
52+
"impl/squeeze.cpp",
5153
"impl/stack.cpp",
52-
"impl/lstm_cell.cpp"
54+
"impl/lstm_cell.cpp",
55+
"impl/unsqueeze.cpp",
56+
"impl/topk.cpp",
5357
],
5458
deps = [
5559
"@tensorrt//:nvinfer",

core/conversion/converters/impl/activation.cpp

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,35 @@ auto acthardtanh TRTORCH_UNUSED =
124124
out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], out_tensor);
125125
LOG_DEBUG("Output shape: " << out_tensor->getDimensions());
126126
return true;
127-
}});
127+
}})
128+
.pattern({"aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> (Tensor)",
129+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
130+
auto self = args[0].ITensorOrFreeze(ctx);
131+
auto negative_slopeScalar = args[1].unwrapToScalar().to<float>();
132+
133+
auto new_layer = ctx->net->addActivation(*self, nvinfer1::ActivationType::kLEAKY_RELU);
134+
new_layer->setAlpha(negative_slopeScalar);
135+
136+
new_layer->setName(util::node_info(n).c_str());
137+
auto out_tensor = new_layer->getOutput(0);
138+
out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], out_tensor);
139+
LOG_DEBUG("Output shape: " << out_tensor->getDimensions());
140+
return true;
141+
}})
142+
.pattern({"aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)",
143+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
144+
auto self = args[0].ITensorOrFreeze(ctx);
145+
auto negative_slopeScalar = args[1].unwrapToScalar().to<float>();
146+
147+
auto new_layer = ctx->net->addActivation(*self, nvinfer1::ActivationType::kLEAKY_RELU);
148+
new_layer->setAlpha(negative_slopeScalar);
149+
150+
new_layer->setName(util::node_info(n).c_str());
151+
auto out_tensor = new_layer->getOutput(0);
152+
out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], out_tensor);
153+
LOG_DEBUG("Output shape: " << out_tensor->getDimensions());
154+
return true;
155+
}});
128156

129157
} // namespace
130158
} // namespace impl

core/conversion/converters/impl/element_wise.cpp

Lines changed: 238 additions & 3 deletions
Large diffs are not rendered by default.
Lines changed: 152 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,152 @@
1+
#include "NvInfer.h"
2+
#include "core/conversion/converters/converters.h"
3+
#include "core/conversion/tensorcontainer/TensorContainer.h"
4+
#include "core/util/prelude.h"
5+
#include "core/util/trt_util.h"
6+
#include "torch/torch.h"
7+
8+
#include <ATen/ATen.h>
9+
#include <vector>
10+
11+
namespace trtorch {
12+
namespace core {
13+
namespace conversion {
14+
namespace converters {
15+
namespace impl {
16+
namespace {
17+
18+
bool add_expand(ConversionCtx* ctx, const torch::jit::Node* n, nvinfer1::ITensor* in, nvinfer1::Dims expandedDims) {
19+
auto input_dims = in->getDimensions();
20+
TRTORCH_CHECK(
21+
input_dims.nbDims <= expandedDims.nbDims,
22+
"Number of dimensions of the desired expansion must be greater than or equal to the number of input dimensions");
23+
24+
// Validate the expansion. Eg: an input of [3, 1] can be expanded to [1, 3, 4] but not [3, 4, 1]
25+
for (int i = expandedDims.nbDims - 1; i >= 0; --i) {
26+
int64_t offset = expandedDims.nbDims - 1 - i;
27+
int64_t dim = input_dims.nbDims - 1 - offset;
28+
int64_t size = (dim >= 0) ? input_dims.d[dim] : 1;
29+
int64_t targetSize = expandedDims.d[i];
30+
if (size != targetSize) {
31+
if (size != 1) {
32+
TRTORCH_THROW_ERROR(
33+
"The expanded size of tensor (" << targetSize << ")"
34+
<< " must match the existing size (" << size << ")"
35+
<< " at dimension " << i);
36+
}
37+
}
38+
}
39+
40+
auto num_expand_dims = expandedDims.nbDims - input_dims.nbDims;
41+
if (num_expand_dims > 0) {
42+
nvinfer1::Dims reshape_dims;
43+
reshape_dims.nbDims = expandedDims.nbDims;
44+
for (int i = 0; i < num_expand_dims; i++) {
45+
reshape_dims.d[i] = 1;
46+
}
47+
for (int i = 0; i < input_dims.nbDims; i++) {
48+
reshape_dims.d[num_expand_dims + i] = input_dims.d[i];
49+
}
50+
// Add a reshape layer to expand dims
51+
auto reshape_layer = ctx->net->addShuffle(*in);
52+
reshape_layer->setReshapeDimensions(reshape_dims);
53+
in = reshape_layer->getOutput(0);
54+
LOG_DEBUG("Input reshaped to : " << in->getDimensions() << " from " << input_dims);
55+
}
56+
57+
// Start the slicing from beginning of tensor since this is an expand layer
58+
std::vector<int64_t> start_vec(expandedDims.nbDims, 0);
59+
auto start_offset = util::toDims(c10::IntArrayRef(start_vec));
60+
61+
// Set the stride of non singleton dimension to 1
62+
std::vector<int64_t> strides_vec(expandedDims.nbDims, 0);
63+
for (int i = 0; i < expandedDims.nbDims; i++) {
64+
strides_vec[i] = (in->getDimensions().d[i] != 1);
65+
}
66+
67+
auto strides = util::toDims(c10::IntArrayRef(strides_vec));
68+
// Slice layer does the expansion in TRT. Desired output size is specified by expandedDims
69+
auto slice_layer = ctx->net->addSlice(*in, start_offset, expandedDims, strides);
70+
slice_layer->setName(util::node_info(n).c_str());
71+
72+
auto out = ctx->AssociateValueAndTensor(n->outputs()[0], slice_layer->getOutput(0));
73+
74+
LOG_DEBUG("Expand layer output tensor shape: " << out->getDimensions());
75+
76+
return true;
77+
}
78+
79+
auto expand_registrations TRTORCH_UNUSED =
80+
RegisterNodeConversionPatterns()
81+
.pattern({"aten::expand(Tensor(a) self, int[] size, *, bool implicit=False) -> (Tensor(a))",
82+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
83+
auto in = args[0].ITensor();
84+
auto input_dims = in->getDimensions();
85+
auto expanded_size = args[1].unwrapToIntList();
86+
auto expandedDims = util::toDims(expanded_size);
87+
LOG_DEBUG("(expand layer) Expand input from " << input_dims << " to " << expandedDims);
88+
return add_expand(ctx, n, in, expandedDims);
89+
}})
90+
.pattern({"aten::expand_as(Tensor(a) self, Tensor other) -> (Tensor(a))",
91+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
92+
// TODO: Currently expand supports static shapes. Need to explore if the same code can be extended
93+
// to dynamic expansion.
94+
auto in = args[0].ITensor();
95+
auto input_dims = in->getDimensions();
96+
auto targetTensor = args[1].ITensor();
97+
auto targetDims = targetTensor->getDimensions();
98+
LOG_DEBUG("(expand_as layer) Expand input from " << input_dims << " to " << targetDims);
99+
return add_expand(ctx, n, in, targetDims);
100+
}})
101+
.pattern({"aten::repeat(Tensor self, int[] repeats) -> (Tensor)",
102+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
103+
auto in = args[0].ITensor();
104+
auto input_dims = in->getDimensions();
105+
auto repeats = args[1].unwrapToIntList().vec();
106+
TRTORCH_CHECK(
107+
repeats.size() >= input_dims.nbDims,
108+
"Number of repeat dimensions cannot be smaller than number of input dimensions");
109+
auto num_expand_dims = repeats.size() - input_dims.nbDims;
110+
if (num_expand_dims > 0) {
111+
nvinfer1::Dims reshape_dims;
112+
reshape_dims.nbDims = repeats.size();
113+
for (int i = 0; i < num_expand_dims; i++) {
114+
reshape_dims.d[i] = 1;
115+
}
116+
for (int i = 0; i < input_dims.nbDims; i++) {
117+
reshape_dims.d[num_expand_dims + i] = input_dims.d[i];
118+
}
119+
// Add a reshape layer to expand dims
120+
auto reshape_layer = ctx->net->addShuffle(*in);
121+
reshape_layer->setReshapeDimensions(reshape_dims);
122+
in = reshape_layer->getOutput(0);
123+
LOG_DEBUG("Input reshaped to : " << in->getDimensions() << " from " << input_dims);
124+
}
125+
126+
LOG_DEBUG("Repeats: " << repeats);
127+
128+
// Concat across all repeat axes.
129+
// TODO: Implementation might not be performant. Explore other strategies to improve performance.
130+
for (int i = repeats.size() - 1; i >= 0; --i) {
131+
std::vector<nvinfer1::ITensor*> tensors_vec;
132+
for (int j = 0; j < repeats[i]; j++) {
133+
tensors_vec.push_back(in);
134+
}
135+
auto concat_layer = ctx->net->addConcatenation(tensors_vec.data(), tensors_vec.size());
136+
concat_layer->setAxis(i);
137+
in = concat_layer->getOutput(0);
138+
}
139+
140+
auto out = ctx->AssociateValueAndTensor(n->outputs()[0], in);
141+
142+
LOG_DEBUG("Repeat layer output tensor shape: " << in->getDimensions());
143+
144+
return true;
145+
}});
146+
147+
} // namespace
148+
} // namespace impl
149+
} // namespace converters
150+
} // namespace conversion
151+
} // namespace core
152+
} // namespace trtorch
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#include "NvInfer.h"
2+
#include "core/conversion/converters/converters.h"
3+
#include "core/conversion/tensorcontainer/TensorContainer.h"
4+
#include "core/util/prelude.h"
5+
#include "torch/torch.h"
6+
7+
#include <ATen/ATen.h>
8+
#include <vector>
9+
10+
namespace trtorch {
11+
namespace core {
12+
namespace conversion {
13+
namespace converters {
14+
namespace impl {
15+
namespace {
16+
17+
auto squeeze_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().pattern(
18+
{"aten::squeeze.dim(Tensor(a) self, int dim) -> (Tensor(a))",
19+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
20+
auto self = args[0].ITensorOrFreeze(ctx);
21+
auto dim = args[1].unwrapToInt();
22+
23+
auto selfDim = util::toVec(self->getDimensions());
24+
if (dim < 0) {
25+
dim = selfDim.size() + dim;
26+
}
27+
28+
auto shuffle_layer = ctx->net->addShuffle(*self);
29+
TRTORCH_CHECK(shuffle_layer, "Unable to create shuffle layer from node: " << *n);
30+
shuffle_layer->setReshapeDimensions(util::squeezeDims(self->getDimensions(), dim));
31+
32+
auto out = ctx->AssociateValueAndTensor(n->outputs()[0], shuffle_layer->getOutput(0));
33+
34+
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
35+
36+
return true;
37+
}});
38+
39+
} // namespace
40+
} // namespace impl
41+
} // namespace converters
42+
} // namespace conversion
43+
} // namespace core
44+
} // namespace trtorch
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#include "NvInfer.h"
2+
#include "core/conversion/converters/converters.h"
3+
#include "core/conversion/tensorcontainer/TensorContainer.h"
4+
#include "core/util/prelude.h"
5+
#include "torch/torch.h"
6+
7+
#include <ATen/ATen.h>
8+
#include <vector>
9+
10+
namespace trtorch {
11+
namespace core {
12+
namespace conversion {
13+
namespace converters {
14+
namespace impl {
15+
namespace {
16+
17+
auto topk_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().pattern(
18+
{"aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)",
19+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
20+
auto self = args[0].ITensorOrFreeze(ctx);
21+
auto k = args[1].unwrapToInt();
22+
auto dim = args[2].unwrapToInt();
23+
auto largest = args[3].unwrapToBool();
24+
LOG_DEBUG(
25+
"Note: sorted argument is not used in TensorRT for aten::topk, results will depend on the value of largest");
26+
// auto sorted = args[4].unwrapToBool(); # Currently unused
27+
28+
auto selfDim = util::toVec(self->getDimensions());
29+
30+
// reduceAxes The reduction dimensions. The bit in position i of bitmask reduceAxes corresponds to explicit
31+
// dimension i of the result. E.g., the least significant bit corresponds to the first explicit dimension and the
32+
// next to least significant bit corresponds to the second explicit dimension.
33+
34+
if (dim < 0) {
35+
dim = selfDim.size() + dim;
36+
}
37+
38+
uint32_t shiftDim = 1 << dim;
39+
40+
LOG_DEBUG("Output topk reduce dim: " << dim);
41+
42+
auto TopKOperation = largest ? (nvinfer1::TopKOperation::kMAX) : (nvinfer1::TopKOperation::kMIN);
43+
44+
auto new_layer = ctx->net->addTopK(*self, TopKOperation, k, shiftDim);
45+
46+
TRTORCH_CHECK(new_layer, "Unable to create topk layer from node: " << *n);
47+
48+
auto out0 = ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
49+
auto out1 = ctx->AssociateValueAndTensor(n->outputs()[1], new_layer->getOutput(1));
50+
51+
LOG_DEBUG("Output tensor(0) shape: " << out0->getDimensions());
52+
LOG_DEBUG("Output tensor(1) shape: " << out1->getDimensions());
53+
54+
return true;
55+
}});
56+
57+
} // namespace
58+
} // namespace impl
59+
} // namespace converters
60+
} // namespace conversion
61+
} // namespace core
62+
} // namespace trtorch
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#include "NvInfer.h"
2+
#include "core/conversion/converters/converters.h"
3+
#include "core/conversion/tensorcontainer/TensorContainer.h"
4+
#include "core/util/prelude.h"
5+
#include "torch/torch.h"
6+
7+
#include <ATen/ATen.h>
8+
#include <vector>
9+
10+
namespace trtorch {
11+
namespace core {
12+
namespace conversion {
13+
namespace converters {
14+
namespace impl {
15+
namespace {
16+
17+
auto unsqueeze_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().pattern(
18+
{"aten::unsqueeze(Tensor(a) self, int dim) -> (Tensor(a))",
19+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
20+
auto self = args[0].ITensorOrFreeze(ctx);
21+
auto dim = args[1].unwrapToInt();
22+
23+
auto selfDim = util::toVec(self->getDimensions());
24+
if (dim < 0) {
25+
dim = selfDim.size() + dim;
26+
}
27+
28+
auto shuffle_layer = ctx->net->addShuffle(*self);
29+
TRTORCH_CHECK(shuffle_layer, "Unable to create shuffle layer from node: " << *n);
30+
shuffle_layer->setReshapeDimensions(util::unsqueezeDims(self->getDimensions(), dim));
31+
32+
auto out = ctx->AssociateValueAndTensor(n->outputs()[0], shuffle_layer->getOutput(0));
33+
34+
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
35+
36+
return true;
37+
}});
38+
39+
} // namespace
40+
} // namespace impl
41+
} // namespace converters
42+
} // namespace conversion
43+
} // namespace core
44+
} // namespace trtorch

core/conversion/evaluators/aten.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,8 @@ auto aten_registrations TRTORCH_UNUSED =
426426
}
427427
},
428428
EvalOptions().validSchemas({
429-
"aten::div.Scalar(Scalar a, Scalar b) -> (float)",
429+
"aten::div.float(float a, float b) -> (float)",
430+
"aten::div.int(int a, int b) -> (float)",
430431
})})
431432
.evaluator({c10::Symbol::fromQualString("aten::floordiv"),
432433
[](const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> {

0 commit comments

Comments
 (0)