Skip to content

Commit edf6e60

Browse files
committed
Rebasing to master
2 parents 74150d5 + 356301b commit edf6e60

17 files changed

+559
-446
lines changed

core/conversion/converters/BUILD

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,12 @@ cc_library(
2929
cc_library(
3030
name = "converters",
3131
hdrs = [
32-
"converters.h"
32+
"converters.h",
33+
"converter_util.h",
3334
],
3435
srcs = [
3536
"NodeConverterRegistry.cpp",
37+
"converter_util.cpp",
3638
"impl/activation.cpp",
3739
"impl/batch_norm.cpp",
3840
"impl/concat.cpp",
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
#include "core/conversion/converters/converter_util.h"
2+
#include "core/conversion/converters/converters.h"
3+
#include "core/util/prelude.h"
4+
5+
namespace trtorch {
6+
namespace core {
7+
namespace conversion {
8+
namespace converters {
9+
10+
nvinfer1::ITensor* addPadding(
11+
ConversionCtx* ctx,
12+
const torch::jit::Node* n,
13+
nvinfer1::ITensor* tensor,
14+
int nDim,
15+
bool trailing,
16+
bool use_zeros) {
17+
const auto dims = tensor->getDimensions();
18+
19+
if (dims.nbDims < nDim) {
20+
auto newDims = dims;
21+
for (int dim = dims.nbDims; dim < nDim; ++dim) {
22+
newDims = util::unsqueezeDims(newDims, trailing ? dim : 0, 1, use_zeros);
23+
}
24+
25+
LOG_DEBUG("Original shape: " << dims << ", reshaping to: " << newDims);
26+
auto shuffle_layer = ctx->net->addShuffle(*tensor);
27+
TRTORCH_CHECK(shuffle_layer, "Unable to create shuffle layer");
28+
shuffle_layer->setReshapeDimensions(newDims);
29+
shuffle_layer->setZeroIsPlaceholder(use_zeros);
30+
shuffle_layer->setName((util::node_info(n) + " [Reshape to " + util::toStr(newDims) + ']').c_str());
31+
return shuffle_layer->getOutput(0);
32+
} else {
33+
return tensor;
34+
}
35+
}
36+
37+
nvinfer1::ITensor* addUnpadding(
38+
ConversionCtx* ctx,
39+
const torch::jit::Node* n,
40+
nvinfer1::ITensor* tensor,
41+
int nDim,
42+
bool trailing,
43+
bool use_zeros) {
44+
const auto dims = tensor->getDimensions();
45+
if (dims.nbDims > nDim) {
46+
auto newDims = dims;
47+
for (int dim = dims.nbDims; dim > nDim; --dim) {
48+
newDims = util::squeezeDims(newDims, trailing ? dim - 1 : 0);
49+
}
50+
LOG_DEBUG("Original shape: " << dims << ", reshaping to: " << newDims);
51+
auto shuffle_layer = ctx->net->addShuffle(*tensor);
52+
TRTORCH_CHECK(shuffle_layer, "Unable to create shuffle layer");
53+
shuffle_layer->setReshapeDimensions(newDims);
54+
shuffle_layer->setZeroIsPlaceholder(use_zeros);
55+
shuffle_layer->setName((util::node_info(n) + " [Reshape to " + util::toStr(newDims)).c_str() + ']');
56+
return shuffle_layer->getOutput(0);
57+
} else {
58+
return tensor;
59+
}
60+
}
61+
62+
} // namespace converters
63+
} // namespace conversion
64+
} // namespace core
65+
} // namespace trtorch
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
#pragma once
2+
3+
#include <map>
4+
#include <string>
5+
6+
#include "core/conversion/conversionctx/ConversionCtx.h"
7+
#include "core/conversion/converters/Weights.h"
8+
#include "core/conversion/var/Var.h"
9+
#include "core/util/prelude.h"
10+
11+
namespace trtorch {
12+
namespace core {
13+
namespace conversion {
14+
namespace converters {
15+
16+
// If nDim < tensor size, adds shuffle layer to pad tensor with 1s (at the end if trailing) and returns
17+
// (nDim-dimensional) shuffle layer's output. Otherwise, does nothing and passes tensor through. use _zeros controls
18+
// whether we should be using 0 instead of -1 on the shape.
19+
nvinfer1::ITensor* addPadding(
20+
ConversionCtx* ctx,
21+
const torch::jit::Node* n,
22+
nvinfer1::ITensor* tensor,
23+
int nDim,
24+
bool trailing = true,
25+
bool use_zeros = true);
26+
27+
// If nDim < tensor size, adds shuffle layer to un-pad tensor (at the end if trailing) and returns (nDim-dimensional)
28+
// shuffle layer's output Otherwise, does nothing and passes tensor through. use _zeros controls whether we should be
29+
// using 0 instead of -1 on the shape.
30+
nvinfer1::ITensor* addUnpadding(
31+
ConversionCtx* ctx,
32+
const torch::jit::Node* n,
33+
nvinfer1::ITensor* tensor,
34+
int nDim,
35+
bool trailing = true,
36+
bool use_zeros = true);
37+
38+
} // namespace converters
39+
} // namespace conversion
40+
} // namespace core
41+
} // namespace trtorch

core/conversion/converters/impl/batch_norm.cpp

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
#include "core/conversion/converters/converter_util.h"
12
#include "core/conversion/converters/converters.h"
23
#include "core/util/prelude.h"
34
#include "torch/torch.h"
@@ -40,17 +41,11 @@ auto batch_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
4041
LOG_DEBUG("training disregarded");
4142
LOG_DEBUG("cudnn disregarded");
4243

43-
auto should_unpack = util::toVec(orig_shape).size() < 4;
44-
if (should_unpack) {
45-
// expand spatial dims from 1D to 2D
46-
auto new_shape = util::toDimsTailPad(util::toVec(orig_shape), 4);
47-
LOG_DEBUG(
48-
"Input shape is less than 4D got: "
49-
<< orig_shape << ", inserting shuffle layer to reshape to 4D tensor shape: " << new_shape);
50-
auto in_shuffle = ctx->net->addShuffle(*input);
51-
in_shuffle->setReshapeDimensions(new_shape);
52-
in_shuffle->setName(std::string("[Reshape input to " + util::toStr(new_shape) + ']').c_str());
53-
input = in_shuffle->getOutput(0);
44+
// Expand spatial dims from 1D to 2D if needed
45+
bool expandDims = (orig_shape.nbDims < 4);
46+
47+
if (expandDims) {
48+
input = addPadding(ctx, n, input, 4);
5449
}
5550

5651
auto scale = gamma / torch::sqrt(var + eps);
@@ -63,16 +58,8 @@ auto batch_norm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().
6358
auto bn = ctx->net->addScaleNd(
6459
*input, nvinfer1::ScaleMode::kCHANNEL, bias_weights.data, scale_weights.data, power.data, 1);
6560
bn->setName(util::node_info(n).c_str());
66-
auto out_tensor = bn->getOutput(0);
67-
68-
if (should_unpack) {
69-
LOG_DEBUG("Inserting shuffle layer to reshape to back to original shape: " << orig_shape);
70-
auto out_shuffle = ctx->net->addShuffle(*out_tensor);
71-
out_shuffle->setReshapeDimensions(orig_shape);
72-
out_shuffle->setName(std::string("[Reshape output to " + util::toStr(orig_shape) + ']').c_str());
73-
out_tensor = out_shuffle->getOutput(0);
74-
}
75-
61+
// Un-pad bn output if needed
62+
auto out_tensor = addUnpadding(ctx, n, bn->getOutput(0), orig_shape.nbDims);
7663
ctx->AssociateValueAndTensor(n->outputs()[0], out_tensor);
7764
return true;
7865
}});

core/conversion/converters/impl/conv_deconv.cpp

Lines changed: 53 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
#include "torch/torch.h"
2-
1+
#include "core/conversion/converters/converter_util.h"
32
#include "core/conversion/converters/converters.h"
43
#include "core/util/prelude.h"
4+
#include "torch/torch.h"
55

66
namespace trtorch {
77
namespace core {
@@ -14,29 +14,66 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
1414
auto in = args[0].ITensor(); // assumes non-static input Tensor
1515
auto w = Weights(ctx, args[1].unwrapToTensor());
1616
auto stride = util::toDims(args[3].unwrapToIntList());
17-
LOG_DEBUG("stride: " << stride);
1817
auto padding = util::toDims(args[4].unwrapToIntList());
19-
LOG_DEBUG("padding: " << padding);
2018
auto dilation = util::toDims(args[5].unwrapToIntList());
21-
LOG_DEBUG("dilation: " << dilation);
2219
bool transposed = args[6].unwrapToBool();
2320
auto out_padding = util::toDims(args[7].unwrapToIntList());
24-
LOG_DEBUG("out_padding: " << out_padding);
2521
int64_t groups = args[8].unwrapToInt();
22+
23+
auto dims = in->getDimensions();
24+
auto orig_dims = dims;
25+
LOG_DEBUG("Input dims: " << orig_dims);
26+
LOG_DEBUG("Weights: " << w);
27+
LOG_DEBUG("stride: " << stride);
28+
LOG_DEBUG("padding: " << padding);
29+
LOG_DEBUG("dilation: " << dilation);
30+
LOG_DEBUG("out_padding: " << out_padding);
2631
LOG_DEBUG("groups: " << groups);
2732

33+
// Expand spatial dims from 1D to 2D if needed
34+
bool expandDims = (orig_dims.nbDims < 4);
35+
if (expandDims) {
36+
in = addPadding(ctx, n, in, 4);
37+
dims = in->getDimensions();
38+
LOG_DEBUG("Reshaped Input dims: " << dims);
39+
}
40+
if (w.shape.nbDims < 4) {
41+
for (int i = w.shape.nbDims; i < 4; ++i) {
42+
w.shape.d[i] = 1;
43+
}
44+
w.shape.nbDims = 4;
45+
w.kernel_shape.nbDims = 2;
46+
w.kernel_shape.d[1] = 1;
47+
LOG_DEBUG("Reshaped Weights: " << w);
48+
}
49+
if (stride.nbDims == 1) {
50+
stride = util::unsqueezeDims(stride, 1, 1);
51+
LOG_DEBUG("Reshaped stride: " << stride);
52+
}
53+
if (dilation.nbDims == 1) {
54+
dilation = util::unsqueezeDims(dilation, 1, 1);
55+
LOG_DEBUG("Reshaped dilation: " << dilation);
56+
}
57+
if (padding.nbDims == 1) {
58+
padding = util::unsqueezeDims(padding, 1, 0);
59+
LOG_DEBUG("Reshaped padding: " << padding);
60+
}
61+
if (out_padding.nbDims == 1) {
62+
out_padding = util::unsqueezeDims(out_padding, 1, 0);
63+
LOG_DEBUG("Reshaped out_padding: " << out_padding);
64+
}
65+
2866
nvinfer1::ILayer* new_layer;
2967
if (transposed) {
3068
Weights bias;
3169
if (args[2].IValue()->isTensor()) {
3270
bias = Weights(ctx, args[2].unwrapToTensor());
3371
} else {
34-
bias = Weights(ctx, torch::zeros(args[1].unwrapToTensor().sizes()[1] * groups));
72+
bias = Weights(ctx, torch::zeros(w.shape.d[1] * groups));
3573
}
3674

3775
// shape of deconvolution's weight: [in, out/groups, ...]
38-
auto deconv = ctx->net->addDeconvolutionNd(
39-
*in, args[1].unwrapToTensor().sizes()[1] * groups, w.kernel_shape, w.data, bias.data);
76+
auto deconv = ctx->net->addDeconvolutionNd(*in, w.shape.d[1] * groups, w.kernel_shape, w.data, bias.data);
4077
TRTORCH_CHECK(deconv, "Unable to create deconvolution layer from node: " << *n);
4178

4279
deconv->setStrideNd(stride);
@@ -56,11 +93,11 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
5693
if (args[2].IValue()->isTensor()) {
5794
bias = Weights(ctx, args[2].unwrapToTensor());
5895
} else {
59-
bias = Weights(ctx, torch::zeros(args[1].unwrapToTensor().sizes()[0]));
96+
bias = Weights(ctx, torch::zeros(w.shape.d[0]));
6097
}
6198

6299
// shape of convolution's weight: [out, in/groups, ...]
63-
auto conv = ctx->net->addConvolutionNd(*in, args[1].unwrapToTensor().sizes()[0], w.kernel_shape, w.data, bias.data);
100+
auto conv = ctx->net->addConvolutionNd(*in, w.shape.d[0], w.kernel_shape, w.data, bias.data);
64101
TRTORCH_CHECK(conv, "Unable to create convolution layer from node: " << *n);
65102

66103
conv->setStrideNd(stride);
@@ -71,9 +108,13 @@ bool add_conv_deconv(ConversionCtx* ctx, const torch::jit::Node* n, args& args)
71108
conv->setNbGroups(groups);
72109
new_layer = conv;
73110
}
111+
74112
new_layer->setName(util::node_info(n).c_str());
75113

76-
auto out = ctx->AssociateValueAndTensor(n->outputs()[0], new_layer->getOutput(0));
114+
// Un-expand spatial dims back to 1D if needed
115+
auto out = addUnpadding(ctx, n, new_layer->getOutput(0), orig_dims.nbDims);
116+
117+
ctx->AssociateValueAndTensor(n->outputs()[0], out);
77118

78119
LOG_DEBUG("Output tensor shape: " << out->getDimensions());
79120

core/conversion/converters/impl/matrix_multiply.cpp

Lines changed: 60 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -8,24 +8,66 @@ namespace converters {
88
namespace impl {
99
namespace {
1010

11-
auto mm_registrations TRTORCH_UNUSED = RegisterNodeConversionPatterns().pattern(
12-
{"aten::matmul(Tensor self, Tensor other) -> (Tensor)",
13-
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
14-
auto self = args[0].ITensorOrFreeze(ctx);
15-
LOG_DEBUG("self tensor shape: " << self->getDimensions());
16-
17-
auto other = args[1].ITensorOrFreeze(ctx);
18-
LOG_DEBUG("other tensor shape: " << other->getDimensions());
19-
20-
auto mm_layer = ctx->net->addMatrixMultiply(
21-
*self, nvinfer1::MatrixOperation::kNONE, *other, nvinfer1::MatrixOperation::kNONE);
22-
TRTORCH_CHECK(mm_layer, "Unable to create matrix multiplication node: " << *n);
23-
mm_layer->setName(util::node_info(n).c_str());
24-
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], mm_layer->getOutput(0));
25-
26-
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
27-
return true;
28-
}});
11+
auto mm_registrations TRTORCH_UNUSED =
12+
RegisterNodeConversionPatterns()
13+
.pattern({"aten::matmul(Tensor self, Tensor other) -> (Tensor)",
14+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
15+
auto self = args[0].ITensorOrFreeze(ctx);
16+
LOG_DEBUG("self tensor shape: " << self->getDimensions());
17+
18+
auto other = args[1].ITensorOrFreeze(ctx);
19+
LOG_DEBUG("other tensor shape: " << other->getDimensions());
20+
21+
auto mm_layer = ctx->net->addMatrixMultiply(
22+
*self, nvinfer1::MatrixOperation::kNONE, *other, nvinfer1::MatrixOperation::kNONE);
23+
TRTORCH_CHECK(mm_layer, "Unable to create matrix multiplication node: " << *n);
24+
mm_layer->setName(util::node_info(n).c_str());
25+
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], mm_layer->getOutput(0));
26+
27+
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
28+
return true;
29+
}})
30+
.pattern(
31+
{"aten::bmm(Tensor self, Tensor mat2) -> (Tensor)",
32+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
33+
auto self = args[0].ITensorOrFreeze(ctx);
34+
nvinfer1::Dims selfDims = self->getDimensions();
35+
auto mat2 = args[1].ITensorOrFreeze(ctx);
36+
nvinfer1::Dims mat2Dims = mat2->getDimensions();
37+
38+
// check dimensions
39+
TRTORCH_CHECK(
40+
selfDims.nbDims == 3,
41+
"Expected 3-dimensional tensor, but got "
42+
<< selfDims.nbDims
43+
<< "-dimensional tensor for argument #1 'batch1' (while checking arguments for bmm)");
44+
TRTORCH_CHECK(
45+
mat2Dims.nbDims == 3,
46+
"Expected 3-dimensional tensor, but got "
47+
<< mat2Dims.nbDims
48+
<< "-dimensional tensor for argument #2 'batch2' (while checking arguments for bmm)");
49+
50+
// Self and mat2 should have same size at dimension 0
51+
TRTORCH_CHECK(
52+
selfDims.d[0] == mat2Dims.d[0],
53+
"Expected tensor to have size " << selfDims.d[0] << " at dimension 0, but got size " << mat2Dims.d[0]
54+
<< " for argument #2 'batch2' (while checking arguments for bmm)");
55+
// The size of mat2 at dimension 1 should be the same as that of self at dimension 2.
56+
TRTORCH_CHECK(
57+
selfDims.d[2] == mat2Dims.d[1],
58+
"Expected tensor to have size " << selfDims.d[2] << " at dimension 1, but got size " << mat2Dims.d[1]
59+
<< " for argument #2 'batch2' (while checking arguments for bmm)");
60+
61+
auto mm_layer = ctx->net->addMatrixMultiply(
62+
*self, nvinfer1::MatrixOperation::kNONE, *mat2, nvinfer1::MatrixOperation::kNONE);
63+
TRTORCH_CHECK(mm_layer, "Unable to create matrix multiplication node: " << *n);
64+
65+
mm_layer->setName(util::node_info(n).c_str());
66+
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], mm_layer->getOutput(0));
67+
68+
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
69+
return true;
70+
}});
2971
} // namespace
3072
} // namespace impl
3173
} // namespace converters

0 commit comments

Comments
 (0)