@@ -15,49 +15,62 @@ auto interpolate_registrations = RegisterNodeConversionPatterns()
15
15
.pattern({
16
16
" aten::upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> (Tensor)" ,
17
17
[](ConversionCtx* ctx, const torch::jit::Node*n, args& args) -> bool {
18
- TRTORCH_ASSERT (args[ 0 ]. IValue ()-> isTensor (), " Input expected to be of type Tensor " ) ;
18
+ std::cout << " GOT IN HERE!!!!!! " << std::endl ;
19
19
20
20
auto in = args[0 ].ITensor ();
21
21
auto in_shape = util::toVec (in->getDimensions ());
22
-
22
+
23
23
// Case 1: user uses output size and not scales
24
24
if (!args[1 ].IValue ()->isNone () && args[2 ].IValue ()->isNone ()) {
25
- auto output_size = util::toDims (args[1 ].unwrapToIntList ());
25
+ auto out_size = util::toVec ( util:: toDims (args[1 ].unwrapToIntList () ));
26
26
27
- TRTORCH_ASSERT (output_size.nbDims == 1 , " aten::upsample_nearest1d input Tensor and output size dimension mismatch" );
27
+ TRTORCH_ASSERT (out_size.size () == 1 , " aten::upsample_nearest1d input Tensor and output size dimension mismatch" );
28
+
29
+ auto out_shape = in_shape;
30
+ std::copy (out_size.begin (), out_size.end (), out_shape.begin () + (in_shape.size () - out_size.size ()));
31
+
32
+ // remove padding that TensorRT adds automatically
33
+ // out_shape.erase(out_shape.begin(), out_shape.begin()+1);
28
34
29
35
auto resize_layer = ctx->net ->addResize (*in);
30
36
TRTORCH_CHECK (resize_layer, " Unable to create interpolation (resizing) layer from node" << *n);
31
37
32
- resize_layer->setOutputDimensions (output_size );
38
+ resize_layer->setOutputDimensions (util::toDims (out_shape) );
33
39
resize_layer->setResizeMode (nvinfer1::ResizeMode::kNEAREST );
40
+ resize_layer->setName (util::node_info (n).c_str ());
41
+
42
+ auto layer_output = ctx->AssociateValueAndTensor (n->outputs ()[0 ], resize_layer->getOutput (0 ));
43
+ LOG_DEBUG (" Output tensor shape: " << layer_output->getDimensions ());
34
44
} else {
35
- LOG_DEBUG (" scale factor parameters not supported yet." );
45
+ LOG_DEBUG (" scale factor parameter not supported yet." );
36
46
}
37
47
38
48
return true ;
39
49
}
40
50
}).pattern({
41
51
" aten::upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> (Tensor)" ,
42
52
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
43
- // std::raise(SIGINT);
44
-
45
- TRTORCH_ASSERT (args[0 ].IValue ()->isTensor (), " Input expected to be of type Tensor" );
46
-
47
53
auto in = args[0 ].ITensor ();
48
54
auto in_shape = util::toVec (in->getDimensions ());
49
55
50
56
// Case 1: user uses output_size and not scales_h, scales_w
51
57
if (!args[1 ].IValue ()->isNone () && args[2 ].IValue ()->isNone () && args[3 ].IValue ()->isNone ()){
52
- auto output_size = util::toDims (args[1 ].unwrapToIntList ());
53
-
54
- TRTORCH_ASSERT ( (output_size.nbDims == 1 || output_size.nbDims == 2 ), " aten::upsample_nearest2d input Tensor and output size dimension mismatch" );
58
+ auto out_size = util::toVec (util::toDims (args[1 ].unwrapToIntList ()));
55
59
60
+ TRTORCH_ASSERT (out_size.size () == 2 , " aten::upsample_nearest2d input Tensor and output size dimension mismatch" );
61
+
62
+ auto out_shape = in_shape;
63
+ std::copy (out_size.begin (), out_size.end (), out_shape.begin () + (in_shape.size () - out_size.size ()));
64
+
56
65
auto resize_layer = ctx->net ->addResize (*in);
57
66
TRTORCH_CHECK (resize_layer, " Unable to create interpolation (resizing) layer from node" << *n);
58
67
59
- resize_layer->setOutputDimensions (output_size );
68
+ resize_layer->setOutputDimensions (util::toDims (out_shape) );
60
69
resize_layer->setResizeMode (nvinfer1::ResizeMode::kNEAREST );
70
+ resize_layer->setName (util::node_info (n).c_str ());
71
+
72
+ auto layer_output = ctx->AssociateValueAndTensor (n->outputs ()[0 ], resize_layer->getOutput (0 ));
73
+ LOG_DEBUG (" Output tensor shape: " << layer_output->getDimensions ());
61
74
} else {
62
75
LOG_DEBUG (" scale factor parameters not supported yet." );
63
76
}
@@ -67,22 +80,27 @@ auto interpolate_registrations = RegisterNodeConversionPatterns()
67
80
}).pattern({
68
81
" aten::upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> (Tensor)" ,
69
82
[](ConversionCtx* ctx, const torch::jit::Node*n, args& args) -> bool {
70
- TRTORCH_ASSERT (args[0 ].IValue ()->isTensor (), " Input expected to be of type Tensor" );
71
-
72
83
auto in = args[0 ].ITensor ();
73
84
auto in_shape = util::toVec (in->getDimensions ());
74
85
75
86
// Case 1: user uses output size and not scales_d, scales_h, scales_w
76
87
if (!args[1 ].IValue ()->isNone () && args[2 ].IValue ()->isNone () && args[3 ].IValue ()->isNone () && args[4 ].IValue ()->isNone ()) {
77
- auto output_size = util::toDims (args[1 ].unwrapToIntList ());
88
+ auto out_size = util::toVec ( util:: toDims (args[1 ].unwrapToIntList () ));
78
89
79
- TRTORCH_ASSERT ( (output_size.nbDims == 1 || output_size.nbDims == 3 ), " aten::upsample_nearest3d input Tensor and output size dimension mismatch" );
90
+ TRTORCH_ASSERT (out_size.size () == 3 , " aten::upsample_nearest3d input Tensor and output size dimension mismatch" );
91
+
92
+ auto out_shape = in_shape;
93
+ std::copy (out_size.begin (), out_size.end (), out_shape.begin () + (in_shape.size () - out_size.size ()));
80
94
81
95
auto resize_layer = ctx->net ->addResize (*in);
82
96
TRTORCH_CHECK (resize_layer, " Unable to create interpolation (resizing) layer from node" << *n);
83
97
84
- resize_layer->setOutputDimensions (output_size );
98
+ resize_layer->setOutputDimensions (util::toDims (out_shape) );
85
99
resize_layer->setResizeMode (nvinfer1::ResizeMode::kNEAREST );
100
+ resize_layer->setName (util::node_info (n).c_str ());
101
+
102
+ auto layer_output = ctx->AssociateValueAndTensor (n->outputs ()[0 ], resize_layer->getOutput (0 ));
103
+ LOG_DEBUG (" Output tensor shape: " << layer_output->getDimensions ());
86
104
} else {
87
105
LOG_DEBUG (" scale factor parameters not supported yet." );
88
106
}
0 commit comments