Skip to content

Commit 09e032c

Browse files
committed
Merge remote-tracking branch 'origin/master' into squashed_collections
2 parents 891440d + d0e471f commit 09e032c

File tree

6 files changed

+69
-5
lines changed

6 files changed

+69
-5
lines changed

.circleci/config.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -747,7 +747,7 @@ parameters:
747747
# Nightly platform config
748748
torch-nightly-build:
749749
type: string
750-
default: "1.13.0.dev20220715+cu113"
750+
default: "1.13.0.dev20220731+cu113"
751751
torch-nightly-build-index:
752752
type: string
753753
default: "https://download.pytorch.org/whl/nightly/cu113"

core/conversion/converters/impl/unary.cpp

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,55 @@
11
#include "core/conversion/converters/converters.h"
22
#include "core/util/prelude.h"
33

4+
#include "torch/torch.h"
5+
46
namespace torch_tensorrt {
57
namespace core {
68
namespace conversion {
79
namespace converters {
810
namespace impl {
911
namespace {
1012

13+
14+
auto abs_registration TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern(
15+
{"aten::abs (Tensor self) -> Tensor",
16+
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
17+
auto in = args[0].ITensor();
18+
bool unary_supported_input = in->getType() == nvinfer1::DataType::kFLOAT
19+
|| in->getType() == nvinfer1::DataType::kHALF
20+
|| in->getType() == nvinfer1::DataType::kINT8;
21+
if(unary_supported_input){
22+
auto unary_layer = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kABS);
23+
TORCHTRT_CHECK(unary_layer, "Unable to create abs layer from node: " << *n);
24+
unary_layer->setName(util::node_info(n).c_str());
25+
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], unary_layer->getOutput(0));
26+
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
27+
return true;
28+
}
29+
else{
30+
//For types not supported by kABS, use an elementwise implementation abs(x) = max(x, -1 * x)
31+
at::Tensor neg_one = torch::full({1}, -1).to(util::TRTDataTypeToScalarType(in->getType()));
32+
auto neg_one_const = tensor_to_const(ctx, neg_one);
33+
auto neg_layer = add_elementwise(
34+
ctx,
35+
nvinfer1::ElementWiseOperation::kPROD,
36+
in,
37+
neg_one_const,
38+
util::node_info(n) + std::string("_Negation"));
39+
TORCHTRT_CHECK(neg_layer, "Unable to create prod layer from node: " << *n);
40+
auto max_layer = add_elementwise(
41+
ctx,
42+
nvinfer1::ElementWiseOperation::kMAX,
43+
in,
44+
neg_layer->getOutput(0),
45+
util::node_info(n) + std::string("_Max"));
46+
TORCHTRT_CHECK(max_layer, "Unable to create max layer from node: " << *n);
47+
auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], max_layer->getOutput(0));
48+
LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
49+
return true;
50+
}
51+
}});
52+
1153
#define convert(unary, trt_type) \
1254
auto unary##_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern( \
1355
{"aten::" #unary "(Tensor self) -> Tensor", \
@@ -32,7 +74,6 @@ convert(asin, kASIN);
3274
convert(sinh, kSINH);
3375
convert(tan, kTAN);
3476
convert(atan, kATAN);
35-
convert(abs, kABS);
3677
convert(floor, kFLOOR);
3778
convert(reciprocal, kRECIP);
3879
convert(log, kLOG);

core/lowering/register_trt_placeholder_ops.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,10 @@ c10::AliasAnalysisKind aliasAnalysisFromSchema() {
1010
RegisterOperators trt_placeholder_ops_reg({
1111
/// Op marks a Tensor to be conveted from an Torch Tensor
1212
/// to a TRT constant Tensor
13-
Operator("trt::const(Tensor val) -> Tensor", [](Stack& stack) { /*noop*/ }, aliasAnalysisFromSchema()),
13+
Operator(
14+
"trt::const(Tensor val) -> Tensor",
15+
[](Stack& stack) { /*noop*/ },
16+
aliasAnalysisFromSchema()),
1417
});
1518

1619
} // namespace jit

py/torch_tensorrt/fx/test/converters/acc_op/test_convolution.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,8 @@ def forward(self, x):
144144
("tuple_parameters", 1, (1, 1, 1), (1, 1, 1)),
145145
param("non_zero_padding", 1, padding=1),
146146
param("dilation", 1, dilation=2),
147-
param("groups", 1, groups=3),
147+
# TODO TRT 8.4.1 will trigger issue with this test. T127981773
148+
# param("groups", 1, groups=3),
148149
]
149150
)
150151
def test_conv3d(

py/torch_tensorrt/fx/test/converters/acc_op/test_type_as.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import torch
2+
import unittest
23
import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops
34
from torch.testing._internal.common_utils import run_tests
45
from torch_tensorrt.fx.tools.common_fx2trt import AccTestCase, InputTensorSpec
@@ -102,7 +103,8 @@ def forward(self, input):
102103
expected_ops={acc_ops.to_dtype},
103104
precision=LowerPrecision.FP16,
104105
)
105-
106+
107+
@unittest.skip("Does not pass in TRT 8.4.1 T127981773")
106108
def test_type_tensor_with_dynamic_shape_four_dimensions(self):
107109
class Type_as(torch.nn.Module):
108110
def forward(self, input):

tests/core/conversion/converters/test_unary.cpp

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#include <string>
2+
#include "torch/torch.h"
23
#include "core/compiler.h"
34
#include "gtest/gtest.h"
45
#include "tests/util/util.h"
@@ -14,6 +15,22 @@ std::string gen_test_graph(const std::string& unary) {
1415
}
1516
} // namespace
1617

18+
TEST(Converters, ATenAbsIntConvertsCorrectly) {
19+
const auto graph = gen_test_graph("abs");
20+
auto g = std::make_shared<torch::jit::Graph>();
21+
torch::jit::parseIR(graph, g.get());
22+
23+
auto in = at::tensor({-1, 1, -2, 2, -3, 3}, {at::kCUDA}).to(torch::kInt32);
24+
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
25+
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in});
26+
27+
in = at::clone(in);
28+
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
29+
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {in});
30+
31+
ASSERT_TRUE(torch_tensorrt::tests::util::exactlyEqual(jit_results[0], trt_results[0]));
32+
}
33+
1734
#define test_unary(unary, name) \
1835
TEST(Converters, ATen##name##ConvertsCorrectly) { \
1936
const auto graph = gen_test_graph(#unary); \

0 commit comments

Comments
 (0)