Skip to content

Commit 2878e39

Browse files
committed
fix(//tests/cpp): Fix the BERT C++ test
Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent 842d7df commit 2878e39

File tree

4 files changed

+7
-6
lines changed

4 files changed

+7
-6
lines changed

core/conversion/converters/impl/layer_norm.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ auto layer_norm_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns()
3232
gamma = tensor_to_const(ctx, gamma_torch_tensor);
3333
} else {
3434
gamma = args[2].ITensorOrFreeze(ctx);
35-
// gamma = broadcast(ctx, n, gamma, input_shape_vec.size(), "gamma");
3635
gamma = add_expand(ctx, gamma, input_shape);
3736
}
3837

@@ -43,7 +42,6 @@ auto layer_norm_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns()
4342
beta = tensor_to_const(ctx, beta_torch_tensor);
4443
} else {
4544
beta = args[3].ITensorOrFreeze(ctx);
46-
// beta = broadcast(ctx, n, beta, input_shape_vec.size(), "beta");
4745
beta = add_expand(ctx, beta, input_shape);
4846
}
4947

core/runtime/execute_engine.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -351,7 +351,6 @@ std::vector<at::Tensor> execute_engine(std::vector<at::Tensor> inputs, c10::intr
351351
// If in CUDAGraph mode, results need to be copied to the result buffers (on caller stream)
352352
for (size_t o = 0; o < compiled_engine->output_buffers.size(); o++) {
353353
outputs[o].copy_(compiled_engine->output_buffers[o], false);
354-
}
355354
}
356355

357356
if (compiled_engine->profile_execution) {

tests/cpp/test_compiled_modules.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,11 @@ TEST_P(CppAPITests, CompiledModuleIsClose) {
55
std::vector<torch::jit::IValue> trt_inputs_ivalues;
66
std::vector<torch_tensorrt::Input> shapes;
77
for (uint64_t i = 0; i < input_shapes.size(); i++) {
8-
auto in = at::randint(5, input_shapes[i], {at::kCUDA}).to(input_types[i]);
8+
auto in = at::randn(input_shapes[i], {at::kCUDA}).to(input_types[i]);
9+
if (input_types[i] == at::kInt || input_types[i] == at::kLong) {
10+
auto in = at::randint(0, 2, input_shapes[i], {at::kCUDA}).to(input_types[i]);
11+
}
12+
913
jit_inputs_ivalues.push_back(in.clone());
1014
trt_inputs_ivalues.push_back(in.clone());
1115
auto in_spec = torch_tensorrt::Input(input_shapes[i]);

tests/py/ts/models/test_models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def test_efficientnet_b0(self):
9393
)
9494

9595
def test_bert_base_uncased(self):
96-
self.model = cm.BertModule().cuda()
96+
self.model = cm.BertModule()
9797
self.input = torch.randint(0, 2, (1, 14), dtype=torch.int32).to("cuda")
9898

9999
compile_spec = {
@@ -116,7 +116,7 @@ def test_bert_base_uncased(self):
116116
"enabled_precisions": {torch.float},
117117
"truncate_long_and_double": True,
118118
}
119-
with torchtrt.logging.errors():
119+
with torchtrt.logging.debug():
120120
trt_mod = torchtrt.ts.compile(self.model, **compile_spec)
121121

122122
model_outputs = self.model(self.input, self.input)

0 commit comments

Comments
 (0)