Skip to content

Commit 891440d

Browse files
committed
refactor: Disable input_signature in torchscript backend due to lack of
generic interface Signed-off-by: Naren Dasan <[email protected]> Signed-off-by: Naren Dasan <[email protected]>
1 parent bce8464 commit 891440d

File tree

2 files changed

+9
-17
lines changed

2 files changed

+9
-17
lines changed

py/torch_tensorrt/csrc/register_tensorrt_classes.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,11 @@ void RegisterTRTCompileSpec() {
2626
static auto TORCHTRT_UNUSED TRTInputSignatureTSRegistration =
2727
torch::class_<torch_tensorrt::pyapi::InputSignature>("tensorrt", "_InputSignature")
2828
.def(torch::init<>())
29+
.def("_set_signature_ivalue_torchbind",
30+
[](const c10::intrusive_ptr<torch_tensorrt::pyapi::InputSignature>& self,
31+
torch::jit::IValue ival) {
32+
self->signature_ivalue = ival;
33+
})
2934
.def("__str__", &torch_tensorrt::pyapi::InputSignature::to_str);
3035

3136
ADD_FIELD_GET_SET_REGISTRATION(

py/torch_tensorrt/ts/_compile_spec.py

Lines changed: 4 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -327,20 +327,6 @@ def TensorRTCompileSpec(inputs=[],
327327
torch.randn((1, 3, 224, 244)) # Use an example tensor and let torch_tensorrt infer settings
328328
]
329329
330-
input_signature Union(List, Tuple, torch_tensorrt.Input, torch.Tensor): A formatted collection of input specifications for the module. Input Sizes can be specified as torch sizes, tuples or lists. dtypes can be specified using
331-
torch datatypes or torch_tensorrt datatypes and you can use either torch devices or the torch_tensorrt device type enum to select device type. **This API should be considered beta-level stable and may change in the future** ::
332-
333-
input_signature=([
334-
torch_tensorrt.Input((1, 3, 224, 224)), # Static NCHW input shape for input #1
335-
torch_tensorrt.Input(
336-
min_shape=(1, 224, 224, 3),
337-
opt_shape=(1, 512, 512, 3),
338-
max_shape=(1, 1024, 1024, 3),
339-
dtype=torch.int32
340-
format=torch.channel_last
341-
), # Dynamic input shape for input #2
342-
], torch.randn((1, 3, 224, 244))) # Use an example tensor and let torch_tensorrt infer settings for input #3
343-
344330
device (Union(torch_tensorrt.Device, torch.device, dict)): Target device for TensorRT engines to run on ::
345331
346332
device=torch_tensorrt.Device("dla:1", allow_gpu_fallback=True)
@@ -362,7 +348,7 @@ def TensorRTCompileSpec(inputs=[],
362348

363349
compile_spec = {
364350
"inputs": inputs,
365-
"input_signature": input_signature,
351+
#"input_signature": input_signature,
366352
"device": device,
367353
"disable_tf32":
368354
disable_tf32, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
@@ -384,12 +370,13 @@ def TensorRTCompileSpec(inputs=[],
384370

385371
backend_spec = torch.classes.tensorrt.CompileSpec()
386372

373+
if input_signature is not None:
374+
raise ValueError("Input signature parsing is not currently supported in the TorchScript backend integration")
375+
387376
for i in parsed_spec.inputs:
388377
clone = _internal_input_to_torch_class_input(i)
389378
backend_spec._append_input(clone)
390379

391-
backend_spec._set_input_signature(parsed_spec.input_signature)
392-
393380
d = torch.classes.tensorrt._Device()
394381
d._set_device_type(int(parsed_spec.device.device_type))
395382
d._set_gpu_id(parsed_spec.device.gpu_id)

0 commit comments

Comments
 (0)