@@ -81,13 +81,11 @@ def compile(
81
81
engine_capability : EngineCapability = ENGINE_CAPABILITY ,
82
82
refit : bool = REFIT ,
83
83
debug : bool = DEBUG ,
84
- capability : EngineCapability = EngineCapability .default ,
85
84
num_avg_timing_iters : int = NUM_AVG_TIMING_ITERS ,
86
85
workspace_size : int = WORKSPACE_SIZE ,
87
86
dla_sram_size : int = DLA_SRAM_SIZE ,
88
87
dla_local_dram_size : int = DLA_LOCAL_DRAM_SIZE ,
89
88
dla_global_dram_size : int = DLA_GLOBAL_DRAM_SIZE ,
90
- calibrator : object = None ,
91
89
truncate_long_and_double : bool = TRUNCATE_LONG_AND_DOUBLE ,
92
90
require_full_compilation : bool = REQUIRE_FULL_COMPILATION ,
93
91
min_block_size : int = MIN_BLOCK_SIZE ,
@@ -168,6 +166,12 @@ def compile(
168
166
if debug :
169
167
set_log_level (logger .parent , logging .DEBUG )
170
168
169
+ if torch_executed_modules is not None and torch_executed_modules :
170
+ logger .warning (
171
+ f"Detected torch_executed_modules was non-empty: { torch_executed_modules } "
172
+ "\n This feature is unimplemented in Torch-TRT Dynamo currently."
173
+ )
174
+
171
175
if not isinstance (inputs , collections .abc .Sequence ):
172
176
inputs = [inputs ]
173
177
@@ -226,6 +230,7 @@ def compile(
226
230
"use_python_runtime" : use_python_runtime ,
227
231
"truncate_long_and_double" : truncate_long_and_double ,
228
232
"use_fast_partitioner" : use_fast_partitioner ,
233
+ "num_avg_timing_iters" : num_avg_timing_iters ,
229
234
"enable_experimental_decompositions" : enable_experimental_decompositions ,
230
235
"require_full_compilation" : require_full_compilation ,
231
236
"disable_tf32" : disable_tf32 ,
0 commit comments