@@ -135,7 +135,6 @@ def __init__(
135
135
amp_backend : str = 'native' ,
136
136
amp_level : str = 'O2' ,
137
137
distributed_backend : Optional [str ] = None ,
138
- automatic_optimization : Optional [bool ] = None ,
139
138
move_metrics_to_cpu : bool = False ,
140
139
multiple_trainloader_mode : str = 'max_size_cycle' ,
141
140
stochastic_weight_avg : bool = False
@@ -212,10 +211,6 @@ def __init__(
212
211
213
212
log_every_n_steps: How often to log within steps (defaults to every 50 steps).
214
213
215
- automatic_optimization: If False you are responsible for calling .backward, .step, zero_grad
216
- in LightningModule. This argument has been moved to LightningModule. It is deprecated
217
- here in v1.1 and will be removed in v1.3.
218
-
219
214
prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.
220
215
Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data
221
216
@@ -351,23 +346,12 @@ def __init__(
351
346
self .training_tricks_connector .on_trainer_init (
352
347
gradient_clip_val , track_grad_norm , accumulate_grad_batches , truncated_bptt_steps , terminate_on_nan
353
348
)
354
-
355
- # init train loop related flags
356
- # TODO: remove in 1.3.0
357
- if automatic_optimization is None :
358
- automatic_optimization = True
359
- else :
360
- rank_zero_warn (
361
- "Disable automatic optimization with the trainer flag is deprecated and will be removed in v1.3.0!"
362
- "Please use the property on the LightningModule for disabling automatic optimization"
363
- )
364
349
self .train_loop .on_trainer_init (
365
350
max_epochs ,
366
351
min_epochs ,
367
352
max_steps ,
368
353
min_steps ,
369
354
num_sanity_val_steps ,
370
- automatic_optimization ,
371
355
weights_summary ,
372
356
)
373
357
self .evaluation_loop .on_trainer_init ()
0 commit comments