Skip to content

Commit 295a70e

Browse files
committed
prune automatic_optimization
1 parent cd306fb commit 295a70e

File tree

3 files changed

+2
-19
lines changed

3 files changed

+2
-19
lines changed

CHANGELOG.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
1818

1919
### Removed
2020

21-
- Removed deprecated Trainer argument `enable_pl_optimizer`
21+
- Removed deprecated Trainer argument `enable_pl_optimizer` and `automatic_optimization` ([#6163](https://github.com/PyTorchLightning/pytorch-lightning/pull/6163))
2222

2323

2424
### Fixed

pytorch_lightning/trainer/trainer.py

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,6 @@ def __init__(
135135
amp_backend: str = 'native',
136136
amp_level: str = 'O2',
137137
distributed_backend: Optional[str] = None,
138-
automatic_optimization: Optional[bool] = None,
139138
move_metrics_to_cpu: bool = False,
140139
multiple_trainloader_mode: str = 'max_size_cycle',
141140
stochastic_weight_avg: bool = False
@@ -212,10 +211,6 @@ def __init__(
212211
213212
log_every_n_steps: How often to log within steps (defaults to every 50 steps).
214213
215-
automatic_optimization: If False you are responsible for calling .backward, .step, zero_grad
216-
in LightningModule. This argument has been moved to LightningModule. It is deprecated
217-
here in v1.1 and will be removed in v1.3.
218-
219214
prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.
220215
Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data
221216
@@ -351,23 +346,12 @@ def __init__(
351346
self.training_tricks_connector.on_trainer_init(
352347
gradient_clip_val, track_grad_norm, accumulate_grad_batches, truncated_bptt_steps, terminate_on_nan
353348
)
354-
355-
# init train loop related flags
356-
# TODO: remove in 1.3.0
357-
if automatic_optimization is None:
358-
automatic_optimization = True
359-
else:
360-
rank_zero_warn(
361-
"Disable automatic optimization with the trainer flag is deprecated and will be removed in v1.3.0!"
362-
"Please use the property on the LightningModule for disabling automatic optimization"
363-
)
364349
self.train_loop.on_trainer_init(
365350
max_epochs,
366351
min_epochs,
367352
max_steps,
368353
min_steps,
369354
num_sanity_val_steps,
370-
automatic_optimization,
371355
weights_summary,
372356
)
373357
self.evaluation_loop.on_trainer_init()

pytorch_lightning/trainer/training_loop.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,6 @@ def on_trainer_init(
5858
max_steps,
5959
min_steps,
6060
num_sanity_val_steps,
61-
automatic_optimization,
6261
weights_summary,
6362
):
6463
self.trainer.global_step = 0
@@ -71,7 +70,7 @@ def on_trainer_init(
7170
self.trainer.batch_idx = 0
7271
self.trainer.num_training_batches = 0
7372
self.trainer.train_dataloader = None
74-
self.automatic_optimization = automatic_optimization
73+
self.automatic_optimization: bool = ...
7574

7675
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
7776
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs

0 commit comments

Comments
 (0)