Skip to content

Commit b67691b

Browse files
committed
prune automatic_optimization
1 parent a13a0d6 commit b67691b

File tree

3 files changed

+2
-19
lines changed

3 files changed

+2
-19
lines changed

CHANGELOG.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
2121
- Removed support for passing a bool value to `profiler` argument of Trainer ([#6164](https://github.com/PyTorchLightning/pytorch-lightning/pull/6164))
2222

2323

24-
- Removed deprecated Trainer argument `enable_pl_optimizer`
24+
- Removed deprecated Trainer argument `enable_pl_optimizer` and `automatic_optimization` ([#6163](https://github.com/PyTorchLightning/pytorch-lightning/pull/6163))
2525

2626

2727
### Fixed

pytorch_lightning/trainer/trainer.py

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,6 @@ def __init__(
135135
amp_backend: str = 'native',
136136
amp_level: str = 'O2',
137137
distributed_backend: Optional[str] = None,
138-
automatic_optimization: Optional[bool] = None,
139138
move_metrics_to_cpu: bool = False,
140139
multiple_trainloader_mode: str = 'max_size_cycle',
141140
stochastic_weight_avg: bool = False
@@ -212,10 +211,6 @@ def __init__(
212211
213212
log_every_n_steps: How often to log within steps (defaults to every 50 steps).
214213
215-
automatic_optimization: If False you are responsible for calling .backward, .step, zero_grad
216-
in LightningModule. This argument has been moved to LightningModule. It is deprecated
217-
here in v1.1 and will be removed in v1.3.
218-
219214
prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.
220215
Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data
221216
@@ -350,23 +345,12 @@ def __init__(
350345
self.training_tricks_connector.on_trainer_init(
351346
gradient_clip_val, track_grad_norm, accumulate_grad_batches, truncated_bptt_steps, terminate_on_nan
352347
)
353-
354-
# init train loop related flags
355-
# TODO: remove in 1.3.0
356-
if automatic_optimization is None:
357-
automatic_optimization = True
358-
else:
359-
rank_zero_warn(
360-
"Disable automatic optimization with the trainer flag is deprecated and will be removed in v1.3.0!"
361-
"Please use the property on the LightningModule for disabling automatic optimization"
362-
)
363348
self.train_loop.on_trainer_init(
364349
max_epochs,
365350
min_epochs,
366351
max_steps,
367352
min_steps,
368353
num_sanity_val_steps,
369-
automatic_optimization,
370354
weights_summary,
371355
)
372356
self.evaluation_loop.on_trainer_init()

pytorch_lightning/trainer/training_loop.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,6 @@ def on_trainer_init(
5858
max_steps,
5959
min_steps,
6060
num_sanity_val_steps,
61-
automatic_optimization,
6261
weights_summary,
6362
):
6463
self.trainer.global_step = 0
@@ -71,7 +70,7 @@ def on_trainer_init(
7170
self.trainer.batch_idx = 0
7271
self.trainer.num_training_batches = 0
7372
self.trainer.train_dataloader = None
74-
self.automatic_optimization = automatic_optimization
73+
self.automatic_optimization: bool = ...
7574

7675
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
7776
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs

0 commit comments

Comments
 (0)