Skip to content

Commit 1d9c553

Browse files
authored
prune deprecated Trainer arg enable_pl_optimizer (#6163)
* prune enable_pl_optimizer * prune automatic_optimization
1 parent 09baf29 commit 1d9c553

File tree

9 files changed

+5
-43
lines changed

9 files changed

+5
-43
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
2121
- Removed support for passing a bool value to `profiler` argument of Trainer ([#6164](https://github.com/PyTorchLightning/pytorch-lightning/pull/6164))
2222

2323

24+
- Removed deprecated Trainer argument `enable_pl_optimizer` and `automatic_optimization` ([#6163](https://github.com/PyTorchLightning/pytorch-lightning/pull/6163))
25+
26+
2427
### Fixed
2528

2629
- Made the `Plugin.reduce` method more consistent across all Plugins to reflect a mean-reduction by default ([#6011](https://github.com/PyTorchLightning/pytorch-lightning/pull/6011))

docs/source/common/optimizers.rst

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -300,8 +300,6 @@ override the :meth:`optimizer_step` function.
300300

301301
For example, here step optimizer A every 2 batches and optimizer B every 4 batches
302302

303-
.. note:: When using Trainer(enable_pl_optimizer=True), there is no need to call `.zero_grad()`.
304-
305303
.. testcode::
306304

307305
def optimizer_zero_grad(self, current_epoch, batch_idx, optimizer, opt_idx):

pytorch_lightning/core/lightning.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1324,9 +1324,6 @@ def optimizer_step(
13241324
By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example
13251325
once per optimizer.
13261326
1327-
.. tip:: With ``Trainer(enable_pl_optimizer=True)``, you can use ``optimizer.step()`` directly
1328-
and it will handle zero_grad, accumulated gradients, AMP, TPU and more automatically for you.
1329-
13301327
Warning:
13311328
If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter
13321329
to ``optimizer.step()`` function as shown in the examples. This ensures that

pytorch_lightning/trainer/connectors/optimizer_connector.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,7 @@ class OptimizerConnector:
2020
def __init__(self, trainer):
2121
self.trainer = trainer
2222

23-
def on_trainer_init(self, enable_pl_optimizer):
24-
if enable_pl_optimizer is not None:
25-
rank_zero_warn(
26-
"Trainer argument `enable_pl_optimizer` is deprecated in v1.1.3. It will be removed in v1.3.0",
27-
DeprecationWarning
28-
)
23+
def on_trainer_init(self):
2924
self.trainer.lr_schedulers = []
3025
self.trainer.optimizers = []
3126
self.trainer.optimizer_frequencies = []

pytorch_lightning/trainer/trainer.py

Lines changed: 1 addition & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -135,9 +135,7 @@ def __init__(
135135
amp_backend: str = 'native',
136136
amp_level: str = 'O2',
137137
distributed_backend: Optional[str] = None,
138-
automatic_optimization: Optional[bool] = None,
139138
move_metrics_to_cpu: bool = False,
140-
enable_pl_optimizer: bool = None, # todo: remove in v1.3
141139
multiple_trainloader_mode: str = 'max_size_cycle',
142140
stochastic_weight_avg: bool = False
143141
):
@@ -213,10 +211,6 @@ def __init__(
213211
214212
log_every_n_steps: How often to log within steps (defaults to every 50 steps).
215213
216-
automatic_optimization: If False you are responsible for calling .backward, .step, zero_grad
217-
in LightningModule. This argument has been moved to LightningModule. It is deprecated
218-
here in v1.1 and will be removed in v1.3.
219-
220214
prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.
221215
Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data
222216
@@ -288,11 +282,6 @@ def __init__(
288282
move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu.
289283
This can save some gpu memory, but can make training slower. Use with attention.
290284
291-
enable_pl_optimizer: If True, each optimizer will be wrapped by
292-
`pytorch_lightning.core.optimizer.LightningOptimizer`. It allows Lightning to
293-
handle AMP, TPU, accumulated_gradients, etc.
294-
.. warning:: Currently deprecated and it will be removed in v1.3
295-
296285
multiple_trainloader_mode: How to loop over the datasets when there are multiple train loaders.
297286
In 'max_size_cycle' mode, the trainer ends one epoch when the largest dataset is traversed,
298287
and smaller datasets reload when running out of their data. In 'min_size' mode, all the datasets
@@ -345,7 +334,7 @@ def __init__(
345334
self.on_init_start()
346335

347336
# init optimizer + lr scheduler related flags
348-
self.optimizer_connector.on_trainer_init(enable_pl_optimizer)
337+
self.optimizer_connector.on_trainer_init()
349338

350339
# init data flags
351340
self.data_connector.on_trainer_init(
@@ -356,23 +345,12 @@ def __init__(
356345
self.training_tricks_connector.on_trainer_init(
357346
gradient_clip_val, track_grad_norm, accumulate_grad_batches, truncated_bptt_steps, terminate_on_nan
358347
)
359-
360-
# init train loop related flags
361-
# TODO: remove in 1.3.0
362-
if automatic_optimization is None:
363-
automatic_optimization = True
364-
else:
365-
rank_zero_warn(
366-
"Disable automatic optimization with the trainer flag is deprecated and will be removed in v1.3.0!"
367-
"Please use the property on the LightningModule for disabling automatic optimization"
368-
)
369348
self.train_loop.on_trainer_init(
370349
max_epochs,
371350
min_epochs,
372351
max_steps,
373352
min_steps,
374353
num_sanity_val_steps,
375-
automatic_optimization,
376354
weights_summary,
377355
)
378356
self.evaluation_loop.on_trainer_init()

pytorch_lightning/trainer/training_loop.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,6 @@ def on_trainer_init(
5858
max_steps,
5959
min_steps,
6060
num_sanity_val_steps,
61-
automatic_optimization,
6261
weights_summary,
6362
):
6463
self.trainer.global_step = 0
@@ -71,7 +70,6 @@ def on_trainer_init(
7170
self.trainer.batch_idx = 0
7271
self.trainer.num_training_batches = 0
7372
self.trainer.train_dataloader = None
74-
self.automatic_optimization = automatic_optimization
7573

7674
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
7775
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs

tests/deprecated_api/test_remove_1-3.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -106,8 +106,3 @@ def test_v1_3_0_deprecated_metrics():
106106
torch.randint(10, 20, (50, )).float(),
107107
torch.randint(1, 100, (50, )).float()
108108
)
109-
110-
111-
def test_trainer_enable_pl_optimizer(tmpdir):
112-
with pytest.deprecated_call(match='will be removed in v1.3'):
113-
Trainer(enable_pl_optimizer=True)

tests/plugins/test_rpc_sequential_plugin.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@ def test_rpc_sequential_plugin_manual(tmpdir, args=None):
4242
gpus=2,
4343
distributed_backend="ddp",
4444
plugins=[RPCSequentialPlugin(balance=[2, 1], rpc_timeout_sec=5 * 60)],
45-
enable_pl_optimizer=True,
4645
)
4746

4847
trainer.fit(model)

tests/utilities/test_all_gather_grad.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,6 @@ def training_epoch_end(self, outputs) -> None:
8989
max_epochs=1,
9090
log_every_n_steps=1,
9191
accumulate_grad_batches=2,
92-
enable_pl_optimizer=True,
9392
gpus=2,
9493
accelerator="ddp",
9594
)

0 commit comments

Comments
 (0)