Skip to content

Commit a13a0d6

Browse files
committed
prune enable_pl_optimizer
1 parent 09baf29 commit a13a0d6

File tree

4 files changed

+5
-18
lines changed

4 files changed

+5
-18
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
2121
- Removed support for passing a bool value to `profiler` argument of Trainer ([#6164](https://github.com/PyTorchLightning/pytorch-lightning/pull/6164))
2222

2323

24+
- Removed deprecated Trainer argument `enable_pl_optimizer`
25+
26+
2427
### Fixed
2528

2629
- Made the `Plugin.reduce` method more consistent across all Plugins to reflect a mean-reduction by default ([#6011](https://github.com/PyTorchLightning/pytorch-lightning/pull/6011))

pytorch_lightning/trainer/connectors/optimizer_connector.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,7 @@ class OptimizerConnector:
2020
def __init__(self, trainer):
2121
self.trainer = trainer
2222

23-
def on_trainer_init(self, enable_pl_optimizer):
24-
if enable_pl_optimizer is not None:
25-
rank_zero_warn(
26-
"Trainer argument `enable_pl_optimizer` is deprecated in v1.1.3. It will be removed in v1.3.0",
27-
DeprecationWarning
28-
)
23+
def on_trainer_init(self):
2924
self.trainer.lr_schedulers = []
3025
self.trainer.optimizers = []
3126
self.trainer.optimizer_frequencies = []

pytorch_lightning/trainer/trainer.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,6 @@ def __init__(
137137
distributed_backend: Optional[str] = None,
138138
automatic_optimization: Optional[bool] = None,
139139
move_metrics_to_cpu: bool = False,
140-
enable_pl_optimizer: bool = None, # todo: remove in v1.3
141140
multiple_trainloader_mode: str = 'max_size_cycle',
142141
stochastic_weight_avg: bool = False
143142
):
@@ -288,11 +287,6 @@ def __init__(
288287
move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu.
289288
This can save some gpu memory, but can make training slower. Use with attention.
290289
291-
enable_pl_optimizer: If True, each optimizer will be wrapped by
292-
`pytorch_lightning.core.optimizer.LightningOptimizer`. It allows Lightning to
293-
handle AMP, TPU, accumulated_gradients, etc.
294-
.. warning:: Currently deprecated and it will be removed in v1.3
295-
296290
multiple_trainloader_mode: How to loop over the datasets when there are multiple train loaders.
297291
In 'max_size_cycle' mode, the trainer ends one epoch when the largest dataset is traversed,
298292
and smaller datasets reload when running out of their data. In 'min_size' mode, all the datasets
@@ -345,7 +339,7 @@ def __init__(
345339
self.on_init_start()
346340

347341
# init optimizer + lr scheduler related flags
348-
self.optimizer_connector.on_trainer_init(enable_pl_optimizer)
342+
self.optimizer_connector.on_trainer_init()
349343

350344
# init data flags
351345
self.data_connector.on_trainer_init(

tests/deprecated_api/test_remove_1-3.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -106,8 +106,3 @@ def test_v1_3_0_deprecated_metrics():
106106
torch.randint(10, 20, (50, )).float(),
107107
torch.randint(1, 100, (50, )).float()
108108
)
109-
110-
111-
def test_trainer_enable_pl_optimizer(tmpdir):
112-
with pytest.deprecated_call(match='will be removed in v1.3'):
113-
Trainer(enable_pl_optimizer=True)

0 commit comments

Comments
 (0)