Skip to content

Commit cd306fb

Browse files
committed
prune enable_pl_optimizer
1 parent ebabe56 commit cd306fb

File tree

4 files changed

+4
-18
lines changed

4 files changed

+4
-18
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
1818

1919
### Removed
2020

21+
- Removed deprecated Trainer argument `enable_pl_optimizer`
22+
2123

2224
### Fixed
2325

pytorch_lightning/trainer/connectors/optimizer_connector.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,7 @@ class OptimizerConnector:
2020
def __init__(self, trainer):
2121
self.trainer = trainer
2222

23-
def on_trainer_init(self, enable_pl_optimizer):
24-
if enable_pl_optimizer is not None:
25-
rank_zero_warn(
26-
"Trainer argument `enable_pl_optimizer` is deprecated in v1.1.3. It will be removed in v1.3.0",
27-
DeprecationWarning
28-
)
23+
def on_trainer_init(self):
2924
self.trainer.lr_schedulers = []
3025
self.trainer.optimizers = []
3126
self.trainer.optimizer_frequencies = []

pytorch_lightning/trainer/trainer.py

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,6 @@ def __init__(
137137
distributed_backend: Optional[str] = None,
138138
automatic_optimization: Optional[bool] = None,
139139
move_metrics_to_cpu: bool = False,
140-
enable_pl_optimizer: bool = None, # todo: remove in v1.3
141140
multiple_trainloader_mode: str = 'max_size_cycle',
142141
stochastic_weight_avg: bool = False
143142
):
@@ -289,11 +288,6 @@ def __init__(
289288
move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu.
290289
This can save some gpu memory, but can make training slower. Use with attention.
291290
292-
enable_pl_optimizer: If True, each optimizer will be wrapped by
293-
`pytorch_lightning.core.optimizer.LightningOptimizer`. It allows Lightning to
294-
handle AMP, TPU, accumulated_gradients, etc.
295-
.. warning:: Currently deprecated and it will be removed in v1.3
296-
297291
multiple_trainloader_mode: How to loop over the datasets when there are multiple train loaders.
298292
In 'max_size_cycle' mode, the trainer ends one epoch when the largest dataset is traversed,
299293
and smaller datasets reload when running out of their data. In 'min_size' mode, all the datasets
@@ -346,7 +340,7 @@ def __init__(
346340
self.on_init_start()
347341

348342
# init optimizer + lr scheduler related flags
349-
self.optimizer_connector.on_trainer_init(enable_pl_optimizer)
343+
self.optimizer_connector.on_trainer_init()
350344

351345
# init data flags
352346
self.data_connector.on_trainer_init(

tests/deprecated_api/test_remove_1-3.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -141,8 +141,3 @@ def test_v1_3_0_trainer_cli_profiler(cli_args, expected_parsed_arg, expected_pro
141141
assert getattr(args, "profiler") == expected_parsed_arg
142142
trainer = Trainer.from_argparse_args(args)
143143
assert isinstance(trainer.profiler, expected_profiler)
144-
145-
146-
def test_trainer_enable_pl_optimizer(tmpdir):
147-
with pytest.deprecated_call(match='will be removed in v1.3'):
148-
Trainer(enable_pl_optimizer=True)

0 commit comments

Comments
 (0)