Skip to content

Commit f0ad06a

Browse files
SeanNarenlexierule
authored andcommitted
Expose DeepSpeed FP16 parameters due to loss instability (#6115)
* Expose deepspeed config parameters to init function due to instability in parameters * See if tests can run on normal CI, without special tests * Add changelog * Update pytorch_lightning/plugins/training_type/deepspeed.py Co-authored-by: Carlos Mocholí <[email protected]> Co-authored-by: Carlos Mocholí <[email protected]> (cherry picked from commit 432e563) Add missing config
1 parent 953c873 commit f0ad06a

File tree

4 files changed

+98
-16
lines changed

4 files changed

+98
-16
lines changed

CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
192192
- Disabled batch transfer in DP mode ([#6093](https://github.com/PyTorchLightning/pytorch-lightning/pull/6093))
193193

194194

195+
- Expose DeepSpeed loss parameters to allow users to fix loss instability ([#6115](https://github.com/PyTorchLightning/pytorch-lightning/pull/6115)
196+
197+
195198
## [1.2.0] - 2021-02-18
196199

197200
### Added

pytorch_lightning/plugins/training_type/deepspeed.py

Lines changed: 37 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,11 @@ def __init__(
7979
num_nodes: int = 1,
8080
parallel_devices: Optional[List[torch.device]] = None,
8181
cluster_environment: Optional[ClusterEnvironment] = None,
82+
loss_scale: float = 0,
83+
initial_scale_power: int = 32,
84+
loss_scale_window: int = 1000,
85+
hysteresis: int = 2,
86+
min_loss_scale: int = 1
8287
) -> None:
8388
"""
8489
@@ -127,6 +132,18 @@ def __init__(
127132
128133
logging_level: Set logging level for deepspeed. (Default: ``logging.WARN``)
129134
135+
loss_scale: Loss scaling value for FP16 training.
136+
0.0 results in dynamic loss scaling, otherwise static (Default: 0)
137+
138+
initial_scale_power: Power of the initial dynamic loss scale value. Loss scale is computed
139+
by ``2^initial_scale_power`` (Default: 32)
140+
141+
loss_scale_window: Window in which to raise/lower the dynamic FP16 loss scaling value (Default: 1000)
142+
143+
hysteresis: FP16 Delay shift in Dynamic Loss scaling (Default: 2)
144+
145+
min_loss_scale: The minimum FP16 dynamic loss scaling value (Default: 1000)
146+
130147
"""
131148
if not _DEEPSPEED_AVAILABLE:
132149
raise MisconfigurationException(
@@ -154,6 +171,13 @@ def __init__(
154171
self._config_initialized = False
155172
deepspeed.utils.logging.logger.setLevel(logging_level)
156173

174+
# default FP16 parameters.
175+
self.loss_scale = loss_scale
176+
self.initial_scale_power = initial_scale_power
177+
self.loss_scale_window = loss_scale_window
178+
self.hysteresis = hysteresis
179+
self.min_loss_scale = min_loss_scale
180+
157181
def _load_config(self, config):
158182
if config is None and self.DEEPSPEED_ENV_VAR in os.environ:
159183
rank_zero_info(f"Loading DeepSpeed config from set {self.DEEPSPEED_ENV_VAR} environment variable")
@@ -299,9 +323,19 @@ def _format_precision_config(self):
299323
amp_level = self.lightning_module.trainer.accelerator_connector.amp_level
300324
precision = self.lightning_module.trainer.accelerator_connector.precision
301325
if precision == 16:
302-
if "amp" not in self.config and amp_type == AMPType.NATIVE:
303-
self.config["fp16"] = {"enabled": True}
304-
elif "apex" not in self.config and amp_type == AMPType.APEX:
326+
if "fp16" not in self.config and amp_type == AMPType.NATIVE:
327+
# FP16 is a DeepSpeed standalone AMP implementation
328+
rank_zero_info("Enabling DeepSpeed FP16.")
329+
self.config["fp16"] = {
330+
"enabled": True,
331+
"loss_scale": self.loss_scale,
332+
"initial_scale_power": self.initial_scale_power,
333+
"loss_scale_window": self.loss_scale_window,
334+
"hysteresis": self.hysteresis,
335+
"min_loss_scale": self.min_loss_scale
336+
}
337+
elif "amp" not in self.config and amp_type == AMPType.APEX:
338+
rank_zero_only("Enabling DeepSpeed APEX Implementation.")
305339
self.config["amp"] = {
306340
"enabled": True,
307341
"opt_level": amp_level,

tests/plugins/test_deepspeed_plugin.py

Lines changed: 58 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,11 @@ def deepspeed_config():
3434
}
3535

3636

37+
@pytest.fixture
38+
def deepspeed_zero_config(deepspeed_config):
39+
return {**deepspeed_config, 'zero_allow_untested_optimizer': True, 'zero_optimization': {'stage': 2}}
40+
41+
3742
@pytest.mark.skipif(not _DEEPSPEED_AVAILABLE, reason="DeepSpeed not available.")
3843
def test_deepspeed_plugin_string(tmpdir):
3944
"""
@@ -165,9 +170,6 @@ def test_invalid_deepspeed_defaults_no_precision(tmpdir):
165170

166171
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires GPU machine")
167172
@pytest.mark.skipif(not _DEEPSPEED_AVAILABLE, reason="DeepSpeed not available.")
168-
@pytest.mark.skipif(
169-
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
170-
)
171173
def test_warn_deepspeed_override_backward(tmpdir):
172174
"""
173175
Test to ensure that if the backward hook in the LightningModule is overridden, we throw a warning.
@@ -191,9 +193,6 @@ def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args
191193

192194
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires GPU machine")
193195
@pytest.mark.skipif(not _DEEPSPEED_AVAILABLE, reason="DeepSpeed not available.")
194-
@pytest.mark.skipif(
195-
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
196-
)
197196
def test_deepspeed_run_configure_optimizers(tmpdir):
198197
"""
199198
Test end to end that deepspeed works with defaults (without ZeRO as that requires compilation),
@@ -223,10 +222,7 @@ def on_train_start(self) -> None:
223222

224223
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires GPU machine")
225224
@pytest.mark.skipif(not _DEEPSPEED_AVAILABLE, reason="DeepSpeed not available.")
226-
@pytest.mark.skipif(
227-
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
228-
)
229-
def test_deepspeed_config(tmpdir, deepspeed_config):
225+
def test_deepspeed_config(tmpdir, deepspeed_zero_config):
230226
"""
231227
Test to ensure deepspeed works correctly when passed a DeepSpeed config object including optimizers/schedulers
232228
and saves the model weights to load correctly.
@@ -255,6 +251,58 @@ def on_train_start(self) -> None:
255251
_assert_save_model_is_equal(model, tmpdir, trainer)
256252

257253

254+
@pytest.mark.skipif(not _DEEPSPEED_AVAILABLE, reason="DeepSpeed not available.")
255+
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires GPU machine")
256+
def test_deepspeed_custom_precision_params(tmpdir):
257+
"""
258+
Ensure if we modify the FP16 parameters via the DeepSpeedPlugin, the deepspeed config contains these changes.
259+
"""
260+
261+
class TestModel(BoringModel):
262+
263+
def on_train_start(self) -> None:
264+
assert self.trainer.training_type_plugin.config['fp16']['loss_scale'] == 10
265+
assert self.trainer.training_type_plugin.config['fp16']['initial_scale_power'] == 10
266+
assert self.trainer.training_type_plugin.config['fp16']['loss_scale_window'] == 10
267+
assert self.trainer.training_type_plugin.config['fp16']['hysteresis'] == 10
268+
assert self.trainer.training_type_plugin.config['fp16']['min_loss_scale'] == 10
269+
raise SystemExit()
270+
271+
model = TestModel()
272+
trainer = Trainer(
273+
plugins=[
274+
DeepSpeedPlugin(
275+
loss_scale=10, initial_scale_power=10, loss_scale_window=10, hysteresis=10, min_loss_scale=10
276+
)
277+
],
278+
precision=16,
279+
gpus=1
280+
)
281+
with pytest.raises(SystemExit):
282+
trainer.fit(model)
283+
284+
285+
@pytest.mark.skipif(not _DEEPSPEED_AVAILABLE, reason="DeepSpeed not available.")
286+
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires GPU machine")
287+
def test_deepspeed_assert_config_zero_offload_disabled(tmpdir, deepspeed_zero_config):
288+
"""
289+
Ensure if we use a config and turn off cpu_offload, that this is set to False within the config.
290+
"""
291+
292+
deepspeed_zero_config['zero_optimization']['cpu_offload'] = False
293+
294+
class TestModel(BoringModel):
295+
296+
def on_train_start(self) -> None:
297+
assert self.trainer.training_type_plugin.config['zero_optimization']['cpu_offload'] is False
298+
raise SystemExit()
299+
300+
model = TestModel()
301+
trainer = Trainer(plugins=[DeepSpeedPlugin(config=deepspeed_zero_config)], precision=16, gpus=1)
302+
with pytest.raises(SystemExit):
303+
trainer.fit(model)
304+
305+
258306
@pytest.mark.skipif(not torch.cuda.is_available(), reason="requires GPU machine")
259307
@pytest.mark.skipif(not _DEEPSPEED_AVAILABLE, reason="DeepSpeed not available.")
260308
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")

tests/special_tests.sh

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,6 @@ export PL_RUNNING_SPECIAL_TESTS=1
1717
DEFAULTS="-m coverage run --source pytorch_lightning -a -m pytest --verbose --capture=no"
1818
python ${DEFAULTS} tests/trainer/optimization/test_manual_optimization.py::test_step_with_optimizer_closure_with_different_frequencies_ddp
1919
python ${DEFAULTS} tests/models/test_sync_batchnorm.py::test_sync_batchnorm_ddp
20-
python ${DEFAULTS} tests/plugins/test_deepspeed_plugin.py::test_warn_deepspeed_override_backward
21-
python ${DEFAULTS} tests/plugins/test_deepspeed_plugin.py::test_deepspeed_run_configure_optimizers
22-
python ${DEFAULTS} tests/plugins/test_deepspeed_plugin.py::test_deepspeed_config
2320
python ${DEFAULTS} tests/plugins/test_deepspeed_plugin.py::test_deepspeed_multigpu
2421
python ${DEFAULTS} tests/plugins/test_rpc_plugin.py::test_rpc_function_calls_ddp
2522
python ${DEFAULTS} tests/plugins/test_rpc_sequential_plugin.py::test_rpc_sequential_plugin_manual

0 commit comments

Comments
 (0)