Skip to content

Commit 3371d32

Browse files
authored
docstring changes in tuner (#6264)
* docstring changes in tuner * added full stop
1 parent 6788dba commit 3371d32

File tree

3 files changed

+21
-0
lines changed

3 files changed

+21
-0
lines changed

pytorch_lightning/tuner/auto_gpu_select.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,11 @@
1717

1818

1919
def pick_multiple_gpus(nb):
20+
'''
21+
Raises:
22+
MisconfigurationException:
23+
If ``gpus`` is set to 0, when ``auto_select_gpus=True``.
24+
'''
2025
if nb == 0:
2126
raise MisconfigurationException(
2227
r"auto_select_gpus=True, gpus=0 is not a valid configuration.\
@@ -33,6 +38,11 @@ def pick_multiple_gpus(nb):
3338

3439

3540
def pick_single_gpu(exclude_gpus: list):
41+
'''
42+
Raises:
43+
RuntimeError:
44+
If you try to allocate a GPU, when no GPUs are available.
45+
'''
3646
for i in range(torch.cuda.device_count()):
3747
if i in exclude_gpus:
3848
continue

pytorch_lightning/tuner/batch_size_scaling.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,13 @@ def scale_batch_size(
7070
7171
**fit_kwargs: remaining arguments to be passed to .fit(), e.g., dataloader
7272
or datamodule.
73+
74+
Raises:
75+
MisconfigurationException:
76+
If field ``batch_arg_name`` is not found in ``model`` and ``model.hparams``, or
77+
if batch scaling feature is used with dataloaders passed directly to ``.fit()``.
78+
ValueError:
79+
If mode in method ``scale_batch_size`` is neither ``power`` nor ``binsearch``.
7380
"""
7481
if trainer.fast_dev_run:
7582
rank_zero_warn('Skipping batch size scaler since fast_dev_run is enabled.', UserWarning)

pytorch_lightning/tuner/lr_finder.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,10 @@ def lr_find(
106106
107107
update_attr: Whether to update the learning rate attribute or not.
108108
109+
Raises:
110+
MisconfigurationException:
111+
If learning rate/lr in ``model`` or ``model.hparams`` isn't overriden when ``auto_lr_find=True``, or
112+
if you are using `more than one optimizer` with learning rate finder.
109113
110114
Example::
111115

0 commit comments

Comments
 (0)