docstring changes in tuner (#6264)
* docstring changes in tuner * added full stop
This commit is contained in:
parent
6788dbabff
commit
3371d32664
|
@ -17,6 +17,11 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||||
|
|
||||||
|
|
||||||
def pick_multiple_gpus(nb):
|
def pick_multiple_gpus(nb):
|
||||||
|
'''
|
||||||
|
Raises:
|
||||||
|
MisconfigurationException:
|
||||||
|
If ``gpus`` is set to 0, when ``auto_select_gpus=True``.
|
||||||
|
'''
|
||||||
if nb == 0:
|
if nb == 0:
|
||||||
raise MisconfigurationException(
|
raise MisconfigurationException(
|
||||||
r"auto_select_gpus=True, gpus=0 is not a valid configuration.\
|
r"auto_select_gpus=True, gpus=0 is not a valid configuration.\
|
||||||
|
@ -33,6 +38,11 @@ def pick_multiple_gpus(nb):
|
||||||
|
|
||||||
|
|
||||||
def pick_single_gpu(exclude_gpus: list):
|
def pick_single_gpu(exclude_gpus: list):
|
||||||
|
'''
|
||||||
|
Raises:
|
||||||
|
RuntimeError:
|
||||||
|
If you try to allocate a GPU, when no GPUs are available.
|
||||||
|
'''
|
||||||
for i in range(torch.cuda.device_count()):
|
for i in range(torch.cuda.device_count()):
|
||||||
if i in exclude_gpus:
|
if i in exclude_gpus:
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -70,6 +70,13 @@ def scale_batch_size(
|
||||||
|
|
||||||
**fit_kwargs: remaining arguments to be passed to .fit(), e.g., dataloader
|
**fit_kwargs: remaining arguments to be passed to .fit(), e.g., dataloader
|
||||||
or datamodule.
|
or datamodule.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
MisconfigurationException:
|
||||||
|
If field ``batch_arg_name`` is not found in ``model`` and ``model.hparams``, or
|
||||||
|
if batch scaling feature is used with dataloaders passed directly to ``.fit()``.
|
||||||
|
ValueError:
|
||||||
|
If mode in method ``scale_batch_size`` is neither ``power`` nor ``binsearch``.
|
||||||
"""
|
"""
|
||||||
if trainer.fast_dev_run:
|
if trainer.fast_dev_run:
|
||||||
rank_zero_warn('Skipping batch size scaler since fast_dev_run is enabled.', UserWarning)
|
rank_zero_warn('Skipping batch size scaler since fast_dev_run is enabled.', UserWarning)
|
||||||
|
|
|
@ -106,6 +106,10 @@ def lr_find(
|
||||||
|
|
||||||
update_attr: Whether to update the learning rate attribute or not.
|
update_attr: Whether to update the learning rate attribute or not.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
MisconfigurationException:
|
||||||
|
If learning rate/lr in ``model`` or ``model.hparams`` isn't overriden when ``auto_lr_find=True``, or
|
||||||
|
if you are using `more than one optimizer` with learning rate finder.
|
||||||
|
|
||||||
Example::
|
Example::
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue