diff --git a/pytorch_lightning/tuner/auto_gpu_select.py b/pytorch_lightning/tuner/auto_gpu_select.py index fd2ba4a1f3..3bd1ce52b5 100644 --- a/pytorch_lightning/tuner/auto_gpu_select.py +++ b/pytorch_lightning/tuner/auto_gpu_select.py @@ -17,6 +17,11 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException def pick_multiple_gpus(nb): + ''' + Raises: + MisconfigurationException: + If ``gpus`` is set to 0, when ``auto_select_gpus=True``. + ''' if nb == 0: raise MisconfigurationException( r"auto_select_gpus=True, gpus=0 is not a valid configuration.\ @@ -33,6 +38,11 @@ def pick_multiple_gpus(nb): def pick_single_gpu(exclude_gpus: list): + ''' + Raises: + RuntimeError: + If you try to allocate a GPU, when no GPUs are available. + ''' for i in range(torch.cuda.device_count()): if i in exclude_gpus: continue diff --git a/pytorch_lightning/tuner/batch_size_scaling.py b/pytorch_lightning/tuner/batch_size_scaling.py index c29cffc426..d6090a8e43 100644 --- a/pytorch_lightning/tuner/batch_size_scaling.py +++ b/pytorch_lightning/tuner/batch_size_scaling.py @@ -70,6 +70,13 @@ def scale_batch_size( **fit_kwargs: remaining arguments to be passed to .fit(), e.g., dataloader or datamodule. + + Raises: + MisconfigurationException: + If field ``batch_arg_name`` is not found in ``model`` and ``model.hparams``, or + if batch scaling feature is used with dataloaders passed directly to ``.fit()``. + ValueError: + If mode in method ``scale_batch_size`` is neither ``power`` nor ``binsearch``. """ if trainer.fast_dev_run: rank_zero_warn('Skipping batch size scaler since fast_dev_run is enabled.', UserWarning) diff --git a/pytorch_lightning/tuner/lr_finder.py b/pytorch_lightning/tuner/lr_finder.py index cf29799a05..1be9f5a616 100644 --- a/pytorch_lightning/tuner/lr_finder.py +++ b/pytorch_lightning/tuner/lr_finder.py @@ -106,6 +106,10 @@ def lr_find( update_attr: Whether to update the learning rate attribute or not. + Raises: + MisconfigurationException: + If learning rate/lr in ``model`` or ``model.hparams`` isn't overriden when ``auto_lr_find=True``, or + if you are using `more than one optimizer` with learning rate finder. Example::