docs: Add empty lines in docstring [ci skip] (#4232)

* Add empty lines in docstring for proper docs

* Remove Returns:

* Remove unnecessary Returns:

* Update pytorch_lightning/accelerators/ddp2_accelerator.py

Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com>

* fix returns

Co-authored-by: William Falcon <waf2107@columbia.edu>
Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com>
This commit is contained in:
Akihiro Nitta 2020-10-21 22:00:39 +09:00 committed by GitHub
parent e0f9799dbf
commit d27ee8b5bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 20 additions and 12 deletions

View File

@ -122,11 +122,12 @@ class DDP2Accelerator(Accelerator):
Entry point for ddp
Args:
process_idx:
process_idx: current process rank
mp_queue: multiprocessing queue
model:
model: pointer to current :class:`LightningModule`
Returns:
Dict with evaluation results
"""
# show progressbar only on progress_rank 0

View File

@ -212,6 +212,7 @@ class DDPAccelerator(Accelerator):
model:
Returns:
Dict with evaluation results
"""
seed = os.environ.get("PL_GLOBAL_SEED")

View File

@ -111,6 +111,7 @@ class DDPCPUSLURMAccelerator(Accelerator):
model:
Returns:
Dict with evaluation results
"""
# determine which process we are and world size

View File

@ -73,11 +73,11 @@ class DDPCPUSpawnAccelerator(Accelerator):
def ddp_train(self, process_idx, mp_queue, model):
"""
Entry point for ddp
Args:
process_idx:
mp_queue: multiprocessing queue
model:
Returns:
"""
# show progressbar only on progress_rank 0
if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:

View File

@ -110,6 +110,7 @@ class DDPCPUTorchElasticAccelerator(Accelerator):
model:
Returns:
Dict with evaluation results
"""
# determine which process we are and world size

View File

@ -113,6 +113,7 @@ class DDPSLURMAccelerator(Accelerator):
model:
Returns:
Dict with evaluation results
"""
seed = os.environ.get("PL_GLOBAL_SEED")

View File

@ -81,9 +81,6 @@ class DDPSpawnAccelerator(Accelerator):
process_idx:
mp_queue: multiprocessing queue
model:
Returns:
"""
seed = os.environ.get("PL_GLOBAL_SEED")
if seed is not None:

View File

@ -113,6 +113,7 @@ class DDPTorchElasticAccelerator(Accelerator):
model:
Returns:
Dict with evaluation results
"""
# determine which process we are and world size

View File

@ -85,13 +85,13 @@ def get_num_classes(
"""
Calculates the number of classes for a given prediction and target tensor.
Args:
pred: predicted values
target: true labels
num_classes: number of classes if known
Args:
pred: predicted values
target: true labels
num_classes: number of classes if known
Return:
An integer that represents the number of classes.
Return:
An integer that represents the number of classes.
"""
num_target_classes = int(target.max().detach().item() + 1)
num_pred_classes = int(pred.max().detach().item() + 1)
@ -272,6 +272,7 @@ def accuracy(
- ``'none'``: returns calculated metric per class
return_state: returns a internal state that can be ddp reduced
before doing the final calculation
Return:
A Tensor with the accuracy score.

View File

@ -55,8 +55,10 @@ class XLADeviceUtils:
def _fetch_xla_device_type(device: torch.device) -> str:
"""
Returns XLA device type
Args:
device: (:class:`~torch.device`): Accepts a torch.device type with a XLA device format i.e xla:0
Return:
Returns a str of the device hardware type. i.e TPU
"""
@ -67,6 +69,7 @@ class XLADeviceUtils:
def _is_device_tpu() -> bool:
"""
Check if device is TPU
Return:
A boolean value indicating if the xla device is a TPU device or not
"""
@ -79,6 +82,7 @@ class XLADeviceUtils:
def tpu_device_exists() -> bool:
"""
Public method to check if TPU is available
Return:
A boolean value indicating if a TPU device exists on the system
"""