From d27ee8b5bfa2d29f110970d5b8ba90cf78ec3fff Mon Sep 17 00:00:00 2001 From: Akihiro Nitta Date: Wed, 21 Oct 2020 22:00:39 +0900 Subject: [PATCH] docs: Add empty lines in docstring [ci skip] (#4232) * Add empty lines in docstring for proper docs * Remove Returns: * Remove unnecessary Returns: * Update pytorch_lightning/accelerators/ddp2_accelerator.py Co-authored-by: Rohit Gupta * fix returns Co-authored-by: William Falcon Co-authored-by: Rohit Gupta --- pytorch_lightning/accelerators/ddp2_accelerator.py | 5 +++-- pytorch_lightning/accelerators/ddp_accelerator.py | 1 + .../accelerators/ddp_cpu_slurm_accelerator.py | 1 + .../accelerators/ddp_cpu_spawn_accelerator.py | 2 +- .../ddp_cpu_torchelastic_accelerator.py | 1 + .../accelerators/ddp_slurm_accelerator.py | 1 + .../accelerators/ddp_spawn_accelerator.py | 3 --- .../accelerators/ddp_torchelastic_accelerator.py | 1 + .../metrics/functional/classification.py | 13 +++++++------ pytorch_lightning/utilities/xla_device_utils.py | 4 ++++ 10 files changed, 20 insertions(+), 12 deletions(-) diff --git a/pytorch_lightning/accelerators/ddp2_accelerator.py b/pytorch_lightning/accelerators/ddp2_accelerator.py index f4d3eb4023..9fa52fedfd 100644 --- a/pytorch_lightning/accelerators/ddp2_accelerator.py +++ b/pytorch_lightning/accelerators/ddp2_accelerator.py @@ -122,11 +122,12 @@ class DDP2Accelerator(Accelerator): Entry point for ddp Args: - process_idx: + process_idx: current process rank mp_queue: multiprocessing queue - model: + model: pointer to current :class:`LightningModule` Returns: + Dict with evaluation results """ # show progressbar only on progress_rank 0 diff --git a/pytorch_lightning/accelerators/ddp_accelerator.py b/pytorch_lightning/accelerators/ddp_accelerator.py index 9439c7656a..60dcc93d59 100644 --- a/pytorch_lightning/accelerators/ddp_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_accelerator.py @@ -212,6 +212,7 @@ class DDPAccelerator(Accelerator): model: Returns: + Dict with evaluation results """ seed = os.environ.get("PL_GLOBAL_SEED") diff --git a/pytorch_lightning/accelerators/ddp_cpu_slurm_accelerator.py b/pytorch_lightning/accelerators/ddp_cpu_slurm_accelerator.py index f194c2c1e6..808a22aaf8 100644 --- a/pytorch_lightning/accelerators/ddp_cpu_slurm_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_cpu_slurm_accelerator.py @@ -111,6 +111,7 @@ class DDPCPUSLURMAccelerator(Accelerator): model: Returns: + Dict with evaluation results """ # determine which process we are and world size diff --git a/pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py b/pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py index 8f897b01a8..a5beebb337 100644 --- a/pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_cpu_spawn_accelerator.py @@ -73,11 +73,11 @@ class DDPCPUSpawnAccelerator(Accelerator): def ddp_train(self, process_idx, mp_queue, model): """ Entry point for ddp + Args: process_idx: mp_queue: multiprocessing queue model: - Returns: """ # show progressbar only on progress_rank 0 if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None: diff --git a/pytorch_lightning/accelerators/ddp_cpu_torchelastic_accelerator.py b/pytorch_lightning/accelerators/ddp_cpu_torchelastic_accelerator.py index 300512e3fc..e5da095bd5 100644 --- a/pytorch_lightning/accelerators/ddp_cpu_torchelastic_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_cpu_torchelastic_accelerator.py @@ -110,6 +110,7 @@ class DDPCPUTorchElasticAccelerator(Accelerator): model: Returns: + Dict with evaluation results """ # determine which process we are and world size diff --git a/pytorch_lightning/accelerators/ddp_slurm_accelerator.py b/pytorch_lightning/accelerators/ddp_slurm_accelerator.py index bc7920c364..7fa84f0c92 100644 --- a/pytorch_lightning/accelerators/ddp_slurm_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_slurm_accelerator.py @@ -113,6 +113,7 @@ class DDPSLURMAccelerator(Accelerator): model: Returns: + Dict with evaluation results """ seed = os.environ.get("PL_GLOBAL_SEED") diff --git a/pytorch_lightning/accelerators/ddp_spawn_accelerator.py b/pytorch_lightning/accelerators/ddp_spawn_accelerator.py index 272fcda932..f257b56dd4 100644 --- a/pytorch_lightning/accelerators/ddp_spawn_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_spawn_accelerator.py @@ -81,9 +81,6 @@ class DDPSpawnAccelerator(Accelerator): process_idx: mp_queue: multiprocessing queue model: - - Returns: - """ seed = os.environ.get("PL_GLOBAL_SEED") if seed is not None: diff --git a/pytorch_lightning/accelerators/ddp_torchelastic_accelerator.py b/pytorch_lightning/accelerators/ddp_torchelastic_accelerator.py index df7693f521..afe33e6a35 100644 --- a/pytorch_lightning/accelerators/ddp_torchelastic_accelerator.py +++ b/pytorch_lightning/accelerators/ddp_torchelastic_accelerator.py @@ -113,6 +113,7 @@ class DDPTorchElasticAccelerator(Accelerator): model: Returns: + Dict with evaluation results """ # determine which process we are and world size diff --git a/pytorch_lightning/metrics/functional/classification.py b/pytorch_lightning/metrics/functional/classification.py index 43dda00fc0..e87bfb7a00 100644 --- a/pytorch_lightning/metrics/functional/classification.py +++ b/pytorch_lightning/metrics/functional/classification.py @@ -85,13 +85,13 @@ def get_num_classes( """ Calculates the number of classes for a given prediction and target tensor. - Args: - pred: predicted values - target: true labels - num_classes: number of classes if known + Args: + pred: predicted values + target: true labels + num_classes: number of classes if known - Return: - An integer that represents the number of classes. + Return: + An integer that represents the number of classes. """ num_target_classes = int(target.max().detach().item() + 1) num_pred_classes = int(pred.max().detach().item() + 1) @@ -272,6 +272,7 @@ def accuracy( - ``'none'``: returns calculated metric per class return_state: returns a internal state that can be ddp reduced before doing the final calculation + Return: A Tensor with the accuracy score. diff --git a/pytorch_lightning/utilities/xla_device_utils.py b/pytorch_lightning/utilities/xla_device_utils.py index b4a479185d..5687992981 100644 --- a/pytorch_lightning/utilities/xla_device_utils.py +++ b/pytorch_lightning/utilities/xla_device_utils.py @@ -55,8 +55,10 @@ class XLADeviceUtils: def _fetch_xla_device_type(device: torch.device) -> str: """ Returns XLA device type + Args: device: (:class:`~torch.device`): Accepts a torch.device type with a XLA device format i.e xla:0 + Return: Returns a str of the device hardware type. i.e TPU """ @@ -67,6 +69,7 @@ class XLADeviceUtils: def _is_device_tpu() -> bool: """ Check if device is TPU + Return: A boolean value indicating if the xla device is a TPU device or not """ @@ -79,6 +82,7 @@ class XLADeviceUtils: def tpu_device_exists() -> bool: """ Public method to check if TPU is available + Return: A boolean value indicating if a TPU device exists on the system """