From 774d9be3577ce1f05d895134d5ce70c16877d08b Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Mon, 16 Mar 2020 00:46:39 +0100 Subject: [PATCH] Fix docs - missing Trainer (#1159) * drop pandas * formatting --- docs/source/early_stopping.rst | 4 ++-- docs/source/experiment_logging.rst | 12 ++++++------ docs/source/experiment_reporting.rst | 2 +- docs/source/fast_training.rst | 2 +- docs/source/introduction_guide.rst | 2 +- docs/source/training_tricks.rst | 4 ++-- pytorch_lightning/core/lightning.py | 4 ++-- pytorch_lightning/loggers/trains.py | 7 +++---- 8 files changed, 18 insertions(+), 19 deletions(-) diff --git a/docs/source/early_stopping.rst b/docs/source/early_stopping.rst index ce288d33bd..f729cfe12c 100644 --- a/docs/source/early_stopping.rst +++ b/docs/source/early_stopping.rst @@ -11,7 +11,7 @@ Enable Early Stopping --------------------- There are two ways to enable early stopping. -.. note:: See: :ref:`trainer` +.. seealso:: :ref:`trainer` .. code-block:: python @@ -35,4 +35,4 @@ To disable early stopping pass ``False`` to the `early_stop_callback`. Note that ``None`` will not disable early stopping but will lead to the default behaviour. -.. note:: See: :ref:`trainer` +.. seealso:: :ref:`trainer` diff --git a/docs/source/experiment_logging.rst b/docs/source/experiment_logging.rst index b3fe825aed..a05329124c 100644 --- a/docs/source/experiment_logging.rst +++ b/docs/source/experiment_logging.rst @@ -7,7 +7,7 @@ Comet.ml `Comet.ml `_ is a third-party logger. To use CometLogger as your logger do the following. -.. note:: See: :ref:`comet` docs. +.. seealso:: :ref:`comet` docs. .. code-block:: python @@ -38,7 +38,7 @@ Neptune.ai `Neptune.ai `_ is a third-party logger. To use Neptune.ai as your logger do the following. -.. note:: See: :ref:`neptune` docs. +.. seealso:: :ref:`neptune` docs. .. code-block:: python @@ -68,7 +68,7 @@ allegro.ai TRAINS `allegro.ai `_ is a third-party logger. To use TRAINS as your logger do the following. -.. note:: See: :ref:`trains` docs. +.. seealso:: :ref:`trains` docs. .. code-block:: python @@ -95,7 +95,7 @@ Tensorboard To use `Tensorboard `_ as your logger do the following. -.. note:: See: TensorBoardLogger :ref:`tf-logger` +.. seealso:: TensorBoardLogger :ref:`tf-logger` .. code-block:: python @@ -121,7 +121,7 @@ Test Tube `Test Tube `_ is a tensorboard logger but with nicer file structure. To use TestTube as your logger do the following. -.. note:: See: TestTube :ref:`testTube` +.. seealso:: TestTube :ref:`testTube` .. code-block:: python @@ -146,7 +146,7 @@ Wandb `Wandb `_ is a third-party logger. To use Wandb as your logger do the following. -.. note:: See: :ref:`wandb` docs +.. seealso:: :ref:`wandb` docs .. code-block:: python diff --git a/docs/source/experiment_reporting.rst b/docs/source/experiment_reporting.rst index aa5642ab20..0063a92694 100644 --- a/docs/source/experiment_reporting.rst +++ b/docs/source/experiment_reporting.rst @@ -22,7 +22,7 @@ Control log writing frequency Writing to a logger can be expensive. In Lightning you can set the interval at which you want to log using this trainer flag. -.. note:: See: :ref:`trainer` +.. seealso:: :ref:`trainer` .. code-block:: python diff --git a/docs/source/fast_training.rst b/docs/source/fast_training.rst index 5e7d1c599d..4e5c189d3a 100644 --- a/docs/source/fast_training.rst +++ b/docs/source/fast_training.rst @@ -16,7 +16,7 @@ Force training for min or max epochs ------------------------------------- It can be useful to force training for a minimum number of epochs or limit to a max number. -.. note:: See: :ref:`trainer` +.. seealso:: :ref:`trainer` .. code-block:: python diff --git a/docs/source/introduction_guide.rst b/docs/source/introduction_guide.rst index b048c07175..2071f67337 100644 --- a/docs/source/introduction_guide.rst +++ b/docs/source/introduction_guide.rst @@ -472,7 +472,7 @@ First, change the runtime to TPU (and reinstall lightning). Next, install the required xla library (adds support for PyTorch on TPUs) -.. code-block:: python +.. code-block:: import collections from datetime import datetime, timedelta diff --git a/docs/source/training_tricks.rst b/docs/source/training_tricks.rst index 416fcdb8a8..2904c11ad1 100644 --- a/docs/source/training_tricks.rst +++ b/docs/source/training_tricks.rst @@ -7,7 +7,7 @@ Accumulate gradients Accumulated gradients runs K small batches of size N before doing a backwards pass. The effect is a large effective batch size of size KxN. -.. note:: See: :ref:`trainer` +.. seealso:: :ref:`trainer` .. code-block:: python @@ -20,7 +20,7 @@ Gradient Clipping Gradient clipping may be enabled to avoid exploding gradients. Specifically, this will `clip the gradient norm `_ computed over all model parameters together. -.. note:: See: :ref:`trainer` +.. seealso:: :ref:`trainer` .. code-block:: python diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index b531c1542e..fe8a1f5c39 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -8,8 +8,8 @@ from argparse import Namespace from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch -import torch.distributed as dist from torch import Tensor +from torch.distributed import init_process_group from torch.nn.parallel import DistributedDataParallel from torch.optim import Adam from torch.optim.optimizer import Optimizer @@ -859,7 +859,7 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks): root_node = self.trainer.resolve_root_node_address(root_node) os.environ['MASTER_ADDR'] = root_node - dist.init_process_group('nccl', rank=proc_rank, world_size=world_size) + init_process_group('nccl', rank=proc_rank, world_size=world_size) def configure_apex( self, diff --git a/pytorch_lightning/loggers/trains.py b/pytorch_lightning/loggers/trains.py index a56c6c1267..7d2bd01e35 100644 --- a/pytorch_lightning/loggers/trains.py +++ b/pytorch_lightning/loggers/trains.py @@ -29,9 +29,7 @@ from argparse import Namespace from pathlib import Path from typing import Any, Dict, Optional, Union -import PIL import numpy as np -import pandas as pd import torch try: @@ -79,6 +77,7 @@ class TrainsLogger(LightningLoggerBase): Example: .. code-block:: python + self.logger.experiment.some_trains_function() """ @@ -180,7 +179,7 @@ class TrainsLogger(LightningLoggerBase): @rank_zero_only def log_image( self, title: str, series: str, - image: Union[str, np.ndarray, PIL.Image.Image, torch.Tensor], + image: Union[str, np.ndarray, 'PIL.Image', torch.Tensor], step: Optional[int] = None) -> None: """Log Debug image in TRAINS experiment @@ -217,7 +216,7 @@ class TrainsLogger(LightningLoggerBase): @rank_zero_only def log_artifact( self, name: str, - artifact: Union[str, Path, Dict[str, Any], pd.DataFrame, np.ndarray, PIL.Image.Image], + artifact: Union[str, Path, Dict[str, Any], 'pandas.DataFrame', 'numpy.ndarray', 'PIL.Image.Image'], metadata: Optional[Dict[str, Any]] = None, delete_after_upload: bool = False) -> None: """Save an artifact (file/object) in TRAINS experiment storage.