diff --git a/docs/source/bolts.rst b/docs/source/bolts.rst
new file mode 100644
index 0000000000..02e7e39bc9
--- /dev/null
+++ b/docs/source/bolts.rst
@@ -0,0 +1,89 @@
+Bolts
+=====
+`PyTorch Lightning Bolts `_, is our official collection
+of prebuilt models across many research domains.
+
+.. code-block:: bash
+
+ pip install pytorch-lightning-bolts
+
+In bolts we have:
+
+- A collection of pretrained state-of-the-art models.
+- A collection of models designed to bootstrap your research.
+- A collection of Callbacks, transforms, full datasets.
+- All models work on CPUs, TPUs, GPUs and 16-bit precision.
+
+-----------------
+
+Quality control
+---------------
+Bolts are built-by the Lightning community and contributed to bolts.
+The lightning team guarantees that contributions are:
+
+- Rigorously Tested (CPUs, GPUs, TPUs)
+- Rigorously Documented
+- Standardized via PyTorch Lightning
+- Optimized for speed
+- Checked for correctness
+
+---------
+
+Example 1: Pretrained, prebuilt models
+--------------------------------------
+
+.. code-block:: python
+
+ from pl_bolts.models import VAE, GPT2, ImageGPT, PixelCNN
+ from pl_bolts.models.self_supervised import AMDIM, CPCV2, SimCLR, MocoV2
+ from pl_bolts.models import LinearRegression, LogisticRegression
+ from pl_bolts.models.gans import GAN
+ from pl_bolts.callbacks import PrintTableMetricsCallback
+ from pl_bolts.datamodules import FashionMNISTDataModule, CIFAR10DataModule, ImagenetDataModule
+
+------------
+
+Example 2: Extend for faster research
+-------------------------------------
+Bolts are contributed with benchmarks and continuous-integration tests. This means
+you can trust the implementations and use them to bootstrap your resarch much faster.
+
+.. code-block:: python
+
+ from pl_bolts.models import ImageGPT
+ from pl_bolts.self_supervised import SimCLR
+
+ class VideoGPT(ImageGPT):
+
+ def training_step(self, batch, batch_idx):
+ x, y = batch
+ x = _shape_input(x)
+
+ logits = self.gpt(x)
+ simclr_features = self.simclr(x)
+
+ # -----------------
+ # do something new with GPT logits + simclr_features
+ # -----------------
+
+ loss = self.criterion(logits.view(-1, logits.size(-1)), x.view(-1).long())
+
+ logs = {"loss": loss}
+ return {"loss": loss, "log": logs}
+
+----------
+
+Example 3: Callbacks
+--------------------
+We also have a collection of callbacks.
+
+.. code-block:: python
+
+ from pl_bolts.callbacks import PrintTableMetricsCallback
+ import pytorch_lightning as pl
+
+ trainer = pl.Trainer(callbacks=[PrintTableMetricsCallback()])
+
+ # loss│train_loss│val_loss│epoch
+ # ──────────────────────────────
+ # 2.2541470527648926│2.2541470527648926│2.2158432006835938│0
diff --git a/docs/source/callbacks.rst b/docs/source/callbacks.rst
index f9fcecf880..57f7b8a9a5 100644
--- a/docs/source/callbacks.rst
+++ b/docs/source/callbacks.rst
@@ -49,14 +49,14 @@ We successfully extended functionality without polluting our super clean
----------------
Best Practices
-==============
+--------------
+The following are best practices when using/designing callbacks.
-1. Callbacks should be isolated in their functionality. Your callback should not rely on the
-behavior of other callbacks in order to work properly.
-2. Do not manually call methods from the callback. The callbacks are designed to be
-invoked at specific times during training. Directly calling methods (eg. `on_validation_end`)
-is strongly discouraged.
-3. Whenever possible, your callbacks should not depend on the order in which they are executed.
+1. Callbacks should be isolated in their functionality.
+2. Your callback should not rely on the behavior of other callbacks in order to work properly.
+3. Do not manually call methods from the callback.
+4. Directly calling methods (eg. `on_validation_end`) is strongly discouraged.
+5. Whenever possible, your callbacks should not depend on the order in which they are executed.
---------
diff --git a/docs/source/conf.py b/docs/source/conf.py
index c6a0638281..9c901a1c4a 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -139,6 +139,7 @@ exclude_patterns = [
'api/pytorch_lightning.rst',
'api/pl_examples.*',
'api/modules.rst',
+ 'PULL_REQUEST_TEMPLATE.md',
# deprecated/renamed:
'api/pytorch_lightning.logging.*', # TODO: remove in v0.9.0
diff --git a/docs/source/hooks.rst b/docs/source/hooks.rst
index 066e1c153b..91dc275229 100644
--- a/docs/source/hooks.rst
+++ b/docs/source/hooks.rst
@@ -20,6 +20,8 @@ Hooks lifecycle
Training set-up
^^^^^^^^^^^^^^^
+- :meth:`~pytorch_lightning.core.lightning.LightningModule.prepare_data`
+- :meth:`~pytorch_lightning.core.lightning.LightningModule.setup`
- :meth:`~pytorch_lightning.core.lightning.LightningModule.init_ddp_connection`
- :meth:`~pytorch_lightning.trainer.optimizers.TrainerOptimizersMixin.init_optimizers`
- :meth:`~pytorch_lightning.core.lightning.LightningModule.configure_apex`
@@ -30,6 +32,8 @@ Training set-up
- :meth:`~pytorch_lightning.core.lightning.LightningModule.summarize`
- :meth:`~pytorch_lightning.trainer.training_io.TrainerIOMixin.restore_weights`
+.. warning:: `prepare_data` is only called from global_rank=0. Don't assign state (self.something), use `setup` for that
+
----------
Training loop
diff --git a/docs/source/index.rst b/docs/source/index.rst
index baa5d3180a..9c927c5b28 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -27,6 +27,13 @@ PyTorch Lightning Documentation
hooks
trainer
+.. toctree::
+ :maxdepth: 1
+ :name: Bolts
+ :caption: Bolts
+
+ bolts
+
.. toctree::
:maxdepth: 1
:name: Community Examples
@@ -35,7 +42,6 @@ PyTorch Lightning Documentation
Contextual Emotion Detection (DoubleDistilBert)
Cotatron: Transcription-Guided Speech Encoder
FasterRCNN object detection + Hydra
- Generative Adversarial Network
Hyperparameter optimization with Optuna
Image Inpainting using Partial Convolutions
MNIST on TPU
@@ -100,7 +106,6 @@ PyTorch Lightning Documentation
CODE_OF_CONDUCT.md
CONTRIBUTING.md
BECOMING_A_CORE_CONTRIBUTOR.md
- PULL_REQUEST_TEMPLATE.md
governance.md
Indices and tables
diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py
index 8382c76203..7ee0576188 100644
--- a/pytorch_lightning/core/lightning.py
+++ b/pytorch_lightning/core/lightning.py
@@ -1337,11 +1337,19 @@ class LightningModule(ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, Mod
The dataloader you return will not be called every epoch unless you set
:paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.
- It's recommended that all data downloads and preparation happen in :meth:`prepare_data`.
+ For data processing use the following pattern:
+
+ - download in :meth:`prepare_data`
+ - process and split in :meth:`setup`
+
+ However, the above are only necessary for distributed processing.
+
+ .. warning:: do not assign state in prepare_data
- :meth:`~pytorch_lightning.trainer.Trainer.fit`
- ...
- :meth:`prepare_data`
+ - :meth:`setup`
- :meth:`train_dataloader`
Note:
@@ -1383,11 +1391,20 @@ class LightningModule(ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, Mod
The dataloader you return will not be called every epoch unless you set
:paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.
- It's recommended that all data downloads and preparation happen in :meth:`prepare_data`.
+ For data processing use the following pattern:
+
+ - download in :meth:`prepare_data`
+ - process and split in :meth:`setup`
+
+ However, the above are only necessary for distributed processing.
+
+ .. warning:: do not assign state in prepare_data
+
- :meth:`~pytorch_lightning.trainer.Trainer.fit`
- ...
- :meth:`prepare_data`
+ - :meth:`setup`
- :meth:`train_dataloader`
- :meth:`val_dataloader`
- :meth:`test_dataloader`
diff --git a/pytorch_lightning/trainer/evaluation_loop.py b/pytorch_lightning/trainer/evaluation_loop.py
index 7f37edc04b..02d6540af2 100644
--- a/pytorch_lightning/trainer/evaluation_loop.py
+++ b/pytorch_lightning/trainer/evaluation_loop.py
@@ -341,9 +341,10 @@ class TrainerEvaluationLoopMixin(ABC):
elif self.is_overridden('validation_epoch_end', model=model):
eval_results = model.validation_epoch_end(outputs)
- # aggregate ddp stats across
- if self.use_ddp or self.use_ddp2:
- self.reduce_eval_ddp(eval_results)
+ # aggregate ddp stats across
+ has_content = eval_results is not None and len(eval_results) > 0
+ if has_content and (self.use_ddp or self.use_ddp2):
+ self.reduce_eval_ddp(eval_results)
# enable train mode again
model.train()
@@ -406,23 +407,26 @@ class TrainerEvaluationLoopMixin(ABC):
# run evaluation
eval_results = self._evaluate(self.model, dataloaders, max_batches, test_mode)
- _, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(eval_results)
- # add metrics to prog bar
- self.add_progress_bar_metrics(prog_bar_metrics)
+ # enable no returns
+ if eval_results is not None and len(eval_results) > 0:
+ _, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(eval_results)
- # log results of test
- if test_mode and self.is_global_zero:
- print('-' * 80)
- print('TEST RESULTS')
- pprint(callback_metrics)
- print('-' * 80)
+ # add metrics to prog bar
+ self.add_progress_bar_metrics(prog_bar_metrics)
- # log metrics
- self.log_metrics(log_metrics, {})
+ # log results of test
+ if test_mode and self.is_global_zero:
+ print('-' * 80)
+ print('TEST RESULTS')
+ pprint(callback_metrics)
+ print('-' * 80)
- # track metrics for callbacks
- self.callback_metrics.update(callback_metrics)
+ # log metrics
+ self.log_metrics(log_metrics, {})
+
+ # track metrics for callbacks
+ self.callback_metrics.update(callback_metrics)
# hook
model.on_post_performance_check()
diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py
index 0bf2b2867c..13260d0809 100644
--- a/pytorch_lightning/trainer/trainer.py
+++ b/pytorch_lightning/trainer/trainer.py
@@ -129,11 +129,6 @@ class Trainer(
>>> trainer.fit(model, train_loader)
1
>>> trainer.test(model, train_loader) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
- --------------------------------------------------------------------------------
- TEST RESULTS
- ...
- --------------------------------------------------------------------------------
-
"""
DEPRECATED_IN_0_9 = ('use_amp', 'show_progress_bar', 'training_tqdm_dict', 'num_tpu_cores')
@@ -1142,8 +1137,11 @@ class Trainer(
self.val_dataloaders,
max_batches,
False)
- _, _, _, callback_metrics, _ = self.process_output(eval_results)
- self.callback_metrics = callback_metrics
+
+ # allow no returns from eval
+ if eval_results is not None and len(eval_results) > 0:
+ _, _, _, callback_metrics, _ = self.process_output(eval_results)
+ self.callback_metrics = callback_metrics
self.on_sanity_check_end()