2020-08-20 02:03:22 +00:00
|
|
|
# Copyright The PyTorch Lightning team.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
import collections
|
2020-02-25 15:36:44 +00:00
|
|
|
import inspect
|
2020-01-20 19:50:31 +00:00
|
|
|
import os
|
2020-06-19 03:08:25 +00:00
|
|
|
import re
|
2020-07-31 10:27:57 +00:00
|
|
|
import tempfile
|
2020-09-04 10:04:45 +00:00
|
|
|
from abc import ABC
|
2019-10-23 08:48:24 +00:00
|
|
|
from argparse import Namespace
|
2020-07-24 15:42:15 +00:00
|
|
|
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
|
2019-10-22 08:32:40 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
import torch
|
2020-03-17 00:50:36 +00:00
|
|
|
import torch.distributed as torch_distrib
|
2020-03-17 22:44:00 +00:00
|
|
|
from pytorch_lightning import _logger as log
|
2019-11-27 03:39:18 +00:00
|
|
|
from pytorch_lightning.core.grads import GradInformation
|
2020-09-21 02:59:21 +00:00
|
|
|
from pytorch_lightning.core.hooks import DataHooks, ModelHooks
|
2020-01-21 20:18:32 +00:00
|
|
|
from pytorch_lightning.core.memory import ModelSummary
|
2020-07-24 15:42:15 +00:00
|
|
|
from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, PRIMITIVE_TYPES, ModelIO
|
2020-09-21 02:59:21 +00:00
|
|
|
from pytorch_lightning.core.step_result import EvalResult, TrainResult
|
2019-11-27 03:39:18 +00:00
|
|
|
from pytorch_lightning.overrides.data_parallel import LightningDistributedDataParallel
|
2020-04-09 18:05:46 +00:00
|
|
|
from pytorch_lightning.utilities import rank_zero_warn
|
2020-07-24 15:42:15 +00:00
|
|
|
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
|
2020-09-21 02:59:21 +00:00
|
|
|
from pytorch_lightning.utilities.parsing import (
|
|
|
|
AttributeDict,
|
|
|
|
collect_init_args,
|
|
|
|
get_init_args,
|
|
|
|
)
|
|
|
|
from torch import ScriptModule, Tensor
|
|
|
|
from torch.nn import Module
|
|
|
|
from torch.nn.parallel import DistributedDataParallel
|
|
|
|
from torch.optim.optimizer import Optimizer
|
|
|
|
|
2019-10-21 06:16:55 +00:00
|
|
|
|
2020-02-25 03:23:25 +00:00
|
|
|
try:
|
|
|
|
import torch_xla.core.xla_model as xm
|
|
|
|
except ImportError:
|
|
|
|
XLA_AVAILABLE = False
|
2020-03-17 00:50:36 +00:00
|
|
|
else:
|
|
|
|
XLA_AVAILABLE = True
|
2020-02-25 03:23:25 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2020-09-21 02:59:21 +00:00
|
|
|
class LightningModule(
|
|
|
|
ABC, DeviceDtypeModuleMixin, GradInformation, ModelIO, ModelHooks, DataHooks, Module
|
|
|
|
):
|
2019-07-25 16:08:00 +00:00
|
|
|
def __init__(self, *args, **kwargs):
|
2020-03-27 12:36:50 +00:00
|
|
|
super().__init__(*args, **kwargs)
|
2019-03-31 20:29:50 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
self.exp_save_path = None
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2019-11-28 17:48:55 +00:00
|
|
|
#: The current epoch
|
2019-03-31 01:45:16 +00:00
|
|
|
self.current_epoch = 0
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2019-11-28 17:48:55 +00:00
|
|
|
#: Total training batches seen across all epochs
|
2019-03-31 01:45:16 +00:00
|
|
|
self.global_step = 0
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
self.loaded_optimizer_states_dict = {}
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
#: Pointer to the trainer object
|
2019-04-23 11:25:09 +00:00
|
|
|
self.trainer = None
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
#: Pointer to the logger object
|
2019-10-04 22:53:38 +00:00
|
|
|
self.logger = None
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
#: True if using dp
|
2019-08-24 01:23:27 +00:00
|
|
|
self.use_dp = False
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
#: True if using ddp
|
2019-08-24 01:23:27 +00:00
|
|
|
self.use_ddp = False
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
#: True if using ddp2
|
2019-10-04 22:53:38 +00:00
|
|
|
self.use_ddp2 = False
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-05-24 22:59:08 +00:00
|
|
|
# True if on tpu
|
|
|
|
self.use_tpu = False
|
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
#: True if using amp
|
2019-08-24 01:23:27 +00:00
|
|
|
self.use_amp = False
|
2019-03-31 20:29:50 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
#: The precision used
|
|
|
|
self.precision = 32
|
|
|
|
|
2020-06-15 21:05:58 +00:00
|
|
|
# optionally can be set by user
|
|
|
|
self._example_input_array = None
|
2020-07-24 15:42:15 +00:00
|
|
|
self._datamodule = None
|
2020-06-15 21:05:58 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def example_input_array(self) -> Any:
|
|
|
|
return self._example_input_array
|
|
|
|
|
|
|
|
@example_input_array.setter
|
|
|
|
def example_input_array(self, example: Any) -> None:
|
|
|
|
self._example_input_array = example
|
|
|
|
|
2020-07-24 15:42:15 +00:00
|
|
|
@property
|
|
|
|
def datamodule(self) -> Any:
|
|
|
|
return self._datamodule
|
|
|
|
|
|
|
|
@datamodule.setter
|
|
|
|
def datamodule(self, datamodule: Any) -> None:
|
|
|
|
self._datamodule = datamodule
|
|
|
|
|
2020-05-17 12:20:51 +00:00
|
|
|
@property
|
|
|
|
def on_gpu(self):
|
|
|
|
"""
|
|
|
|
True if your model is currently running on GPUs.
|
|
|
|
Useful to set flags around the LightningModule for different CPU vs GPU behavior.
|
|
|
|
"""
|
2020-09-21 02:59:21 +00:00
|
|
|
return self.device.type == "cuda"
|
2020-05-17 12:20:51 +00:00
|
|
|
|
2020-03-12 16:47:23 +00:00
|
|
|
def print(self, *args, **kwargs) -> None:
|
2020-02-25 03:30:53 +00:00
|
|
|
r"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Prints only from process 0. Use this in any distributed mode to log only once.
|
2020-02-25 03:30:53 +00:00
|
|
|
|
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
*args: The thing to print. Will be passed to Python's built-in print function.
|
|
|
|
**kwargs: Will be passed to Python's built-in print function.
|
|
|
|
|
|
|
|
Example:
|
2020-02-25 03:30:53 +00:00
|
|
|
|
2020-03-20 19:49:01 +00:00
|
|
|
.. code-block:: python
|
2020-02-25 03:30:53 +00:00
|
|
|
|
2020-03-20 19:49:01 +00:00
|
|
|
def forward(self, x):
|
2020-04-06 12:12:44 +00:00
|
|
|
self.print(x, 'in forward')
|
2020-02-25 03:30:53 +00:00
|
|
|
|
|
|
|
"""
|
2020-06-13 16:00:14 +00:00
|
|
|
if self.trainer.is_global_zero:
|
2020-03-06 23:14:03 +00:00
|
|
|
print(*args, **kwargs)
|
2020-02-25 03:30:53 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
def forward(self, *args, **kwargs):
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Same as :meth:`torch.nn.Module.forward()`, however in Lightning you want this to define
|
|
|
|
the operations you want to use for prediction (i.e.: on a server or as a feature extractor).
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Normally you'd call ``self()`` from your :meth:`training_step` method.
|
2020-03-05 23:52:17 +00:00
|
|
|
This makes it easy to write a complex system for training with the outputs
|
|
|
|
you'd want in a prediction setting.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-06-15 21:04:32 +00:00
|
|
|
You may also find the :func:`~pytorch_lightning.core.decorators.auto_move_data` decorator useful
|
|
|
|
when using the module outside Lightning in a production setting.
|
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
*args: Whatever you decide to pass into the forward method.
|
|
|
|
**kwargs: Keyword arguments are also possible.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
Return:
|
|
|
|
Predicted output
|
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Examples:
|
|
|
|
.. code-block:: python
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# example if we were using this model as a feature extractor
|
|
|
|
def forward(self, x):
|
|
|
|
feature_maps = self.convnet(x)
|
|
|
|
return feature_maps
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
def training_step(self, batch, batch_idx):
|
|
|
|
x, y = batch
|
2020-03-27 07:17:56 +00:00
|
|
|
feature_maps = self(x)
|
2020-03-05 23:52:17 +00:00
|
|
|
logits = self.classifier(feature_maps)
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# ...
|
|
|
|
return loss
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# splitting it this way allows model to be used a feature extractor
|
|
|
|
model = MyModelAbove()
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
inputs = server.get_request()
|
|
|
|
results = model(inputs)
|
|
|
|
server.write_results(results)
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# -------------
|
|
|
|
# This is in stark contrast to torch.nn.Module where normally you would have this:
|
|
|
|
def forward(self, batch):
|
|
|
|
x, y = batch
|
|
|
|
feature_maps = self.convnet(x)
|
|
|
|
logits = self.classifier(feature_maps)
|
|
|
|
return logits
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
"""
|
2020-09-03 18:24:44 +00:00
|
|
|
return super().forward(*args, **kwargs)
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def training_step(self, *args, **kwargs):
|
2020-04-06 12:12:44 +00:00
|
|
|
r"""
|
|
|
|
Here you compute and return the training loss and some additional metrics for e.g.
|
|
|
|
the progress bar or logger.
|
2020-02-11 04:55:22 +00:00
|
|
|
|
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
|
|
|
|
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
|
2020-02-11 04:55:22 +00:00
|
|
|
batch_idx (int): Integer displaying index of this batch
|
2020-04-06 12:12:44 +00:00
|
|
|
optimizer_idx (int): When using multiple optimizers, this argument will also be present.
|
|
|
|
hiddens(:class:`~torch.Tensor`): Passed in if
|
|
|
|
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.truncated_bptt_steps` > 0.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Return:
|
2020-08-11 23:39:43 +00:00
|
|
|
:class:`~pytorch_lightning.core.step_result.TrainResult`
|
2020-02-09 22:39:10 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
.. note:: :class:`~pytorch_lightning.core.step_result.TrainResult` is simply a Dict with convenient
|
|
|
|
functions for logging, distributed sync and error checking.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
|
|
|
In this step you'd normally do the forward pass and calculate the loss for a batch.
|
2020-03-05 23:52:17 +00:00
|
|
|
You can also do fancier things like multiple forward passes or something model specific.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
Example::
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def training_step(self, batch, batch_idx):
|
|
|
|
x, y, z = batch
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# implement your own
|
|
|
|
out = self(x)
|
|
|
|
loss = self.loss(out, x)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# TrainResult auto-detaches the loss after the optimization steps are complete
|
|
|
|
result = pl.TrainResult(minimize=loss)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
The return object :class:`~pytorch_lightning.core.step_result.TrainResult` controls where to log,
|
|
|
|
when to log (step or epoch) and syncing with multiple GPUs.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# log to progress bar and logger
|
|
|
|
result.log('train_loss', loss, prog_bar=True, logger=True)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# sync metric value across GPUs in distributed training
|
|
|
|
result.log('train_loss_2', loss, sync_dist=True)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# log to progress bar as well
|
|
|
|
result.log('train_loss_2', loss, prog_bar=True)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# assign arbitrary values
|
|
|
|
result.predictions = predictions
|
|
|
|
result.some_value = 'some_value'
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
If you define multiple optimizers, this step will be called with an additional
|
|
|
|
``optimizer_idx`` parameter.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# Multiple optimizers (e.g.: GANs)
|
|
|
|
def training_step(self, batch, batch_idx, optimizer_idx):
|
|
|
|
if optimizer_idx == 0:
|
|
|
|
# do training_step with encoder
|
|
|
|
if optimizer_idx == 1:
|
|
|
|
# do training_step with decoder
|
|
|
|
|
|
|
|
|
|
|
|
If you add truncated back propagation through time you will also get an additional
|
|
|
|
argument with the hidden states of the previous step.
|
|
|
|
|
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# Truncated back-propagation through time
|
|
|
|
def training_step(self, batch, batch_idx, hiddens):
|
|
|
|
# hiddens are the hidden states from the previous truncated backprop step
|
|
|
|
...
|
|
|
|
out, hiddens = self.lstm(data, hiddens)
|
|
|
|
...
|
2020-02-01 20:51:42 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# TrainResult auto-detaches hiddens
|
|
|
|
result = pl.TrainResult(minimize=loss, hiddens=hiddens)
|
|
|
|
return result
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-27 12:41:07 +00:00
|
|
|
Notes:
|
2020-04-06 12:12:44 +00:00
|
|
|
The loss value shown in the progress bar is smoothed (averaged) over the last values,
|
|
|
|
so it differs from the actual loss returned in train/validation step.
|
2019-08-13 15:37:37 +00:00
|
|
|
"""
|
2020-09-21 02:59:21 +00:00
|
|
|
rank_zero_warn(
|
|
|
|
"`training_step` must be implemented to be used with the Lightning Trainer"
|
|
|
|
)
|
2019-08-13 15:37:37 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def training_step_end(self, *args, **kwargs):
|
|
|
|
"""
|
|
|
|
Use this when training with dp or ddp2 because :meth:`training_step`
|
|
|
|
will operate on only part of the batch. However, this is still optional
|
|
|
|
and only needed for things like softmax or NCE loss.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
If you later switch to ddp or some other mode, this will still be called
|
|
|
|
so that you don't have to change your code
|
2020-04-03 12:43:26 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# pseudocode
|
|
|
|
sub_batches = split_batches_for_dp(batch)
|
|
|
|
batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches]
|
|
|
|
training_step_end(batch_parts_outputs)
|
2020-04-03 12:43:26 +00:00
|
|
|
|
|
|
|
Args:
|
2020-08-11 23:39:43 +00:00
|
|
|
batch_parts_outputs: What you return in `training_step` for each batch part.
|
2020-04-03 12:43:26 +00:00
|
|
|
|
|
|
|
Return:
|
2020-08-11 23:39:43 +00:00
|
|
|
:class:`~pytorch_lightning.core.step_result.TrainResult`
|
2020-04-03 19:02:20 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
.. note:: :class:`~pytorch_lightning.core.step_result.TrainResult` is simply a Dict with convenient
|
|
|
|
functions for logging, distributed sync and error checking.
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
.. code-block:: python
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def training_step(self, batch, batch_idx):
|
|
|
|
# batch is 1/num_gpus big
|
|
|
|
x, y = batch
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
out = self(x)
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# softmax uses only a portion of the batch in the denomintaor
|
|
|
|
loss = self.softmax(out)
|
|
|
|
loss = nce_loss(loss)
|
|
|
|
return pl.TrainResult(loss)
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
If you wish to do something with all the parts of the batch, then use this method to do it:
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
.. code-block:: python
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def training_step(self, batch, batch_idx):
|
|
|
|
# batch is 1/num_gpus big
|
|
|
|
x, y = batch
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
out = self(x)
|
|
|
|
result = pl.TrainResult()
|
|
|
|
result.out = out
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def training_step_end(self, training_step_outputs):
|
|
|
|
# this out is now the full size of the batch
|
|
|
|
all_outs = training_step_outputs.out
|
|
|
|
|
|
|
|
# this softmax now uses the full batch
|
|
|
|
loss = nce_loss(all_outs)
|
|
|
|
result = pl.TrainResult(loss)
|
|
|
|
return result
|
2020-04-03 12:43:26 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
See Also:
|
2020-09-14 01:04:21 +00:00
|
|
|
See the :ref:`multi_gpu` guide for more details.
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
|
|
|
|
2020-09-21 02:59:21 +00:00
|
|
|
def training_epoch_end(self, outputs: Union[TrainResult, List[TrainResult]]):
|
2020-08-11 23:39:43 +00:00
|
|
|
"""
|
|
|
|
Called at the end of the training epoch with the outputs of all training steps.
|
|
|
|
Use this in case you need to do something with all the outputs for every training_step.
|
2020-03-05 17:32:45 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# the pseudocode for these calls
|
|
|
|
train_outs = []
|
|
|
|
for train_batch in train_data:
|
|
|
|
out = training_step(train_batch)
|
|
|
|
train_outs.append(out)
|
|
|
|
training_epoch_end(train_outs)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Args:
|
2020-08-11 23:39:43 +00:00
|
|
|
outputs: List of outputs you defined in :meth:`training_step`, or if there are
|
|
|
|
multiple dataloaders, a list containing a list of outputs for each dataloader.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Return:
|
2020-08-11 23:39:43 +00:00
|
|
|
:class:`~pytorch_lightning.core.step_result.TrainResult`
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
.. note:: :class:`~pytorch_lightning.core.step_result.TrainResult` is simply a Dict with convenient
|
|
|
|
functions for logging, distributed sync and error checking.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
Note:
|
|
|
|
If this method is not overridden, this won't be called.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
Example::
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def training_epoch_end(self, training_step_outputs):
|
|
|
|
# do something with all training_step outputs
|
|
|
|
return result
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains
|
|
|
|
one entry per dataloader, while the inner list contains the individual outputs of
|
|
|
|
each training step for that dataloader.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def training_epoch_end(self, outputs):
|
|
|
|
epoch_result = pl.TrainResult()
|
|
|
|
for train_result in outputs:
|
|
|
|
all_losses = train_result.minimize
|
|
|
|
# do something with all losses
|
|
|
|
return results
|
2019-11-05 15:01:52 +00:00
|
|
|
"""
|
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def validation_step(self, *args, **kwargs) -> EvalResult:
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Operates on a single batch of data from the validation set.
|
2020-03-05 23:52:17 +00:00
|
|
|
In this step you'd might generate examples or calculate anything of interest like accuracy.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# the pseudocode for these calls
|
|
|
|
val_outs = []
|
|
|
|
for val_batch in val_data:
|
|
|
|
out = validation_step(train_batch)
|
2020-03-27 12:43:12 +00:00
|
|
|
val_outs.append(out)
|
2020-03-05 23:52:17 +00:00
|
|
|
validation_epoch_end(val_outs)
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
|
|
|
|
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
|
2020-01-17 11:03:31 +00:00
|
|
|
batch_idx (int): The index of this batch
|
2020-03-05 23:52:17 +00:00
|
|
|
dataloader_idx (int): The index of the dataloader that produced this batch
|
|
|
|
(only if multiple val datasets used)
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
Return:
|
2020-09-10 13:21:11 +00:00
|
|
|
:class:`~pytorch_lightning.core.step_result.EvalResult`
|
2020-03-06 15:33:17 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# pseudocode of order
|
|
|
|
out = validation_step()
|
|
|
|
if defined('validation_step_end'):
|
|
|
|
out = validation_step_end(out)
|
|
|
|
out = validation_epoch_end(out)
|
|
|
|
|
2019-11-28 17:48:55 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# if you have one val dataloader:
|
2019-12-04 11:57:10 +00:00
|
|
|
def validation_step(self, batch, batch_idx)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
|
|
|
# if you have multiple val dataloaders:
|
2020-03-05 17:32:45 +00:00
|
|
|
def validation_step(self, batch, batch_idx, dataloader_idx)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-06 15:33:17 +00:00
|
|
|
Examples:
|
2020-03-05 23:52:17 +00:00
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# CASE 1: A single validation dataset
|
|
|
|
def validation_step(self, batch, batch_idx):
|
|
|
|
x, y = batch
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# implement your own
|
2020-03-27 07:17:56 +00:00
|
|
|
out = self(x)
|
2020-03-05 23:52:17 +00:00
|
|
|
loss = self.loss(out, y)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# log 6 example images
|
|
|
|
# or generated text... or whatever
|
|
|
|
sample_imgs = x[:6]
|
|
|
|
grid = torchvision.utils.make_grid(sample_imgs)
|
|
|
|
self.logger.experiment.add_image('example_images', grid, 0)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# calculate acc
|
|
|
|
labels_hat = torch.argmax(out, dim=1)
|
|
|
|
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# log the outputs!
|
|
|
|
result = pl.EvalResult(checkpoint_on=loss)
|
|
|
|
result.log_dict({'val_loss': loss, 'val_acc': val_acc})
|
|
|
|
return result
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
If you pass in multiple val datasets, validation_step will have an additional argument.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# CASE 2: multiple validation datasets
|
2020-07-22 13:54:14 +00:00
|
|
|
def validation_step(self, batch, batch_idx, dataloader_idx):
|
|
|
|
# dataloader_idx tells you which dataset this is.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Note:
|
|
|
|
If you don't need to validate you don't need to implement this method.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Note:
|
|
|
|
When the :meth:`validation_step` is called, the model has been put in eval mode
|
2020-03-05 23:52:17 +00:00
|
|
|
and PyTorch gradients have been disabled. At the end of validation,
|
2020-03-06 15:33:17 +00:00
|
|
|
the model goes back to training mode and gradients are enabled.
|
2019-03-31 01:45:16 +00:00
|
|
|
"""
|
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def validation_step_end(self, *args, **kwargs) -> EvalResult:
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Use this when validating with dp or ddp2 because :meth:`validation_step`
|
|
|
|
will operate on only part of the batch. However, this is still optional
|
2020-03-05 17:32:45 +00:00
|
|
|
and only needed for things like softmax or NCE loss.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Note:
|
|
|
|
If you later switch to ddp or some other mode, this will still be called
|
|
|
|
so that you don't have to change your code.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
# pseudocode
|
|
|
|
sub_batches = split_batches_for_dp(batch)
|
2020-03-06 15:33:17 +00:00
|
|
|
batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches]
|
2020-03-05 17:32:45 +00:00
|
|
|
validation_step_end(batch_parts_outputs)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
batch_parts_outputs: What you return in :meth:`validation_step`
|
|
|
|
for each batch part.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Return:
|
2020-09-10 13:21:11 +00:00
|
|
|
:class:`~pytorch_lightning.core.step_result.EvalResult`
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
.. code-block:: python
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# WITHOUT validation_step_end
|
|
|
|
# if used in DP or DDP2, this batch is 1/num_gpus large
|
|
|
|
def validation_step(self, batch, batch_idx):
|
|
|
|
# batch is 1/num_gpus big
|
|
|
|
x, y = batch
|
|
|
|
|
|
|
|
out = self(x)
|
|
|
|
loss = self.softmax(out)
|
|
|
|
loss = nce_loss(loss)
|
|
|
|
result = pl.EvalResult()
|
|
|
|
result.log('val_loss', loss)
|
|
|
|
return result
|
|
|
|
|
|
|
|
# --------------
|
|
|
|
# with validation_step_end to do softmax over the full batch
|
|
|
|
def validation_step(self, batch, batch_idx):
|
|
|
|
# batch is 1/num_gpus big
|
|
|
|
x, y = batch
|
|
|
|
|
|
|
|
out = self(x)
|
|
|
|
result = pl.EvalResult()
|
|
|
|
result.out = out
|
|
|
|
return result
|
|
|
|
|
|
|
|
def validation_epoch_end(self, output_results):
|
|
|
|
# this out is now the full size of the batch
|
|
|
|
all_val_step_outs = output_results.out
|
|
|
|
loss = nce_loss(all_val_step_outs)
|
|
|
|
|
|
|
|
result = pl.EvalResult(checkpoint_on=loss)
|
|
|
|
result.log('val_loss', loss)
|
|
|
|
return result
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
See Also:
|
2020-09-14 01:04:21 +00:00
|
|
|
See the :ref:`multi_gpu` guide for more details.
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
def validation_end(self, outputs):
|
|
|
|
"""
|
2020-03-05 23:52:17 +00:00
|
|
|
Warnings:
|
2020-04-06 12:12:44 +00:00
|
|
|
Deprecated in v0.7.0. Use :meth:`validation_epoch_end` instead.
|
|
|
|
Will be removed in 1.0.0.
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-12 16:47:23 +00:00
|
|
|
def validation_epoch_end(
|
2020-09-21 02:59:21 +00:00
|
|
|
self, outputs: Union[EvalResult, List[EvalResult]]
|
2020-08-11 23:39:43 +00:00
|
|
|
) -> EvalResult:
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Called at the end of the validation epoch with the outputs of all validation steps.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
# the pseudocode for these calls
|
|
|
|
val_outs = []
|
|
|
|
for val_batch in val_data:
|
2020-04-13 16:16:54 +00:00
|
|
|
out = validation_step(val_batch)
|
2020-04-03 12:43:26 +00:00
|
|
|
val_outs.append(out)
|
2020-03-05 17:32:45 +00:00
|
|
|
validation_epoch_end(val_outs)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
outputs: List of outputs you defined in :meth:`validation_step`, or if there
|
|
|
|
are multiple dataloaders, a list containing a list of outputs for each dataloader.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
Return:
|
2020-09-10 13:21:11 +00:00
|
|
|
:class:`~pytorch_lightning.core.step_result.EvalResult`
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Note:
|
|
|
|
If you didn't define a :meth:`validation_step`, this won't be called.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
- The outputs here are strictly for logging or progress bar.
|
|
|
|
- If you don't need to display anything, don't return anything.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Examples:
|
2020-04-06 12:12:44 +00:00
|
|
|
With a single dataloader:
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def validation_epoch_end(self, val_step_outputs):
|
|
|
|
# do something with the outputs of all val batches
|
|
|
|
all_val_preds = val_step_outputs.predictions
|
|
|
|
|
|
|
|
val_step_outputs.some_result = calc_all_results(all_val_preds)
|
|
|
|
return val_step_outputs
|
2020-03-05 23:52:17 +00:00
|
|
|
|
|
|
|
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
|
|
|
|
one entry per dataloader, while the inner list contains the individual outputs of
|
|
|
|
each validation step for that dataloader.
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
def validation_epoch_end(self, outputs):
|
2020-08-11 23:39:43 +00:00
|
|
|
for dataloader_output_result in outputs:
|
|
|
|
dataloader_outs = dataloader_output_result.dataloader_i_outputs
|
|
|
|
|
|
|
|
result = pl.EvalResult()
|
|
|
|
result.log('final_metric', final_value)
|
|
|
|
return result
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def test_step(self, *args, **kwargs) -> EvalResult:
|
2020-03-05 17:32:45 +00:00
|
|
|
r"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Operates on a single batch of data from the test set.
|
2020-03-05 23:52:17 +00:00
|
|
|
In this step you'd normally generate examples or calculate anything of interest
|
|
|
|
such as accuracy.
|
2020-03-05 17:32:45 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# the pseudocode for these calls
|
|
|
|
test_outs = []
|
|
|
|
for test_batch in test_data:
|
2020-04-06 12:12:44 +00:00
|
|
|
out = test_step(test_batch)
|
2020-03-06 15:33:17 +00:00
|
|
|
test_outs.append(out)
|
2020-03-05 17:32:45 +00:00
|
|
|
test_epoch_end(test_outs)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
|
|
|
|
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
|
|
|
|
batch_idx (int): The index of this batch.
|
2020-03-05 23:52:17 +00:00
|
|
|
dataloader_idx (int): The index of the dataloader that produced this batch
|
2020-04-06 12:12:44 +00:00
|
|
|
(only if multiple test datasets used).
|
2020-03-05 17:32:45 +00:00
|
|
|
|
|
|
|
Return:
|
2020-09-10 13:21:11 +00:00
|
|
|
:class:`~pytorch_lightning.core.step_result.EvalResult`
|
2020-03-05 17:32:45 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# if you have one test dataloader:
|
|
|
|
def test_step(self, batch, batch_idx)
|
|
|
|
|
|
|
|
# if you have multiple test dataloaders:
|
|
|
|
def test_step(self, batch, batch_idx, dataloader_idx)
|
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Examples:
|
|
|
|
.. code-block:: python
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# CASE 1: A single test dataset
|
|
|
|
def test_step(self, batch, batch_idx):
|
|
|
|
x, y = batch
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# implement your own
|
2020-03-27 07:17:56 +00:00
|
|
|
out = self(x)
|
2020-03-05 23:52:17 +00:00
|
|
|
loss = self.loss(out, y)
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# log 6 example images
|
|
|
|
# or generated text... or whatever
|
|
|
|
sample_imgs = x[:6]
|
|
|
|
grid = torchvision.utils.make_grid(sample_imgs)
|
|
|
|
self.logger.experiment.add_image('example_images', grid, 0)
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# calculate acc
|
|
|
|
labels_hat = torch.argmax(out, dim=1)
|
2020-08-11 23:39:43 +00:00
|
|
|
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# log the outputs!
|
|
|
|
result = pl.EvalResult(checkpoint_on=loss)
|
|
|
|
result.log_dict({'test_loss': loss, 'test_acc': test_acc})
|
|
|
|
return resultt
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
If you pass in multiple validation datasets, :meth:`test_step` will have an additional
|
2020-03-05 23:52:17 +00:00
|
|
|
argument.
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
.. code-block:: python
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
# CASE 2: multiple test datasets
|
2020-07-22 13:54:14 +00:00
|
|
|
def test_step(self, batch, batch_idx, dataloader_idx):
|
|
|
|
# dataloader_idx tells you which dataset this is.
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Note:
|
|
|
|
If you don't need to validate you don't need to implement this method.
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Note:
|
|
|
|
When the :meth:`test_step` is called, the model has been put in eval mode and
|
2020-03-06 15:33:17 +00:00
|
|
|
PyTorch gradients have been disabled. At the end of the test epoch, the model goes back
|
2020-03-05 23:52:17 +00:00
|
|
|
to training mode and gradients are enabled.
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
def test_step_end(self, *args, **kwargs) -> EvalResult:
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Use this when testing with dp or ddp2 because :meth:`test_step` will operate
|
2020-03-05 17:32:45 +00:00
|
|
|
on only part of the batch. However, this is still optional
|
|
|
|
and only needed for things like softmax or NCE loss.
|
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Note:
|
|
|
|
If you later switch to ddp or some other mode, this will still be called
|
|
|
|
so that you don't have to change your code.
|
2020-03-05 17:32:45 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# pseudocode
|
|
|
|
sub_batches = split_batches_for_dp(batch)
|
2020-03-06 15:33:17 +00:00
|
|
|
batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches]
|
2020-03-05 17:32:45 +00:00
|
|
|
test_step_end(batch_parts_outputs)
|
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
batch_parts_outputs: What you return in :meth:`test_step` for each batch part.
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Return:
|
2020-09-10 13:21:11 +00:00
|
|
|
:class:`~pytorch_lightning.core.step_result.EvalResult`
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
.. code-block:: python
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-08-11 23:39:43 +00:00
|
|
|
# WITHOUT test_step_end
|
|
|
|
# if used in DP or DDP2, this batch is 1/num_gpus large
|
|
|
|
def test_step(self, batch, batch_idx):
|
|
|
|
# batch is 1/num_gpus big
|
|
|
|
x, y = batch
|
|
|
|
|
|
|
|
out = self(x)
|
|
|
|
loss = self.softmax(out)
|
|
|
|
loss = nce_loss(loss)
|
|
|
|
result = pl.EvalResult()
|
|
|
|
result.log('test_loss', loss)
|
|
|
|
return result
|
|
|
|
|
|
|
|
# --------------
|
|
|
|
# with test_step_end to do softmax over the full batch
|
|
|
|
def test_step(self, batch, batch_idx):
|
|
|
|
# batch is 1/num_gpus big
|
|
|
|
x, y = batch
|
|
|
|
|
|
|
|
out = self(x)
|
|
|
|
result = pl.EvalResult()
|
|
|
|
result.out = out
|
|
|
|
return result
|
|
|
|
|
|
|
|
def test_epoch_end(self, output_results):
|
|
|
|
# this out is now the full size of the batch
|
|
|
|
all_test_step_outs = output_results.out
|
|
|
|
loss = nce_loss(all_test_step_outs)
|
|
|
|
|
|
|
|
result = pl.EvalResult(checkpoint_on=loss)
|
|
|
|
result.log('test_loss', loss)
|
|
|
|
return result
|
2020-03-05 17:32:45 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
See Also:
|
2020-09-14 01:04:21 +00:00
|
|
|
See the :ref:`multi_gpu` guide for more details.
|
2019-03-31 01:45:16 +00:00
|
|
|
"""
|
|
|
|
|
2019-08-30 22:56:09 +00:00
|
|
|
def test_end(self, outputs):
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
2020-03-05 23:52:17 +00:00
|
|
|
Warnings:
|
2020-04-06 12:12:44 +00:00
|
|
|
Deprecated in v0.7.0. Use :meth:`test_epoch_end` instead.
|
|
|
|
Will be removed in 1.0.0.
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
|
|
|
|
2020-03-12 16:47:23 +00:00
|
|
|
def test_epoch_end(
|
2020-09-21 02:59:21 +00:00
|
|
|
self, outputs: Union[EvalResult, List[EvalResult]]
|
2020-08-11 23:39:43 +00:00
|
|
|
) -> EvalResult:
|
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Called at the end of a test epoch with the output of all test steps.
|
2020-03-05 17:32:45 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# the pseudocode for these calls
|
|
|
|
test_outs = []
|
|
|
|
for test_batch in test_data:
|
|
|
|
out = test_step(test_batch)
|
|
|
|
test_outs.append(out)
|
|
|
|
test_epoch_end(test_outs)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 17:32:45 +00:00
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
outputs: List of outputs you defined in :meth:`test_step_end`, or if there
|
|
|
|
are multiple dataloaders, a list containing a list of outputs for each dataloader
|
2020-03-05 17:32:45 +00:00
|
|
|
|
|
|
|
Return:
|
2020-09-10 13:21:11 +00:00
|
|
|
:class:`~pytorch_lightning.core.step_result.EvalResult`
|
2020-04-06 12:12:44 +00:00
|
|
|
|
|
|
|
Note:
|
|
|
|
If you didn't define a :meth:`test_step`, this won't be called.
|
2020-03-05 17:32:45 +00:00
|
|
|
|
|
|
|
- The outputs here are strictly for logging or progress bar.
|
|
|
|
- If you don't need to display anything, don't return anything.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Examples:
|
2020-04-06 12:12:44 +00:00
|
|
|
With a single dataloader:
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
def test_epoch_end(self, outputs):
|
2020-08-11 23:39:43 +00:00
|
|
|
# do something with the outputs of all test batches
|
|
|
|
all_test_preds = test_step_outputs.predictions
|
|
|
|
|
|
|
|
test_step_outputs.some_result = calc_all_results(all_test_preds)
|
|
|
|
return test_step_outputs
|
2020-03-05 23:52:17 +00:00
|
|
|
|
|
|
|
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
|
|
|
|
one entry per dataloader, while the inner list contains the individual outputs of
|
|
|
|
each test step for that dataloader.
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
def test_epoch_end(self, outputs):
|
2020-08-11 23:39:43 +00:00
|
|
|
for dataloader_output_result in outputs:
|
|
|
|
dataloader_outs = dataloader_output_result.dataloader_i_outputs
|
|
|
|
|
|
|
|
result = pl.EvalResult()
|
|
|
|
result.log('final_metric', final_value)
|
2020-03-05 23:52:17 +00:00
|
|
|
return results
|
2019-08-30 22:56:09 +00:00
|
|
|
"""
|
|
|
|
|
2020-09-21 02:59:21 +00:00
|
|
|
def configure_ddp(
|
|
|
|
self, model: "LightningModule", device_ids: List[int]
|
|
|
|
) -> DistributedDataParallel:
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
|
|
|
Override to init DDP in your own way or with your own wrapper.
|
|
|
|
The only requirements are that:
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-08-03 12:55:17 +00:00
|
|
|
1. On a validation batch, the call goes to ``model.validation_step``.
|
|
|
|
2. On a training batch, the call goes to ``model.training_step``.
|
|
|
|
3. On a testing batch, the call goes to ``model.test_step``.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
model: the :class:`LightningModule` currently being optimized.
|
|
|
|
device_ids: the list of GPU ids.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
Return:
|
|
|
|
DDP wrapped model
|
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Examples:
|
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# default implementation used in Trainer
|
|
|
|
def configure_ddp(self, model, device_ids):
|
|
|
|
# Lightning DDP simply routes to test_step, val_step, etc...
|
|
|
|
model = LightningDistributedDataParallel(
|
|
|
|
model,
|
|
|
|
device_ids=device_ids,
|
|
|
|
find_unused_parameters=True
|
|
|
|
)
|
|
|
|
return model
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2019-11-05 15:01:52 +00:00
|
|
|
"""
|
2020-09-21 02:59:21 +00:00
|
|
|
model = LightningDistributedDataParallel(
|
|
|
|
model, device_ids=device_ids, find_unused_parameters=True
|
|
|
|
)
|
2019-11-05 15:01:52 +00:00
|
|
|
return model
|
|
|
|
|
2020-04-19 21:08:19 +00:00
|
|
|
def _init_slurm_connection(self) -> None:
|
2020-08-11 23:39:43 +00:00
|
|
|
""""""
|
2020-04-19 21:08:19 +00:00
|
|
|
"""
|
2020-05-07 13:25:54 +00:00
|
|
|
Sets up environment variables necessary for pytorch distributed communications
|
2020-04-19 21:08:19 +00:00
|
|
|
based on slurm environment.
|
2019-11-28 17:48:55 +00:00
|
|
|
"""
|
2019-11-05 15:01:52 +00:00
|
|
|
# use slurm job id for the port number
|
|
|
|
# guarantees unique ports across jobs from same grid search
|
|
|
|
try:
|
|
|
|
# use the last 4 numbers in the job id as the id
|
2020-09-21 02:59:21 +00:00
|
|
|
default_port = os.environ["SLURM_JOB_ID"]
|
2019-11-05 15:01:52 +00:00
|
|
|
default_port = default_port[-4:]
|
|
|
|
|
|
|
|
# all ports should be in the 10k+ range
|
|
|
|
default_port = int(default_port) + 15000
|
|
|
|
|
2020-01-14 03:20:38 +00:00
|
|
|
except Exception:
|
2019-11-05 15:01:52 +00:00
|
|
|
default_port = 12910
|
|
|
|
|
|
|
|
# if user gave a port number, use that one instead
|
|
|
|
try:
|
2020-09-21 02:59:21 +00:00
|
|
|
default_port = os.environ["MASTER_PORT"]
|
2019-11-05 15:01:52 +00:00
|
|
|
except Exception:
|
2020-09-21 02:59:21 +00:00
|
|
|
os.environ["MASTER_PORT"] = str(default_port)
|
2019-11-05 15:01:52 +00:00
|
|
|
|
|
|
|
# figure out the root node addr
|
|
|
|
try:
|
2020-09-21 02:59:21 +00:00
|
|
|
root_node = os.environ["SLURM_NODELIST"].split(" ")[0]
|
2019-11-05 15:01:52 +00:00
|
|
|
except Exception:
|
2020-09-21 02:59:21 +00:00
|
|
|
root_node = "127.0.0.1"
|
2019-11-05 15:01:52 +00:00
|
|
|
|
2020-09-12 15:07:15 +00:00
|
|
|
root_node = self.trainer.slurm_connector.resolve_root_node_address(root_node)
|
2020-09-21 02:59:21 +00:00
|
|
|
os.environ["MASTER_ADDR"] = root_node
|
2020-04-19 21:08:19 +00:00
|
|
|
|
2020-09-21 02:59:21 +00:00
|
|
|
def init_ddp_connection(
|
|
|
|
self, global_rank: int, world_size: int, is_slurm_managing_tasks: bool = True
|
|
|
|
) -> None:
|
2020-04-19 21:08:19 +00:00
|
|
|
"""
|
|
|
|
Override to define your custom way of setting up a distributed environment.
|
|
|
|
|
|
|
|
Lightning's implementation uses env:// init by default and sets the first node as root
|
|
|
|
for SLURM managed cluster.
|
|
|
|
|
|
|
|
Args:
|
2020-06-13 16:00:14 +00:00
|
|
|
global_rank: The global process idx.
|
2020-04-19 21:08:19 +00:00
|
|
|
world_size: Number of GPUs being use across all nodes. (num_nodes * num_gpus).
|
|
|
|
is_slurm_managing_tasks: is cluster managed by SLURM.
|
|
|
|
"""
|
|
|
|
if is_slurm_managing_tasks:
|
|
|
|
self._init_slurm_connection()
|
|
|
|
|
2020-09-21 02:59:21 +00:00
|
|
|
if "MASTER_ADDR" not in os.environ:
|
|
|
|
rank_zero_warn(
|
|
|
|
"MASTER_ADDR environment variable is not defined. Set as localhost"
|
|
|
|
)
|
|
|
|
os.environ["MASTER_ADDR"] = "127.0.0.1"
|
2020-04-30 11:58:03 +00:00
|
|
|
log.debug(f"MASTER_ADDR: {os.environ['MASTER_ADDR']}")
|
2020-04-19 21:08:19 +00:00
|
|
|
|
2020-09-21 02:59:21 +00:00
|
|
|
if "MASTER_PORT" not in os.environ:
|
|
|
|
rank_zero_warn(
|
|
|
|
"MASTER_PORT environment variable is not defined. Set as 12910"
|
|
|
|
)
|
|
|
|
os.environ["MASTER_PORT"] = "12910"
|
2020-04-30 11:58:03 +00:00
|
|
|
log.debug(f"MASTER_PORT: {os.environ['MASTER_PORT']}")
|
2020-04-19 21:08:19 +00:00
|
|
|
|
2020-09-21 02:59:21 +00:00
|
|
|
if "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) != world_size:
|
2020-07-24 15:42:15 +00:00
|
|
|
rank_zero_warn(
|
|
|
|
f"WORLD_SIZE environment variable ({os.environ['WORLD_SIZE']}) "
|
|
|
|
f"is not equal to the computed world size ({world_size}). Ignored."
|
|
|
|
)
|
2020-04-19 21:08:19 +00:00
|
|
|
|
2020-04-16 03:17:31 +00:00
|
|
|
torch_backend = "nccl" if self.trainer.on_gpu else "gloo"
|
2020-09-21 02:59:21 +00:00
|
|
|
log.info(
|
|
|
|
f"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank+1}/{world_size}"
|
|
|
|
)
|
|
|
|
torch_distrib.init_process_group(
|
|
|
|
torch_backend, rank=global_rank, world_size=world_size
|
|
|
|
)
|
2019-11-05 15:01:52 +00:00
|
|
|
|
2020-09-21 02:59:21 +00:00
|
|
|
def configure_sync_batchnorm(self, model: "LightningModule") -> "LightningModule":
|
2020-08-05 17:29:05 +00:00
|
|
|
"""
|
|
|
|
Add global batchnorm for a model spread across multiple GPUs and nodes.
|
|
|
|
|
|
|
|
Override to synchronize batchnorm between specific process groups instead
|
|
|
|
of the whole world or use a different sync_bn like `apex`'s version.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
model: pointer to current :class:`LightningModule`.
|
|
|
|
|
|
|
|
Return:
|
|
|
|
LightningModule with batchnorm layers synchronized between process groups
|
|
|
|
"""
|
|
|
|
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model, process_group=None)
|
|
|
|
|
|
|
|
return model
|
|
|
|
|
2020-03-12 16:47:23 +00:00
|
|
|
def configure_apex(
|
2020-09-21 02:59:21 +00:00
|
|
|
self,
|
|
|
|
amp: object,
|
|
|
|
model: "LightningModule",
|
|
|
|
optimizers: List[Optimizer],
|
|
|
|
amp_level: str,
|
|
|
|
) -> Tuple["LightningModule", List[Optimizer]]:
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Override to init AMP your own way.
|
|
|
|
Must return a model and list of optimizers.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
Args:
|
2020-04-06 12:12:44 +00:00
|
|
|
amp: pointer to amp library object.
|
|
|
|
model: pointer to current :class:`LightningModule`.
|
|
|
|
optimizers: list of optimizers passed in :meth:`configure_optimizers`.
|
2020-03-12 16:47:23 +00:00
|
|
|
amp_level: AMP mode chosen ('O1', 'O2', etc...)
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
Return:
|
|
|
|
Apex wrapped model and optimizers
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Examples:
|
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# Default implementation used by Trainer.
|
|
|
|
def configure_apex(self, amp, model, optimizers, amp_level):
|
|
|
|
model, optimizers = amp.initialize(
|
|
|
|
model, optimizers, opt_level=amp_level,
|
|
|
|
)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
return model, optimizers
|
2019-11-05 15:01:52 +00:00
|
|
|
"""
|
2020-06-12 18:37:52 +00:00
|
|
|
model, optimizers = amp.initialize(model, optimizers, opt_level=amp_level)
|
2019-11-05 15:01:52 +00:00
|
|
|
|
|
|
|
return model, optimizers
|
|
|
|
|
2020-07-24 15:42:15 +00:00
|
|
|
def configure_optimizers(
|
|
|
|
self,
|
2020-09-21 02:59:21 +00:00
|
|
|
) -> Optional[
|
|
|
|
Union[Optimizer, Sequence[Optimizer], Dict, Sequence[Dict], Tuple[List, List]]
|
|
|
|
]:
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
2020-03-05 23:52:17 +00:00
|
|
|
Choose what optimizers and learning-rate schedulers to use in your optimization.
|
|
|
|
Normally you'd need one. But in the case of GANs or similar you might have multiple.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Return:
|
|
|
|
Any of these 6 options.
|
|
|
|
|
2020-03-31 16:41:24 +00:00
|
|
|
- Single optimizer.
|
|
|
|
- List or Tuple - List of optimizers.
|
2020-06-04 15:23:44 +00:00
|
|
|
- Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).
|
|
|
|
- Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler' key which value is a single LR scheduler or lr_dict.
|
2020-04-06 12:12:44 +00:00
|
|
|
- Tuple of dictionaries as described, with an optional 'frequency' key.
|
2020-04-02 15:48:53 +00:00
|
|
|
- None - Fit will run without any optimizer.
|
2020-03-31 16:41:24 +00:00
|
|
|
|
|
|
|
Note:
|
2020-04-06 12:12:44 +00:00
|
|
|
The 'frequency' value is an int corresponding to the number of sequential batches
|
2020-03-31 16:41:24 +00:00
|
|
|
optimized with the specific optimizer. It should be given to none or to all of the optimizers.
|
2020-04-06 12:12:44 +00:00
|
|
|
There is a difference between passing multiple optimizers in a list,
|
2020-03-31 16:41:24 +00:00
|
|
|
and passing multiple optimizers in dictionaries with a frequency of 1:
|
|
|
|
In the former case, all optimizers will operate on the given batch in each optimization step.
|
|
|
|
In the latter, only one optimizer will operate on the given batch at every step.
|
2020-03-05 23:52:17 +00:00
|
|
|
|
2020-06-04 15:23:44 +00:00
|
|
|
The lr_dict is a dictionary which contains scheduler and its associated configuration.
|
|
|
|
It has five keys. The default configuration is shown below.
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
{
|
|
|
|
'scheduler': lr_scheduler, # The LR schduler
|
|
|
|
'interval': 'epoch', # The unit of the scheduler's step size
|
|
|
|
'frequency': 1, # The frequency of the scheduler
|
|
|
|
'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler
|
|
|
|
'monitor': 'val_loss' # Metric to monitor
|
|
|
|
}
|
|
|
|
|
|
|
|
If user only provides LR schedulers, then their configuration will set to default as shown above.
|
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Examples:
|
|
|
|
.. code-block:: python
|
|
|
|
|
2020-04-02 15:48:53 +00:00
|
|
|
# most cases
|
2020-03-05 23:52:17 +00:00
|
|
|
def configure_optimizers(self):
|
|
|
|
opt = Adam(self.parameters(), lr=1e-3)
|
|
|
|
return opt
|
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
# multiple optimizer case (e.g.: GAN)
|
2020-03-05 23:52:17 +00:00
|
|
|
def configure_optimizers(self):
|
|
|
|
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
|
|
|
|
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
|
|
|
|
return generator_opt, disriminator_opt
|
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
# example with learning rate schedulers
|
2020-03-05 23:52:17 +00:00
|
|
|
def configure_optimizers(self):
|
|
|
|
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
|
|
|
|
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
|
|
|
|
discriminator_sched = CosineAnnealing(discriminator_opt, T_max=10)
|
|
|
|
return [generator_opt, disriminator_opt], [discriminator_sched]
|
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
# example with step-based learning rate schedulers
|
2020-03-05 23:52:17 +00:00
|
|
|
def configure_optimizers(self):
|
|
|
|
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
|
|
|
|
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
|
|
|
|
gen_sched = {'scheduler': ExponentialLR(gen_opt, 0.99),
|
|
|
|
'interval': 'step'} # called after each training step
|
|
|
|
dis_sched = CosineAnnealing(discriminator_opt, T_max=10) # called every epoch
|
|
|
|
return [gen_opt, dis_opt], [gen_sched, dis_sched]
|
|
|
|
|
2020-03-31 16:41:24 +00:00
|
|
|
# example with optimizer frequencies
|
|
|
|
# see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1
|
|
|
|
# https://arxiv.org/abs/1704.00028
|
|
|
|
def configure_optimizers(self):
|
|
|
|
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
|
|
|
|
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
|
|
|
|
n_critic = 5
|
|
|
|
return (
|
|
|
|
{'optimizer': dis_opt, 'frequency': n_critic},
|
|
|
|
{'optimizer': gen_opt, 'frequency': 1}
|
|
|
|
)
|
|
|
|
|
2020-03-20 19:49:01 +00:00
|
|
|
Note:
|
|
|
|
|
|
|
|
Some things to know:
|
|
|
|
|
2020-03-06 11:25:24 +00:00
|
|
|
- Lightning calls ``.backward()`` and ``.step()`` on each optimizer
|
2020-03-20 19:49:01 +00:00
|
|
|
and learning rate scheduler as needed.
|
|
|
|
|
2020-03-06 11:25:24 +00:00
|
|
|
- If you use 16-bit precision (``precision=16``), Lightning will automatically
|
2020-03-20 19:49:01 +00:00
|
|
|
handle the optimizers for you.
|
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
- If you use multiple optimizers, :meth:`training_step` will have an additional
|
2020-03-20 19:49:01 +00:00
|
|
|
``optimizer_idx`` parameter.
|
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
- If you use LBFGS Lightning handles the closure function automatically for you.
|
2020-03-20 19:49:01 +00:00
|
|
|
|
2020-03-06 11:25:24 +00:00
|
|
|
- If you use multiple optimizers, gradients will be calculated only
|
2020-03-20 19:49:01 +00:00
|
|
|
for the parameters of current optimizer at each training step.
|
|
|
|
|
2020-03-06 11:25:24 +00:00
|
|
|
- If you need to control how often those optimizers step or override the
|
2020-04-06 12:12:44 +00:00
|
|
|
default ``.step()`` schedule, override the :meth:`optimizer_step` hook.
|
2020-03-20 19:49:01 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
- If you only want to call a learning rate scheduler every ``x`` step or epoch,
|
2020-06-04 15:23:44 +00:00
|
|
|
or want to monitor a custom metric, you can specify these in a lr_dict:
|
2020-03-20 19:49:01 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
{
|
|
|
|
'scheduler': lr_scheduler,
|
2020-06-04 15:23:44 +00:00
|
|
|
'interval': 'step', # or 'epoch'
|
2020-03-20 19:49:01 +00:00
|
|
|
'monitor': 'val_f1',
|
2020-06-04 15:23:44 +00:00
|
|
|
'frequency': x,
|
2020-03-20 19:49:01 +00:00
|
|
|
}
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
"""
|
2020-09-21 02:59:21 +00:00
|
|
|
rank_zero_warn(
|
|
|
|
"`configure_optimizers` must be implemented to be used with the Lightning Trainer"
|
|
|
|
)
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2020-03-12 16:47:23 +00:00
|
|
|
def optimizer_step(
|
2020-07-24 15:42:15 +00:00
|
|
|
self,
|
|
|
|
epoch: int,
|
|
|
|
batch_idx: int,
|
|
|
|
optimizer: Optimizer,
|
|
|
|
optimizer_idx: int,
|
|
|
|
second_order_closure: Optional[Callable] = None,
|
|
|
|
on_tpu: bool = False,
|
|
|
|
using_native_amp: bool = False,
|
|
|
|
using_lbfgs: bool = False,
|
2020-03-12 16:47:23 +00:00
|
|
|
) -> None:
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Override this method to adjust the default way the
|
|
|
|
:class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer.
|
|
|
|
By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example
|
2020-03-05 23:52:17 +00:00
|
|
|
once per optimizer.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
Args:
|
2020-03-12 16:47:23 +00:00
|
|
|
epoch: Current epoch
|
|
|
|
batch_idx: Index of current batch
|
|
|
|
optimizer: A PyTorch optimizer
|
2020-04-06 12:12:44 +00:00
|
|
|
optimizer_idx: If you used multiple optimizers this indexes into that list.
|
2020-03-12 16:47:23 +00:00
|
|
|
second_order_closure: closure for second order methods
|
2020-06-25 20:02:16 +00:00
|
|
|
on_tpu: true if TPU backward is required
|
|
|
|
using_native_amp: True if using native amp
|
|
|
|
using_lbfgs: True if the matching optimizer is lbfgs
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Examples:
|
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# DEFAULT
|
|
|
|
def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx,
|
2020-06-25 20:02:16 +00:00
|
|
|
second_order_closure, on_tpu, using_native_amp, using_lbfgs):
|
2020-03-05 23:52:17 +00:00
|
|
|
optimizer.step()
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
# Alternating schedule for optimizer steps (i.e.: GANs)
|
2020-03-05 23:52:17 +00:00
|
|
|
def optimizer_step(self, current_epoch, batch_idx, optimizer, optimizer_idx,
|
2020-06-25 20:02:16 +00:00
|
|
|
second_order_closure, on_tpu, using_native_amp, using_lbfgs):
|
2020-03-05 23:52:17 +00:00
|
|
|
# update generator opt every 2 steps
|
|
|
|
if optimizer_idx == 0:
|
|
|
|
if batch_idx % 2 == 0 :
|
|
|
|
optimizer.step()
|
|
|
|
optimizer.zero_grad()
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# update discriminator opt every 4 steps
|
|
|
|
if optimizer_idx == 1:
|
|
|
|
if batch_idx % 4 == 0 :
|
|
|
|
optimizer.step()
|
|
|
|
optimizer.zero_grad()
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# ...
|
|
|
|
# add as many optimizers as you want
|
2019-11-28 17:48:55 +00:00
|
|
|
|
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Here's another example showing how to use this for more advanced things such as
|
2020-04-06 12:12:44 +00:00
|
|
|
learning rate warm-up:
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# learning rate warm-up
|
|
|
|
def optimizer_step(self, current_epoch, batch_idx, optimizer,
|
2020-06-25 20:02:16 +00:00
|
|
|
optimizer_idx, second_order_closure, on_tpu, using_native_amp, using_lbfgs):
|
2020-03-05 23:52:17 +00:00
|
|
|
# warm up lr
|
|
|
|
if self.trainer.global_step < 500:
|
|
|
|
lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)
|
|
|
|
for pg in optimizer.param_groups:
|
2020-05-24 22:59:08 +00:00
|
|
|
pg['lr'] = lr_scale * self.learning_rate
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
# update params
|
|
|
|
optimizer.step()
|
|
|
|
optimizer.zero_grad()
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-04-16 16:01:41 +00:00
|
|
|
Note:
|
|
|
|
If you also override the :meth:`~pytorch_lightning.core.hooks.ModelHooks.on_before_zero_grad`
|
|
|
|
model hook don't forget to add the call to it before ``optimizer.zero_grad()`` yourself.
|
|
|
|
|
2019-08-13 13:32:45 +00:00
|
|
|
"""
|
2020-06-25 20:02:16 +00:00
|
|
|
if on_tpu:
|
2020-02-25 03:23:25 +00:00
|
|
|
xm.optimizer_step(optimizer)
|
2020-06-25 20:02:16 +00:00
|
|
|
elif using_native_amp:
|
|
|
|
self.trainer.scaler.step(optimizer)
|
|
|
|
elif using_lbfgs:
|
2019-10-05 14:47:18 +00:00
|
|
|
optimizer.step(second_order_closure)
|
|
|
|
else:
|
2020-06-25 20:02:16 +00:00
|
|
|
optimizer.step()
|
2020-04-16 16:01:41 +00:00
|
|
|
|
2020-09-21 02:59:21 +00:00
|
|
|
def optimizer_zero_grad(
|
|
|
|
self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int
|
|
|
|
):
|
2019-08-13 13:32:45 +00:00
|
|
|
optimizer.zero_grad()
|
|
|
|
|
2020-03-12 16:47:23 +00:00
|
|
|
def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list:
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
2020-03-05 23:52:17 +00:00
|
|
|
When using truncated backpropagation through time, each batch must be split along the
|
2020-04-06 12:12:44 +00:00
|
|
|
time dimension. Lightning handles this by default, but for custom behavior override
|
2020-03-05 23:52:17 +00:00
|
|
|
this function.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
Args:
|
2020-03-12 16:47:23 +00:00
|
|
|
batch: Current batch
|
2020-04-06 12:12:44 +00:00
|
|
|
split_size: The size of the split
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
Return:
|
2020-04-06 12:12:44 +00:00
|
|
|
List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated
|
2020-01-17 11:03:31 +00:00
|
|
|
back propagation through time. The default implementation splits root level Tensors and
|
|
|
|
Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.
|
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Examples:
|
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
def tbptt_split_batch(self, batch, split_size):
|
|
|
|
splits = []
|
|
|
|
for t in range(0, time_dims[0], split_size):
|
|
|
|
batch_split = []
|
|
|
|
for i, x in enumerate(batch):
|
|
|
|
if isinstance(x, torch.Tensor):
|
|
|
|
split_x = x[:, t:t + split_size]
|
|
|
|
elif isinstance(x, collections.Sequence):
|
|
|
|
split_x = [None] * len(x)
|
|
|
|
for batch_idx in range(len(x)):
|
|
|
|
split_x[batch_idx] = x[batch_idx][t:t + split_size]
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
batch_split.append(split_x)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
splits.append(batch_split)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
return splits
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Note:
|
|
|
|
Called in the training loop after
|
|
|
|
:meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start`
|
|
|
|
if :paramref:`~pytorch_lightning.trainer.Trainer.truncated_bptt_steps` > 0.
|
|
|
|
Each returned batch split is passed separately to :meth:`training_step`.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
"""
|
2020-09-21 02:59:21 +00:00
|
|
|
time_dims = [
|
|
|
|
len(x[0])
|
|
|
|
for x in batch
|
|
|
|
if isinstance(x, (torch.Tensor, collections.Sequence))
|
|
|
|
]
|
2019-10-31 10:45:28 +00:00
|
|
|
assert len(time_dims) >= 1, "Unable to determine batch time dimension"
|
2020-09-21 02:59:21 +00:00
|
|
|
assert all(
|
|
|
|
x == time_dims[0] for x in time_dims
|
|
|
|
), "Batch time dimension length is ambiguous"
|
2019-10-31 10:45:28 +00:00
|
|
|
|
|
|
|
splits = []
|
|
|
|
for t in range(0, time_dims[0], split_size):
|
|
|
|
batch_split = []
|
|
|
|
for i, x in enumerate(batch):
|
|
|
|
if isinstance(x, torch.Tensor):
|
2020-07-24 15:42:15 +00:00
|
|
|
split_x = x[:, t : t + split_size]
|
2019-10-31 10:45:28 +00:00
|
|
|
elif isinstance(x, collections.Sequence):
|
|
|
|
split_x = [None] * len(x)
|
|
|
|
for batch_idx in range(len(x)):
|
2020-07-24 15:42:15 +00:00
|
|
|
split_x[batch_idx] = x[batch_idx][t : t + split_size]
|
2019-10-31 10:45:28 +00:00
|
|
|
|
|
|
|
batch_split.append(split_x)
|
|
|
|
|
|
|
|
splits.append(batch_split)
|
|
|
|
|
|
|
|
return splits
|
|
|
|
|
2020-06-15 21:05:58 +00:00
|
|
|
def summarize(self, mode: str = ModelSummary.MODE_DEFAULT) -> ModelSummary:
|
2019-10-08 19:30:06 +00:00
|
|
|
model_summary = ModelSummary(self, mode=mode)
|
2020-09-21 02:59:21 +00:00
|
|
|
log.info("\n" + str(model_summary))
|
2020-06-15 21:05:58 +00:00
|
|
|
return model_summary
|
2019-07-25 16:01:52 +00:00
|
|
|
|
2020-03-12 16:47:23 +00:00
|
|
|
def freeze(self) -> None:
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Freeze all params for inference.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Example:
|
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
model = MyLightningModule(...)
|
|
|
|
model.freeze()
|
2019-11-28 17:48:55 +00:00
|
|
|
|
|
|
|
"""
|
2019-07-25 16:01:52 +00:00
|
|
|
for param in self.parameters():
|
|
|
|
param.requires_grad = False
|
|
|
|
|
2019-11-05 14:14:33 +00:00
|
|
|
self.eval()
|
|
|
|
|
2020-03-12 16:47:23 +00:00
|
|
|
def unfreeze(self) -> None:
|
2020-04-06 12:12:44 +00:00
|
|
|
"""
|
|
|
|
Unfreeze all parameters for training.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
model = MyLightningModule(...)
|
|
|
|
model.unfreeze()
|
|
|
|
|
|
|
|
"""
|
2019-07-25 16:01:52 +00:00
|
|
|
for param in self.parameters():
|
|
|
|
param.requires_grad = True
|
2019-11-05 14:14:33 +00:00
|
|
|
|
|
|
|
self.train()
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-12 16:47:23 +00:00
|
|
|
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Called by Lightning to restore your model.
|
|
|
|
If you saved something with :meth:`on_save_checkpoint` this is your chance to restore this.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
Args:
|
2020-03-12 16:47:23 +00:00
|
|
|
checkpoint: Loaded checkpoint
|
2019-11-28 17:48:55 +00:00
|
|
|
|
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Example:
|
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
def on_load_checkpoint(self, checkpoint):
|
|
|
|
# 99% of the time you don't need to implement this method
|
|
|
|
self.something_cool_i_want_to_save = checkpoint['something_cool_i_want_to_save']
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Note:
|
|
|
|
Lightning auto-restores global step, epoch, and train state including amp scaling.
|
|
|
|
There is no need for you to restore anything regarding training.
|
2019-11-28 17:48:55 +00:00
|
|
|
"""
|
|
|
|
|
2020-03-12 16:47:23 +00:00
|
|
|
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
2020-04-06 12:12:44 +00:00
|
|
|
Called by Lightning when saving a checkpoint to give you a chance to store anything
|
|
|
|
else you might want to save.
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
Args:
|
2020-03-12 16:47:23 +00:00
|
|
|
checkpoint: Checkpoint to be saved
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Example:
|
|
|
|
.. code-block:: python
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
def on_save_checkpoint(self, checkpoint):
|
|
|
|
# 99% of use cases you don't need to implement this method
|
|
|
|
checkpoint['something_cool_i_want_to_save'] = my_cool_pickable_object
|
2019-11-28 17:48:55 +00:00
|
|
|
|
2020-04-06 12:12:44 +00:00
|
|
|
Note:
|
|
|
|
Lightning saves all aspects of training (epoch, global step, etc...)
|
|
|
|
including amp scaling.
|
|
|
|
There is no need for you to store anything about training.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2019-11-28 17:48:55 +00:00
|
|
|
"""
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-04-24 00:46:18 +00:00
|
|
|
def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:
|
2020-02-05 11:24:43 +00:00
|
|
|
r"""
|
2020-07-28 20:32:34 +00:00
|
|
|
Implement this to override the default items displayed in the progress bar.
|
|
|
|
By default it includes the average loss value, split index of BPTT (if used)
|
|
|
|
and the version of the experiment when using a logger.
|
|
|
|
|
|
|
|
.. code-block::
|
|
|
|
|
|
|
|
Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]
|
|
|
|
|
|
|
|
Here is an example how to override the defaults:
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
def get_progress_bar_dict(self):
|
|
|
|
# don't show the version number
|
|
|
|
items = super().get_progress_bar_dict()
|
|
|
|
items.pop("v_num", None)
|
|
|
|
return items
|
2020-02-05 11:24:43 +00:00
|
|
|
|
|
|
|
Return:
|
|
|
|
Dictionary with the items to be displayed in the progress bar.
|
|
|
|
"""
|
2020-03-30 00:20:34 +00:00
|
|
|
# call .item() only once but store elements without graphs
|
2020-09-10 11:24:42 +00:00
|
|
|
running_train_loss = self.trainer.train_loop.running_loss.mean()
|
2020-09-21 02:59:21 +00:00
|
|
|
avg_training_loss = (
|
|
|
|
running_train_loss.cpu().item()
|
|
|
|
if running_train_loss is not None
|
|
|
|
else float("NaN")
|
|
|
|
)
|
|
|
|
tqdm_dict = {"loss": "{:.3f}".format(avg_training_loss)}
|
2020-02-05 11:24:43 +00:00
|
|
|
|
|
|
|
if self.trainer.truncated_bptt_steps is not None:
|
2020-09-21 02:59:21 +00:00
|
|
|
tqdm_dict["split_idx"] = self.trainer.split_idx
|
2020-02-05 11:24:43 +00:00
|
|
|
|
|
|
|
if self.trainer.logger is not None and self.trainer.logger.version is not None:
|
2020-07-28 20:32:34 +00:00
|
|
|
version = self.trainer.logger.version
|
|
|
|
# show last 4 places of long version strings
|
|
|
|
version = version[-4:] if isinstance(version, str) else version
|
2020-09-21 02:59:21 +00:00
|
|
|
tqdm_dict["v_num"] = version
|
2020-02-05 11:24:43 +00:00
|
|
|
|
|
|
|
return tqdm_dict
|
2020-04-24 00:46:18 +00:00
|
|
|
|
|
|
|
def get_tqdm_dict(self) -> Dict[str, Union[int, str]]:
|
|
|
|
"""
|
|
|
|
Additional items to be displayed in the progress bar.
|
|
|
|
|
|
|
|
Return:
|
|
|
|
Dictionary with the items to be displayed in the progress bar.
|
|
|
|
|
|
|
|
Warning:
|
|
|
|
Deprecated since v0.7.3.
|
|
|
|
Use :meth:`get_progress_bar_dict` instead.
|
|
|
|
"""
|
2020-07-24 15:42:15 +00:00
|
|
|
rank_zero_warn(
|
|
|
|
"`get_tqdm_dict` was renamed to `get_progress_bar_dict` in v0.7.3"
|
|
|
|
" and this method will be removed in v1.0.0",
|
|
|
|
DeprecationWarning,
|
|
|
|
)
|
2020-04-24 00:46:18 +00:00
|
|
|
return self.get_progress_bar_dict()
|
2020-05-24 22:59:08 +00:00
|
|
|
|
2020-06-08 11:19:34 +00:00
|
|
|
@classmethod
|
|
|
|
def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:
|
2020-08-11 23:39:43 +00:00
|
|
|
""""""
|
2020-06-04 12:35:50 +00:00
|
|
|
"""
|
|
|
|
Collect all module arguments in the current constructor and all child constructors.
|
|
|
|
The child constructors are all the ``__init__`` methods that reach the current class through
|
|
|
|
(chained) ``super().__init__()`` calls.
|
2020-06-08 11:19:34 +00:00
|
|
|
|
|
|
|
Args:
|
|
|
|
frame: instance frame
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
self_arguments: arguments dictionary of the first instance
|
|
|
|
parents_arguments: arguments dictionary of the parent's instances
|
2020-06-04 12:35:50 +00:00
|
|
|
"""
|
2020-06-08 11:19:34 +00:00
|
|
|
if not frame:
|
|
|
|
frame = inspect.currentframe()
|
2020-05-24 22:59:08 +00:00
|
|
|
|
2020-06-08 11:19:34 +00:00
|
|
|
frame_args = collect_init_args(frame.f_back, [])
|
2020-06-04 12:35:50 +00:00
|
|
|
self_arguments = frame_args[-1]
|
2020-05-24 22:59:08 +00:00
|
|
|
|
|
|
|
# set module_arguments in child
|
2020-06-08 11:19:34 +00:00
|
|
|
self_arguments = self_arguments
|
|
|
|
parents_arguments = {}
|
2020-06-04 12:35:50 +00:00
|
|
|
|
|
|
|
# add all arguments from parents
|
2020-05-24 22:59:08 +00:00
|
|
|
for args in frame_args[:-1]:
|
2020-06-08 11:19:34 +00:00
|
|
|
parents_arguments.update(args)
|
|
|
|
return self_arguments, parents_arguments
|
2020-05-24 22:59:08 +00:00
|
|
|
|
2020-06-08 11:19:34 +00:00
|
|
|
def save_hyperparameters(self, *args, frame=None) -> None:
|
|
|
|
"""Save all model arguments.
|
2020-06-04 12:35:50 +00:00
|
|
|
|
2020-06-08 11:19:34 +00:00
|
|
|
Args:
|
|
|
|
args: single object of `dict`, `NameSpace` or `OmegaConf`
|
|
|
|
or string names or argumenst from class `__init__`
|
|
|
|
|
|
|
|
>>> from collections import OrderedDict
|
|
|
|
>>> class ManuallyArgsModel(LightningModule):
|
|
|
|
... def __init__(self, arg1, arg2, arg3):
|
|
|
|
... super().__init__()
|
2020-08-09 19:00:08 +00:00
|
|
|
... # manually assign arguments
|
2020-06-08 11:19:34 +00:00
|
|
|
... self.save_hyperparameters('arg1', 'arg3')
|
|
|
|
... def forward(self, *args, **kwargs):
|
|
|
|
... ...
|
|
|
|
>>> model = ManuallyArgsModel(1, 'abc', 3.14)
|
|
|
|
>>> model.hparams
|
|
|
|
"arg1": 1
|
|
|
|
"arg3": 3.14
|
|
|
|
|
|
|
|
>>> class AutomaticArgsModel(LightningModule):
|
|
|
|
... def __init__(self, arg1, arg2, arg3):
|
|
|
|
... super().__init__()
|
|
|
|
... # equivalent automatic
|
|
|
|
... self.save_hyperparameters()
|
|
|
|
... def forward(self, *args, **kwargs):
|
|
|
|
... ...
|
|
|
|
>>> model = AutomaticArgsModel(1, 'abc', 3.14)
|
|
|
|
>>> model.hparams
|
|
|
|
"arg1": 1
|
|
|
|
"arg2": abc
|
|
|
|
"arg3": 3.14
|
|
|
|
|
|
|
|
>>> class SingleArgModel(LightningModule):
|
|
|
|
... def __init__(self, params):
|
|
|
|
... super().__init__()
|
|
|
|
... # manually assign single argument
|
|
|
|
... self.save_hyperparameters(params)
|
|
|
|
... def forward(self, *args, **kwargs):
|
|
|
|
... ...
|
|
|
|
>>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))
|
|
|
|
>>> model.hparams
|
|
|
|
"p1": 1
|
|
|
|
"p2": abc
|
|
|
|
"p3": 3.14
|
2020-06-04 12:35:50 +00:00
|
|
|
"""
|
2020-06-08 11:19:34 +00:00
|
|
|
if not frame:
|
|
|
|
frame = inspect.currentframe().f_back
|
|
|
|
init_args = get_init_args(frame)
|
2020-09-21 02:59:21 +00:00
|
|
|
assert init_args, "failed to inspect the self init"
|
2020-06-08 11:19:34 +00:00
|
|
|
if not args:
|
|
|
|
hp = init_args
|
2020-09-21 02:59:21 +00:00
|
|
|
self._hparams_name = "kwargs" if hp else None
|
2020-06-08 11:19:34 +00:00
|
|
|
else:
|
|
|
|
isx_non_str = [i for i, arg in enumerate(args) if not isinstance(arg, str)]
|
|
|
|
if len(isx_non_str) == 1:
|
|
|
|
hp = args[isx_non_str[0]]
|
|
|
|
cand_names = [k for k, v in init_args.items() if v == hp]
|
|
|
|
self._hparams_name = cand_names[0] if cand_names else None
|
|
|
|
else:
|
|
|
|
hp = {arg: init_args[arg] for arg in args if isinstance(arg, str)}
|
2020-09-21 02:59:21 +00:00
|
|
|
self._hparams_name = "kwargs"
|
2020-06-08 11:19:34 +00:00
|
|
|
|
|
|
|
# `hparams` are expected here
|
|
|
|
if hp:
|
|
|
|
self._set_hparams(hp)
|
|
|
|
|
|
|
|
def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None:
|
|
|
|
if isinstance(hp, Namespace):
|
|
|
|
hp = vars(hp)
|
|
|
|
if isinstance(hp, dict):
|
|
|
|
hp = AttributeDict(hp)
|
|
|
|
elif isinstance(hp, PRIMITIVE_TYPES):
|
2020-09-21 02:59:21 +00:00
|
|
|
raise ValueError(f"Primitives {PRIMITIVE_TYPES} are not allowed.")
|
2020-06-08 11:19:34 +00:00
|
|
|
elif not isinstance(hp, ALLOWED_CONFIG_TYPES):
|
2020-09-21 02:59:21 +00:00
|
|
|
raise ValueError(f"Unsupported config type of {type(hp)}.")
|
2020-06-08 11:19:34 +00:00
|
|
|
|
|
|
|
if isinstance(hp, dict) and isinstance(self.hparams, dict):
|
|
|
|
self.hparams.update(hp)
|
|
|
|
else:
|
|
|
|
self._hparams = hp
|
2020-06-04 12:35:50 +00:00
|
|
|
|
2020-07-31 10:27:57 +00:00
|
|
|
def to_onnx(self, file_path: str, input_sample: Optional[Tensor] = None, **kwargs):
|
|
|
|
"""Saves the model in ONNX format
|
|
|
|
|
|
|
|
Args:
|
|
|
|
file_path: The path of the file the model should be saved to.
|
|
|
|
input_sample: A sample of an input tensor for tracing.
|
|
|
|
**kwargs: Will be passed to torch.onnx.export function.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
>>> class SimpleModel(LightningModule):
|
|
|
|
... def __init__(self):
|
|
|
|
... super().__init__()
|
|
|
|
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
|
|
|
|
...
|
|
|
|
... def forward(self, x):
|
|
|
|
... return torch.relu(self.l1(x.view(x.size(0), -1)))
|
|
|
|
|
|
|
|
>>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:
|
|
|
|
... model = SimpleModel()
|
|
|
|
... input_sample = torch.randn((1, 64))
|
|
|
|
... model.to_onnx(tmpfile.name, input_sample, export_params=True)
|
|
|
|
... os.path.isfile(tmpfile.name)
|
|
|
|
True
|
|
|
|
"""
|
|
|
|
|
|
|
|
if isinstance(input_sample, Tensor):
|
|
|
|
input_data = input_sample
|
|
|
|
elif self.example_input_array is not None:
|
|
|
|
input_data = self.example_input_array
|
|
|
|
else:
|
2020-08-26 16:22:19 +00:00
|
|
|
if input_sample is not None:
|
2020-09-21 02:59:21 +00:00
|
|
|
raise ValueError(
|
|
|
|
f"Received `input_sample` of type {type(input_sample)}. Expected type is `Tensor`"
|
|
|
|
)
|
2020-08-26 16:22:19 +00:00
|
|
|
else:
|
2020-09-21 02:59:21 +00:00
|
|
|
raise ValueError(
|
|
|
|
"Could not export to ONNX since neither `input_sample` nor"
|
|
|
|
" `model.example_input_array` attribute is set."
|
|
|
|
)
|
2020-08-26 16:22:19 +00:00
|
|
|
input_data = input_data.to(self.device)
|
2020-09-21 02:59:21 +00:00
|
|
|
if "example_outputs" not in kwargs:
|
2020-07-31 10:27:57 +00:00
|
|
|
self.eval()
|
2020-08-26 16:22:19 +00:00
|
|
|
with torch.no_grad():
|
2020-09-21 02:59:21 +00:00
|
|
|
kwargs["example_outputs"] = self(input_data)
|
2020-07-31 10:27:57 +00:00
|
|
|
|
|
|
|
torch.onnx.export(self, input_data, file_path, **kwargs)
|
|
|
|
|
2020-09-21 02:59:21 +00:00
|
|
|
def to_torchscript(
|
|
|
|
self, file_path: Optional[str] = None, **kwargs
|
|
|
|
) -> Union[ScriptModule, Dict[str, ScriptModule]]:
|
2020-09-03 18:24:44 +00:00
|
|
|
"""
|
|
|
|
By default compiles the whole model to a :class:`~torch.jit.ScriptModule`.
|
|
|
|
If you would like to customize the modules that are scripted or you want to use tracing
|
|
|
|
you should override this method. In case you want to return multiple modules, we
|
|
|
|
recommend using a dictionary.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
file_path: Path where to save the torchscript. Default: None (no file saved).
|
|
|
|
**kwargs: Additional arguments that will be passed to the :func:`torch.jit.save` function.
|
|
|
|
|
|
|
|
Note:
|
|
|
|
- Requires the implementation of the
|
|
|
|
:meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method.
|
|
|
|
- The exported script will be set to evaluation mode.
|
|
|
|
- It is recommended that you install the latest supported version of PyTorch
|
|
|
|
to use this feature without limitations. See also the :mod:`torch.jit`
|
|
|
|
documentation for supported features.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
>>> class SimpleModel(LightningModule):
|
|
|
|
... def __init__(self):
|
|
|
|
... super().__init__()
|
|
|
|
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
|
|
|
|
...
|
|
|
|
... def forward(self, x):
|
|
|
|
... return torch.relu(self.l1(x.view(x.size(0), -1)))
|
|
|
|
...
|
|
|
|
>>> model = SimpleModel()
|
|
|
|
>>> torch.jit.save(model.to_torchscript(), "model.pt") # doctest: +SKIP
|
|
|
|
>>> os.path.isfile("model.pt") # doctest: +SKIP
|
|
|
|
True
|
|
|
|
|
|
|
|
Return:
|
|
|
|
This LightningModule as a torchscript, regardless of whether file_path is
|
|
|
|
defined or not.
|
|
|
|
"""
|
|
|
|
|
|
|
|
mode = self.training
|
|
|
|
with torch.no_grad():
|
|
|
|
scripted_module = torch.jit.script(self.eval(), **kwargs)
|
|
|
|
self.train(mode)
|
|
|
|
|
|
|
|
if file_path is not None:
|
|
|
|
torch.jit.save(scripted_module, file_path)
|
|
|
|
|
|
|
|
return scripted_module
|
|
|
|
|
2020-06-08 11:19:34 +00:00
|
|
|
@property
|
|
|
|
def hparams(self) -> Union[AttributeDict, str]:
|
2020-09-21 02:59:21 +00:00
|
|
|
if not hasattr(self, "_hparams"):
|
2020-06-08 11:19:34 +00:00
|
|
|
self._hparams = AttributeDict()
|
|
|
|
return self._hparams
|
|
|
|
|
|
|
|
@hparams.setter
|
|
|
|
def hparams(self, hp: Union[dict, Namespace, Any]):
|
2020-06-19 03:08:25 +00:00
|
|
|
hparams_assignment_name = self.__get_hparams_assignment_variable()
|
|
|
|
self._hparams_name = hparams_assignment_name
|
|
|
|
self._set_hparams(hp)
|
|
|
|
|
|
|
|
def __get_hparams_assignment_variable(self):
|
2020-08-11 23:39:43 +00:00
|
|
|
""""""
|
2020-06-19 03:08:25 +00:00
|
|
|
"""
|
|
|
|
looks at the code of the class to figure out what the user named self.hparams
|
|
|
|
this only happens when the user explicitly sets self.hparams
|
|
|
|
"""
|
2020-06-19 04:49:40 +00:00
|
|
|
try:
|
|
|
|
class_code = inspect.getsource(self.__class__)
|
2020-09-21 02:59:21 +00:00
|
|
|
lines = class_code.split("\n")
|
2020-06-19 04:49:40 +00:00
|
|
|
for line in lines:
|
|
|
|
line = re.sub(r"\s+", "", line, flags=re.UNICODE)
|
2020-09-21 02:59:21 +00:00
|
|
|
if ".hparams=" in line:
|
|
|
|
return line.split("=")[1]
|
2020-06-19 04:49:40 +00:00
|
|
|
except Exception as e:
|
2020-09-21 02:59:21 +00:00
|
|
|
return "hparams"
|
2020-06-19 03:08:25 +00:00
|
|
|
|
|
|
|
return None
|