2019-11-28 17:48:55 +00:00
|
|
|
"""
|
|
|
|
The lightning training loop handles everything except the actual computations of your model.
|
|
|
|
To decide what will happen in your training loop, define the `training_step` function.
|
|
|
|
|
|
|
|
Below are all the things lightning automates for you in the training loop.
|
|
|
|
|
|
|
|
Accumulated gradients
|
|
|
|
---------------------
|
|
|
|
|
|
|
|
Accumulated gradients runs K small batches of size N before doing a backwards pass.
|
|
|
|
The effect is a large effective batch size of size KxN.
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# DEFAULT (ie: no accumulated grads)
|
|
|
|
trainer = Trainer(accumulate_grad_batches=1)
|
|
|
|
|
|
|
|
Force training for min or max epochs
|
|
|
|
------------------------------------
|
|
|
|
|
|
|
|
It can be useful to force training for a minimum number of epochs or limit to a max number
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# DEFAULT
|
2019-12-07 13:50:21 +00:00
|
|
|
trainer = Trainer(min_epochs=1, max_epochs=1000)
|
2019-11-28 17:48:55 +00:00
|
|
|
|
|
|
|
Early stopping
|
|
|
|
--------------
|
|
|
|
|
|
|
|
The trainer already sets up default early stopping for you.
|
|
|
|
To modify this behavior, pass in your own EarlyStopping callback.
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
from pytorch_lightning.callbacks import EarlyStopping
|
|
|
|
|
|
|
|
# DEFAULTS used by Trainer
|
|
|
|
early_stop_callback = EarlyStopping(
|
|
|
|
monitor='val_loss',
|
|
|
|
min_delta=0.00,
|
|
|
|
patience=3,
|
|
|
|
verbose=False,
|
|
|
|
mode='min'
|
|
|
|
)
|
|
|
|
|
|
|
|
# without passing anything in, uses the default callback above
|
|
|
|
trainer = Trainer()
|
|
|
|
|
|
|
|
# pass in your own to override the default callback
|
|
|
|
trainer = Trainer(early_stop_callback=early_stop_callback)
|
|
|
|
|
2019-12-09 18:32:49 +00:00
|
|
|
# pass in min_epochs to enable the callback after min_epochs have run
|
|
|
|
trainer = Trainer(early_stop_callback=early_stop_callback, min_epochs=5)
|
|
|
|
|
2019-11-28 17:48:55 +00:00
|
|
|
# pass in None to disable it
|
|
|
|
trainer = Trainer(early_stop_callback=None)
|
|
|
|
|
|
|
|
Force disable early stop
|
|
|
|
------------------------
|
|
|
|
|
|
|
|
To disable early stopping pass None to the early_stop_callback
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# DEFAULT
|
|
|
|
trainer = Trainer(early_stop_callback=None)
|
|
|
|
|
|
|
|
Gradient Clipping
|
|
|
|
-----------------
|
|
|
|
|
|
|
|
Gradient clipping may be enabled to avoid exploding gradients.
|
|
|
|
Specifically, this will `clip the gradient norm computed over all model parameters
|
|
|
|
`together <https://pytorch.org/docs/stable/nn.html#torch.nn.utils.clip_grad_norm_>`_.
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# DEFAULT (ie: don't clip)
|
|
|
|
trainer = Trainer(gradient_clip_val=0)
|
|
|
|
|
|
|
|
# clip gradients with norm above 0.5
|
|
|
|
trainer = Trainer(gradient_clip_val=0.5)
|
|
|
|
|
|
|
|
Inspect gradient norms
|
|
|
|
----------------------
|
|
|
|
|
|
|
|
Looking at grad norms can help you figure out where training might be going wrong.
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# DEFAULT (-1 doesn't track norms)
|
|
|
|
trainer = Trainer(track_grad_norm=-1)
|
|
|
|
|
|
|
|
# track the LP norm (P=2 here)
|
|
|
|
trainer = Trainer(track_grad_norm=2)
|
|
|
|
|
|
|
|
Set how much of the training set to check
|
|
|
|
-----------------------------------------
|
|
|
|
|
|
|
|
If you don't want to check 100% of the training set (for debugging or if it's huge), set this flag.
|
|
|
|
|
|
|
|
train_percent_check will be overwritten by overfit_pct if `overfit_pct > 0`
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# DEFAULT
|
|
|
|
trainer = Trainer(train_percent_check=1.0)
|
|
|
|
|
|
|
|
# check 10% only
|
|
|
|
trainer = Trainer(train_percent_check=0.1)
|
|
|
|
|
|
|
|
Packed sequences as inputs
|
|
|
|
--------------------------
|
|
|
|
|
|
|
|
When using PackedSequence, do 2 things:
|
|
|
|
1. return either a padded tensor in dataset or a list of variable length tensors
|
2020-02-27 21:07:51 +00:00
|
|
|
in the dataloader collate_fn (example above shows the list implementation).
|
2019-11-28 17:48:55 +00:00
|
|
|
2. Pack the sequence in forward or training and validation steps depending on use case.
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# For use in dataloader
|
|
|
|
def collate_fn(batch):
|
|
|
|
x = [item[0] for item in batch]
|
|
|
|
y = [item[1] for item in batch]
|
|
|
|
return x, y
|
|
|
|
|
|
|
|
# In module
|
2019-12-04 11:57:10 +00:00
|
|
|
def training_step(self, batch, batch_idx):
|
2019-11-28 17:48:55 +00:00
|
|
|
x = rnn.pack_sequence(batch[0], enforce_sorted=False)
|
|
|
|
y = rnn.pack_sequence(batch[1], enforce_sorted=False)
|
|
|
|
|
|
|
|
|
|
|
|
Truncated Backpropagation Through Time
|
|
|
|
--------------------------------------
|
|
|
|
|
|
|
|
There are times when multiple backwards passes are needed for each batch.
|
|
|
|
For example, it may save memory to use Truncated Backpropagation Through Time when training RNNs.
|
|
|
|
|
|
|
|
When this flag is enabled each batch is split into sequences of size truncated_bptt_steps
|
|
|
|
and passed to training_step(...) separately. A default splitting function is provided,
|
|
|
|
however, you can override it for more flexibility. See `tbptt_split_batch`.
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
# DEFAULT (single backwards pass per batch)
|
|
|
|
trainer = Trainer(truncated_bptt_steps=None)
|
|
|
|
|
|
|
|
# (split batch into sequences of size 2)
|
|
|
|
trainer = Trainer(truncated_bptt_steps=2)
|
|
|
|
|
|
|
|
|
|
|
|
"""
|
2020-01-20 19:50:31 +00:00
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
from typing import Callable
|
|
|
|
|
2020-01-17 13:10:05 +00:00
|
|
|
import copy
|
2019-12-09 12:40:27 +00:00
|
|
|
import warnings
|
2020-02-22 22:06:48 +00:00
|
|
|
import logging as log
|
2020-02-27 21:21:14 +00:00
|
|
|
from abc import ABC, abstractmethod
|
|
|
|
from typing import Union, List
|
2019-12-04 15:57:32 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
import numpy as np
|
2020-02-27 21:21:14 +00:00
|
|
|
from torch.utils.data import DataLoader
|
2019-12-03 12:59:41 +00:00
|
|
|
|
2020-02-27 21:21:14 +00:00
|
|
|
from pytorch_lightning.loggers import LightningLoggerBase
|
|
|
|
from pytorch_lightning import LightningModule
|
2019-12-03 12:59:41 +00:00
|
|
|
from pytorch_lightning.utilities.debugging import MisconfigurationException
|
2020-02-26 04:17:27 +00:00
|
|
|
from pytorch_lightning.callbacks.base import Callback
|
2019-10-22 01:16:51 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
from apex import amp
|
|
|
|
except ImportError:
|
|
|
|
APEX_AVAILABLE = False
|
2020-02-27 21:21:14 +00:00
|
|
|
else:
|
|
|
|
APEX_AVAILABLE = True
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-02-17 21:01:20 +00:00
|
|
|
try:
|
|
|
|
import torch_xla.distributed.parallel_loader as xla_pl
|
2020-02-25 03:23:25 +00:00
|
|
|
import torch_xla.core.xla_model as xm
|
2020-02-17 21:01:20 +00:00
|
|
|
except ImportError:
|
|
|
|
XLA_AVAILABLE = False
|
2020-02-27 21:21:14 +00:00
|
|
|
else:
|
|
|
|
XLA_AVAILABLE = True
|
2020-02-17 21:01:20 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-12-04 15:57:32 +00:00
|
|
|
class TrainerTrainLoopMixin(ABC):
|
|
|
|
|
2020-02-27 21:21:14 +00:00
|
|
|
# this is just a summary on variables used in this abstract class,
|
|
|
|
# the proper values/initialisation should be done in child class
|
|
|
|
max_epochs: int
|
|
|
|
min_epochs: int
|
|
|
|
use_ddp: bool
|
|
|
|
use_dp: bool
|
|
|
|
use_ddp2: bool
|
|
|
|
single_gpu: bool
|
|
|
|
use_tpu: bool
|
|
|
|
data_parallel_device_ids: ...
|
|
|
|
check_val_every_n_epoch: ...
|
|
|
|
num_training_batches: int
|
|
|
|
val_check_batch: ...
|
|
|
|
num_val_batches: int
|
|
|
|
disable_validation: bool
|
|
|
|
fast_dev_run: ...
|
|
|
|
main_progress_bar: ...
|
|
|
|
accumulation_scheduler: ...
|
|
|
|
lr_schedulers: ...
|
|
|
|
enable_early_stop: ...
|
|
|
|
early_stop_callback: ...
|
|
|
|
callback_metrics: ...
|
|
|
|
logger: Union[LightningLoggerBase, bool]
|
|
|
|
global_step: int
|
|
|
|
testing: bool
|
|
|
|
log_save_interval: float
|
|
|
|
proc_rank: int
|
|
|
|
row_log_interval: float
|
|
|
|
total_batches: int
|
|
|
|
truncated_bptt_steps: ...
|
|
|
|
optimizers: ...
|
|
|
|
accumulate_grad_batches: int
|
|
|
|
use_amp: bool
|
|
|
|
print_nan_grads: ...
|
|
|
|
track_grad_norm: ...
|
|
|
|
model: LightningModule
|
|
|
|
running_loss: ...
|
|
|
|
training_tqdm_dict: ...
|
|
|
|
reduce_lr_on_plateau_scheduler: ...
|
|
|
|
profiler: ...
|
|
|
|
batch_idx: int
|
|
|
|
precision: ...
|
|
|
|
train_dataloader: DataLoader
|
|
|
|
reload_dataloaders_every_epoch: bool
|
|
|
|
progress_bar_refresh_rate: ...
|
|
|
|
max_steps: int
|
|
|
|
max_steps: int
|
|
|
|
total_batch_idx: int
|
|
|
|
|
|
|
|
# Callback system
|
|
|
|
callbacks: List[Callback]
|
|
|
|
on_train_start: Callable
|
|
|
|
on_train_end: Callable
|
|
|
|
on_batch_start: Callable
|
|
|
|
on_batch_end: Callable
|
|
|
|
on_epoch_start: Callable
|
|
|
|
on_epoch_end: Callable
|
2020-02-26 04:17:27 +00:00
|
|
|
|
2019-12-09 12:40:27 +00:00
|
|
|
@property
|
|
|
|
def max_nb_epochs(self):
|
|
|
|
"""
|
|
|
|
.. warning:: `max_nb_epochs` is deprecated and will be removed in v0.8.0, use `max_epochs` instead.
|
|
|
|
"""
|
|
|
|
warnings.warn("`max_nb_epochs` is deprecated and will be removed in "
|
|
|
|
"v0.8.0, use `max_epochs` instead.", DeprecationWarning)
|
|
|
|
return self.max_epochs
|
|
|
|
|
|
|
|
@property
|
|
|
|
def min_nb_epochs(self):
|
|
|
|
"""
|
|
|
|
.. warning:: `min_nb_epochs` is deprecated and will be removed in v0.8.0, use `min_epochs` instead.
|
|
|
|
"""
|
|
|
|
warnings.warn("`min_nb_epochs` is deprecated and will be removed in "
|
|
|
|
"v0.8.0, use `min_epochs` instead.", DeprecationWarning)
|
|
|
|
return self.min_epochs
|
|
|
|
|
2019-12-04 15:57:32 +00:00
|
|
|
@abstractmethod
|
|
|
|
def get_model(self):
|
2020-02-27 21:21:14 +00:00
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2019-12-04 15:57:32 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def is_function_implemented(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2019-12-04 15:57:32 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def is_infinite_dataloader(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2020-02-25 18:06:24 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def run_evaluation(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2019-12-04 15:57:32 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def transfer_batch_to_gpu(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2019-12-04 15:57:32 +00:00
|
|
|
|
2020-02-17 21:01:20 +00:00
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def transfer_batch_to_tpu(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2020-02-17 21:01:20 +00:00
|
|
|
|
2019-12-04 15:57:32 +00:00
|
|
|
@abstractmethod
|
|
|
|
def clip_gradients(self):
|
2020-02-27 21:21:14 +00:00
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2019-12-04 15:57:32 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def print_nan_gradients(self):
|
2020-02-27 21:21:14 +00:00
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2019-12-04 15:57:32 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def is_overriden(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2019-12-04 15:57:32 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def add_tqdm_metrics(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2019-12-04 15:57:32 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def log_metrics(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2019-12-04 15:57:32 +00:00
|
|
|
|
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def process_output(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-02-25 03:23:25 +00:00
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def reset_train_dataloader(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2020-02-25 03:23:25 +00:00
|
|
|
|
2020-02-26 21:55:18 +00:00
|
|
|
@abstractmethod
|
|
|
|
def reset_val_dataloader(self, model):
|
2020-02-27 21:21:14 +00:00
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2020-02-26 21:55:18 +00:00
|
|
|
|
2020-02-25 18:06:24 +00:00
|
|
|
@abstractmethod
|
2020-02-27 21:21:14 +00:00
|
|
|
def has_arg(self, *args):
|
|
|
|
"""Warning: this is just empty shell for code implemented in other class."""
|
2020-02-25 18:06:24 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
def train(self):
|
2020-02-05 10:15:51 +00:00
|
|
|
warnings.warn('Displayed epoch numbers in the progress bar start from "1" until v0.6.x,'
|
|
|
|
' but will start from "0" in v0.8.0.', DeprecationWarning)
|
2020-02-26 04:17:27 +00:00
|
|
|
|
2020-02-26 21:55:18 +00:00
|
|
|
# get model
|
|
|
|
model = self.get_model()
|
|
|
|
|
|
|
|
# load data
|
|
|
|
self.reset_train_dataloader(model)
|
|
|
|
self.reset_val_dataloader(model)
|
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
# Train begin callbacks
|
2020-02-26 21:55:18 +00:00
|
|
|
model.on_train_start()
|
2020-02-26 04:17:27 +00:00
|
|
|
self.on_train_start()
|
|
|
|
|
2020-02-22 22:06:48 +00:00
|
|
|
try:
|
|
|
|
# run all epochs
|
|
|
|
for epoch in range(self.current_epoch, self.max_epochs):
|
|
|
|
# set seed for distributed sampler (enables shuffling for each epoch)
|
2020-02-25 03:23:25 +00:00
|
|
|
if self.use_ddp \
|
|
|
|
and hasattr(self.train_dataloader.sampler, 'set_epoch'):
|
|
|
|
self.train_dataloader.sampler.set_epoch(epoch)
|
2020-02-22 22:06:48 +00:00
|
|
|
|
|
|
|
# update training progress in trainer and model
|
|
|
|
model.current_epoch = epoch
|
|
|
|
self.current_epoch = epoch
|
|
|
|
|
|
|
|
total_val_batches = 0
|
|
|
|
is_val_epoch = False
|
|
|
|
if not self.disable_validation:
|
|
|
|
# val can be checked multiple times in epoch
|
|
|
|
is_val_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
|
|
|
|
val_checks_per_epoch = self.num_training_batches // self.val_check_batch
|
|
|
|
val_checks_per_epoch = val_checks_per_epoch if is_val_epoch else 0
|
|
|
|
total_val_batches = self.num_val_batches * val_checks_per_epoch
|
|
|
|
|
|
|
|
# total batches includes multiple val checks
|
|
|
|
self.total_batches = self.num_training_batches + total_val_batches
|
|
|
|
self.batch_loss_value = 0 # accumulated grads
|
|
|
|
|
|
|
|
if self.fast_dev_run:
|
|
|
|
# limit the number of batches to 2 (1 train and 1 val) in fast_dev_run
|
|
|
|
num_iterations = 2
|
2020-02-26 21:55:18 +00:00
|
|
|
elif self.is_infinite_dataloader(self.train_dataloader):
|
|
|
|
# for infinite train loader, the progress bar never ends
|
2020-02-22 22:06:48 +00:00
|
|
|
num_iterations = None
|
|
|
|
else:
|
|
|
|
num_iterations = self.total_batches
|
|
|
|
|
|
|
|
# reset progress bar
|
|
|
|
# .reset() doesn't work on disabled progress bar so we should check
|
|
|
|
if not self.main_progress_bar.disable:
|
|
|
|
self.main_progress_bar.reset(num_iterations)
|
2020-02-26 21:55:18 +00:00
|
|
|
desc = f'Epoch {epoch + 1}' if not self.is_infinite_dataloader(self.train_dataloader) else ''
|
2020-02-22 22:06:48 +00:00
|
|
|
self.main_progress_bar.set_description(desc)
|
|
|
|
|
|
|
|
# changing gradient according accumulation_scheduler
|
2020-02-26 04:17:27 +00:00
|
|
|
self.accumulation_scheduler.on_epoch_start(self, self.get_model())
|
2020-02-22 22:06:48 +00:00
|
|
|
|
|
|
|
# -----------------
|
|
|
|
# RUN TNG EPOCH
|
|
|
|
# -----------------
|
|
|
|
self.run_training_epoch()
|
|
|
|
|
|
|
|
# update LR schedulers
|
|
|
|
if self.lr_schedulers is not None:
|
|
|
|
for lr_scheduler in self.lr_schedulers:
|
|
|
|
lr_scheduler.step()
|
|
|
|
if self.reduce_lr_on_plateau_scheduler is not None:
|
|
|
|
val_loss = self.callback_metrics.get('val_loss')
|
|
|
|
if val_loss is None:
|
|
|
|
avail_metrics = ','.join(list(self.callback_metrics.keys()))
|
|
|
|
m = f'ReduceLROnPlateau conditioned on metric val_loss ' \
|
|
|
|
f'which is not available. Available metrics are: {avail_metrics}'
|
|
|
|
raise MisconfigurationException(m)
|
|
|
|
self.reduce_lr_on_plateau_scheduler.step(val_loss)
|
|
|
|
|
|
|
|
if self.max_steps and self.max_steps == self.global_step:
|
2019-11-03 10:42:53 +00:00
|
|
|
self.main_progress_bar.close()
|
2020-02-22 22:06:48 +00:00
|
|
|
model.on_train_end()
|
2020-02-26 04:17:27 +00:00
|
|
|
self.on_train_end()
|
2019-10-22 01:16:51 +00:00
|
|
|
return
|
|
|
|
|
2020-02-22 22:06:48 +00:00
|
|
|
# early stopping
|
|
|
|
met_min_epochs = epoch >= self.min_epochs - 1
|
|
|
|
met_min_steps = self.global_step >= self.min_steps if self.min_steps else True
|
2019-11-03 10:42:53 +00:00
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
if self.enable_early_stop and not self.disable_validation and is_val_epoch:
|
|
|
|
if ((met_min_epochs and met_min_steps) or self.fast_dev_run):
|
|
|
|
should_stop = self.early_stop_callback.on_epoch_end(self, self.get_model())
|
|
|
|
# stop training
|
|
|
|
stop = should_stop and met_min_epochs
|
|
|
|
if stop:
|
|
|
|
self.run_training_teardown()
|
|
|
|
self.on_train_end()
|
|
|
|
return
|
2019-12-07 13:52:06 +00:00
|
|
|
|
2020-02-22 22:06:48 +00:00
|
|
|
self.run_training_teardown()
|
2020-02-25 03:23:25 +00:00
|
|
|
|
2020-02-22 22:06:48 +00:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
log.info('Detected KeyboardInterrupt, attempting graceful shutdown...')
|
|
|
|
self.run_training_teardown()
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
# Train end callbacks
|
|
|
|
self.on_train_end()
|
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
def run_training_epoch(self):
|
2020-02-26 04:17:27 +00:00
|
|
|
|
|
|
|
# Epoch begin callbacks
|
|
|
|
self.on_epoch_start()
|
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# before epoch hook
|
|
|
|
if self.is_function_implemented('on_epoch_start'):
|
|
|
|
model = self.get_model()
|
2020-02-07 03:01:21 +00:00
|
|
|
with self.profiler.profile('on_epoch_start'):
|
|
|
|
model.on_epoch_start()
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-02-25 03:23:25 +00:00
|
|
|
# reset train dataloader
|
|
|
|
if self.reload_dataloaders_every_epoch:
|
|
|
|
self.reset_train_dataloader(self.get_model())
|
2020-02-17 21:01:20 +00:00
|
|
|
|
2020-02-27 00:29:03 +00:00
|
|
|
# track local dataloader so TPU can wrap each epoch
|
|
|
|
train_dataloader = self.train_dataloader
|
|
|
|
|
2020-02-17 21:01:20 +00:00
|
|
|
# on TPU we have to wrap it under the ParallelLoader
|
|
|
|
if self.use_tpu:
|
|
|
|
device = xm.xla_device()
|
2020-02-27 00:29:03 +00:00
|
|
|
train_dataloader = xla_pl.ParallelLoader(train_dataloader, [device])
|
|
|
|
train_dataloader = train_dataloader.per_device_loader(device)
|
2020-02-17 21:01:20 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# run epoch
|
2020-02-07 03:01:21 +00:00
|
|
|
for batch_idx, batch in self.profiler.profile_iterable(
|
2020-02-27 00:29:03 +00:00
|
|
|
enumerate(train_dataloader), "get_train_batch"
|
2020-02-07 03:01:21 +00:00
|
|
|
):
|
2020-01-05 19:37:09 +00:00
|
|
|
# stop epoch if we limited the number of training batches
|
|
|
|
if batch_idx >= self.num_training_batches:
|
|
|
|
break
|
|
|
|
|
2019-12-04 11:57:10 +00:00
|
|
|
self.batch_idx = batch_idx
|
2019-10-22 01:16:51 +00:00
|
|
|
|
|
|
|
model = self.get_model()
|
|
|
|
model.global_step = self.global_step
|
|
|
|
|
|
|
|
# ---------------
|
|
|
|
# RUN TRAIN STEP
|
|
|
|
# ---------------
|
2019-12-04 11:57:10 +00:00
|
|
|
output = self.run_training_batch(batch, batch_idx)
|
2019-10-22 01:16:51 +00:00
|
|
|
batch_result, grad_norm_dic, batch_step_metrics = output
|
2019-10-23 10:11:18 +00:00
|
|
|
|
|
|
|
# when returning -1 from train_step, we end epoch early
|
2019-10-22 01:16:51 +00:00
|
|
|
early_stop_epoch = batch_result == -1
|
|
|
|
|
|
|
|
# ---------------
|
|
|
|
# RUN VAL STEP
|
|
|
|
# ---------------
|
2019-12-04 11:57:10 +00:00
|
|
|
is_val_check_batch = (batch_idx + 1) % self.val_check_batch == 0
|
2019-10-22 01:16:51 +00:00
|
|
|
can_check_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
|
2020-02-26 04:17:27 +00:00
|
|
|
should_check_val = not self.disable_validation and can_check_epoch
|
|
|
|
should_check_val = should_check_val and (is_val_check_batch or early_stop_epoch)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
|
|
|
# fast_dev_run always forces val checking after train batch
|
|
|
|
if self.fast_dev_run or should_check_val:
|
2020-02-25 18:06:24 +00:00
|
|
|
self.run_evaluation(test_mode=self.testing)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-01-23 16:12:51 +00:00
|
|
|
if self.enable_early_stop:
|
|
|
|
self.early_stop_callback.check_metrics(self.callback_metrics)
|
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# when logs should be saved
|
2019-12-04 11:57:10 +00:00
|
|
|
should_save_log = (batch_idx + 1) % self.log_save_interval == 0 or early_stop_epoch
|
2019-10-22 01:16:51 +00:00
|
|
|
if should_save_log or self.fast_dev_run:
|
|
|
|
if self.proc_rank == 0 and self.logger is not None:
|
|
|
|
self.logger.save()
|
|
|
|
|
|
|
|
# when metrics should be logged
|
2019-12-04 11:57:10 +00:00
|
|
|
should_log_metrics = batch_idx % self.row_log_interval == 0 or early_stop_epoch
|
2019-10-22 01:16:51 +00:00
|
|
|
if should_log_metrics or self.fast_dev_run:
|
|
|
|
# logs user requested information to logger
|
|
|
|
self.log_metrics(batch_step_metrics, grad_norm_dic)
|
|
|
|
|
2020-02-16 04:48:19 +00:00
|
|
|
# progress global step according to grads progress
|
|
|
|
if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
|
|
|
|
self.global_step += 1
|
2019-12-04 11:57:10 +00:00
|
|
|
self.total_batch_idx += 1
|
2019-10-23 10:11:18 +00:00
|
|
|
|
2020-02-18 16:23:22 +00:00
|
|
|
# max steps reached, end training
|
|
|
|
if self.max_steps is not None and self.max_steps == self.global_step:
|
|
|
|
break
|
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# end epoch early
|
2019-10-23 10:11:18 +00:00
|
|
|
# stop when the flag is changed or we've gone past the amount
|
|
|
|
# requested in the batches
|
2019-10-22 01:16:51 +00:00
|
|
|
if early_stop_epoch or self.fast_dev_run:
|
|
|
|
break
|
|
|
|
|
|
|
|
# epoch end hook
|
|
|
|
if self.is_function_implemented('on_epoch_end'):
|
|
|
|
model = self.get_model()
|
2020-02-07 03:01:21 +00:00
|
|
|
with self.profiler.profile('on_epoch_end'):
|
|
|
|
model.on_epoch_end()
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
# Epoch begin callbacks
|
|
|
|
self.on_epoch_end()
|
|
|
|
|
2019-12-04 11:57:10 +00:00
|
|
|
def run_training_batch(self, batch, batch_idx):
|
2019-10-22 01:16:51 +00:00
|
|
|
# track grad norms
|
|
|
|
grad_norm_dic = {}
|
|
|
|
|
|
|
|
# track all metrics for callbacks
|
|
|
|
all_callback_metrics = []
|
|
|
|
|
|
|
|
# track metrics to log
|
|
|
|
all_log_metrics = []
|
|
|
|
|
|
|
|
if batch is None:
|
2019-11-19 23:38:54 +00:00
|
|
|
return 0, grad_norm_dic, {}
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
# Batch begin callbacks
|
|
|
|
self.on_batch_start()
|
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# hook
|
|
|
|
if self.is_function_implemented('on_batch_start'):
|
|
|
|
model_ref = self.get_model()
|
2020-02-07 03:01:21 +00:00
|
|
|
with self.profiler.profile('on_batch_start'):
|
|
|
|
response = model_ref.on_batch_start(batch)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
|
|
|
if response == -1:
|
2019-11-19 23:38:54 +00:00
|
|
|
return -1, grad_norm_dic, {}
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
splits = [batch]
|
|
|
|
if self.truncated_bptt_steps is not None:
|
|
|
|
model_ref = self.get_model()
|
2020-02-07 03:01:21 +00:00
|
|
|
with self.profiler.profile('tbptt_split_batch'):
|
|
|
|
splits = model_ref.tbptt_split_batch(batch, self.truncated_bptt_steps)
|
2019-10-31 10:45:28 +00:00
|
|
|
|
|
|
|
self.hiddens = None
|
2019-12-04 11:57:10 +00:00
|
|
|
for split_idx, split_batch in enumerate(splits):
|
|
|
|
self.split_idx = split_idx
|
2019-10-31 10:45:28 +00:00
|
|
|
|
|
|
|
# call training_step once per optimizer
|
|
|
|
for opt_idx, optimizer in enumerate(self.optimizers):
|
2020-01-16 12:22:29 +00:00
|
|
|
# make sure only the gradients of the current optimizer's paramaters are calculated
|
2020-01-14 03:12:04 +00:00
|
|
|
# in the training step to prevent dangling gradients in multiple-optimizer setup.
|
2020-01-21 13:09:27 +00:00
|
|
|
if len(self.optimizers) > 1:
|
|
|
|
for param in self.get_model().parameters():
|
|
|
|
param.requires_grad = False
|
|
|
|
for group in optimizer.param_groups:
|
|
|
|
for param in group['params']:
|
|
|
|
param.requires_grad = True
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# wrap the forward step in a closure so second order methods work
|
|
|
|
def optimizer_closure():
|
|
|
|
# forward pass
|
2020-02-07 03:01:21 +00:00
|
|
|
with self.profiler.profile('model_forward'):
|
|
|
|
output = self.training_forward(
|
|
|
|
split_batch, batch_idx, opt_idx, self.hiddens)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
closure_loss = output[0]
|
|
|
|
progress_bar_metrics = output[1]
|
|
|
|
log_metrics = output[2]
|
|
|
|
callback_metrics = output[3]
|
|
|
|
self.hiddens = output[4]
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# accumulate loss
|
|
|
|
# (if accumulate_grad_batches = 1 no effect)
|
|
|
|
closure_loss = closure_loss / self.accumulate_grad_batches
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# backward pass
|
2019-10-22 01:16:51 +00:00
|
|
|
model_ref = self.get_model()
|
2020-02-07 03:01:21 +00:00
|
|
|
with self.profiler.profile('model_backward'):
|
2020-02-17 21:01:20 +00:00
|
|
|
model_ref.backward(self, closure_loss, optimizer, opt_idx)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-11-05 15:01:52 +00:00
|
|
|
# track metrics for callbacks
|
|
|
|
all_callback_metrics.append(callback_metrics)
|
|
|
|
|
|
|
|
# track progress bar metrics
|
|
|
|
self.add_tqdm_metrics(progress_bar_metrics)
|
|
|
|
all_log_metrics.append(log_metrics)
|
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# insert after step hook
|
|
|
|
if self.is_function_implemented('on_after_backward'):
|
|
|
|
model_ref = self.get_model()
|
2020-02-07 03:01:21 +00:00
|
|
|
with self.profiler.profile('on_after_backward'):
|
|
|
|
model_ref.on_after_backward()
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
return closure_loss
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# calculate loss
|
|
|
|
loss = optimizer_closure()
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# nan grads
|
|
|
|
if self.print_nan_grads:
|
|
|
|
self.print_nan_gradients()
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# track total loss for logging (avoid mem leaks)
|
|
|
|
self.batch_loss_value += loss.item()
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# gradient update with accumulated gradients
|
2019-12-04 11:57:10 +00:00
|
|
|
if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# track gradient norms when requested
|
2019-12-04 11:57:10 +00:00
|
|
|
if batch_idx % self.row_log_interval == 0:
|
2019-10-31 10:45:28 +00:00
|
|
|
if self.track_grad_norm > 0:
|
|
|
|
model = self.get_model()
|
|
|
|
grad_norm_dic = model.grad_norm(
|
|
|
|
self.track_grad_norm)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# clip gradients
|
|
|
|
self.clip_gradients()
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# calls .step(), .zero_grad()
|
|
|
|
# override function to modify this behavior
|
|
|
|
model = self.get_model()
|
2020-02-07 03:01:21 +00:00
|
|
|
with self.profiler.profile('optimizer_step'):
|
2020-02-25 03:23:25 +00:00
|
|
|
model.optimizer_step(self.current_epoch, batch_idx,
|
|
|
|
optimizer, opt_idx, optimizer_closure)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# calculate running loss for display
|
|
|
|
self.running_loss.append(self.batch_loss_value)
|
|
|
|
self.batch_loss_value = 0
|
|
|
|
self.avg_loss = np.mean(self.running_loss[-100:])
|
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# activate batch end hook
|
|
|
|
if self.is_function_implemented('on_batch_end'):
|
|
|
|
model = self.get_model()
|
2020-02-07 03:01:21 +00:00
|
|
|
with self.profiler.profile('on_batch_end'):
|
|
|
|
model.on_batch_end()
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
# Batch end callbacks
|
|
|
|
self.on_batch_end()
|
|
|
|
|
2019-11-03 10:42:53 +00:00
|
|
|
# update progress bar
|
2020-02-25 03:23:25 +00:00
|
|
|
if batch_idx % self.progress_bar_refresh_rate == 0:
|
|
|
|
self.main_progress_bar.update(self.progress_bar_refresh_rate)
|
|
|
|
self.main_progress_bar.set_postfix(**self.training_tqdm_dict)
|
2019-11-03 10:42:53 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# collapse all metrics into one dict
|
|
|
|
all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}
|
|
|
|
|
|
|
|
# track all metrics for callbacks
|
2019-11-12 03:58:32 +00:00
|
|
|
self.callback_metrics.update({k: v for d in all_callback_metrics for k, v in d.items()})
|
2019-10-22 01:16:51 +00:00
|
|
|
|
|
|
|
return 0, grad_norm_dic, all_log_metrics
|
|
|
|
|
2020-02-22 22:06:48 +00:00
|
|
|
def run_training_teardown(self):
|
|
|
|
model = self.get_model()
|
|
|
|
|
|
|
|
self.main_progress_bar.close()
|
|
|
|
|
|
|
|
with self.profiler.profile('on_train_end'):
|
|
|
|
model.on_train_end()
|
|
|
|
|
|
|
|
if self.logger is not None:
|
|
|
|
self.logger.finalize("success")
|
|
|
|
|
|
|
|
# summarize profile results
|
|
|
|
self.profiler.describe()
|
|
|
|
|
2019-12-04 11:57:10 +00:00
|
|
|
def training_forward(self, batch, batch_idx, opt_idx, hiddens):
|
2019-10-22 01:16:51 +00:00
|
|
|
"""
|
|
|
|
Handle forward for each training case (distributed, single gpu, etc...)
|
|
|
|
:param batch:
|
2019-12-04 11:57:10 +00:00
|
|
|
:param batch_idx:
|
2019-10-22 01:16:51 +00:00
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
# ---------------
|
|
|
|
# FORWARD
|
|
|
|
# ---------------
|
|
|
|
# enable not needing to add opt_idx to training_step
|
2019-12-04 11:57:10 +00:00
|
|
|
args = [batch, batch_idx]
|
2019-12-04 12:32:47 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
if len(self.optimizers) > 1:
|
2019-12-04 12:32:47 +00:00
|
|
|
if self.has_arg('training_step', 'optimizer_idx'):
|
|
|
|
args.append(opt_idx)
|
|
|
|
else:
|
|
|
|
raise ValueError(
|
|
|
|
f'Your LightningModule defines {len(self.optimizers)} optimizers but '
|
|
|
|
f'training_step is missing the "optimizer_idx" argument.'
|
|
|
|
)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-11-05 15:01:52 +00:00
|
|
|
# pass hiddens if using tbptt
|
2019-10-31 10:45:28 +00:00
|
|
|
if self.truncated_bptt_steps is not None:
|
|
|
|
args.append(hiddens)
|
|
|
|
|
2019-11-05 15:01:52 +00:00
|
|
|
# distributed forward
|
|
|
|
if self.use_ddp or self.use_ddp2 or self.use_dp:
|
2019-10-22 01:16:51 +00:00
|
|
|
output = self.model(*args)
|
2019-11-05 15:01:52 +00:00
|
|
|
|
|
|
|
# single GPU forward
|
2019-10-22 01:16:51 +00:00
|
|
|
elif self.single_gpu:
|
|
|
|
gpu_id = 0
|
2019-12-04 15:57:32 +00:00
|
|
|
if isinstance(self.data_parallel_device_ids, list):
|
2019-10-22 01:16:51 +00:00
|
|
|
gpu_id = self.data_parallel_device_ids[0]
|
2020-01-17 13:10:05 +00:00
|
|
|
batch = self.transfer_batch_to_gpu(copy.copy(batch), gpu_id)
|
2019-10-22 01:16:51 +00:00
|
|
|
args[0] = batch
|
|
|
|
output = self.model.training_step(*args)
|
|
|
|
|
2020-02-17 21:01:20 +00:00
|
|
|
# TPU support
|
|
|
|
elif self.use_tpu:
|
|
|
|
batch = self.transfer_batch_to_tpu(copy.copy(batch))
|
|
|
|
args[0] = batch
|
|
|
|
output = self.model.training_step(*args)
|
|
|
|
|
2019-11-05 15:01:52 +00:00
|
|
|
# CPU forward
|
2019-10-22 01:16:51 +00:00
|
|
|
else:
|
|
|
|
output = self.model.training_step(*args)
|
|
|
|
|
2019-11-05 15:01:52 +00:00
|
|
|
# allow any mode to define training_end
|
|
|
|
if self.is_overriden('training_end'):
|
|
|
|
model_ref = self.get_model()
|
2020-02-07 03:01:21 +00:00
|
|
|
with self.profiler.profile('training_end'):
|
|
|
|
output = model_ref.training_end(output)
|
2019-11-05 15:01:52 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# format and reduce outputs accordingly
|
|
|
|
output = self.process_output(output, train=True)
|
2019-11-05 15:01:52 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
return output
|