2020-04-02 15:48:53 +00:00
|
|
|
import distutils
|
2020-03-14 17:01:57 +00:00
|
|
|
import inspect
|
2019-07-09 00:11:20 +00:00
|
|
|
import os
|
2019-11-21 18:26:24 +00:00
|
|
|
import sys
|
2019-08-05 21:57:39 +00:00
|
|
|
import warnings
|
2020-03-03 14:32:15 +00:00
|
|
|
from argparse import ArgumentParser
|
2020-04-02 15:48:53 +00:00
|
|
|
from typing import Union, Optional, List, Dict, Tuple, Iterable, Any
|
2019-07-09 00:11:20 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
import torch
|
2020-03-17 00:50:36 +00:00
|
|
|
import torch.distributed as torch_distrib
|
2019-10-22 08:32:40 +00:00
|
|
|
import torch.multiprocessing as mp
|
2020-02-23 02:23:30 +00:00
|
|
|
from torch.utils.data import DataLoader
|
2020-01-26 15:19:09 +00:00
|
|
|
from tqdm.auto import tqdm
|
2019-07-09 00:11:20 +00:00
|
|
|
|
2020-03-17 22:44:00 +00:00
|
|
|
from pytorch_lightning import _logger as log
|
|
|
|
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, Callback
|
2020-03-24 18:55:27 +00:00
|
|
|
from pytorch_lightning.core.lightning import LightningModule
|
2020-02-23 02:23:30 +00:00
|
|
|
from pytorch_lightning.loggers import LightningLoggerBase
|
2020-03-31 12:57:48 +00:00
|
|
|
from pytorch_lightning.profiler import SimpleProfiler, PassThroughProfiler, BaseProfiler
|
2019-12-04 16:39:14 +00:00
|
|
|
from pytorch_lightning.trainer.auto_mix_precision import TrainerAMPMixin
|
|
|
|
from pytorch_lightning.trainer.callback_config import TrainerCallbackConfigMixin
|
2020-03-06 17:00:05 +00:00
|
|
|
from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin
|
2020-03-24 18:55:27 +00:00
|
|
|
from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin
|
2020-04-06 12:13:24 +00:00
|
|
|
from pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_8, TrainerDeprecatedAPITillVer0_9
|
2020-03-24 18:55:27 +00:00
|
|
|
from pytorch_lightning.trainer.distrib_data_parallel import TrainerDDPMixin
|
|
|
|
from pytorch_lightning.trainer.distrib_parts import TrainerDPMixin, parse_gpu_ids, determine_root_gpu_device
|
2019-12-04 16:39:14 +00:00
|
|
|
from pytorch_lightning.trainer.evaluation_loop import TrainerEvaluationLoopMixin
|
|
|
|
from pytorch_lightning.trainer.logging import TrainerLoggingMixin
|
|
|
|
from pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin
|
2020-04-02 15:48:53 +00:00
|
|
|
from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin
|
2020-04-08 12:35:47 +00:00
|
|
|
from pytorch_lightning.trainer.supporters import TensorRunningAccum
|
2020-01-14 03:20:38 +00:00
|
|
|
from pytorch_lightning.trainer.training_io import TrainerIOMixin
|
2020-01-20 19:50:31 +00:00
|
|
|
from pytorch_lightning.trainer.training_loop import TrainerTrainLoopMixin
|
2019-12-04 16:39:14 +00:00
|
|
|
from pytorch_lightning.trainer.training_tricks import TrainerTrainingTricksMixin
|
2020-03-31 12:57:48 +00:00
|
|
|
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
2019-10-04 19:35:02 +00:00
|
|
|
|
2019-05-14 00:40:07 +00:00
|
|
|
try:
|
|
|
|
from apex import amp
|
2019-08-05 21:28:04 +00:00
|
|
|
except ImportError:
|
2019-05-14 00:40:07 +00:00
|
|
|
APEX_AVAILABLE = False
|
2020-02-27 21:21:14 +00:00
|
|
|
else:
|
|
|
|
APEX_AVAILABLE = True
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2020-02-17 21:01:20 +00:00
|
|
|
try:
|
|
|
|
import torch_xla
|
|
|
|
import torch_xla.core.xla_model as xm
|
|
|
|
import torch_xla.distributed.xla_multiprocessing as xmp
|
|
|
|
except ImportError:
|
|
|
|
XLA_AVAILABLE = False
|
2020-02-27 21:21:14 +00:00
|
|
|
else:
|
|
|
|
XLA_AVAILABLE = True
|
2020-02-17 21:01:20 +00:00
|
|
|
|
2019-07-09 00:12:27 +00:00
|
|
|
|
2020-03-06 17:00:05 +00:00
|
|
|
class Trainer(
|
|
|
|
TrainerIOMixin,
|
2020-04-02 15:48:53 +00:00
|
|
|
TrainerOptimizersMixin,
|
2020-04-06 12:13:24 +00:00
|
|
|
TrainerAMPMixin,
|
2020-03-06 17:00:05 +00:00
|
|
|
TrainerDPMixin,
|
|
|
|
TrainerDDPMixin,
|
|
|
|
TrainerLoggingMixin,
|
|
|
|
TrainerModelHooksMixin,
|
|
|
|
TrainerTrainingTricksMixin,
|
|
|
|
TrainerDataLoadingMixin,
|
|
|
|
TrainerEvaluationLoopMixin,
|
|
|
|
TrainerTrainLoopMixin,
|
|
|
|
TrainerCallbackConfigMixin,
|
|
|
|
TrainerCallbackHookMixin,
|
|
|
|
TrainerDeprecatedAPITillVer0_8,
|
2020-04-02 22:53:00 +00:00
|
|
|
TrainerDeprecatedAPITillVer0_9,
|
2020-03-06 17:00:05 +00:00
|
|
|
):
|
2020-03-24 18:55:27 +00:00
|
|
|
DEPRECATED_IN_0_8 = (
|
|
|
|
'gradient_clip', 'nb_gpu_nodes', 'max_nb_epochs', 'min_nb_epochs',
|
|
|
|
'add_row_log_interval', 'nb_sanity_val_steps'
|
|
|
|
)
|
2020-04-02 22:53:00 +00:00
|
|
|
DEPRECATED_IN_0_9 = ('use_amp', 'show_progress_bar')
|
2020-02-09 22:39:10 +00:00
|
|
|
|
2019-12-04 11:57:10 +00:00
|
|
|
def __init__(
|
|
|
|
self,
|
2020-02-25 19:52:39 +00:00
|
|
|
logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,
|
2020-02-23 02:23:30 +00:00
|
|
|
checkpoint_callback: Union[ModelCheckpoint, bool] = True,
|
2020-03-03 12:35:43 +00:00
|
|
|
early_stop_callback: Optional[Union[EarlyStopping, bool]] = False,
|
2020-02-26 04:17:27 +00:00
|
|
|
callbacks: List[Callback] = [],
|
2020-02-23 02:23:30 +00:00
|
|
|
default_save_path: Optional[str] = None,
|
|
|
|
gradient_clip_val: float = 0,
|
|
|
|
process_position: int = 0,
|
|
|
|
num_nodes: int = 1,
|
|
|
|
gpus: Optional[Union[List[int], str, int]] = None,
|
|
|
|
num_tpu_cores: Optional[int] = None,
|
|
|
|
log_gpu_memory: Optional[str] = None,
|
2020-03-12 16:40:30 +00:00
|
|
|
progress_bar_refresh_rate: int = 1,
|
2020-02-23 02:23:30 +00:00
|
|
|
overfit_pct: float = 0.0,
|
|
|
|
track_grad_norm: int = -1,
|
|
|
|
check_val_every_n_epoch: int = 1,
|
|
|
|
fast_dev_run: bool = False,
|
2020-02-25 18:06:24 +00:00
|
|
|
accumulate_grad_batches: Union[int, Dict[int, int], List[list]] = 1,
|
2020-02-23 02:23:30 +00:00
|
|
|
max_epochs: int = 1000,
|
|
|
|
min_epochs: int = 1,
|
|
|
|
max_steps: Optional[int] = None,
|
|
|
|
min_steps: Optional[int] = None,
|
|
|
|
train_percent_check: float = 1.0,
|
|
|
|
val_percent_check: float = 1.0,
|
|
|
|
test_percent_check: float = 1.0,
|
2020-02-27 21:21:14 +00:00
|
|
|
val_check_interval: float = 1.0,
|
2020-02-23 02:23:30 +00:00
|
|
|
log_save_interval: int = 100,
|
|
|
|
row_log_interval: int = 10,
|
2019-12-04 11:57:10 +00:00
|
|
|
add_row_log_interval=None, # backward compatible, todo: remove in v0.8.0
|
2020-02-23 02:23:30 +00:00
|
|
|
distributed_backend: Optional[str] = None,
|
|
|
|
precision: int = 32,
|
2020-03-19 13:24:45 +00:00
|
|
|
print_nan_grads: bool = False, # backward compatible, todo: remove in v0.9.0
|
2020-03-31 11:42:29 +00:00
|
|
|
weights_summary: Optional[str] = 'full',
|
2020-02-23 02:23:30 +00:00
|
|
|
weights_save_path: Optional[str] = None,
|
|
|
|
amp_level: str = 'O1',
|
|
|
|
num_sanity_val_steps: int = 5,
|
|
|
|
truncated_bptt_steps: Optional[int] = None,
|
|
|
|
resume_from_checkpoint: Optional[str] = None,
|
|
|
|
profiler: Optional[BaseProfiler] = None,
|
2020-02-25 20:05:41 +00:00
|
|
|
benchmark: bool = False,
|
2020-02-25 03:23:25 +00:00
|
|
|
reload_dataloaders_every_epoch: bool = False,
|
2020-03-31 13:01:47 +00:00
|
|
|
gradient_clip=None, # backward compatible, todo: remove in v0.8.0
|
|
|
|
nb_gpu_nodes=None, # backward compatible, todo: remove in v0.8.0
|
|
|
|
max_nb_epochs=None, # backward compatible, todo: remove in v0.8.0
|
|
|
|
min_nb_epochs=None, # backward compatible, todo: remove in v0.8.0
|
2020-04-06 12:13:24 +00:00
|
|
|
use_amp=None, # backward compatible, todo: remove in v0.9.0
|
|
|
|
show_progress_bar=None, # backward compatible, todo: remove in v0.9.0
|
2020-03-31 13:01:47 +00:00
|
|
|
nb_sanity_val_steps=None, # backward compatible, todo: remove in v0.8.0
|
2020-03-03 14:32:15 +00:00
|
|
|
**kwargs
|
2019-12-04 11:57:10 +00:00
|
|
|
):
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
|
|
|
|
|
|
|
Customize every aspect of training via flags
|
|
|
|
|
|
|
|
Args:
|
2020-02-25 19:52:39 +00:00
|
|
|
logger: Logger (or iterable collection of loggers) for experiment tracking.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
checkpoint_callback: Callback for checkpointing.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-27 21:07:51 +00:00
|
|
|
early_stop_callback (:class:`pytorch_lightning.callbacks.EarlyStopping`):
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
callbacks: Add a list of callbacks.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
default_save_path: Default path for logs and weights when no logger/ckpt_callback passed
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
gradient_clip_val: 0 means don't clip.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
gradient_clip:
|
2020-03-20 19:49:01 +00:00
|
|
|
.. warning:: .. deprecated:: 0.7.0
|
|
|
|
|
|
|
|
Use `gradient_clip_val` instead. Will remove 0.9.0.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
process_position: orders the tqdm bar when running multiple models on same machine.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
num_nodes: number of GPU nodes for distributed training.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
nb_gpu_nodes:
|
2020-03-06 11:45:59 +00:00
|
|
|
.. warning:: .. deprecated:: 0.7.0
|
2020-03-20 19:49:01 +00:00
|
|
|
|
2020-03-06 11:45:59 +00:00
|
|
|
Use `num_nodes` instead. Will remove 0.9.0.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
gpus: Which GPUs to train on.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
num_tpu_cores: How many TPU cores to train on (1 or 8).
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
log_gpu_memory: None, 'min_max', 'all'. Might slow performance
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-04-02 22:53:00 +00:00
|
|
|
show_progress_bar:
|
|
|
|
.. warning:: .. deprecated:: 0.7.2
|
|
|
|
|
|
|
|
Set `progress_bar_refresh_rate` to postive integer to enable. Will remove 0.9.0.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-04-02 22:53:00 +00:00
|
|
|
progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-03-24 18:49:11 +00:00
|
|
|
overfit_pct: How much of training-, validation-, and test dataset to check.
|
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
track_grad_norm: -1 no tracking. Otherwise tracks that norm
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
check_val_every_n_epoch: Check val every n train epochs.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
fast_dev_run: runs 1 batch of train, test and val to find any bugs (ie: a sort of unit test).
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
max_epochs: Stop training once this number of epochs is reached.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
max_nb_epochs:
|
2020-03-06 11:45:59 +00:00
|
|
|
.. warning:: .. deprecated:: 0.7.0
|
2020-03-20 19:49:01 +00:00
|
|
|
|
2020-03-06 11:45:59 +00:00
|
|
|
Use `max_epochs` instead. Will remove 0.9.0.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
min_epochs: Force training for at least these many epochs
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
min_nb_epochs:
|
2020-03-06 11:45:59 +00:00
|
|
|
.. warning:: .. deprecated:: 0.7.0
|
2020-03-20 19:49:01 +00:00
|
|
|
|
2020-03-06 11:45:59 +00:00
|
|
|
Use `min_epochs` instead. Will remove 0.9.0.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
max_steps: Stop training after this number of steps. Disabled by default (None).
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
min_steps: Force training for at least these number of steps. Disabled by default (None).
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
train_percent_check: How much of training dataset to check.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
val_percent_check: How much of validation dataset to check.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
test_percent_check: How much of test dataset to check.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
val_check_interval: How often within one training epoch to check the validation set
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
log_save_interval: Writes logs to disk this often
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
row_log_interval: How often to add logging rows (does not write to disk)
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
add_row_log_interval:
|
2020-03-06 11:45:59 +00:00
|
|
|
.. warning:: .. deprecated:: 0.7.0
|
2020-03-20 19:49:01 +00:00
|
|
|
|
2020-03-06 11:45:59 +00:00
|
|
|
Use `row_log_interval` instead. Will remove 0.9.0.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
distributed_backend: The distributed backend to use.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
use_amp:
|
2020-03-05 23:52:17 +00:00
|
|
|
.. warning:: .. deprecated:: 0.7.0
|
2020-03-20 19:49:01 +00:00
|
|
|
|
2020-03-06 17:00:05 +00:00
|
|
|
Use `precision` instead. Will remove 0.9.0.
|
2020-02-17 21:01:20 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
precision: Full precision (32), half precision (16).
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-03-19 13:24:45 +00:00
|
|
|
print_nan_grads:
|
|
|
|
.. warning:: .. deprecated:: 0.7.2
|
2020-03-20 19:49:01 +00:00
|
|
|
|
2020-03-19 13:24:45 +00:00
|
|
|
Has no effect. When detected, NaN grads will be printed automatically.
|
|
|
|
Will remove 0.9.0.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
weights_summary: Prints a summary of the weights when training begins.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
weights_save_path: Where to save weights if specified.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
amp_level: The optimization level to use (O1, O2, etc...).
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
num_sanity_val_steps: Sanity check runs n batches of val before starting the training routine.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
nb_sanity_val_steps:
|
2020-03-05 23:52:17 +00:00
|
|
|
.. warning:: .. deprecated:: 0.7.0
|
2020-03-20 19:49:01 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
Use `num_sanity_val_steps` instead. Will remove 0.8.0.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-04-03 05:35:09 +00:00
|
|
|
resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here.
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
profiler: To profile individual steps during training and assist in
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-02-25 03:23:25 +00:00
|
|
|
reload_dataloaders_every_epoch: Set to True to reload dataloaders every epoch
|
2020-03-05 18:11:06 +00:00
|
|
|
|
2020-03-05 23:52:17 +00:00
|
|
|
benchmark: If true enables cudnn.benchmark.
|
2019-07-18 16:04:19 +00:00
|
|
|
"""
|
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
# Init callbacks
|
|
|
|
self.callbacks = callbacks
|
2020-03-03 04:51:32 +00:00
|
|
|
self.on_init_start()
|
2020-02-26 04:17:27 +00:00
|
|
|
|
2020-02-25 20:05:41 +00:00
|
|
|
# benchmarking
|
|
|
|
self.benchmark = benchmark
|
|
|
|
if benchmark:
|
|
|
|
torch.backends.cudnn.benchmark = True
|
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
# Transfer params
|
2020-03-03 14:32:15 +00:00
|
|
|
self.num_nodes = num_nodes
|
2020-03-06 17:00:05 +00:00
|
|
|
# Backward compatibility, TODO: remove in v0.8.0
|
2020-01-14 19:40:41 +00:00
|
|
|
if nb_gpu_nodes is not None:
|
2020-03-06 17:00:05 +00:00
|
|
|
warnings.warn("Argument `nb_gpu_nodes` has renamed to `num_nodes` since v0.5.0"
|
2020-02-11 12:41:15 +00:00
|
|
|
" and this method will be removed in v0.8.0", DeprecationWarning)
|
2020-03-06 17:00:05 +00:00
|
|
|
self.num_gpu_nodes = nb_gpu_nodes
|
2019-09-04 14:43:46 +00:00
|
|
|
self.log_gpu_memory = log_gpu_memory
|
2020-01-14 19:40:41 +00:00
|
|
|
|
2020-03-06 17:00:05 +00:00
|
|
|
self.gradient_clip_val = gradient_clip_val
|
|
|
|
# Backward compatibility, TODO: remove in v0.8.0
|
2020-01-14 19:40:41 +00:00
|
|
|
if gradient_clip is not None:
|
2020-03-06 17:00:05 +00:00
|
|
|
warnings.warn("Argument `gradient_clip` has renamed to `gradient_clip_val` since v0.5.0"
|
2020-02-11 12:41:15 +00:00
|
|
|
" and this method will be removed in v0.8.0", DeprecationWarning)
|
2020-03-06 17:00:05 +00:00
|
|
|
self.gradient_clip = gradient_clip
|
2020-01-14 19:40:41 +00:00
|
|
|
|
2020-02-25 03:23:25 +00:00
|
|
|
self.progress_bar_refresh_rate = progress_bar_refresh_rate
|
2019-03-31 01:45:16 +00:00
|
|
|
self.check_val_every_n_epoch = check_val_every_n_epoch
|
|
|
|
self.track_grad_norm = track_grad_norm
|
2019-11-30 19:50:50 +00:00
|
|
|
self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
|
2020-02-17 21:01:20 +00:00
|
|
|
|
|
|
|
# tpu config
|
|
|
|
self.on_tpu = num_tpu_cores is not None
|
|
|
|
self.num_tpu_cores = num_tpu_cores
|
|
|
|
assert num_tpu_cores in [1, 8, None], 'num_tpu_cores can only be 1 or 8'
|
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
self.process_position = process_position
|
2019-10-08 19:30:06 +00:00
|
|
|
self.weights_summary = weights_summary
|
2020-01-14 19:40:41 +00:00
|
|
|
|
2020-03-06 17:00:05 +00:00
|
|
|
self.max_epochs = max_epochs
|
|
|
|
# Backward compatibility, TODO: remove in v0.8.0
|
2020-01-14 19:40:41 +00:00
|
|
|
if max_nb_epochs is not None:
|
2020-03-06 17:00:05 +00:00
|
|
|
warnings.warn("Argument `max_nb_epochs` has renamed to `max_epochs` since v0.5.0"
|
2020-02-11 12:41:15 +00:00
|
|
|
" and this method will be removed in v0.8.0", DeprecationWarning)
|
2020-03-06 17:00:05 +00:00
|
|
|
self.max_nb_epochs = max_nb_epochs
|
2020-01-14 19:40:41 +00:00
|
|
|
|
2020-03-06 17:00:05 +00:00
|
|
|
self.min_epochs = min_epochs
|
|
|
|
# Backward compatibility, TODO: remove in v0.8.0
|
2020-01-14 19:40:41 +00:00
|
|
|
if min_nb_epochs is not None:
|
2020-03-06 17:00:05 +00:00
|
|
|
warnings.warn("Argument `min_nb_epochs` has renamed to `min_epochs` since v0.5.0"
|
2020-02-11 12:41:15 +00:00
|
|
|
" and this method will be removed in v0.8.0", DeprecationWarning)
|
2020-03-06 17:00:05 +00:00
|
|
|
self.min_nb_epochs = min_nb_epochs
|
2020-01-14 19:40:41 +00:00
|
|
|
|
2020-02-18 16:23:22 +00:00
|
|
|
self.max_steps = max_steps
|
|
|
|
self.min_steps = min_steps
|
|
|
|
|
2020-03-06 17:00:05 +00:00
|
|
|
self.num_sanity_val_steps = num_sanity_val_steps
|
|
|
|
# Backward compatibility, TODO: remove in v0.8.0
|
2020-01-14 19:40:41 +00:00
|
|
|
if nb_sanity_val_steps is not None:
|
2020-03-06 19:43:17 +00:00
|
|
|
warnings.warn("Argument `nb_sanity_val_steps` has renamed to "
|
|
|
|
"`num_sanity_val_steps` since v0.5.0"
|
2020-02-11 12:41:15 +00:00
|
|
|
" and this method will be removed in v0.8.0", DeprecationWarning)
|
2020-03-06 17:00:05 +00:00
|
|
|
self.nb_sanity_val_steps = nb_sanity_val_steps
|
2020-03-19 13:24:45 +00:00
|
|
|
|
|
|
|
# Backward compatibility, TODO: remove in v0.9.0
|
|
|
|
if print_nan_grads:
|
|
|
|
warnings.warn("Argument `print_nan_grads` has no effect and will be removed in v0.9.0."
|
|
|
|
" NaN grads will be printed automatically when detected.",
|
|
|
|
DeprecationWarning)
|
|
|
|
|
2020-04-02 09:41:56 +00:00
|
|
|
self.reload_dataloaders_every_epoch = reload_dataloaders_every_epoch
|
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
self.truncated_bptt_steps = truncated_bptt_steps
|
2019-11-30 21:48:38 +00:00
|
|
|
self.resume_from_checkpoint = resume_from_checkpoint
|
2019-10-24 10:43:35 +00:00
|
|
|
self.shown_warnings = set()
|
2019-07-08 13:42:13 +00:00
|
|
|
|
2019-10-09 14:23:08 +00:00
|
|
|
self.fast_dev_run = fast_dev_run
|
|
|
|
if self.fast_dev_run:
|
2020-04-03 19:00:26 +00:00
|
|
|
self.num_sanity_val_steps = 0
|
2019-12-07 13:50:21 +00:00
|
|
|
self.max_epochs = 1
|
2020-03-30 22:37:02 +00:00
|
|
|
log.info('Running in fast_dev_run mode: will run a full train,'
|
2020-04-03 19:00:26 +00:00
|
|
|
' val and test loop using a single batch')
|
2019-10-09 14:23:08 +00:00
|
|
|
|
2019-10-04 23:48:57 +00:00
|
|
|
# set default save path if user didn't provide one
|
|
|
|
self.default_save_path = default_save_path
|
|
|
|
if self.default_save_path is None:
|
|
|
|
self.default_save_path = os.getcwd()
|
|
|
|
|
2019-07-24 14:42:01 +00:00
|
|
|
# training bookeeping
|
2019-12-04 11:57:10 +00:00
|
|
|
self.total_batch_idx = 0
|
2020-04-08 12:35:47 +00:00
|
|
|
self.running_loss = TensorRunningAccum(window_length=20)
|
2019-12-04 11:57:10 +00:00
|
|
|
self.batch_idx = 0
|
2019-07-24 14:42:01 +00:00
|
|
|
self.tqdm_metrics = {}
|
2019-10-08 20:21:00 +00:00
|
|
|
self.callback_metrics = {}
|
2019-12-04 11:57:10 +00:00
|
|
|
self.num_val_batches = 0
|
|
|
|
self.num_training_batches = 0
|
|
|
|
self.num_test_batches = 0
|
2020-02-25 03:23:25 +00:00
|
|
|
self.train_dataloader = None
|
|
|
|
self.test_dataloaders = None
|
|
|
|
self.val_dataloaders = None
|
2019-09-06 04:29:38 +00:00
|
|
|
|
|
|
|
# training state
|
|
|
|
self.model = None
|
|
|
|
self.testing = False
|
2020-01-14 03:31:15 +00:00
|
|
|
self.disable_validation = False
|
2019-09-06 04:29:38 +00:00
|
|
|
self.lr_schedulers = []
|
|
|
|
self.optimizers = None
|
2020-03-31 16:41:24 +00:00
|
|
|
self.optimizer_frequencies = []
|
2019-09-06 04:29:38 +00:00
|
|
|
self.global_step = 0
|
|
|
|
self.current_epoch = 0
|
|
|
|
self.total_batches = 0
|
2020-04-05 15:12:41 +00:00
|
|
|
self.interrupted = False
|
2019-09-06 04:29:38 +00:00
|
|
|
|
2020-01-26 14:42:57 +00:00
|
|
|
# configure logger
|
|
|
|
self.configure_logger(logger)
|
|
|
|
|
2020-02-07 03:01:21 +00:00
|
|
|
# configure profiler
|
|
|
|
if profiler is True:
|
2020-03-31 12:57:48 +00:00
|
|
|
profiler = SimpleProfiler()
|
2020-02-07 03:01:21 +00:00
|
|
|
self.profiler = profiler or PassThroughProfiler()
|
|
|
|
|
2019-09-06 21:01:03 +00:00
|
|
|
# configure early stop callback
|
2019-10-04 23:48:57 +00:00
|
|
|
# creates a default one if none passed in
|
2020-01-26 14:42:57 +00:00
|
|
|
self.configure_early_stopping(early_stop_callback)
|
2019-10-04 23:48:57 +00:00
|
|
|
|
|
|
|
# configure checkpoint callback
|
|
|
|
self.checkpoint_callback = checkpoint_callback
|
2019-10-09 21:46:27 +00:00
|
|
|
self.weights_save_path = weights_save_path
|
2019-09-06 04:29:38 +00:00
|
|
|
|
|
|
|
# accumulated grads
|
2020-03-03 14:32:15 +00:00
|
|
|
self.accumulate_grad_batches = accumulate_grad_batches
|
2019-10-22 01:16:51 +00:00
|
|
|
self.configure_accumulated_gradients(accumulate_grad_batches)
|
2019-09-06 04:29:38 +00:00
|
|
|
|
2019-09-08 19:36:58 +00:00
|
|
|
# allow int, string and gpu list
|
2020-03-03 14:32:15 +00:00
|
|
|
self.gpus = gpus
|
|
|
|
self.data_parallel_device_ids = parse_gpu_ids(self.gpus)
|
2019-10-23 09:05:09 +00:00
|
|
|
self.root_gpu = determine_root_gpu_device(self.data_parallel_device_ids)
|
2020-04-03 21:56:19 +00:00
|
|
|
self.root_device = torch.device("cpu")
|
2019-09-06 04:29:38 +00:00
|
|
|
|
2020-02-17 21:01:20 +00:00
|
|
|
# tpu state flags
|
|
|
|
self.use_tpu = False
|
|
|
|
self.tpu_local_core_rank = None
|
|
|
|
self.tpu_global_core_rank = None
|
|
|
|
|
2019-09-06 04:29:38 +00:00
|
|
|
# distributed backend choice
|
|
|
|
self.use_ddp = False
|
2019-10-04 19:07:54 +00:00
|
|
|
self.use_ddp2 = False
|
2019-09-06 04:29:38 +00:00
|
|
|
self.use_dp = False
|
|
|
|
self.single_gpu = False
|
2019-10-04 19:07:54 +00:00
|
|
|
self.distributed_backend = distributed_backend
|
2020-03-06 17:00:05 +00:00
|
|
|
self.set_distributed_mode(distributed_backend, self.num_nodes)
|
2019-09-06 04:29:38 +00:00
|
|
|
|
2020-02-17 21:01:20 +00:00
|
|
|
# override dist backend when using tpus
|
|
|
|
if self.on_tpu:
|
|
|
|
self.init_tpu()
|
|
|
|
self.current_tpu_idx = None
|
|
|
|
|
2019-09-06 04:29:38 +00:00
|
|
|
# init flags for SLURM+ddp to work
|
|
|
|
self.proc_rank = 0
|
|
|
|
self.world_size = 1
|
|
|
|
self.node_rank = 0
|
2020-03-06 17:00:05 +00:00
|
|
|
self.configure_slurm_ddp(self.num_nodes)
|
2019-09-08 19:36:58 +00:00
|
|
|
|
|
|
|
# nvidia setup
|
2019-10-22 01:16:51 +00:00
|
|
|
self.set_nvidia_flags(self.is_slurm_managing_tasks, self.data_parallel_device_ids)
|
2019-09-06 04:29:38 +00:00
|
|
|
|
|
|
|
# can't init progress bar here because starting a new process
|
2019-09-25 23:05:06 +00:00
|
|
|
# means the progress_bar won't survive pickling
|
2020-04-02 22:53:00 +00:00
|
|
|
# backward compatibility
|
|
|
|
if show_progress_bar is not None:
|
|
|
|
self.show_progress_bar = show_progress_bar
|
2019-09-06 04:29:38 +00:00
|
|
|
|
|
|
|
# logging
|
|
|
|
self.log_save_interval = log_save_interval
|
|
|
|
self.val_check_interval = val_check_interval
|
2020-01-14 19:40:41 +00:00
|
|
|
|
|
|
|
# backward compatibility
|
2019-12-04 11:59:19 +00:00
|
|
|
if add_row_log_interval is not None:
|
|
|
|
warnings.warn("`add_row_log_interval` has renamed to `row_log_interval` since v0.5.0"
|
2020-02-11 12:41:15 +00:00
|
|
|
" and this method will be removed in v0.8.0", DeprecationWarning)
|
2019-12-04 11:59:19 +00:00
|
|
|
if not row_log_interval: # in case you did not set the proper value
|
|
|
|
row_log_interval = add_row_log_interval
|
2019-09-25 23:05:06 +00:00
|
|
|
self.row_log_interval = row_log_interval
|
2019-09-06 04:29:38 +00:00
|
|
|
|
|
|
|
# how much of the data to use
|
2020-03-03 14:32:15 +00:00
|
|
|
self.overfit_pct = overfit_pct
|
2019-10-22 01:16:51 +00:00
|
|
|
self.determine_data_use_amount(train_percent_check, val_percent_check,
|
|
|
|
test_percent_check, overfit_pct)
|
2019-09-06 04:29:38 +00:00
|
|
|
|
|
|
|
# 16 bit mixed precision training using apex
|
|
|
|
self.amp_level = amp_level
|
2020-02-17 21:01:20 +00:00
|
|
|
self.precision = precision
|
2020-03-03 05:26:59 +00:00
|
|
|
|
2020-04-06 12:13:24 +00:00
|
|
|
# Backward compatibility, TODO: remove in v0.9.0
|
|
|
|
if use_amp is not None:
|
|
|
|
warnings.warn("`use_amp` has been replaced by `precision` since v0.7.0"
|
|
|
|
" and this argument will be removed in v0.9.0", DeprecationWarning)
|
|
|
|
self.precision = 16 if use_amp else 32
|
|
|
|
|
2020-03-03 20:16:57 +00:00
|
|
|
assert self.precision in (16, 32), 'only 32 or 16 bit precision supported'
|
2020-03-03 05:26:59 +00:00
|
|
|
|
2020-03-06 17:00:05 +00:00
|
|
|
if self.precision == 16 and self.num_tpu_cores is None:
|
2020-02-17 21:01:20 +00:00
|
|
|
use_amp = True
|
2019-10-22 01:16:51 +00:00
|
|
|
self.init_amp(use_amp)
|
2019-09-06 04:29:38 +00:00
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
# Callback system
|
2020-03-03 04:51:32 +00:00
|
|
|
self.on_init_end()
|
2020-02-26 04:17:27 +00:00
|
|
|
|
2019-10-05 18:45:37 +00:00
|
|
|
@property
|
2020-02-23 02:23:30 +00:00
|
|
|
def slurm_job_id(self) -> int:
|
2019-10-05 18:45:37 +00:00
|
|
|
try:
|
|
|
|
job_id = os.environ['SLURM_JOB_ID']
|
|
|
|
job_id = int(job_id)
|
2019-12-04 11:57:10 +00:00
|
|
|
except Exception:
|
2019-10-05 18:45:37 +00:00
|
|
|
job_id = None
|
|
|
|
return job_id
|
|
|
|
|
2020-03-03 14:32:15 +00:00
|
|
|
@classmethod
|
|
|
|
def default_attributes(cls):
|
2020-03-06 19:43:17 +00:00
|
|
|
init_signature = inspect.signature(Trainer)
|
|
|
|
|
|
|
|
args = {}
|
|
|
|
for param_name in init_signature.parameters:
|
|
|
|
value = init_signature.parameters[param_name].default
|
|
|
|
args[param_name] = value
|
|
|
|
|
|
|
|
return args
|
2020-03-03 14:32:15 +00:00
|
|
|
|
2020-03-24 18:55:27 +00:00
|
|
|
@classmethod
|
|
|
|
def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:
|
|
|
|
r"""Scans the Trainer signature and returns argument names, types and default values.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
List with tuples of 3 values:
|
|
|
|
(argument name, set with argument types, argument default value).
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
>>> args = Trainer.get_init_arguments_and_types()
|
|
|
|
>>> import pprint
|
|
|
|
>>> pprint.pprint(sorted(args)) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
|
|
|
|
[('accumulate_grad_batches',
|
|
|
|
(<class 'int'>, typing.Dict[int, int], typing.List[list]),
|
|
|
|
1),
|
|
|
|
...
|
|
|
|
('callbacks', (<class 'pytorch_lightning.callbacks.base.Callback'>,), []),
|
|
|
|
('check_val_every_n_epoch', (<class 'int'>,), 1),
|
|
|
|
...
|
|
|
|
('max_epochs', (<class 'int'>,), 1000),
|
|
|
|
...
|
|
|
|
('precision', (<class 'int'>,), 32),
|
|
|
|
('print_nan_grads', (<class 'bool'>,), False),
|
|
|
|
('process_position', (<class 'int'>,), 0),
|
|
|
|
('profiler',
|
2020-03-31 12:57:48 +00:00
|
|
|
(<class 'pytorch_lightning.profiler.profilers.BaseProfiler'>,
|
2020-03-24 18:55:27 +00:00
|
|
|
<class 'NoneType'>),
|
|
|
|
None),
|
2020-03-31 12:57:48 +00:00
|
|
|
...
|
2020-03-24 18:55:27 +00:00
|
|
|
"""
|
|
|
|
trainer_default_params = inspect.signature(cls).parameters
|
|
|
|
name_type_default = []
|
|
|
|
for arg in trainer_default_params:
|
|
|
|
arg_type = trainer_default_params[arg].annotation
|
|
|
|
arg_default = trainer_default_params[arg].default
|
|
|
|
try:
|
|
|
|
arg_types = tuple(arg_type.__args__)
|
|
|
|
except AttributeError:
|
|
|
|
arg_types = (arg_type,)
|
|
|
|
|
|
|
|
name_type_default.append((arg, arg_types, arg_default))
|
|
|
|
|
|
|
|
return name_type_default
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def get_deprecated_arg_names(cls) -> List:
|
|
|
|
"""Returns a list with deprecated Trainer arguments."""
|
|
|
|
depr_arg_names = []
|
|
|
|
for name, val in cls.__dict__.items():
|
|
|
|
if name.startswith('DEPRECATED') and isinstance(val, (tuple, list)):
|
|
|
|
depr_arg_names.extend(val)
|
|
|
|
return depr_arg_names
|
|
|
|
|
2020-03-03 14:32:15 +00:00
|
|
|
@classmethod
|
|
|
|
def add_argparse_args(cls, parent_parser: ArgumentParser) -> ArgumentParser:
|
2020-03-24 18:55:27 +00:00
|
|
|
r"""Extends existing argparse by default `Trainer` attributes.
|
2020-03-03 14:32:15 +00:00
|
|
|
|
2020-03-24 18:55:27 +00:00
|
|
|
Args:
|
|
|
|
parent_parser:
|
|
|
|
The custom cli arguments parser, which will be extended by
|
|
|
|
the Trainer default arguments.
|
|
|
|
|
|
|
|
Only arguments of the allowed types (str, float, int, bool) will
|
|
|
|
extend the `parent_parser`.
|
|
|
|
"""
|
|
|
|
parser = ArgumentParser(parents=[parent_parser], add_help=False, )
|
2020-03-03 14:32:15 +00:00
|
|
|
|
2020-03-24 18:55:27 +00:00
|
|
|
depr_arg_names = cls.get_deprecated_arg_names()
|
|
|
|
|
|
|
|
allowed_types = (str, float, int, bool)
|
2020-03-06 19:43:17 +00:00
|
|
|
# TODO: get "help" from docstring :)
|
2020-04-08 12:35:47 +00:00
|
|
|
for arg, arg_types, arg_default in (at for at in cls.get_init_arguments_and_types()
|
|
|
|
if at[0] not in depr_arg_names):
|
|
|
|
for allowed_type in (at for at in allowed_types if at in arg_types):
|
|
|
|
if isinstance(allowed_type, bool):
|
|
|
|
allowed_type = lambda x: bool(distutils.util.strtobool(x))
|
|
|
|
parser.add_argument(
|
|
|
|
f'--{arg}',
|
|
|
|
default=arg_default,
|
|
|
|
type=allowed_type,
|
|
|
|
dest=arg,
|
|
|
|
help='autogenerated by pl.Trainer'
|
|
|
|
)
|
|
|
|
break
|
2020-03-03 14:32:15 +00:00
|
|
|
|
|
|
|
return parser
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def from_argparse_args(cls, args):
|
|
|
|
|
|
|
|
params = vars(args)
|
|
|
|
return cls(**params)
|
|
|
|
|
2019-09-08 19:36:58 +00:00
|
|
|
@property
|
2020-02-23 02:23:30 +00:00
|
|
|
def num_gpus(self) -> int:
|
2019-09-08 19:36:58 +00:00
|
|
|
gpus = self.data_parallel_device_ids
|
|
|
|
if gpus is None:
|
|
|
|
return 0
|
2020-02-01 23:44:05 +00:00
|
|
|
return len(gpus)
|
2019-09-08 19:36:58 +00:00
|
|
|
|
2019-07-18 15:08:48 +00:00
|
|
|
@property
|
2020-02-23 02:23:30 +00:00
|
|
|
def data_parallel(self) -> bool:
|
2019-10-05 20:39:05 +00:00
|
|
|
return self.use_dp or self.use_ddp or self.use_ddp2
|
2019-07-18 15:08:48 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
@property
|
2020-02-23 02:23:30 +00:00
|
|
|
def training_tqdm_dict(self) -> dict:
|
2019-12-04 11:59:19 +00:00
|
|
|
"""Read-only for tqdm metrics.
|
2019-10-22 01:16:51 +00:00
|
|
|
:return:
|
2019-03-31 01:45:16 +00:00
|
|
|
"""
|
2020-02-05 11:24:43 +00:00
|
|
|
ref_model = self.model if not self.data_parallel else self.model.module
|
2019-08-08 14:59:16 +00:00
|
|
|
|
2020-02-05 11:24:43 +00:00
|
|
|
return dict(**ref_model.get_tqdm_dict(), **self.tqdm_metrics)
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2019-10-21 06:16:55 +00:00
|
|
|
@property
|
|
|
|
def tng_tqdm_dic(self):
|
2019-12-04 11:59:19 +00:00
|
|
|
"""Read-only for tqdm metrics.
|
|
|
|
|
2020-02-17 21:01:20 +00:00
|
|
|
.. warning:: .. deprecated:: 0.5.0
|
2020-03-20 19:49:01 +00:00
|
|
|
|
|
|
|
Use `training_tqdm_dict` instead. Will remove 0.8.0.
|
|
|
|
|
2019-10-21 06:16:55 +00:00
|
|
|
"""
|
2019-12-04 11:59:19 +00:00
|
|
|
warnings.warn("`tng_tqdm_dic` has renamed to `training_tqdm_dict` since v0.5.0"
|
2020-02-11 12:41:15 +00:00
|
|
|
" and this method will be removed in v0.8.0", DeprecationWarning)
|
2019-10-21 06:16:55 +00:00
|
|
|
return self.training_tqdm_dict
|
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
# -----------------------------
|
|
|
|
# MODEL TRAINING
|
|
|
|
# -----------------------------
|
2020-02-23 02:23:30 +00:00
|
|
|
def fit(
|
|
|
|
self,
|
|
|
|
model: LightningModule,
|
|
|
|
train_dataloader: Optional[DataLoader] = None,
|
2020-02-25 03:23:25 +00:00
|
|
|
val_dataloaders: Optional[DataLoader] = None,
|
|
|
|
test_dataloaders: Optional[DataLoader] = None
|
2020-02-23 02:23:30 +00:00
|
|
|
):
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
|
|
|
Runs the full optimization routine.
|
|
|
|
|
2020-02-19 11:00:08 +00:00
|
|
|
Args:
|
2020-02-23 02:23:30 +00:00
|
|
|
model: Model to fit.
|
2020-02-19 11:00:08 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
train_dataloader: A Pytorch
|
2020-02-19 11:00:08 +00:00
|
|
|
DataLoader with training samples. If the model has
|
|
|
|
a predefined train_dataloader method this will be skipped.
|
|
|
|
|
2020-02-25 03:23:25 +00:00
|
|
|
val_dataloaders: Either a single
|
2020-02-19 11:00:08 +00:00
|
|
|
Pytorch Dataloader or a list of them, specifying validation samples.
|
2020-02-25 03:23:25 +00:00
|
|
|
If the model has a predefined val_dataloaders method this will be skipped
|
2020-02-19 11:00:08 +00:00
|
|
|
|
2020-02-25 03:23:25 +00:00
|
|
|
test_dataloaders: Either a single
|
2020-02-19 11:00:08 +00:00
|
|
|
Pytorch Dataloader or a list of them, specifying validation samples.
|
2020-02-25 03:23:25 +00:00
|
|
|
If the model has a predefined test_dataloaders method this will be skipped
|
2020-02-19 11:00:08 +00:00
|
|
|
|
2020-01-17 11:03:31 +00:00
|
|
|
Example::
|
|
|
|
|
2020-02-19 11:00:08 +00:00
|
|
|
# Option 1,
|
|
|
|
# Define the train_dataloader(), test_dataloader() and val_dataloader() fxs
|
|
|
|
# in the lightningModule
|
|
|
|
# RECOMMENDED FOR MOST RESEARCH AND APPLICATIONS TO MAINTAIN READABILITY
|
2020-01-17 11:03:31 +00:00
|
|
|
trainer = Trainer()
|
|
|
|
model = LightningModule()
|
2020-02-19 11:00:08 +00:00
|
|
|
trainer.fit(model)
|
|
|
|
|
|
|
|
# Option 2
|
|
|
|
# in production cases we might want to pass different datasets to the same model
|
|
|
|
# Recommended for PRODUCTION SYSTEMS
|
|
|
|
train, val, test = DataLoader(...), DataLoader(...), DataLoader(...)
|
|
|
|
trainer = Trainer()
|
|
|
|
model = LightningModule()
|
|
|
|
trainer.fit(model, train_dataloader=train,
|
|
|
|
val_dataloader=val, test_dataloader=test)
|
|
|
|
|
|
|
|
# Option 1 & 2 can be mixed, for example the training set can be
|
|
|
|
# defined as part of the model, and validation/test can then be
|
|
|
|
# feed to .fit()
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
"""
|
2020-03-06 23:14:03 +00:00
|
|
|
# bind logger and other properties
|
2020-02-27 20:54:06 +00:00
|
|
|
model.logger = self.logger
|
2020-03-06 23:14:03 +00:00
|
|
|
self.copy_trainer_model_properties(model)
|
2020-02-27 20:54:06 +00:00
|
|
|
|
2020-02-25 03:23:25 +00:00
|
|
|
# set up the passed in dataloaders (if needed)
|
2020-03-06 10:49:18 +00:00
|
|
|
self.__attach_dataloaders(model, train_dataloader, val_dataloaders, test_dataloaders)
|
|
|
|
|
2020-04-02 15:53:37 +00:00
|
|
|
# check that model is configured correctly
|
|
|
|
self.check_model_configuration(model)
|
|
|
|
|
2020-03-06 10:49:18 +00:00
|
|
|
# download the data and do whatever transforms we need
|
|
|
|
# do before any spawn calls so that the model can assign properties
|
|
|
|
# only on proc 0 because no spawn has happened yet
|
|
|
|
model.prepare_data()
|
2020-02-19 11:00:08 +00:00
|
|
|
|
2020-02-25 03:23:25 +00:00
|
|
|
# route to appropriate start method
|
2019-07-18 15:08:48 +00:00
|
|
|
# when using multi-node or DDP within a node start each module in a separate process
|
2019-10-05 20:39:05 +00:00
|
|
|
if self.use_ddp2:
|
|
|
|
task = int(os.environ['SLURM_LOCALID'])
|
|
|
|
self.ddp_train(task, model)
|
2019-07-18 20:47:46 +00:00
|
|
|
|
2019-10-05 20:39:05 +00:00
|
|
|
elif self.use_ddp:
|
|
|
|
if self.is_slurm_managing_tasks:
|
2019-07-18 20:47:46 +00:00
|
|
|
task = int(os.environ['SLURM_LOCALID'])
|
|
|
|
self.ddp_train(task, model)
|
|
|
|
else:
|
2020-03-02 18:59:35 +00:00
|
|
|
self.__set_random_port()
|
2020-03-03 04:38:47 +00:00
|
|
|
|
|
|
|
# track for predict
|
|
|
|
self.model = model
|
|
|
|
|
|
|
|
# train
|
2019-10-22 08:32:40 +00:00
|
|
|
mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,))
|
2020-03-03 04:38:47 +00:00
|
|
|
|
|
|
|
# load weights if not interrupted
|
2020-03-03 02:50:38 +00:00
|
|
|
self.load_spawn_weights(model)
|
|
|
|
self.model = model
|
2019-07-14 20:57:15 +00:00
|
|
|
|
2019-07-18 15:08:48 +00:00
|
|
|
# 1 gpu or dp option triggers training using DP module
|
|
|
|
# easier to avoid NCCL issues
|
|
|
|
elif self.use_dp:
|
2019-10-22 01:16:51 +00:00
|
|
|
self.dp_train(model)
|
2019-07-14 20:57:15 +00:00
|
|
|
|
2019-08-07 17:39:40 +00:00
|
|
|
elif self.single_gpu:
|
2019-10-22 01:16:51 +00:00
|
|
|
self.single_gpu_train(model)
|
2019-08-07 17:39:40 +00:00
|
|
|
|
2020-03-19 13:14:29 +00:00
|
|
|
elif self.use_tpu: # pragma: no-cover
|
2020-02-17 21:01:20 +00:00
|
|
|
log.info(f'training on {self.num_tpu_cores} TPU cores')
|
|
|
|
|
|
|
|
# COLAB_GPU is an env var available by default in Colab environments.
|
|
|
|
start_method = 'fork' if os.getenv('COLAB_GPU') else 'spawn'
|
2020-03-03 04:38:47 +00:00
|
|
|
|
|
|
|
# track for predict
|
|
|
|
self.model = model
|
|
|
|
|
|
|
|
# train
|
2020-02-17 21:01:20 +00:00
|
|
|
xmp.spawn(self.tpu_train, args=(model,), nprocs=self.num_tpu_cores, start_method=start_method)
|
2020-03-03 04:38:47 +00:00
|
|
|
|
|
|
|
# load weights if not interrupted
|
2020-03-03 02:50:38 +00:00
|
|
|
self.load_spawn_weights(model)
|
|
|
|
self.model = model
|
2020-02-17 21:01:20 +00:00
|
|
|
|
2019-07-18 15:09:00 +00:00
|
|
|
# ON CPU
|
2019-07-03 19:09:49 +00:00
|
|
|
else:
|
2019-07-11 18:17:43 +00:00
|
|
|
# run through amp wrapper
|
|
|
|
if self.use_amp:
|
2019-12-04 11:59:19 +00:00
|
|
|
raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')
|
2019-07-11 18:17:43 +00:00
|
|
|
|
2019-07-25 15:08:31 +00:00
|
|
|
# CHOOSE OPTIMIZER
|
2019-07-28 13:33:58 +00:00
|
|
|
# allow for lr schedulers as well
|
2020-04-02 15:48:53 +00:00
|
|
|
self.optimizers, self.lr_schedulers, self.optimizer_frequencies = self.init_optimizers(model)
|
2019-07-25 15:08:31 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
self.run_pretrain_routine(model)
|
2019-07-03 19:09:49 +00:00
|
|
|
|
2019-07-24 11:26:18 +00:00
|
|
|
# return 1 when finished
|
|
|
|
# used for testing or when we need to know that training succeeded
|
|
|
|
return 1
|
|
|
|
|
2020-03-02 18:59:35 +00:00
|
|
|
def __set_random_port(self):
|
|
|
|
"""
|
|
|
|
When running DDP NOT managed by SLURM, the ports might collide
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
default_port = os.environ['MASTER_PORT']
|
|
|
|
except Exception:
|
|
|
|
import random
|
|
|
|
default_port = random.randint(10000, 19000)
|
|
|
|
os.environ['MASTER_PORT'] = str(default_port)
|
|
|
|
|
2020-03-06 10:49:18 +00:00
|
|
|
def __attach_dataloaders(self, model, train_dataloader, val_dataloaders, test_dataloaders):
|
2020-02-25 03:23:25 +00:00
|
|
|
# when dataloader is passed via fit, patch the train_dataloader
|
|
|
|
# functions to overwrite with these implementations
|
|
|
|
if train_dataloader is not None:
|
2020-03-02 03:50:49 +00:00
|
|
|
model.train_dataloader = _PatchDataLoader(train_dataloader)
|
2020-02-25 03:23:25 +00:00
|
|
|
|
|
|
|
if val_dataloaders is not None:
|
2020-03-02 03:50:49 +00:00
|
|
|
model.val_dataloader = _PatchDataLoader(val_dataloaders)
|
2020-02-25 03:23:25 +00:00
|
|
|
|
|
|
|
if test_dataloaders is not None:
|
2020-03-02 03:50:49 +00:00
|
|
|
model.test_dataloader = _PatchDataLoader(test_dataloaders)
|
2020-02-25 03:23:25 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
def run_pretrain_routine(self, model: LightningModule):
|
2019-12-04 11:57:10 +00:00
|
|
|
"""Sanity check a few things before starting actual training.
|
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
Args:
|
|
|
|
model: The model to run sanity test on.
|
2019-07-03 19:09:49 +00:00
|
|
|
"""
|
2019-07-08 21:38:57 +00:00
|
|
|
ref_model = model
|
2019-07-14 02:21:17 +00:00
|
|
|
if self.data_parallel:
|
2019-07-08 21:38:57 +00:00
|
|
|
ref_model = model.module
|
|
|
|
|
2019-08-30 22:56:09 +00:00
|
|
|
# give model convenience properties
|
2019-07-08 22:55:05 +00:00
|
|
|
ref_model.trainer = self
|
|
|
|
|
2019-07-08 21:15:26 +00:00
|
|
|
# set local properties on the model
|
2019-10-22 01:16:51 +00:00
|
|
|
self.copy_trainer_model_properties(ref_model)
|
2019-07-08 21:15:26 +00:00
|
|
|
|
2020-02-27 20:54:06 +00:00
|
|
|
# log hyper-parameters
|
2019-10-10 19:16:19 +00:00
|
|
|
if self.logger is not None:
|
|
|
|
# save exp to get started
|
|
|
|
if hasattr(ref_model, "hparams"):
|
|
|
|
self.logger.log_hyperparams(ref_model.hparams)
|
|
|
|
|
|
|
|
self.logger.save()
|
|
|
|
|
|
|
|
if self.use_ddp or self.use_ddp2:
|
2020-03-17 00:50:36 +00:00
|
|
|
torch_distrib.barrier()
|
2019-10-10 19:16:19 +00:00
|
|
|
|
2020-02-22 01:39:12 +00:00
|
|
|
# wait for all models to restore weights
|
|
|
|
if self.on_tpu and XLA_AVAILABLE:
|
|
|
|
# wait for all processes to catch up
|
2020-02-23 20:00:32 +00:00
|
|
|
torch_xla.core.xla_model.rendezvous("pl.Trainer.run_pretrain_routine")
|
2020-02-22 01:39:12 +00:00
|
|
|
|
2019-09-06 15:54:51 +00:00
|
|
|
# register auto-resubmit when on SLURM
|
|
|
|
self.register_slurm_signal_handlers()
|
|
|
|
|
2019-07-08 21:15:26 +00:00
|
|
|
# print model summary
|
2020-03-02 22:12:22 +00:00
|
|
|
# TODO: remove self.testing condition because model.summarize() is wiping out the weights
|
|
|
|
if self.proc_rank == 0 and self.weights_summary is not None and not self.testing:
|
2019-10-08 21:11:47 +00:00
|
|
|
if self.weights_summary in ['full', 'top']:
|
|
|
|
ref_model.summarize(mode=self.weights_summary)
|
|
|
|
else:
|
2020-03-30 22:37:02 +00:00
|
|
|
raise MisconfigurationException("weights_summary can be None, 'full' or 'top'")
|
2019-07-08 21:15:26 +00:00
|
|
|
|
2019-07-27 02:57:49 +00:00
|
|
|
# track model now.
|
|
|
|
# if cluster resets state, the model will update with the saved weights
|
|
|
|
self.model = model
|
|
|
|
|
2020-03-05 04:02:19 +00:00
|
|
|
# set up checkpoint callback
|
|
|
|
self.configure_checkpoint_callback()
|
|
|
|
|
2019-08-07 11:42:14 +00:00
|
|
|
# restore training and model before hpc call
|
2019-09-06 15:54:51 +00:00
|
|
|
self.restore_weights(model)
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2019-08-30 22:56:09 +00:00
|
|
|
# when testing requested only run test and return
|
|
|
|
if self.testing:
|
2020-02-25 03:23:25 +00:00
|
|
|
# only load test dataloader for testing
|
2020-03-02 22:12:22 +00:00
|
|
|
# self.reset_test_dataloader(ref_model)
|
2020-02-25 18:06:24 +00:00
|
|
|
self.run_evaluation(test_mode=True)
|
2019-08-30 22:56:09 +00:00
|
|
|
return
|
|
|
|
|
2020-01-14 03:31:15 +00:00
|
|
|
# check if we should run validation during training
|
2020-03-27 02:07:22 +00:00
|
|
|
self.disable_validation = not (self.is_overriden('validation_step') and self.val_percent_check > 0) \
|
|
|
|
and not self.fast_dev_run
|
2020-01-14 03:31:15 +00:00
|
|
|
|
2019-08-30 22:56:09 +00:00
|
|
|
# run tiny validation (if validation defined)
|
|
|
|
# to make sure program won't crash during val
|
2019-08-07 12:14:52 +00:00
|
|
|
ref_model.on_sanity_check_start()
|
2020-01-14 03:31:15 +00:00
|
|
|
if not self.disable_validation and self.num_sanity_val_steps > 0:
|
2020-02-26 21:55:18 +00:00
|
|
|
self.reset_val_dataloader(ref_model)
|
2019-11-03 10:42:53 +00:00
|
|
|
# init progress bars for validation sanity check
|
2020-01-26 15:19:09 +00:00
|
|
|
pbar = tqdm(desc='Validation sanity check',
|
2020-02-25 18:06:24 +00:00
|
|
|
total=self.num_sanity_val_steps * len(self.val_dataloaders),
|
|
|
|
leave=False, position=2 * self.process_position,
|
2020-04-02 22:53:00 +00:00
|
|
|
disable=not self.progress_bar_refresh_rate, dynamic_ncols=True)
|
2019-11-03 10:42:53 +00:00
|
|
|
self.main_progress_bar = pbar
|
|
|
|
# dummy validation progress bar
|
2020-01-26 15:19:09 +00:00
|
|
|
self.val_progress_bar = tqdm(disable=True)
|
2019-08-24 01:23:27 +00:00
|
|
|
|
2020-03-30 16:14:27 +00:00
|
|
|
eval_results = self._evaluate(model,
|
|
|
|
self.val_dataloaders,
|
|
|
|
self.num_sanity_val_steps,
|
|
|
|
False)
|
2020-01-23 16:12:51 +00:00
|
|
|
_, _, _, callback_metrics, _ = self.process_output(eval_results)
|
2019-08-07 11:51:55 +00:00
|
|
|
|
2019-11-03 10:42:53 +00:00
|
|
|
# close progress bars
|
|
|
|
self.main_progress_bar.close()
|
|
|
|
self.val_progress_bar.close()
|
|
|
|
|
2020-01-23 16:12:51 +00:00
|
|
|
if self.enable_early_stop:
|
|
|
|
self.early_stop_callback.check_metrics(callback_metrics)
|
|
|
|
|
2019-11-03 10:42:53 +00:00
|
|
|
# init progress bar
|
2020-01-26 15:19:09 +00:00
|
|
|
pbar = tqdm(leave=True, position=2 * self.process_position,
|
2020-02-05 11:24:43 +00:00
|
|
|
disable=not self.show_progress_bar, dynamic_ncols=True,
|
2020-03-27 13:22:53 +00:00
|
|
|
file=sys.stdout, smoothing=0)
|
2019-11-03 10:42:53 +00:00
|
|
|
self.main_progress_bar = pbar
|
|
|
|
|
2019-10-23 15:41:00 +00:00
|
|
|
# clear cache before training
|
|
|
|
if self.on_gpu:
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
# CORE TRAINING LOOP
|
2019-10-22 01:16:51 +00:00
|
|
|
self.train()
|
2019-10-05 17:35:20 +00:00
|
|
|
|
2020-02-23 02:23:30 +00:00
|
|
|
def test(self, model: Optional[LightningModule] = None):
|
2020-01-17 11:03:31 +00:00
|
|
|
r"""
|
|
|
|
|
|
|
|
Separates from fit to make sure you never run on your test set until you want to.
|
|
|
|
|
|
|
|
Args:
|
2020-03-20 19:49:01 +00:00
|
|
|
model: The model to test.
|
2020-01-17 11:03:31 +00:00
|
|
|
|
|
|
|
Example::
|
|
|
|
|
|
|
|
# Option 1
|
|
|
|
# run test after fitting
|
|
|
|
trainer = Trainer()
|
|
|
|
model = LightningModule()
|
|
|
|
|
|
|
|
trainer.fit()
|
|
|
|
trainer.test()
|
|
|
|
|
|
|
|
# Option 2
|
|
|
|
# run test from a loaded model
|
|
|
|
model = LightningModule.load_from_checkpoint('path/to/checkpoint.ckpt')
|
|
|
|
trainer = Trainer()
|
|
|
|
trainer.test(model)
|
|
|
|
"""
|
2020-03-03 04:38:47 +00:00
|
|
|
|
2019-10-18 22:39:30 +00:00
|
|
|
self.testing = True
|
2019-08-30 22:56:09 +00:00
|
|
|
if model is not None:
|
2020-03-02 22:12:22 +00:00
|
|
|
self.model = model
|
2019-08-30 22:56:09 +00:00
|
|
|
self.fit(model)
|
2020-03-19 13:14:29 +00:00
|
|
|
elif self.use_ddp or self.use_tpu: # pragma: no-cover
|
2020-03-03 04:38:47 +00:00
|
|
|
# attempt to load weights from a spawn
|
|
|
|
path = os.path.join(self.default_save_path, '__temp_weight_ddp_end.ckpt')
|
|
|
|
test_model = self.model
|
|
|
|
if os.path.exists(path):
|
|
|
|
test_model = self.load_spawn_weights(self.model)
|
|
|
|
|
|
|
|
self.fit(test_model)
|
2020-03-02 22:12:22 +00:00
|
|
|
else:
|
|
|
|
self.run_evaluation(test_mode=True)
|
2020-02-26 04:17:27 +00:00
|
|
|
|
2020-03-06 11:57:14 +00:00
|
|
|
self.testing = False
|
|
|
|
|
2020-04-02 15:53:37 +00:00
|
|
|
def check_model_configuration(self, model: LightningModule):
|
|
|
|
r"""
|
|
|
|
Checks that the model is configured correctly before training is started.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
model: The model to test.
|
|
|
|
|
|
|
|
"""
|
|
|
|
# Check training_step, train_dataloader, configure_optimizer methods
|
|
|
|
if not self.is_overriden('training_step', model):
|
|
|
|
raise MisconfigurationException(
|
|
|
|
'No `training_step()` method defined. Lightning `Trainer` expects as minimum a'
|
|
|
|
' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')
|
|
|
|
|
|
|
|
if not self.is_overriden('train_dataloader', model):
|
|
|
|
raise MisconfigurationException(
|
|
|
|
'No `train_dataloader()` method defined. Lightning `Trainer` expects as minimum a'
|
|
|
|
' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')
|
|
|
|
|
|
|
|
if not self.is_overriden('configure_optimizers', model):
|
|
|
|
raise MisconfigurationException(
|
|
|
|
'No `configure_optimizers()` method defined. Lightning `Trainer` expects as minimum a'
|
|
|
|
' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')
|
|
|
|
|
|
|
|
# Check val_dataloader, validation_step and validation_epoch_end
|
|
|
|
if self.is_overriden('val_dataloader', model):
|
|
|
|
if not self.is_overriden('validation_step', model):
|
|
|
|
raise MisconfigurationException('You have passed in a `val_dataloader()`'
|
|
|
|
' but have not defined `validation_step()`.')
|
|
|
|
else:
|
|
|
|
if not self.is_overriden('validation_epoch_end', model):
|
|
|
|
warnings.warn('You have defined a `val_dataloader()` and have'
|
|
|
|
' defined a `validation_step()`, you may also want to'
|
|
|
|
' define `validation_epoch_end()` for accumulating stats.',
|
|
|
|
RuntimeWarning)
|
|
|
|
else:
|
|
|
|
if self.is_overriden('validation_step', model):
|
|
|
|
raise MisconfigurationException('You have defined `validation_step()`,'
|
|
|
|
' but have not passed in a val_dataloader().')
|
|
|
|
|
|
|
|
# Check test_dataloader, test_step and test_epoch_end
|
|
|
|
if self.is_overriden('test_dataloader', model):
|
|
|
|
if not self.is_overriden('test_step', model):
|
|
|
|
raise MisconfigurationException('You have passed in a `test_dataloader()`'
|
|
|
|
' but have not defined `test_step()`.')
|
|
|
|
else:
|
|
|
|
if not self.is_overriden('test_epoch_end', model):
|
|
|
|
warnings.warn('You have defined a `test_dataloader()` and'
|
|
|
|
' have defined a `test_step()`, you may also want to'
|
|
|
|
' define `test_epoch_end()` for accumulating stats.',
|
|
|
|
RuntimeWarning)
|
|
|
|
else:
|
|
|
|
if self.is_overriden('test_step', model):
|
|
|
|
raise MisconfigurationException('You have defined `test_step()`,'
|
|
|
|
' but have not passed in a `test_dataloader()`.')
|
|
|
|
|
2020-02-26 04:17:27 +00:00
|
|
|
|
2020-03-02 03:50:49 +00:00
|
|
|
class _PatchDataLoader(object):
|
2020-03-20 19:49:01 +00:00
|
|
|
r"""
|
2020-03-02 03:50:49 +00:00
|
|
|
Callable object for patching dataloaders passed into trainer.fit().
|
|
|
|
Use this class to override model.*_dataloader() and be pickle-compatible.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
dataloader: Dataloader object to return when called.
|
2020-03-20 19:49:01 +00:00
|
|
|
|
|
|
|
"""
|
2020-03-31 16:41:24 +00:00
|
|
|
|
2020-03-02 03:50:49 +00:00
|
|
|
def __init__(self, dataloader: Union[List[DataLoader], DataLoader]):
|
|
|
|
self.dataloader = dataloader
|
|
|
|
|
2020-04-03 12:40:02 +00:00
|
|
|
# cannot pickle __code__ so cannot verify if PatchDataloader
|
|
|
|
# exists which shows dataloader methods have been overwritten.
|
|
|
|
# so, we hack it by using the string representation
|
|
|
|
self.patch_loader_code = str(self.__call__.__code__)
|
2020-04-02 15:53:37 +00:00
|
|
|
|
2020-03-02 03:50:49 +00:00
|
|
|
def __call__(self) -> Union[List[DataLoader], DataLoader]:
|
|
|
|
return self.dataloader
|