2020-03-12 16:41:37 +00:00
|
|
|
from abc import ABC
|
2020-06-13 16:00:14 +00:00
|
|
|
from typing import Union, Iterable
|
2019-12-04 15:57:32 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
import torch
|
2019-10-22 08:32:40 +00:00
|
|
|
|
2019-11-27 03:39:18 +00:00
|
|
|
from pytorch_lightning.core import memory
|
2020-02-27 21:21:14 +00:00
|
|
|
from pytorch_lightning.loggers import TensorBoardLogger, LightningLoggerBase, LoggerCollection
|
2020-04-26 13:20:06 +00:00
|
|
|
from pytorch_lightning.utilities.memory import recursive_detach
|
2019-10-22 01:16:51 +00:00
|
|
|
|
|
|
|
|
2019-12-04 15:57:32 +00:00
|
|
|
class TrainerLoggingMixin(ABC):
|
|
|
|
|
2020-02-27 21:21:14 +00:00
|
|
|
# this is just a summary on variables used in this abstract class,
|
|
|
|
# the proper values/initialisation should be done in child class
|
|
|
|
current_epoch: int
|
|
|
|
on_gpu: bool
|
|
|
|
log_gpu_memory: ...
|
2020-06-13 16:00:14 +00:00
|
|
|
logger: Union[LightningLoggerBase, bool]
|
2020-04-24 00:46:18 +00:00
|
|
|
progress_bar_metrics: ...
|
2020-02-27 21:21:14 +00:00
|
|
|
global_step: int
|
2020-06-13 16:00:14 +00:00
|
|
|
global_rank: int
|
2020-02-27 21:21:14 +00:00
|
|
|
use_dp: bool
|
|
|
|
use_ddp2: bool
|
2020-04-10 16:02:59 +00:00
|
|
|
default_root_dir: str
|
2020-02-27 21:21:14 +00:00
|
|
|
slurm_job_id: int
|
|
|
|
num_gpus: int
|
2020-08-20 00:34:09 +00:00
|
|
|
logged_metrics: ...
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-01-26 14:42:57 +00:00
|
|
|
def configure_logger(self, logger):
|
|
|
|
if logger is True:
|
|
|
|
# default logger
|
|
|
|
self.logger = TensorBoardLogger(
|
2020-04-10 16:02:59 +00:00
|
|
|
save_dir=self.default_root_dir,
|
2020-01-26 14:42:57 +00:00
|
|
|
version=self.slurm_job_id,
|
|
|
|
name='lightning_logs'
|
|
|
|
)
|
|
|
|
elif logger is False:
|
|
|
|
self.logger = None
|
|
|
|
else:
|
2020-02-25 19:52:39 +00:00
|
|
|
if isinstance(logger, Iterable):
|
|
|
|
self.logger = LoggerCollection(logger)
|
|
|
|
else:
|
|
|
|
self.logger = logger
|
2020-01-26 14:42:57 +00:00
|
|
|
|
2019-12-08 15:59:25 +00:00
|
|
|
def log_metrics(self, metrics, grad_norm_dic, step=None):
|
2019-12-04 15:57:32 +00:00
|
|
|
"""Logs the metric dict passed in.
|
2020-02-16 04:35:23 +00:00
|
|
|
If `step` parameter is None and `step` key is presented is metrics,
|
|
|
|
uses metrics["step"] as a step
|
2020-02-25 19:52:39 +00:00
|
|
|
|
|
|
|
Args:
|
|
|
|
metrics (dict): Metric values
|
|
|
|
grad_norm_dic (dict): Gradient norms
|
|
|
|
step (int): Step for which metrics should be logged. Default value corresponds to `self.global_step`
|
2019-10-22 01:16:51 +00:00
|
|
|
"""
|
|
|
|
# add gpu memory
|
|
|
|
if self.on_gpu and self.log_gpu_memory:
|
|
|
|
mem_map = memory.get_memory_profile(self.log_gpu_memory)
|
|
|
|
metrics.update(mem_map)
|
|
|
|
|
|
|
|
# add norms
|
|
|
|
metrics.update(grad_norm_dic)
|
|
|
|
|
|
|
|
# turn all tensors to scalars
|
|
|
|
scalar_metrics = self.metrics_to_scalars(metrics)
|
|
|
|
|
2020-02-16 04:35:23 +00:00
|
|
|
if "step" in scalar_metrics and step is None:
|
|
|
|
step = scalar_metrics.pop("step")
|
2020-08-15 12:36:00 +00:00
|
|
|
|
|
|
|
elif step is None:
|
2020-02-16 04:35:23 +00:00
|
|
|
# added metrics by Lightning for convenience
|
2020-04-23 21:52:41 +00:00
|
|
|
scalar_metrics['epoch'] = self.current_epoch
|
2020-02-16 04:35:23 +00:00
|
|
|
step = step if step is not None else self.global_step
|
2020-08-15 12:36:00 +00:00
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# log actual metrics
|
2020-06-13 16:00:14 +00:00
|
|
|
if self.is_global_zero and self.logger is not None:
|
2020-04-08 12:35:47 +00:00
|
|
|
self.logger.agg_and_log_metrics(scalar_metrics, step=step)
|
2019-10-22 01:16:51 +00:00
|
|
|
self.logger.save()
|
|
|
|
|
2020-08-20 00:34:09 +00:00
|
|
|
# track the logged metrics
|
|
|
|
self.logged_metrics = scalar_metrics
|
2020-07-20 23:00:20 +00:00
|
|
|
self.dev_debugger.track_logged_metrics_history(scalar_metrics)
|
|
|
|
|
2020-04-24 00:46:18 +00:00
|
|
|
def add_progress_bar_metrics(self, metrics):
|
2019-10-22 01:16:51 +00:00
|
|
|
for k, v in metrics.items():
|
2020-02-01 23:44:05 +00:00
|
|
|
if isinstance(v, torch.Tensor):
|
2019-10-22 01:16:51 +00:00
|
|
|
v = v.item()
|
|
|
|
|
2020-04-24 00:46:18 +00:00
|
|
|
self.progress_bar_metrics[k] = v
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-07-20 23:00:20 +00:00
|
|
|
self.dev_debugger.track_pbar_metrics_history(metrics)
|
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
def metrics_to_scalars(self, metrics):
|
|
|
|
new_metrics = {}
|
|
|
|
for k, v in metrics.items():
|
|
|
|
if isinstance(v, torch.Tensor):
|
|
|
|
v = v.item()
|
|
|
|
|
2020-02-01 23:44:05 +00:00
|
|
|
if isinstance(v, dict):
|
2019-10-22 01:16:51 +00:00
|
|
|
v = self.metrics_to_scalars(v)
|
|
|
|
|
|
|
|
new_metrics[k] = v
|
|
|
|
|
|
|
|
return new_metrics
|
|
|
|
|
|
|
|
def process_output(self, output, train=False):
|
2019-12-04 15:57:32 +00:00
|
|
|
"""Reduces output according to the training mode.
|
|
|
|
|
2020-04-24 00:46:18 +00:00
|
|
|
Separates loss from logging and progress bar metrics
|
2019-10-22 01:16:51 +00:00
|
|
|
"""
|
2020-07-11 21:43:00 +00:00
|
|
|
# --------------------------
|
|
|
|
# handle single scalar only
|
|
|
|
# --------------------------
|
|
|
|
# single scalar returned from a xx_step
|
|
|
|
if isinstance(output, torch.Tensor):
|
|
|
|
progress_bar_metrics = {}
|
|
|
|
log_metrics = {}
|
|
|
|
callback_metrics = {}
|
|
|
|
hiddens = None
|
|
|
|
return output, progress_bar_metrics, log_metrics, callback_metrics, hiddens
|
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# ---------------
|
|
|
|
# EXTRACT CALLBACK KEYS
|
|
|
|
# ---------------
|
|
|
|
# all keys not progress_bar or log are candidates for callbacks
|
|
|
|
callback_metrics = {}
|
|
|
|
for k, v in output.items():
|
2019-10-31 10:45:28 +00:00
|
|
|
if k not in ['progress_bar', 'log', 'hiddens']:
|
2019-10-22 01:16:51 +00:00
|
|
|
callback_metrics[k] = v
|
|
|
|
|
|
|
|
if train and (self.use_dp or self.use_ddp2):
|
2019-12-04 11:57:10 +00:00
|
|
|
num_gpus = self.num_gpus
|
|
|
|
callback_metrics = self.reduce_distributed_output(callback_metrics, num_gpus)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
|
|
|
# ---------------
|
|
|
|
# EXTRACT PROGRESS BAR KEYS
|
|
|
|
# ---------------
|
|
|
|
try:
|
|
|
|
progress_output = output['progress_bar']
|
|
|
|
|
2020-04-24 00:46:18 +00:00
|
|
|
# reduce progress metrics for progress bar when using dp
|
2019-10-22 01:16:51 +00:00
|
|
|
if train and (self.use_dp or self.use_ddp2):
|
2019-12-04 11:57:10 +00:00
|
|
|
num_gpus = self.num_gpus
|
|
|
|
progress_output = self.reduce_distributed_output(progress_output, num_gpus)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
|
|
|
progress_bar_metrics = progress_output
|
|
|
|
except Exception:
|
|
|
|
progress_bar_metrics = {}
|
|
|
|
|
|
|
|
# ---------------
|
|
|
|
# EXTRACT LOGGING KEYS
|
|
|
|
# ---------------
|
|
|
|
# extract metrics to log to experiment
|
|
|
|
try:
|
|
|
|
log_output = output['log']
|
|
|
|
|
2020-04-24 00:46:18 +00:00
|
|
|
# reduce progress metrics for progress bar when using dp
|
2019-10-22 01:16:51 +00:00
|
|
|
if train and (self.use_dp or self.use_ddp2):
|
2019-12-04 11:57:10 +00:00
|
|
|
num_gpus = self.num_gpus
|
|
|
|
log_output = self.reduce_distributed_output(log_output, num_gpus)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
|
|
|
log_metrics = log_output
|
|
|
|
except Exception:
|
|
|
|
log_metrics = {}
|
|
|
|
|
|
|
|
# ---------------
|
|
|
|
# EXTRACT LOSS
|
|
|
|
# ---------------
|
|
|
|
# if output dict doesn't have the keyword loss
|
|
|
|
# then assume the output=loss if scalar
|
|
|
|
loss = None
|
|
|
|
if train:
|
|
|
|
try:
|
|
|
|
loss = output['loss']
|
|
|
|
except Exception:
|
2020-02-01 23:44:05 +00:00
|
|
|
if isinstance(output, torch.Tensor):
|
2019-10-22 01:16:51 +00:00
|
|
|
loss = output
|
|
|
|
else:
|
|
|
|
raise RuntimeError(
|
|
|
|
'No `loss` value in the dictionary returned from `model.training_step()`.'
|
|
|
|
)
|
|
|
|
|
|
|
|
# when using dp need to reduce the loss
|
|
|
|
if self.use_dp or self.use_ddp2:
|
|
|
|
loss = self.reduce_distributed_output(loss, self.num_gpus)
|
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
# ---------------
|
|
|
|
# EXTRACT HIDDEN
|
|
|
|
# ---------------
|
|
|
|
hiddens = output.get('hiddens')
|
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
# use every metric passed in as a candidate for callback
|
|
|
|
callback_metrics.update(progress_bar_metrics)
|
|
|
|
callback_metrics.update(log_metrics)
|
|
|
|
|
2020-04-19 20:41:54 +00:00
|
|
|
# detach all metrics for callbacks to prevent memory leaks
|
|
|
|
# no .item() because it will slow things down
|
2020-04-26 13:20:06 +00:00
|
|
|
callback_metrics = recursive_detach(callback_metrics)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-10-31 10:45:28 +00:00
|
|
|
return loss, progress_bar_metrics, log_metrics, callback_metrics, hiddens
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2019-12-04 11:57:10 +00:00
|
|
|
def reduce_distributed_output(self, output, num_gpus):
|
|
|
|
if num_gpus <= 1:
|
2019-10-22 01:16:51 +00:00
|
|
|
return output
|
|
|
|
|
|
|
|
# when using DP, we get one output per gpu
|
|
|
|
# average outputs and return
|
2020-02-01 23:44:05 +00:00
|
|
|
if isinstance(output, torch.Tensor):
|
2019-10-22 01:16:51 +00:00
|
|
|
return output.mean()
|
|
|
|
|
|
|
|
for k, v in output.items():
|
|
|
|
# recurse on nested dics
|
|
|
|
if isinstance(output[k], dict):
|
2019-12-04 11:57:10 +00:00
|
|
|
output[k] = self.reduce_distributed_output(output[k], num_gpus)
|
2019-10-22 01:16:51 +00:00
|
|
|
|
2020-08-07 07:18:29 +00:00
|
|
|
# compute the average of scalars
|
|
|
|
elif isinstance(output[k], list):
|
|
|
|
output[k] = sum(output[k]) / len(output[k])
|
|
|
|
|
2019-11-05 15:01:52 +00:00
|
|
|
# do nothing when there's a scalar
|
|
|
|
elif isinstance(output[k], torch.Tensor) and output[k].dim() == 0:
|
|
|
|
pass
|
|
|
|
|
2020-05-02 15:01:44 +00:00
|
|
|
# do not reduce metrics that have batch size > num gpus
|
|
|
|
elif output[k].size(0) <= num_gpus:
|
|
|
|
output[k] = torch.mean(output[k])
|
|
|
|
|
2019-10-22 01:16:51 +00:00
|
|
|
return output
|