2019-03-31 01:45:16 +00:00
|
|
|
import torch
|
2019-08-05 08:52:09 +00:00
|
|
|
|
2019-08-07 14:14:59 +00:00
|
|
|
from pytorch_lightning.root_module.memory import ModelSummary
|
|
|
|
from pytorch_lightning.root_module.grads import GradInformation
|
|
|
|
from pytorch_lightning.root_module.model_saving import ModelIO, load_hparams_from_tags_csv
|
|
|
|
from pytorch_lightning.root_module.hooks import ModelHooks
|
|
|
|
from pytorch_lightning.root_module.decorators import data_loader
|
2019-03-31 01:45:16 +00:00
|
|
|
|
|
|
|
|
2019-07-24 19:33:08 +00:00
|
|
|
class LightningModule(GradInformation, ModelIO, ModelHooks):
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2019-07-25 16:08:00 +00:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super(LightningModule, self).__init__(*args, **kwargs)
|
2019-03-31 20:29:50 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
self.dtype = torch.FloatTensor
|
|
|
|
self.exp_save_path = None
|
|
|
|
self.current_epoch = 0
|
|
|
|
self.global_step = 0
|
|
|
|
self.loaded_optimizer_states_dict = {}
|
2019-04-23 11:25:09 +00:00
|
|
|
self.trainer = None
|
2019-06-29 19:58:47 +00:00
|
|
|
self.experiment = None
|
2019-07-24 20:19:19 +00:00
|
|
|
self.example_input_array = None
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2019-03-31 20:29:50 +00:00
|
|
|
# track if gpu was requested for checkpointing
|
|
|
|
self.on_gpu = False
|
2019-08-24 01:23:27 +00:00
|
|
|
self.use_dp = False
|
|
|
|
self.use_ddp = False
|
|
|
|
self.use_amp = False
|
2019-03-31 20:29:50 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
def forward(self, *args, **kwargs):
|
|
|
|
"""
|
|
|
|
Expand model in into whatever you need.
|
|
|
|
Also need to return the target
|
|
|
|
:param x:
|
|
|
|
:return:
|
|
|
|
"""
|
2019-06-25 23:35:11 +00:00
|
|
|
raise NotImplementedError
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2019-08-13 15:37:37 +00:00
|
|
|
def training_step(self, *args, **kwargs):
|
|
|
|
"""
|
|
|
|
return loss, dict with metrics for tqdm
|
|
|
|
:param called with batch, batch_nb
|
|
|
|
additional: optimizer_i if multiple optimizers used
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
def validation_step(self, *args, **kwargs):
|
2019-03-31 01:45:16 +00:00
|
|
|
"""
|
|
|
|
return whatever outputs will need to be aggregated in validation_end
|
2019-08-11 14:01:57 +00:00
|
|
|
OPTIONAL
|
2019-08-13 15:37:37 +00:00
|
|
|
:param called with batch, batch_nb
|
|
|
|
additional: dataset_i if multiple val datasets used
|
2019-03-31 01:45:16 +00:00
|
|
|
:return:
|
|
|
|
"""
|
2019-08-11 14:01:57 +00:00
|
|
|
pass
|
2019-03-31 01:45:16 +00:00
|
|
|
|
|
|
|
def validation_end(self, outputs):
|
|
|
|
"""
|
|
|
|
Outputs has the appended output after each validation step
|
2019-08-11 14:01:57 +00:00
|
|
|
OPTIONAL
|
2019-03-31 01:45:16 +00:00
|
|
|
:param outputs:
|
|
|
|
:return: dic_with_metrics for tqdm
|
|
|
|
"""
|
2019-08-11 14:01:57 +00:00
|
|
|
pass
|
2019-03-31 01:45:16 +00:00
|
|
|
|
|
|
|
def configure_optimizers(self):
|
|
|
|
"""
|
2019-07-24 05:12:45 +00:00
|
|
|
Return a list of optimizers and a list of schedulers (could be empty)
|
2019-03-31 01:45:16 +00:00
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2019-08-13 13:32:45 +00:00
|
|
|
def optimizer_step(self, epoch_nb, batch_nb, optimizer, optimizer_i):
|
|
|
|
"""
|
|
|
|
Do something instead of the standard optimizer behavior
|
|
|
|
:param epoch_nb:
|
|
|
|
:param batch_nb:
|
|
|
|
:param optimizer:
|
|
|
|
:param optimizer_i:
|
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
optimizer.step()
|
|
|
|
|
|
|
|
# clear gradients
|
|
|
|
optimizer.zero_grad()
|
|
|
|
|
2019-07-25 15:01:08 +00:00
|
|
|
@data_loader
|
2019-03-31 01:45:16 +00:00
|
|
|
def tng_dataloader(self):
|
|
|
|
"""
|
2019-08-11 14:01:57 +00:00
|
|
|
Implement a PyTorch DataLoader
|
2019-03-31 01:45:16 +00:00
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
raise NotImplementedError
|
|
|
|
|
2019-07-25 15:01:08 +00:00
|
|
|
@data_loader
|
2019-03-31 01:45:16 +00:00
|
|
|
def test_dataloader(self):
|
|
|
|
"""
|
2019-08-11 14:01:57 +00:00
|
|
|
Implement a PyTorch DataLoader
|
2019-03-31 01:45:16 +00:00
|
|
|
:return:
|
|
|
|
"""
|
2019-08-11 14:01:57 +00:00
|
|
|
return None
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2019-07-25 15:01:08 +00:00
|
|
|
@data_loader
|
2019-03-31 01:45:16 +00:00
|
|
|
def val_dataloader(self):
|
|
|
|
"""
|
2019-08-11 14:01:57 +00:00
|
|
|
Implement a PyTorch DataLoader
|
2019-03-31 01:45:16 +00:00
|
|
|
:return:
|
|
|
|
"""
|
2019-08-11 14:01:57 +00:00
|
|
|
return None
|
2019-03-31 01:45:16 +00:00
|
|
|
|
|
|
|
@classmethod
|
2019-05-13 09:32:18 +00:00
|
|
|
def load_from_metrics(cls, weights_path, tags_csv, on_gpu, map_location=None):
|
2019-03-31 01:45:16 +00:00
|
|
|
"""
|
|
|
|
Primary way of loading model from csv weights path
|
|
|
|
:param weights_path:
|
|
|
|
:param tags_csv:
|
|
|
|
:param on_gpu:
|
2019-05-13 09:32:18 +00:00
|
|
|
:param map_location: dic for mapping storage {'cuda:1':'cuda:0'}
|
2019-03-31 01:45:16 +00:00
|
|
|
:return:
|
|
|
|
"""
|
|
|
|
hparams = load_hparams_from_tags_csv(tags_csv)
|
|
|
|
hparams.__setattr__('on_gpu', on_gpu)
|
|
|
|
|
|
|
|
if on_gpu:
|
2019-05-13 09:32:18 +00:00
|
|
|
if map_location is not None:
|
|
|
|
checkpoint = torch.load(weights_path, map_location=map_location)
|
|
|
|
else:
|
|
|
|
checkpoint = torch.load(weights_path)
|
2019-03-31 01:45:16 +00:00
|
|
|
else:
|
|
|
|
checkpoint = torch.load(weights_path, map_location=lambda storage, loc: storage)
|
|
|
|
|
2019-07-27 02:07:02 +00:00
|
|
|
# load the state_dict on the model automatically
|
2019-03-31 01:45:16 +00:00
|
|
|
model = cls(hparams)
|
2019-07-27 02:07:02 +00:00
|
|
|
model.load_state_dict(checkpoint['state_dict'])
|
2019-03-31 01:45:16 +00:00
|
|
|
|
2019-07-27 01:37:06 +00:00
|
|
|
# give model a chance to load something
|
2019-07-27 02:07:02 +00:00
|
|
|
model.on_load_checkpoint(checkpoint)
|
2019-07-27 01:37:06 +00:00
|
|
|
|
2019-03-31 01:45:16 +00:00
|
|
|
return model
|
2019-07-25 14:39:48 +00:00
|
|
|
|
2019-07-25 16:01:52 +00:00
|
|
|
def summarize(self):
|
|
|
|
model_summary = ModelSummary(self)
|
|
|
|
print(model_summary)
|
|
|
|
|
|
|
|
def freeze(self):
|
|
|
|
for param in self.parameters():
|
|
|
|
param.requires_grad = False
|
|
|
|
|
|
|
|
def unfreeze(self):
|
|
|
|
for param in self.parameters():
|
|
|
|
param.requires_grad = True
|