lightning/examples/new_project_templates/lightning_module_template.py

242 lines
7.4 KiB
Python
Raw Normal View History

2019-06-27 15:04:02 +00:00
import os
from collections import OrderedDict
import torch.nn as nn
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
import torch
import torch.nn.functional as F
from test_tube import HyperOptArgumentParser
from torch import optim
2019-07-08 22:02:41 +00:00
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
2019-06-27 15:04:02 +00:00
from pytorch_lightning.root_module.root_module import LightningModule
class LightningTemplateModel(LightningModule):
"""
Sample model to show how to define a template
"""
def __init__(self, hparams):
"""
Pass in parsed HyperOptArgumentParser to the model
:param hparams:
"""
# init superclass
super(LightningTemplateModel, self).__init__(hparams)
self.batch_size = hparams.batch_size
# build model
self.__build_model()
# ---------------------
# MODEL SETUP
# ---------------------
def __build_model(self):
"""
Layout model
:return:
"""
self.c_d1 = nn.Linear(in_features=self.hparams.in_features, out_features=self.hparams.hidden_dim)
self.c_d1_bn = nn.BatchNorm1d(self.hparams.hidden_dim)
self.c_d1_drop = nn.Dropout(self.hparams.drop_prob)
self.c_d2 = nn.Linear(in_features=self.hparams.hidden_dim, out_features=self.hparams.out_features)
# ---------------------
# TRAINING
# ---------------------
def forward(self, x):
"""
No special modification required for lightning, define as you normally would
:param x:
:return:
"""
x = self.c_d1(x)
x = torch.tanh(x)
x = self.c_d1_bn(x)
x = self.c_d1_drop(x)
x = self.c_d2(x)
logits = F.log_softmax(x, dim=1)
return logits
def loss(self, labels, logits):
nll = F.nll_loss(logits, labels)
return nll
def training_step(self, data_batch, batch_i):
"""
Lightning calls this inside the training loop
:param data_batch:
:return:
"""
# forward pass
x, y = data_batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
# calculate loss
loss_val = self.loss(y, y_hat)
output = OrderedDict({
2019-07-18 15:51:43 +00:00
'loss': loss_val
2019-06-27 15:04:02 +00:00
})
2019-07-18 16:04:45 +00:00
return loss_val
2019-06-27 15:04:02 +00:00
def validation_step(self, data_batch, batch_i):
"""
Lightning calls this inside the validation loop
:param data_batch:
:return:
"""
x, y = data_batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_val = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
output = OrderedDict({
'val_loss': loss_val,
2019-07-18 15:58:27 +00:00
'val_acc': torch.tensor(val_acc).cuda(loss_val.device.index),
2019-06-27 15:04:02 +00:00
})
2019-07-18 16:04:45 +00:00
return loss_val
2019-06-27 15:04:02 +00:00
def validation_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
val_loss_mean = 0
val_acc_mean = 0
for output in outputs:
val_loss_mean += output['val_loss']
val_acc_mean += output['val_acc']
val_loss_mean /= len(outputs)
val_acc_mean /= len(outputs)
tqdm_dic = {'val_loss': val_loss_mean.item(), 'val_acc': val_acc_mean.item()}
return tqdm_dic
def update_tng_log_metrics(self, logs):
return logs
# ---------------------
# MODEL SAVING
# ---------------------
def get_save_dict(self):
checkpoint = {'state_dict': self.state_dict()}
return checkpoint
def load_model_specific(self, checkpoint):
self.load_state_dict(checkpoint['state_dict'])
pass
# ---------------------
# TRAINING SETUP
# ---------------------
def configure_optimizers(self):
"""
return whatever optimizers we want here
:return: list of optimizers
"""
2019-06-28 17:53:00 +00:00
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
2019-06-27 15:04:02 +00:00
return [optimizer]
def __dataloader(self, train):
# init data generators
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
dataset = MNIST(root=self.hparams.data_root, train=train, transform=transform, download=True)
2019-07-08 22:02:41 +00:00
# when using multi-node we need to add the datasampler
2019-07-08 23:42:53 +00:00
train_sampler = None
batch_size = self.hparams.batch_size
2019-07-08 23:56:52 +00:00
try:
if self.on_gpu:
train_sampler = DistributedSampler(dataset, rank=self.trainer.proc_rank)
batch_size = batch_size // self.trainer.world_size # scale batch size
except Exception as e:
pass
2019-07-08 22:02:41 +00:00
2019-07-08 22:59:16 +00:00
should_shuffle = train_sampler is None
2019-07-08 22:02:41 +00:00
loader = DataLoader(
2019-06-27 15:04:02 +00:00
dataset=dataset,
2019-07-08 23:42:53 +00:00
batch_size=batch_size,
2019-07-08 22:59:16 +00:00
shuffle=should_shuffle,
2019-07-08 22:02:41 +00:00
sampler=train_sampler
2019-06-27 15:04:02 +00:00
)
return loader
@property
def tng_dataloader(self):
if self._tng_dataloader is None:
try:
self._tng_dataloader = self.__dataloader(train=True)
except Exception as e:
print(e)
raise e
return self._tng_dataloader
@property
def val_dataloader(self):
if self._val_dataloader is None:
try:
self._val_dataloader = self.__dataloader(train=False)
except Exception as e:
print(e)
raise e
return self._val_dataloader
@property
def test_dataloader(self):
if self._test_dataloader is None:
try:
self._test_dataloader = self.__dataloader(train=False)
except Exception as e:
print(e)
raise e
return self._test_dataloader
@staticmethod
def add_model_specific_args(parent_parser, root_dir):
"""
Parameters you define here will be available to your model through self.hparams
:param parent_parser:
:param root_dir:
:return:
"""
parser = HyperOptArgumentParser(strategy=parent_parser.strategy, parents=[parent_parser])
# param overwrites
# parser.set_defaults(gradient_clip=5.0)
# network params
parser.opt_list('--drop_prob', default=0.2, options=[0.2, 0.5], type=float, tunable=False)
2019-07-08 14:57:34 +00:00
parser.add_argument('--in_features', default=28*28, type=int)
parser.add_argument('--out_features', default=10, type=int)
parser.add_argument('--hidden_dim', default=50000, type=int) # use 500 for CPU, 50000 for GPU to see speed difference
2019-06-27 15:04:02 +00:00
# data
parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)
# training params (opt)
2019-07-09 00:05:45 +00:00
parser.opt_list('--learning_rate', default=0.001*8, type=float, options=[0.0001, 0.0005, 0.001, 0.005],
2019-06-27 15:04:02 +00:00
tunable=False)
parser.opt_list('--optimizer_name', default='adam', type=str, options=['adam'], tunable=False)
2019-07-08 23:42:53 +00:00
# if using 2 nodes with 4 gpus each the batch size here (256) will be 256 / (2*8) = 16 per gpu
2019-07-09 00:05:45 +00:00
parser.opt_list('--batch_size', default=256*8, type=int, options=[32, 64, 128, 256], tunable=False,
2019-07-08 23:42:53 +00:00
help='batch size will be divided over all the gpus being used across all nodes')
2019-06-27 15:04:02 +00:00
return parser