2019-08-04 18:19:23 +00:00
|
|
|
"""
|
2020-04-03 19:01:40 +00:00
|
|
|
Example template for defining a system.
|
2019-08-04 18:19:23 +00:00
|
|
|
"""
|
2020-01-20 19:50:31 +00:00
|
|
|
import os
|
2019-10-22 08:32:40 +00:00
|
|
|
from argparse import ArgumentParser
|
|
|
|
|
2019-06-27 15:04:02 +00:00
|
|
|
import torch
|
2019-10-22 08:32:40 +00:00
|
|
|
import torch.nn as nn
|
2019-06-27 15:04:02 +00:00
|
|
|
import torch.nn.functional as F
|
2019-10-22 08:32:40 +00:00
|
|
|
import torchvision.transforms as transforms
|
2019-06-27 15:04:02 +00:00
|
|
|
from torch import optim
|
2019-07-08 22:02:41 +00:00
|
|
|
from torch.utils.data import DataLoader
|
2019-10-22 08:32:40 +00:00
|
|
|
from torchvision.datasets import MNIST
|
2019-06-27 15:04:02 +00:00
|
|
|
|
2020-03-17 22:44:00 +00:00
|
|
|
from pytorch_lightning import _logger as log
|
2020-02-27 21:07:51 +00:00
|
|
|
from pytorch_lightning.core import LightningModule
|
2019-06-27 15:04:02 +00:00
|
|
|
|
|
|
|
|
2020-02-27 21:07:51 +00:00
|
|
|
class LightningTemplateModel(LightningModule):
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
2020-03-25 11:46:27 +00:00
|
|
|
Sample model to show how to define a template.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
|
|
|
|
>>> # define simple Net for MNIST dataset
|
|
|
|
>>> params = dict(
|
|
|
|
... drop_prob=0.2,
|
|
|
|
... batch_size=2,
|
|
|
|
... in_features=28 * 28,
|
|
|
|
... learning_rate=0.001 * 8,
|
|
|
|
... optimizer_name='adam',
|
|
|
|
... data_root='./datasets',
|
|
|
|
... out_features=10,
|
|
|
|
... hidden_dim=1000,
|
|
|
|
... )
|
2020-05-24 22:59:08 +00:00
|
|
|
>>> model = LightningTemplateModel(**params)
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
|
|
|
|
2020-05-24 22:59:08 +00:00
|
|
|
def __init__(self,
|
|
|
|
drop_prob: float = 0.2,
|
|
|
|
batch_size: int = 2,
|
|
|
|
in_features: int = 28 * 28,
|
|
|
|
learning_rate: float = 0.001 * 8,
|
|
|
|
optimizer_name: str = 'adam',
|
|
|
|
data_root: str = './datasets',
|
|
|
|
out_features: int = 10,
|
|
|
|
hidden_dim: int = 1000,
|
|
|
|
**kwargs
|
|
|
|
) -> 'LightningTemplateModel':
|
2019-06-27 15:04:02 +00:00
|
|
|
# init superclass
|
2020-03-27 12:36:50 +00:00
|
|
|
super().__init__()
|
2020-05-24 22:59:08 +00:00
|
|
|
self.drop_prob = drop_prob
|
|
|
|
self.batch_size = batch_size
|
|
|
|
self.in_features = in_features
|
|
|
|
self.learning_rate = learning_rate
|
|
|
|
self.optimizer_name = optimizer_name
|
|
|
|
self.data_root = data_root
|
|
|
|
self.out_features = out_features
|
|
|
|
self.hidden_dim = hidden_dim
|
|
|
|
|
|
|
|
self.c_d1 = nn.Linear(in_features=self.in_features,
|
|
|
|
out_features=self.hidden_dim)
|
|
|
|
self.c_d1_bn = nn.BatchNorm1d(self.hidden_dim)
|
|
|
|
self.c_d1_drop = nn.Dropout(self.drop_prob)
|
|
|
|
|
|
|
|
self.c_d2 = nn.Linear(in_features=self.hidden_dim,
|
|
|
|
out_features=self.out_features)
|
2019-06-27 15:04:02 +00:00
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
"""
|
2020-04-03 19:01:40 +00:00
|
|
|
No special modification required for Lightning, define it as you normally would
|
|
|
|
in the `nn.Module` in vanilla PyTorch.
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
2020-05-02 12:41:37 +00:00
|
|
|
x = self.c_d1(x.view(x.size(0), -1))
|
2019-06-27 15:04:02 +00:00
|
|
|
x = torch.tanh(x)
|
|
|
|
x = self.c_d1_bn(x)
|
|
|
|
x = self.c_d1_drop(x)
|
|
|
|
x = self.c_d2(x)
|
2020-05-02 12:41:37 +00:00
|
|
|
return x
|
2019-06-27 15:04:02 +00:00
|
|
|
|
2019-09-25 23:05:06 +00:00
|
|
|
def training_step(self, batch, batch_idx):
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
2020-04-03 19:01:40 +00:00
|
|
|
Lightning calls this inside the training loop with the data from the training dataloader
|
|
|
|
passed in as `batch`.
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
|
|
|
# forward pass
|
2019-09-25 23:05:06 +00:00
|
|
|
x, y = batch
|
2020-03-27 07:17:56 +00:00
|
|
|
y_hat = self(x)
|
2020-05-02 12:41:37 +00:00
|
|
|
loss = F.cross_entropy(y_hat, y)
|
|
|
|
tensorboard_logs = {'train_loss': loss}
|
|
|
|
return {'loss': loss, 'log': tensorboard_logs}
|
2019-06-27 15:04:02 +00:00
|
|
|
|
2019-09-25 23:05:06 +00:00
|
|
|
def validation_step(self, batch, batch_idx):
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
2020-04-03 19:01:40 +00:00
|
|
|
Lightning calls this inside the validation loop with the data from the validation dataloader
|
|
|
|
passed in as `batch`.
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
2019-09-25 23:05:06 +00:00
|
|
|
x, y = batch
|
2020-03-27 07:17:56 +00:00
|
|
|
y_hat = self(x)
|
2020-05-02 12:41:37 +00:00
|
|
|
val_loss = F.cross_entropy(y_hat, y)
|
2019-06-27 15:04:02 +00:00
|
|
|
labels_hat = torch.argmax(y_hat, dim=1)
|
2020-05-02 12:41:37 +00:00
|
|
|
n_correct_pred = torch.sum(y == labels_hat).item()
|
|
|
|
return {'val_loss': val_loss, "n_correct_pred": n_correct_pred, "n_pred": len(x)}
|
2019-07-18 16:11:59 +00:00
|
|
|
|
2020-05-02 12:41:37 +00:00
|
|
|
def test_step(self, batch, batch_idx):
|
|
|
|
x, y = batch
|
|
|
|
y_hat = self(x)
|
|
|
|
test_loss = F.cross_entropy(y_hat, y)
|
|
|
|
labels_hat = torch.argmax(y_hat, dim=1)
|
|
|
|
n_correct_pred = torch.sum(y == labels_hat).item()
|
|
|
|
return {'test_loss': test_loss, "n_correct_pred": n_correct_pred, "n_pred": len(x)}
|
2019-06-27 15:04:02 +00:00
|
|
|
|
2020-03-06 00:31:57 +00:00
|
|
|
def validation_epoch_end(self, outputs):
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
2020-04-03 19:01:40 +00:00
|
|
|
Called at the end of validation to aggregate outputs.
|
|
|
|
:param outputs: list of individual outputs of each validation step.
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
2020-05-02 12:41:37 +00:00
|
|
|
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
|
|
|
|
val_acc = sum([x['n_correct_pred'] for x in outputs]) / sum(x['n_pred'] for x in outputs)
|
|
|
|
tensorboard_logs = {'val_loss': avg_loss, 'val_acc': val_acc}
|
|
|
|
return {'val_loss': avg_loss, 'log': tensorboard_logs}
|
2019-08-08 16:06:29 +00:00
|
|
|
|
2020-05-02 12:41:37 +00:00
|
|
|
def test_epoch_end(self, outputs):
|
|
|
|
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
|
|
|
|
test_acc = sum([x['n_correct_pred'] for x in outputs]) / sum(x['n_pred'] for x in outputs)
|
|
|
|
tensorboard_logs = {'test_loss': avg_loss, 'test_acc': test_acc}
|
|
|
|
return {'test_loss': avg_loss, 'log': tensorboard_logs}
|
2019-06-27 15:04:02 +00:00
|
|
|
|
|
|
|
# ---------------------
|
|
|
|
# TRAINING SETUP
|
|
|
|
# ---------------------
|
|
|
|
def configure_optimizers(self):
|
|
|
|
"""
|
2020-04-03 19:01:40 +00:00
|
|
|
Return whatever optimizers and learning rate schedulers you want here.
|
|
|
|
At least one optimizer is required.
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
2020-05-24 22:59:08 +00:00
|
|
|
optimizer = optim.Adam(self.parameters(), lr=self.learning_rate)
|
2019-07-24 05:12:45 +00:00
|
|
|
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
|
|
|
|
return [optimizer], [scheduler]
|
2019-06-27 15:04:02 +00:00
|
|
|
|
2020-02-25 03:23:25 +00:00
|
|
|
def prepare_data(self):
|
|
|
|
transform = transforms.Compose([transforms.ToTensor(),
|
|
|
|
transforms.Normalize((0.5,), (1.0,))])
|
2020-05-24 22:59:08 +00:00
|
|
|
self.mnist_train = MNIST(self.data_root, train=True, download=True, transform=transform)
|
|
|
|
self.mnist_test = MNIST(self.data_root, train=False, download=True, transform=transform)
|
2020-02-25 03:23:25 +00:00
|
|
|
|
2019-09-25 23:05:06 +00:00
|
|
|
def train_dataloader(self):
|
2020-02-01 20:47:58 +00:00
|
|
|
log.info('Training data loader called.')
|
2020-05-24 22:59:08 +00:00
|
|
|
return DataLoader(self.mnist_train, batch_size=self.batch_size, num_workers=4)
|
2019-07-25 14:56:03 +00:00
|
|
|
|
2019-06-27 15:04:02 +00:00
|
|
|
def val_dataloader(self):
|
2020-02-01 20:47:58 +00:00
|
|
|
log.info('Validation data loader called.')
|
2020-05-24 22:59:08 +00:00
|
|
|
return DataLoader(self.mnist_test, batch_size=self.batch_size, num_workers=4)
|
2019-07-25 14:56:03 +00:00
|
|
|
|
2019-06-27 15:04:02 +00:00
|
|
|
def test_dataloader(self):
|
2020-02-01 20:47:58 +00:00
|
|
|
log.info('Test data loader called.')
|
2020-05-24 22:59:08 +00:00
|
|
|
return DataLoader(self.mnist_test, batch_size=self.batch_size, num_workers=4)
|
2020-04-02 15:53:37 +00:00
|
|
|
|
2019-06-27 15:04:02 +00:00
|
|
|
@staticmethod
|
2020-03-19 13:14:29 +00:00
|
|
|
def add_model_specific_args(parent_parser, root_dir): # pragma: no-cover
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
2020-05-24 22:59:08 +00:00
|
|
|
Define parameters that only apply to this model
|
2019-06-27 15:04:02 +00:00
|
|
|
"""
|
2019-10-05 20:39:05 +00:00
|
|
|
parser = ArgumentParser(parents=[parent_parser])
|
2019-06-27 15:04:02 +00:00
|
|
|
|
|
|
|
# param overwrites
|
2019-09-25 23:05:06 +00:00
|
|
|
# parser.set_defaults(gradient_clip_val=5.0)
|
2019-06-27 15:04:02 +00:00
|
|
|
|
|
|
|
# network params
|
2019-08-05 21:57:39 +00:00
|
|
|
parser.add_argument('--in_features', default=28 * 28, type=int)
|
2019-07-08 14:57:34 +00:00
|
|
|
parser.add_argument('--out_features', default=10, type=int)
|
2019-08-05 21:57:39 +00:00
|
|
|
# use 500 for CPU, 50000 for GPU to see speed difference
|
|
|
|
parser.add_argument('--hidden_dim', default=50000, type=int)
|
2019-10-05 20:39:05 +00:00
|
|
|
parser.add_argument('--drop_prob', default=0.2, type=float)
|
|
|
|
parser.add_argument('--learning_rate', default=0.001, type=float)
|
2019-06-27 15:04:02 +00:00
|
|
|
|
|
|
|
# data
|
|
|
|
parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)
|
|
|
|
|
|
|
|
# training params (opt)
|
2020-02-25 14:46:01 +00:00
|
|
|
parser.add_argument('--epochs', default=20, type=int)
|
2019-10-05 20:39:05 +00:00
|
|
|
parser.add_argument('--optimizer_name', default='adam', type=str)
|
|
|
|
parser.add_argument('--batch_size', default=64, type=int)
|
2019-06-27 15:04:02 +00:00
|
|
|
return parser
|