2019-08-11 14:01:57 +00:00
|
|
|
from collections import OrderedDict
|
|
|
|
|
2020-04-22 21:39:08 +00:00
|
|
|
import numpy as np
|
2019-08-11 14:01:57 +00:00
|
|
|
import torch
|
|
|
|
import torch.nn as nn
|
|
|
|
import torch.nn.functional as F
|
|
|
|
from torch.utils.data import DataLoader
|
2020-03-30 22:25:37 +00:00
|
|
|
|
2020-06-15 21:05:58 +00:00
|
|
|
from tests.base.datasets import TrialMNIST, AverageDataset, MNIST
|
2020-01-20 19:50:31 +00:00
|
|
|
|
2019-10-28 22:41:13 +00:00
|
|
|
try:
|
|
|
|
from test_tube import HyperOptArgumentParser
|
|
|
|
except ImportError:
|
|
|
|
# TODO: this should be discussed and moved out of this package
|
|
|
|
raise ImportError('Missing test-tube package.')
|
2019-08-11 14:01:57 +00:00
|
|
|
|
2019-11-27 03:39:18 +00:00
|
|
|
from pytorch_lightning.core.lightning import LightningModule
|
2019-08-11 14:01:57 +00:00
|
|
|
|
2019-11-28 17:06:05 +00:00
|
|
|
|
2020-04-22 21:39:08 +00:00
|
|
|
class Generator(nn.Module):
|
2020-05-24 22:59:08 +00:00
|
|
|
def __init__(self, latent_dim: tuple, img_shape: tuple):
|
2020-04-22 21:39:08 +00:00
|
|
|
super().__init__()
|
|
|
|
self.img_shape = img_shape
|
|
|
|
|
|
|
|
def block(in_feat, out_feat, normalize=True):
|
|
|
|
layers = [nn.Linear(in_feat, out_feat)]
|
|
|
|
if normalize:
|
|
|
|
layers.append(nn.BatchNorm1d(out_feat, 0.8))
|
|
|
|
layers.append(nn.LeakyReLU(0.2, inplace=True))
|
|
|
|
return layers
|
|
|
|
|
|
|
|
self.model = nn.Sequential(
|
|
|
|
*block(latent_dim, 128, normalize=False),
|
|
|
|
*block(128, 256),
|
|
|
|
*block(256, 512),
|
|
|
|
*block(512, 1024),
|
|
|
|
nn.Linear(1024, int(np.prod(img_shape))),
|
|
|
|
nn.Tanh()
|
|
|
|
)
|
|
|
|
|
|
|
|
def forward(self, z):
|
|
|
|
img = self.model(z)
|
|
|
|
img = img.view(img.size(0), *self.img_shape)
|
|
|
|
return img
|
|
|
|
|
|
|
|
|
|
|
|
class Discriminator(nn.Module):
|
2020-05-24 22:59:08 +00:00
|
|
|
def __init__(self, img_shape: tuple):
|
2020-04-22 21:39:08 +00:00
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
self.model = nn.Sequential(
|
|
|
|
nn.Linear(int(np.prod(img_shape)), 512),
|
|
|
|
nn.LeakyReLU(0.2, inplace=True),
|
|
|
|
nn.Linear(512, 256),
|
|
|
|
nn.LeakyReLU(0.2, inplace=True),
|
|
|
|
nn.Linear(256, 1),
|
|
|
|
nn.Sigmoid(),
|
|
|
|
)
|
|
|
|
|
|
|
|
def forward(self, img):
|
|
|
|
img_flat = img.view(img.size(0), -1)
|
|
|
|
validity = self.model(img_flat)
|
|
|
|
|
|
|
|
return validity
|
|
|
|
|
|
|
|
|
|
|
|
class TestGAN(LightningModule):
|
|
|
|
"""Implements a basic GAN for the purpose of illustrating multiple optimizers."""
|
|
|
|
|
2020-05-24 22:59:08 +00:00
|
|
|
def __init__(self, hidden_dim, learning_rate, b1, b2, **kwargs):
|
2020-04-22 21:39:08 +00:00
|
|
|
super().__init__()
|
2020-05-24 22:59:08 +00:00
|
|
|
self.hidden_dim = hidden_dim
|
|
|
|
self.learning_rate = learning_rate
|
|
|
|
self.b1 = b1
|
|
|
|
self.b2 = b2
|
2020-04-22 21:39:08 +00:00
|
|
|
|
|
|
|
# networks
|
|
|
|
mnist_shape = (1, 28, 28)
|
2020-05-24 22:59:08 +00:00
|
|
|
self.generator = Generator(latent_dim=self.hidden_dim, img_shape=mnist_shape)
|
2020-04-22 21:39:08 +00:00
|
|
|
self.discriminator = Discriminator(img_shape=mnist_shape)
|
|
|
|
|
|
|
|
# cache for generated images
|
|
|
|
self.generated_imgs = None
|
|
|
|
self.last_imgs = None
|
|
|
|
|
2020-06-15 21:05:58 +00:00
|
|
|
self.example_input_array = torch.rand(2, self.hidden_dim)
|
|
|
|
|
2020-04-22 21:39:08 +00:00
|
|
|
def forward(self, z):
|
|
|
|
return self.generator(z)
|
|
|
|
|
|
|
|
def adversarial_loss(self, y_hat, y):
|
|
|
|
return F.binary_cross_entropy(y_hat, y)
|
|
|
|
|
|
|
|
def training_step(self, batch, batch_idx, optimizer_idx=None):
|
|
|
|
imgs, _ = batch
|
|
|
|
self.last_imgs = imgs
|
|
|
|
|
|
|
|
# train generator
|
|
|
|
if optimizer_idx == 0:
|
|
|
|
# sample noise
|
2020-05-24 22:59:08 +00:00
|
|
|
z = torch.randn(imgs.shape[0], self.hidden_dim)
|
2020-04-22 21:39:08 +00:00
|
|
|
z = z.type_as(imgs)
|
|
|
|
|
|
|
|
# generate images
|
|
|
|
self.generated_imgs = self(z)
|
|
|
|
|
|
|
|
# ground truth result (ie: all fake)
|
|
|
|
# put on GPU because we created this tensor inside training_loop
|
|
|
|
valid = torch.ones(imgs.size(0), 1)
|
|
|
|
valid = valid.type_as(imgs)
|
|
|
|
|
|
|
|
# adversarial loss is binary cross-entropy
|
|
|
|
g_loss = self.adversarial_loss(self.discriminator(self.generated_imgs), valid)
|
|
|
|
tqdm_dict = {'g_loss': g_loss}
|
|
|
|
output = OrderedDict({
|
|
|
|
'loss': g_loss,
|
|
|
|
'progress_bar': tqdm_dict,
|
|
|
|
'log': tqdm_dict
|
|
|
|
})
|
|
|
|
return output
|
|
|
|
|
|
|
|
# train discriminator
|
|
|
|
if optimizer_idx == 1:
|
|
|
|
# Measure discriminator's ability to classify real from generated samples
|
|
|
|
|
|
|
|
# how well can it label as real?
|
|
|
|
valid = torch.ones(imgs.size(0), 1)
|
|
|
|
valid = valid.type_as(imgs)
|
|
|
|
|
|
|
|
real_loss = self.adversarial_loss(self.discriminator(imgs), valid)
|
|
|
|
|
|
|
|
# how well can it label as fake?
|
|
|
|
fake = torch.zeros(imgs.size(0), 1)
|
|
|
|
fake = fake.type_as(fake)
|
|
|
|
|
2020-05-24 22:59:08 +00:00
|
|
|
fake_loss = self.adversarial_loss(self.discriminator(self.generated_imgs.detach()), fake)
|
2020-04-22 21:39:08 +00:00
|
|
|
|
|
|
|
# discriminator loss is the average of these
|
|
|
|
d_loss = (real_loss + fake_loss) / 2
|
|
|
|
tqdm_dict = {'d_loss': d_loss}
|
|
|
|
output = OrderedDict({
|
|
|
|
'loss': d_loss,
|
|
|
|
'progress_bar': tqdm_dict,
|
|
|
|
'log': tqdm_dict
|
|
|
|
})
|
|
|
|
return output
|
|
|
|
|
|
|
|
def configure_optimizers(self):
|
2020-05-24 22:59:08 +00:00
|
|
|
lr = self.learning_rate
|
|
|
|
b1 = self.b1
|
|
|
|
b2 = self.b2
|
2020-04-22 21:39:08 +00:00
|
|
|
|
|
|
|
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))
|
|
|
|
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))
|
|
|
|
return [opt_g, opt_d], []
|
|
|
|
|
|
|
|
def train_dataloader(self):
|
|
|
|
return DataLoader(TrialMNIST(train=True, download=True), batch_size=16)
|
2020-06-15 21:05:58 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ParityModuleRNN(LightningModule):
|
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
self.rnn = nn.LSTM(10, 20, batch_first=True)
|
|
|
|
self.linear_out = nn.Linear(in_features=20, out_features=5)
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
seq, last = self.rnn(x)
|
|
|
|
return self.linear_out(seq)
|
|
|
|
|
|
|
|
def training_step(self, batch, batch_nb):
|
|
|
|
x, y = batch
|
|
|
|
y_hat = self(x)
|
|
|
|
loss = F.mse_loss(y_hat, y)
|
|
|
|
return {'loss': loss}
|
|
|
|
|
|
|
|
def configure_optimizers(self):
|
|
|
|
return torch.optim.Adam(self.parameters(), lr=0.02)
|
|
|
|
|
|
|
|
def train_dataloader(self):
|
|
|
|
return DataLoader(AverageDataset(), batch_size=30)
|
|
|
|
|
|
|
|
|
|
|
|
class ParityModuleMNIST(LightningModule):
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
self.c_d1 = nn.Linear(in_features=28 * 28, out_features=128)
|
|
|
|
self.c_d1_bn = nn.BatchNorm1d(128)
|
|
|
|
self.c_d1_drop = nn.Dropout(0.3)
|
|
|
|
self.c_d2 = nn.Linear(in_features=128, out_features=10)
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
x = x.view(x.size(0), -1)
|
|
|
|
x = self.c_d1(x)
|
|
|
|
x = torch.tanh(x)
|
|
|
|
x = self.c_d1_bn(x)
|
|
|
|
x = self.c_d1_drop(x)
|
|
|
|
x = self.c_d2(x)
|
|
|
|
return x
|
|
|
|
|
|
|
|
def training_step(self, batch, batch_nb):
|
|
|
|
x, y = batch
|
|
|
|
y_hat = self(x)
|
|
|
|
loss = F.cross_entropy(y_hat, y)
|
|
|
|
return {'loss': loss}
|
|
|
|
|
|
|
|
def configure_optimizers(self):
|
|
|
|
return torch.optim.Adam(self.parameters(), lr=0.02)
|
|
|
|
|
|
|
|
def train_dataloader(self):
|
|
|
|
return DataLoader(MNIST(train=True, download=True,),
|
|
|
|
batch_size=128)
|