2020-05-05 02:16:54 +00:00
|
|
|
.. testsetup:: *
|
|
|
|
|
|
|
|
import torch
|
|
|
|
from pytorch_lightning.trainer.trainer import Trainer
|
|
|
|
from pytorch_lightning.callbacks.base import Callback
|
|
|
|
from pytorch_lightning.core.lightning import LightningModule
|
|
|
|
|
|
|
|
class LitMNIST(LightningModule):
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
|
|
|
|
def train_dataloader():
|
|
|
|
pass
|
|
|
|
|
|
|
|
def val_dataloader():
|
|
|
|
pass
|
|
|
|
|
2020-03-03 15:52:16 +00:00
|
|
|
Child Modules
|
|
|
|
-------------
|
|
|
|
Research projects tend to test different approaches to the same dataset.
|
|
|
|
This is very easy to do in Lightning with inheritance.
|
|
|
|
|
2020-03-06 17:12:39 +00:00
|
|
|
For example, imagine we now want to train an Autoencoder to use as a feature extractor for MNIST images.
|
2020-03-06 11:25:24 +00:00
|
|
|
Recall that `LitMNIST` already defines all the dataloading etc... The only things
|
2020-03-03 15:52:16 +00:00
|
|
|
that change in the `Autoencoder` model are the init, forward, training, validation and test step.
|
|
|
|
|
2020-05-05 02:16:54 +00:00
|
|
|
.. testcode::
|
2020-03-03 15:52:16 +00:00
|
|
|
|
|
|
|
class Encoder(torch.nn.Module):
|
2020-05-05 02:16:54 +00:00
|
|
|
pass
|
|
|
|
|
|
|
|
class Decoder(torch.nn.Module):
|
|
|
|
pass
|
2020-03-03 15:52:16 +00:00
|
|
|
|
2020-03-06 11:25:24 +00:00
|
|
|
class AutoEncoder(LitMNIST):
|
2020-05-05 02:16:54 +00:00
|
|
|
|
2020-03-03 15:52:16 +00:00
|
|
|
def __init__(self):
|
2020-05-05 02:16:54 +00:00
|
|
|
super().__init__()
|
2020-03-03 15:52:16 +00:00
|
|
|
self.encoder = Encoder()
|
|
|
|
self.decoder = Decoder()
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
generated = self.decoder(x)
|
2020-06-21 19:54:17 +00:00
|
|
|
return generated
|
|
|
|
|
2020-03-03 15:52:16 +00:00
|
|
|
def training_step(self, batch, batch_idx):
|
|
|
|
x, _ = batch
|
|
|
|
|
|
|
|
representation = self.encoder(x)
|
2020-03-27 07:17:56 +00:00
|
|
|
x_hat = self(representation)
|
2020-03-03 15:52:16 +00:00
|
|
|
|
|
|
|
loss = MSE(x, x_hat)
|
|
|
|
return loss
|
|
|
|
|
|
|
|
def validation_step(self, batch, batch_idx):
|
2020-05-05 02:16:54 +00:00
|
|
|
return self._shared_eval(batch, batch_idx, 'val')
|
2020-03-03 15:52:16 +00:00
|
|
|
|
|
|
|
def test_step(self, batch, batch_idx):
|
2020-05-05 02:16:54 +00:00
|
|
|
return self._shared_eval(batch, batch_idx, 'test')
|
2020-03-03 15:52:16 +00:00
|
|
|
|
|
|
|
def _shared_eval(self, batch, batch_idx, prefix):
|
|
|
|
x, y = batch
|
|
|
|
representation = self.encoder(x)
|
2020-03-27 07:17:56 +00:00
|
|
|
x_hat = self(representation)
|
2020-03-03 15:52:16 +00:00
|
|
|
|
|
|
|
loss = F.nll_loss(logits, y)
|
2020-08-11 23:39:43 +00:00
|
|
|
result = pl.EvalResult()
|
|
|
|
result.log(f'{prefix}_loss', loss)
|
|
|
|
return result
|
2020-03-03 15:52:16 +00:00
|
|
|
|
2020-05-05 02:16:54 +00:00
|
|
|
|
2020-03-03 15:52:16 +00:00
|
|
|
and we can train this using the same trainer
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
autoencoder = AutoEncoder()
|
|
|
|
trainer = Trainer()
|
|
|
|
trainer.fit(autoencoder)
|
|
|
|
|
|
|
|
And remember that the forward method is to define the practical use of a LightningModule.
|
|
|
|
In this case, we want to use the `AutoEncoder` to extract image representations
|
|
|
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
some_images = torch.Tensor(32, 1, 28, 28)
|
|
|
|
representations = autoencoder(some_images)
|