Fix typo in Quick Start/Step-by-step walk-through (#3007)

* Fix typo in Quick Start/Step-by-step walk-through

* Fix typo in Quick Start/Step-by-step walk-through

* Fix snippets in lightning module

* Remove testblock 

doctest does not have torch with CUDA, so x.cuda() will fail

* Remove test code

"..." is not python, so doctests fail

* Fix #3005

* Fix indentation, stage in docs

Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com>
Co-authored-by: Teddy Koker <teddy.koker@gmail.com>
Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com>
This commit is contained in:
Davian Yang 2020-08-19 04:21:38 +06:30 committed by GitHub
parent 6939f6fc11
commit 9f6be96f84
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 12 additions and 10 deletions

View File

@ -1,6 +1,7 @@
.. testsetup:: * .. testsetup:: *
from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.trainer.trainer import Trainer from pytorch_lightning.trainer.trainer import Trainer
.. _introduction-guide: .. _introduction-guide:
@ -259,9 +260,9 @@ In this case, it's better to group the full definition of a dataset into a `Data
- Val dataloader(s) - Val dataloader(s)
- Test dataloader(s) - Test dataloader(s)
.. code-block:: python .. testcode:: python
class MyDataModule(pl.DataModule): class MyDataModule(LightningDataModule):
def __init__(self): def __init__(self):
super().__init__() super().__init__()

View File

@ -51,7 +51,7 @@ Notice a few things.
# or to init a new tensor # or to init a new tensor
new_x = torch.Tensor(2, 3) new_x = torch.Tensor(2, 3)
new_x = new_x.type_as(x.type()) new_x = new_x.type_as(x)
5. There are no samplers for distributed, Lightning also does this for you. 5. There are no samplers for distributed, Lightning also does this for you.

View File

@ -1,6 +1,7 @@
.. testsetup:: * .. testsetup:: *
from pytorch_lightning.core.lightning import LightningModule from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.trainer.trainer import Trainer from pytorch_lightning.trainer.trainer import Trainer
import os import os
import torch import torch
@ -357,9 +358,9 @@ And the matching code:
| |
.. code-block:: .. testcode:: python
class MNISTDataModule(pl.LightningDataModule): class MNISTDataModule(LightningDataModule):
def __init__(self, batch_size=32): def __init__(self, batch_size=32):
super().__init__() super().__init__()
@ -407,7 +408,7 @@ over download/prepare/splitting data
.. code-block:: python .. code-block:: python
class MyDataModule(pl.DataModule): class MyDataModule(LightningDataModule):
def prepare_data(self): def prepare_data(self):
# called only on 1 GPU # called only on 1 GPU
@ -415,7 +416,7 @@ over download/prepare/splitting data
tokenize() tokenize()
etc() etc()
def setup(self): def setup(self, stage=None):
# called on every GPU (assigning state is OK) # called on every GPU (assigning state is OK)
self.train = ... self.train = ...
self.val = ... self.val = ...
@ -432,7 +433,7 @@ First, define the information that you might need.
.. code-block:: python .. code-block:: python
class MyDataModule(pl.DataModule): class MyDataModule(LightningDataModule):
def __init__(self): def __init__(self):
super().__init__() super().__init__()
@ -444,7 +445,7 @@ First, define the information that you might need.
tokenize() tokenize()
build_vocab() build_vocab()
def setup(self): def setup(self, stage=None):
vocab = load_vocab vocab = load_vocab
self.vocab_size = len(vocab) self.vocab_size = len(vocab)