parent
b183782e83
commit
25bb33ad45
|
@ -74,12 +74,46 @@ A lot of good work has already been done in project mechanics (requirements.txt,
|
|||
```
|
||||
autopep8 -v -r --max-line-length 120 --in-place .
|
||||
```
|
||||
3. Documentation is using [Napoleon formatting with Google style](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
|
||||
|
||||
### Documentation
|
||||
|
||||
We are using Sphinx with Napoleon extension.
|
||||
Moreover we set Google style to follow with type convention.
|
||||
|
||||
- [Napoleon formatting with Google style](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
|
||||
- [ReStructured Text (reST)](https://docs.pylonsproject.org/projects/docs-style-guide/)
|
||||
- [Paragraph-level markup](https://www.sphinx-doc.org/en/1.5/markup/para.html)
|
||||
|
||||
See following short example of a sample function taking one position string and optional
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
def my_func(param_a: int, param_b: Optional[float] = None) -> str:
|
||||
"""Sample function.
|
||||
|
||||
Args:
|
||||
param_a: first parameter
|
||||
param_b: second parameter
|
||||
|
||||
Return:
|
||||
sum of both numbers
|
||||
|
||||
Example:
|
||||
Sample doctest example...
|
||||
>>> my_func(1, 2)
|
||||
3
|
||||
|
||||
.. note:: If you want to add something.
|
||||
"""
|
||||
p = param_b if param_b else 0
|
||||
return str(param_a + p)
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
Test your work locally to speed up your work since so you can focus only in particular (failing) test-cases.
|
||||
To setup a local development environment, install both local and test dependecies:
|
||||
To setup a local development environment, install both local and test dependencies:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
pip install -r tests/requirements.txt
|
||||
|
|
|
@ -354,6 +354,8 @@ autoclass_content = 'both'
|
|||
# see https://github.com/sphinx-doc/sphinx/issues/5459
|
||||
autodoc_default_options = {
|
||||
'members': None,
|
||||
'methods': None,
|
||||
# 'attributes': None,
|
||||
'special-members': '__call__',
|
||||
# 'exclude-members': '__weakref__',
|
||||
'show-inheritance': True,
|
||||
|
|
|
@ -182,7 +182,8 @@ Here's the PyTorch code for loading MNIST
|
|||
|
||||
# transforms
|
||||
# prepare transforms standard to MNIST
|
||||
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
|
||||
transform=transforms.Compose([transforms.ToTensor(),
|
||||
transforms.Normalize((0.1307,), (0.3081,))])
|
||||
|
||||
# data
|
||||
mnist_train = MNIST(os.getcwd(), train=True, download=True)
|
||||
|
@ -201,8 +202,10 @@ the LightningModule
|
|||
class LitMNIST(pl.LightningModule):
|
||||
|
||||
def train_dataloader(self):
|
||||
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
|
||||
mnist_train = MNIST(os.getcwd(), train=True, download=False, transform=transform)
|
||||
transform=transforms.Compose([transforms.ToTensor(),
|
||||
transforms.Normalize((0.1307,), (0.3081,))])
|
||||
mnist_train = MNIST(os.getcwd(), train=True, download=False,
|
||||
transform=transform)
|
||||
return DataLoader(mnist_train, batch_size=64)
|
||||
|
||||
Notice the code is exactly the same, except now the training dataloading has been organized by the LightningModule
|
||||
|
@ -314,7 +317,8 @@ For clarity, we'll recall that the full LightningModule now looks like this.
|
|||
return x
|
||||
|
||||
def train_dataloader(self):
|
||||
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
|
||||
transform=transforms.Compose([transforms.ToTensor(),
|
||||
transforms.Normalize((0.1307,), (0.3081,))])
|
||||
mnist_train = MNIST(os.getcwd(), train=True, download=False, transform=transform)
|
||||
return DataLoader(mnist_train, batch_size=64)
|
||||
|
||||
|
@ -424,7 +428,7 @@ First, change the runtime to TPU (and reinstall lightning).
|
|||
|
||||
Next, install the required xla library (adds support for PyTorch on TPUs)
|
||||
|
||||
.. code-block:: default
|
||||
.. code-block:: python
|
||||
|
||||
import collections
|
||||
from datetime import datetime, timedelta
|
||||
|
@ -627,8 +631,10 @@ sample split in the `train_dataloader` method.
|
|||
return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}
|
||||
|
||||
def val_dataloader(self):
|
||||
transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
|
||||
mnist_train = MNIST(os.getcwd(), train=True, download=False, transform=transform)
|
||||
transform=transforms.Compose([transforms.ToTensor(),
|
||||
transforms.Normalize((0.1307,), (0.3081,))])
|
||||
mnist_train = MNIST(os.getcwd(), train=True, download=False,
|
||||
transform=transform)
|
||||
_, mnist_val = random_split(mnist_train, [55000, 5000])
|
||||
mnist_val = DataLoader(mnist_val, batch_size=64)
|
||||
return mnist_val
|
||||
|
|
|
@ -4,6 +4,10 @@
|
|||
Trainer
|
||||
=======
|
||||
|
||||
|
||||
:hidden:`Trainer`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. automodule:: pytorch_lightning.trainer
|
||||
:members: fit, test
|
||||
:noindex:
|
||||
|
|
Loading…
Reference in New Issue