From 25bb33ad45314888379db4d73f4fbce9ca24c482 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Fri, 6 Mar 2020 13:07:04 +0100 Subject: [PATCH] extend docs notes (#1004) * wip * notes * WIP * example * notes --- .github/CONTRIBUTING.md | 38 ++++++++++++++++++++++++++++-- docs/source/conf.py | 2 ++ docs/source/introduction_guide.rst | 20 ++++++++++------ docs/source/trainer.rst | 4 ++++ 4 files changed, 55 insertions(+), 9 deletions(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 86b800ef4e..6069d720a7 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -74,12 +74,46 @@ A lot of good work has already been done in project mechanics (requirements.txt, ``` autopep8 -v -r --max-line-length 120 --in-place . ``` -3. Documentation is using [Napoleon formatting with Google style](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) + +### Documentation + +We are using Sphinx with Napoleon extension. +Moreover we set Google style to follow with type convention. + +- [Napoleon formatting with Google style](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) +- [ReStructured Text (reST)](https://docs.pylonsproject.org/projects/docs-style-guide/) +- [Paragraph-level markup](https://www.sphinx-doc.org/en/1.5/markup/para.html) + +See following short example of a sample function taking one position string and optional + +```python +from typing import Optional + +def my_func(param_a: int, param_b: Optional[float] = None) -> str: + """Sample function. + + Args: + param_a: first parameter + param_b: second parameter + + Return: + sum of both numbers + + Example: + Sample doctest example... + >>> my_func(1, 2) + 3 + + .. note:: If you want to add something. + """ + p = param_b if param_b else 0 + return str(param_a + p) +``` ### Testing Test your work locally to speed up your work since so you can focus only in particular (failing) test-cases. - To setup a local development environment, install both local and test dependecies: + To setup a local development environment, install both local and test dependencies: ```bash pip install -r requirements.txt pip install -r tests/requirements.txt diff --git a/docs/source/conf.py b/docs/source/conf.py index b77a67da6d..d1c0b54b0a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -354,6 +354,8 @@ autoclass_content = 'both' # see https://github.com/sphinx-doc/sphinx/issues/5459 autodoc_default_options = { 'members': None, + 'methods': None, + # 'attributes': None, 'special-members': '__call__', # 'exclude-members': '__weakref__', 'show-inheritance': True, diff --git a/docs/source/introduction_guide.rst b/docs/source/introduction_guide.rst index 765ba8fdde..5d90539cc1 100644 --- a/docs/source/introduction_guide.rst +++ b/docs/source/introduction_guide.rst @@ -182,7 +182,8 @@ Here's the PyTorch code for loading MNIST # transforms # prepare transforms standard to MNIST - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + transform=transforms.Compose([transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,))]) # data mnist_train = MNIST(os.getcwd(), train=True, download=True) @@ -201,8 +202,10 @@ the LightningModule class LitMNIST(pl.LightningModule): def train_dataloader(self): - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) - mnist_train = MNIST(os.getcwd(), train=True, download=False, transform=transform) + transform=transforms.Compose([transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,))]) + mnist_train = MNIST(os.getcwd(), train=True, download=False, + transform=transform) return DataLoader(mnist_train, batch_size=64) Notice the code is exactly the same, except now the training dataloading has been organized by the LightningModule @@ -314,7 +317,8 @@ For clarity, we'll recall that the full LightningModule now looks like this. return x def train_dataloader(self): - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) + transform=transforms.Compose([transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,))]) mnist_train = MNIST(os.getcwd(), train=True, download=False, transform=transform) return DataLoader(mnist_train, batch_size=64) @@ -424,7 +428,7 @@ First, change the runtime to TPU (and reinstall lightning). Next, install the required xla library (adds support for PyTorch on TPUs) -.. code-block:: default +.. code-block:: python import collections from datetime import datetime, timedelta @@ -627,8 +631,10 @@ sample split in the `train_dataloader` method. return {'avg_val_loss': avg_loss, 'log': tensorboard_logs} def val_dataloader(self): - transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) - mnist_train = MNIST(os.getcwd(), train=True, download=False, transform=transform) + transform=transforms.Compose([transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,))]) + mnist_train = MNIST(os.getcwd(), train=True, download=False, + transform=transform) _, mnist_val = random_split(mnist_train, [55000, 5000]) mnist_val = DataLoader(mnist_val, batch_size=64) return mnist_val diff --git a/docs/source/trainer.rst b/docs/source/trainer.rst index e83bd94e0d..c649e22fce 100644 --- a/docs/source/trainer.rst +++ b/docs/source/trainer.rst @@ -4,6 +4,10 @@ Trainer ======= + +:hidden:`Trainer` +~~~~~~~~~~~~~~~~~ + .. automodule:: pytorch_lightning.trainer :members: fit, test :noindex: