resolving documentation warnings (#833)
* add more underline * fix LightningMudule import error * remove unneeded blank line * escape asterisk to fix inline emphasis warning * add PULL_REQUEST_TEMPLATE.md * add __init__.py and import imagenet_example * fix duplicate label * add noindex option to fix duplicate object warnings * remove unexpected indent * refer explicit LightningModule * fix minor bug * refer EarlyStopping explicitly * restore exclude patterns * change the way how to refer class * remove unused import * update badges & drop Travis/Appveyor (#826) * drop Travis * drop Appveyor * update badges * fix missing PyPI images & CI badges (#853) * docs - anchor links (#848) * docs - add links * add desc. * add Greeting action (#843) * add Greeting action * Update greetings.yml Co-authored-by: William Falcon <waf2107@columbia.edu> * add pep8speaks (#842) * advanced profiler describe + cleaned up tests (#837) * add py36 compatibility * add test case to capture previous bug * clean up tests * clean up tests * Update lightning_module_template.py * Update lightning.py * respond lint issues * break long line * break more lines * checkout conflicting files from master * shorten url * checkout from upstream/master * remove trailing whitespaces * remove unused import LightningModule * fix sphinx bot warnings * Apply suggestions from code review just to trigger CI * Update .github/workflows/greetings.yml Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com> Co-authored-by: William Falcon <waf2107@columbia.edu> Co-authored-by: Jeremy Jordan <13970565+jeremyjordan@users.noreply.github.com>
This commit is contained in:
parent
f5e0df390c
commit
563e2ba2c6
|
@ -10,5 +10,5 @@ jobs:
|
|||
- uses: actions/first-interaction@v1
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-message: 'Hey, thanks for your contribution! Great first issue!'
|
||||
pr-message: 'Hey, thanks for the input! Please give us a bit of time to review it!'
|
||||
issue-message: 'Hi! thanks for your contribution!, great first issue!'
|
||||
pr-message: 'Hey thanks for the input! Please give us a bit of time to review it!'
|
||||
|
|
|
@ -5,7 +5,7 @@ scanner:
|
|||
linter: pycodestyle # Other option is flake8
|
||||
|
||||
pycodestyle: # Same as scanner.linter value. Other option is flake8
|
||||
max-line-length: 120 # Default is 79 in PEP 8
|
||||
max-line-length: 100 # Default is 79 in PEP 8
|
||||
ignore: # Errors and warnings to ignore
|
||||
- W504 # line break after binary operator
|
||||
- E402 # module level import not at top of file
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
Callbacks
|
||||
=========
|
||||
.. automodule:: pytorch_lightning.callbacks
|
||||
:noindex:
|
||||
:exclude-members:
|
||||
_del_model,
|
||||
_save_model,
|
||||
|
@ -11,4 +12,4 @@ Callbacks
|
|||
on_train_end,
|
||||
on_epoch_start,
|
||||
check_monitor_top_k,
|
||||
on_train_start,
|
||||
on_train_start,
|
||||
|
|
|
@ -71,7 +71,7 @@ If you want to pick up training from where you left off, you have a few options.
|
|||
trainer = Trainer(logger=logger)
|
||||
trainer.fit(model)
|
||||
|
||||
2. A second option is to pass in a path to a checkpoint (see: :ref:`pytorch_lightning.trainer`).
|
||||
2. A second option is to pass in a path to a checkpoint (see: :ref:`pytorch_lightning.trainer.trainer.Trainer`).
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
|
|
@ -355,10 +355,10 @@ autoclass_content = 'both'
|
|||
autodoc_default_options = {
|
||||
'members': None,
|
||||
'special-members': '__call__',
|
||||
'undoc-members': True,
|
||||
# 'exclude-members': '__weakref__',
|
||||
'show-inheritance': True,
|
||||
'private-members': True,
|
||||
'noindex': True,
|
||||
}
|
||||
|
||||
# Sphinx will add “permalinks” for each heading and description environment as paragraph signs that
|
||||
|
|
|
@ -71,6 +71,7 @@ PyTorch-Lightning Documentation
|
|||
CODE_OF_CONDUCT.md
|
||||
CONTRIBUTING.md
|
||||
BECOMING_A_CORE_CONTRIBUTOR.md
|
||||
PULL_REQUEST_TEMPLATE.md
|
||||
governance.md
|
||||
|
||||
Indices and tables
|
||||
|
|
|
@ -5,6 +5,7 @@ LightningModule
|
|||
===============
|
||||
|
||||
.. automodule:: pytorch_lightning.core
|
||||
:noindex:
|
||||
:exclude-members:
|
||||
_abc_impl,
|
||||
summarize,
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
Loggers
|
||||
===========
|
||||
.. automodule:: pytorch_lightning.loggers
|
||||
:noindex:
|
||||
:exclude-members:
|
||||
_abc_impl,
|
||||
_save_model,
|
||||
|
|
|
@ -53,9 +53,9 @@ Lightning will call each optimizer sequentially:
|
|||
|
||||
|
||||
Step optimizers at arbitrary intervals
|
||||
-------------------------------------
|
||||
----------------------------------------
|
||||
To do more interesting things with your optimizers such as learning rate warm-up or odd scheduling,
|
||||
override the :meth:`optimizer_step' function.
|
||||
override the :meth:`optimizer_step` function.
|
||||
|
||||
For example, here step optimizer A every 2 batches and optimizer B every 4 batches
|
||||
|
||||
|
@ -96,4 +96,4 @@ Here we add a learning-rate warm up
|
|||
|
||||
# update params
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
optimizer.zero_grad()
|
||||
|
|
|
@ -3,8 +3,9 @@
|
|||
|
||||
|
||||
Performance and Bottleneck Profiler
|
||||
===========
|
||||
===================================
|
||||
.. automodule:: pytorch_lightning.profiler
|
||||
:noindex:
|
||||
:exclude-members:
|
||||
_abc_impl,
|
||||
summarize,
|
||||
|
|
|
@ -6,6 +6,7 @@ Trainer
|
|||
|
||||
.. automodule:: pytorch_lightning.trainer
|
||||
:members: fit, test
|
||||
:noindex:
|
||||
:exclude-members:
|
||||
run_pretrain_routine,
|
||||
_abc_impl,
|
||||
|
|
|
@ -3,8 +3,8 @@ Template model definition
|
|||
-------------------------
|
||||
|
||||
In 99% of cases you want to just copy `one of the examples
|
||||
<https://github.com/PyTorchLightning/pytorch-lightning/tree/master/pl_examples>`_
|
||||
to start a new lightningModule and change the core of what your model is actually trying to do.
|
||||
<https://github.com/PyTorchLightning/pytorch-lightning/tree/master/pl_examples>`_
|
||||
to start a new lightningModule and change the core of what your model is actually trying to do.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
|
|
|
@ -15,10 +15,11 @@ from torch.utils.data import DataLoader
|
|||
from torch.utils.data.distributed import DistributedSampler
|
||||
from torchvision.datasets import MNIST
|
||||
|
||||
import pytorch_lightning as pl
|
||||
from pytorch_lightning.core import LightningModule
|
||||
from pytorch_lightning.core import data_loader
|
||||
|
||||
|
||||
class LightningTemplateModel(pl.LightningModule):
|
||||
class LightningTemplateModel(LightningModule):
|
||||
"""
|
||||
Sample model to show how to define a template
|
||||
"""
|
||||
|
|
|
@ -19,7 +19,9 @@ import torchvision.transforms as transforms
|
|||
from torch.utils.data import DataLoader
|
||||
from torchvision.datasets import MNIST
|
||||
|
||||
import pytorch_lightning as pl
|
||||
from pytorch_lightning.core import LightningModule
|
||||
from pytorch_lightning.core import data_loader
|
||||
from pytorch_lightning.trainer import Trainer
|
||||
|
||||
|
||||
class Generator(nn.Module):
|
||||
|
@ -69,7 +71,7 @@ class Discriminator(nn.Module):
|
|||
return validity
|
||||
|
||||
|
||||
class GAN(pl.LightningModule):
|
||||
class GAN(LightningModule):
|
||||
|
||||
def __init__(self, hparams):
|
||||
super(GAN, self).__init__()
|
||||
|
@ -165,7 +167,7 @@ class GAN(pl.LightningModule):
|
|||
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))
|
||||
return [opt_g, opt_d], []
|
||||
|
||||
@pl.data_loader
|
||||
@data_loader
|
||||
def train_dataloader(self):
|
||||
transform = transforms.Compose([transforms.ToTensor(),
|
||||
transforms.Normalize([0.5], [0.5])])
|
||||
|
@ -193,7 +195,7 @@ def main(hparams):
|
|||
# ------------------------
|
||||
# 2 INIT TRAINER
|
||||
# ------------------------
|
||||
trainer = pl.Trainer()
|
||||
trainer = Trainer()
|
||||
|
||||
# ------------------------
|
||||
# 3 START TRAINING
|
||||
|
|
|
@ -19,6 +19,8 @@ import torchvision.models as models
|
|||
import torchvision.transforms as transforms
|
||||
|
||||
import pytorch_lightning as pl
|
||||
from pytorch_lightning.core import LightningModule
|
||||
from pytorch_lightning.core import data_loader
|
||||
|
||||
# pull out resnet names from torchvision models
|
||||
MODEL_NAMES = sorted(
|
||||
|
@ -27,9 +29,11 @@ MODEL_NAMES = sorted(
|
|||
)
|
||||
|
||||
|
||||
class ImageNetLightningModel(pl.LightningModule):
|
||||
|
||||
class ImageNetLightningModel(LightningModule):
|
||||
def __init__(self, hparams):
|
||||
"""
|
||||
TODO: add docstring here
|
||||
"""
|
||||
super(ImageNetLightningModel, self).__init__()
|
||||
self.hparams = hparams
|
||||
self.model = models.__dict__[self.hparams.arch](pretrained=self.hparams.pretrained)
|
||||
|
@ -128,7 +132,7 @@ class ImageNetLightningModel(pl.LightningModule):
|
|||
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.1)
|
||||
return [optimizer], [scheduler]
|
||||
|
||||
@pl.data_loader
|
||||
@data_loader
|
||||
def train_dataloader(self):
|
||||
normalize = transforms.Normalize(
|
||||
mean=[0.485, 0.456, 0.406],
|
||||
|
@ -159,7 +163,7 @@ class ImageNetLightningModel(pl.LightningModule):
|
|||
)
|
||||
return train_loader
|
||||
|
||||
@pl.data_loader
|
||||
@data_loader
|
||||
def val_dataloader(self):
|
||||
normalize = transforms.Normalize(
|
||||
mean=[0.485, 0.456, 0.406],
|
||||
|
|
|
@ -365,7 +365,6 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks):
|
|||
|
||||
def test_step(self, *args, **kwargs):
|
||||
"""return whatever outputs will need to be aggregated in test_end
|
||||
|
||||
:param batch: The output of your dataloader. A tensor, tuple or list
|
||||
:param int batch_idx: Integer displaying which batch this is
|
||||
:param int dataloader_idx: Integer displaying which dataloader this is (only if multiple test datasets used)
|
||||
|
@ -381,11 +380,13 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks):
|
|||
|
||||
|
||||
**OPTIONAL**
|
||||
If you don't need to test you don't need to implement this method. In this step you'd normally
|
||||
generate examples or calculate anything of interest such as accuracy.
|
||||
If you don't need to test you don't need to implement this method.
|
||||
In this step you'd normally generate examples or
|
||||
calculate anything of interest such as accuracy.
|
||||
|
||||
When the validation_step is called, the model has been put in eval mode and PyTorch gradients
|
||||
have been disabled. At the end of validation, model goes back to training mode and gradients are enabled.
|
||||
When the validation_step is called, the model has been put in eval mode
|
||||
and PyTorch gradients have been disabled.
|
||||
At the end of validation, model goes back to training mode and gradients are enabled.
|
||||
|
||||
The dict you return here will be available in the `test_end` method.
|
||||
|
||||
|
@ -578,7 +579,7 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks):
|
|||
3. On a testing batch, the call goes to model.test_step
|
||||
|
||||
Args:
|
||||
model (LightningModule): the LightningModule currently being optimized
|
||||
model (:class:`.LightningModule`): the LightningModule currently being optimized
|
||||
device_ids (list): the list of GPU ids
|
||||
|
||||
Return:
|
||||
|
@ -692,7 +693,7 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks):
|
|||
|
||||
Args:
|
||||
amp (object): pointer to amp library object
|
||||
model (LightningModule): pointer to current lightningModule
|
||||
model (:class:`.LightningModule`): pointer to current lightningModule
|
||||
optimizers (list): list of optimizers passed in configure_optimizers()
|
||||
amp_level (str): AMP mode chosen ('O1', 'O2', etc...)
|
||||
|
||||
|
@ -1087,7 +1088,6 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks):
|
|||
@classmethod
|
||||
def load_from_metrics(cls, weights_path, tags_csv, map_location=None):
|
||||
r"""
|
||||
|
||||
You should use `load_from_checkpoint` instead!
|
||||
However, if your .ckpt weights don't have the hyperparameters saved, use this method to pass
|
||||
in a .csv with the hparams you'd like to use. These will be converted into a argparse.Namespace
|
||||
|
@ -1097,10 +1097,11 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks):
|
|||
|
||||
weights_path (str): Path to a PyTorch checkpoint
|
||||
tags_csv (str): Path to a .csv with two columns (key, value) as in this
|
||||
Example::
|
||||
key,value
|
||||
drop_prob,0.2
|
||||
batch_size,32
|
||||
|
||||
Example::
|
||||
key,value
|
||||
drop_prob,0.2
|
||||
batch_size,32
|
||||
|
||||
map_location (dict | str | torch.device | function):
|
||||
If your checkpoint saved a GPU model and you now load on CPUs
|
||||
|
@ -1163,7 +1164,7 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks):
|
|||
|
||||
model = MyModel(hparams)
|
||||
|
||||
class MyModel(pl.LightningModule):
|
||||
class MyModel(LightningModule):
|
||||
def __init__(self, hparams):
|
||||
self.learning_rate = hparams.learning_rate
|
||||
|
||||
|
@ -1172,7 +1173,7 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks):
|
|||
# when using a dict
|
||||
model = MyModel({'learning_rate': 0.1})
|
||||
|
||||
class MyModel(pl.LightningModule):
|
||||
class MyModel(LightningModule):
|
||||
def __init__(self, hparams):
|
||||
self.learning_rate = hparams['learning_rate']
|
||||
|
||||
|
|
|
@ -277,6 +277,7 @@ def get_human_readable_count(number):
|
|||
"""
|
||||
Abbreviates an integer number with K, M, B, T for thousands, millions,
|
||||
billions and trillions, respectively.
|
||||
|
||||
Examples:
|
||||
123 -> 123
|
||||
1234 -> 1 K (one thousand)
|
||||
|
@ -284,8 +285,9 @@ def get_human_readable_count(number):
|
|||
3e9 -> 3 B (three billion)
|
||||
4e12 -> 4 T (four trillion)
|
||||
5e15 -> 5,000 T
|
||||
|
||||
:param number: a positive integer number
|
||||
:returns a string formatted according to the pattern described above.
|
||||
:return: a string formatted according to the pattern described above.
|
||||
"""
|
||||
assert number >= 0
|
||||
labels = [' ', 'K', 'M', 'B', 'T']
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
Log using `mlflow <https://mlflow.org>'_
|
||||
Log using `mlflow <https://mlflow.org>`_
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
|
|
@ -103,8 +103,10 @@ class NeptuneLogger(LightningLoggerBase):
|
|||
Must be list of str or single str. Uploaded sources are displayed in the experiment’s Source code tab.
|
||||
If None is passed, Python file from which experiment was created will be uploaded.
|
||||
Pass empty list ([]) to upload no files. Unix style pathname pattern expansion is supported.
|
||||
For example, you can pass '*.py' to upload all python source files from the current directory.
|
||||
For recursion lookup use '**/*.py' (for Python 3.5 and later). For more information see glob library.
|
||||
For example, you can pass '\*.py'
|
||||
to upload all python source files from the current directory.
|
||||
For recursion lookup use '\**/\*.py' (for Python 3.5 and later).
|
||||
For more information see glob library.
|
||||
params (dict|None): Optional. Parameters of the experiment. After experiment creation params are read-only.
|
||||
Parameters are displayed in the experiment’s Parameters section and each key-value pair can be
|
||||
viewed in experiments view as a column.
|
||||
|
|
|
@ -269,14 +269,14 @@ Auto-slurm-job-submission
|
|||
-------------------------
|
||||
|
||||
Instead of manually building SLURM scripts, you can use the
|
||||
`SlurmCluster object <https://williamfalcon.github.io/test-tube/hpc/SlurmCluster>`_
|
||||
to do this for you. The SlurmCluster can also run a grid search if you pass
|
||||
in a `HyperOptArgumentParser
|
||||
<https://williamfalcon.github.io/test-tube/hyperparameter_optimization/HyperOptArgumentParser>`_.
|
||||
`SlurmCluster object <https://williamfalcon.github.io/test-tube/hpc/SlurmCluster>`_
|
||||
to do this for you. The SlurmCluster can also run a grid search if you pass
|
||||
in a `HyperOptArgumentParser
|
||||
<https://williamfalcon.github.io/test-tube/hyperparameter_optimization/HyperOptArgumentParser>`_.
|
||||
|
||||
Here is an example where you run a grid search of 9 combinations of hyperparams.
|
||||
The full examples are `here
|
||||
<https://github.com/PyTorchLightning/pytorch-lightning/tree/master/pl_examples/new_project_templates/multi_node_examples>`_.
|
||||
The full examples are
|
||||
`here <https://git.io/Jv87p>`_.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
|
|
@ -153,8 +153,9 @@ class Trainer(TrainerIOMixin,
|
|||
|
||||
trainer = Trainer(checkpoint_callback=checkpoint_callback)
|
||||
|
||||
early_stop_callback: Callback for early stopping. If
|
||||
set to ``True``, then the default callback monitoring ``'val_loss'`` is created.
|
||||
early_stop_callback (:class:`pytorch_lightning.callbacks.EarlyStopping`):
|
||||
Callback for early stopping.
|
||||
If set to ``True``, then the default callback monitoring ``'val_loss'`` is created.
|
||||
Will raise an error if ``'val_loss'`` is not found.
|
||||
If set to ``False``, then early stopping will be disabled.
|
||||
If set to ``None``, then the default callback monitoring ``'val_loss'`` is created.
|
||||
|
@ -1164,7 +1165,7 @@ class Trainer(TrainerIOMixin,
|
|||
Separates from fit to make sure you never run on your test set until you want to.
|
||||
|
||||
Args:
|
||||
model: The model to test.
|
||||
model (:class:`.LightningModule`): The model to test.
|
||||
|
||||
Example::
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ Packed sequences as inputs
|
|||
|
||||
When using PackedSequence, do 2 things:
|
||||
1. return either a padded tensor in dataset or a list of variable length tensors
|
||||
in the dataloader collate_fn (example above shows the list implementation).
|
||||
in the dataloader collate_fn (example above shows the list implementation).
|
||||
2. Pack the sequence in forward or training and validation steps depending on use case.
|
||||
|
||||
.. code-block:: python
|
||||
|
|
|
@ -4,19 +4,17 @@ import pytest
|
|||
|
||||
from pytorch_lightning.profiler import Profiler, AdvancedProfiler
|
||||
|
||||
PROFILER_OVERHEAD_MAX_TOLERANCE = 0.001
|
||||
PROFILER_OVERHEAD_MAX_TOLERANCE = 0.0001
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def simple_profiler():
|
||||
"""Creates a new profiler for every test with `simple_profiler` as an arg."""
|
||||
profiler = Profiler()
|
||||
return profiler
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def advanced_profiler():
|
||||
"""Creates a new profiler for every test with `advanced_profiler` as an arg."""
|
||||
profiler = AdvancedProfiler()
|
||||
return profiler
|
||||
|
||||
|
@ -51,13 +49,10 @@ def test_simple_profiler_describe(simple_profiler):
|
|||
simple_profiler.describe()
|
||||
|
||||
|
||||
def _get_total_cprofile_duration(profile):
|
||||
return sum([x.totaltime for x in profile.getstats()])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("action,expected", [("a", [3, 1]), ("b", [2]), ("c", [1])])
|
||||
def test_advanced_profiler_durations(advanced_profiler, action, expected):
|
||||
"""Ensure the reported durations are reasonably accurate."""
|
||||
def _get_total_duration(profile):
|
||||
return sum([x.totaltime for x in profile.getstats()])
|
||||
|
||||
for duration in expected:
|
||||
with advanced_profiler.profile(action):
|
||||
|
@ -65,7 +60,7 @@ def test_advanced_profiler_durations(advanced_profiler, action, expected):
|
|||
|
||||
# different environments have different precision when it comes to time.sleep()
|
||||
# see: https://github.com/PyTorchLightning/pytorch-lightning/issues/796
|
||||
recored_total_duration = _get_total_cprofile_duration(
|
||||
recored_total_duration = _get_total_duration(
|
||||
advanced_profiler.profiled_actions[action]
|
||||
)
|
||||
expected_total_duration = np.sum(expected)
|
||||
|
@ -75,17 +70,21 @@ def test_advanced_profiler_durations(advanced_profiler, action, expected):
|
|||
|
||||
|
||||
def test_advanced_profiler_overhead(advanced_profiler, n_iter=5):
|
||||
"""Ensure that the profiler doesn't introduce too much overhead during training."""
|
||||
"""
|
||||
ensure that the profiler doesn't introduce too much overhead during training
|
||||
"""
|
||||
for _ in range(n_iter):
|
||||
with advanced_profiler.profile("no-op"):
|
||||
pass
|
||||
|
||||
action_profile = advanced_profiler.profiled_actions["no-op"]
|
||||
total_duration = _get_total_cprofile_duration(action_profile)
|
||||
total_duration = sum([x.totaltime for x in action_profile.getstats()])
|
||||
average_duration = total_duration / n_iter
|
||||
assert average_duration < PROFILER_OVERHEAD_MAX_TOLERANCE
|
||||
|
||||
|
||||
def test_advanced_profiler_describe(advanced_profiler):
|
||||
"""Ensure the profiler won't fail when reporting the summary."""
|
||||
"""
|
||||
ensure the profiler won't fail when reporting the summary
|
||||
"""
|
||||
advanced_profiler.describe()
|
||||
|
|
Loading…
Reference in New Issue