From 563e2ba2c6eabd7705ed4e2851e2c484d5934f94 Mon Sep 17 00:00:00 2001 From: Hanbyul Kim Date: Fri, 28 Feb 2020 06:07:51 +0900 Subject: [PATCH] resolving documentation warnings (#833) * add more underline * fix LightningMudule import error * remove unneeded blank line * escape asterisk to fix inline emphasis warning * add PULL_REQUEST_TEMPLATE.md * add __init__.py and import imagenet_example * fix duplicate label * add noindex option to fix duplicate object warnings * remove unexpected indent * refer explicit LightningModule * fix minor bug * refer EarlyStopping explicitly * restore exclude patterns * change the way how to refer class * remove unused import * update badges & drop Travis/Appveyor (#826) * drop Travis * drop Appveyor * update badges * fix missing PyPI images & CI badges (#853) * docs - anchor links (#848) * docs - add links * add desc. * add Greeting action (#843) * add Greeting action * Update greetings.yml Co-authored-by: William Falcon * add pep8speaks (#842) * advanced profiler describe + cleaned up tests (#837) * add py36 compatibility * add test case to capture previous bug * clean up tests * clean up tests * Update lightning_module_template.py * Update lightning.py * respond lint issues * break long line * break more lines * checkout conflicting files from master * shorten url * checkout from upstream/master * remove trailing whitespaces * remove unused import LightningModule * fix sphinx bot warnings * Apply suggestions from code review just to trigger CI * Update .github/workflows/greetings.yml Co-authored-by: Jirka Borovec Co-authored-by: William Falcon Co-authored-by: Jeremy Jordan <13970565+jeremyjordan@users.noreply.github.com> --- .github/workflows/greetings.yml | 4 +-- .pep8speaks.yml | 2 +- docs/source/callbacks.rst | 3 +- docs/source/checkpointing.rst | 2 +- docs/source/conf.py | 2 +- docs/source/index.rst | 1 + docs/source/lightning-module.rst | 1 + docs/source/loggers.rst | 1 + docs/source/optimizers.rst | 6 ++-- docs/source/profiler.rst | 3 +- docs/source/trainer.rst | 1 + pl_examples/__init__.py | 4 +-- .../lightning_module_template.py | 5 ++-- pl_examples/domain_templates/gan.py | 10 ++++--- pl_examples/full_examples/__init__.py | 0 .../full_examples/imagenet/__init__.py | 0 .../imagenet/imagenet_example.py | 12 +++++--- pytorch_lightning/core/lightning.py | 29 ++++++++++--------- pytorch_lightning/core/memory.py | 4 ++- pytorch_lightning/loggers/mlflow.py | 2 +- pytorch_lightning/loggers/neptune.py | 6 ++-- pytorch_lightning/trainer/distrib_parts.py | 12 ++++---- pytorch_lightning/trainer/trainer.py | 7 +++-- pytorch_lightning/trainer/training_loop.py | 2 +- tests/test_profiler.py | 23 +++++++-------- 25 files changed, 80 insertions(+), 62 deletions(-) create mode 100644 pl_examples/full_examples/__init__.py create mode 100644 pl_examples/full_examples/imagenet/__init__.py diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml index 8c6d4546bc..e932768947 100644 --- a/.github/workflows/greetings.yml +++ b/.github/workflows/greetings.yml @@ -10,5 +10,5 @@ jobs: - uses: actions/first-interaction@v1 with: repo-token: ${{ secrets.GITHUB_TOKEN }} - issue-message: 'Hey, thanks for your contribution! Great first issue!' - pr-message: 'Hey, thanks for the input! Please give us a bit of time to review it!' + issue-message: 'Hi! thanks for your contribution!, great first issue!' + pr-message: 'Hey thanks for the input! Please give us a bit of time to review it!' diff --git a/.pep8speaks.yml b/.pep8speaks.yml index 154d5258ba..e33d46b159 100644 --- a/.pep8speaks.yml +++ b/.pep8speaks.yml @@ -5,7 +5,7 @@ scanner: linter: pycodestyle # Other option is flake8 pycodestyle: # Same as scanner.linter value. Other option is flake8 - max-line-length: 120 # Default is 79 in PEP 8 + max-line-length: 100 # Default is 79 in PEP 8 ignore: # Errors and warnings to ignore - W504 # line break after binary operator - E402 # module level import not at top of file diff --git a/docs/source/callbacks.rst b/docs/source/callbacks.rst index 433fce6de0..88ebceedbd 100644 --- a/docs/source/callbacks.rst +++ b/docs/source/callbacks.rst @@ -4,6 +4,7 @@ Callbacks ========= .. automodule:: pytorch_lightning.callbacks + :noindex: :exclude-members: _del_model, _save_model, @@ -11,4 +12,4 @@ Callbacks on_train_end, on_epoch_start, check_monitor_top_k, - on_train_start, \ No newline at end of file + on_train_start, diff --git a/docs/source/checkpointing.rst b/docs/source/checkpointing.rst index 318d95dd08..6ec85e8a71 100644 --- a/docs/source/checkpointing.rst +++ b/docs/source/checkpointing.rst @@ -71,7 +71,7 @@ If you want to pick up training from where you left off, you have a few options. trainer = Trainer(logger=logger) trainer.fit(model) -2. A second option is to pass in a path to a checkpoint (see: :ref:`pytorch_lightning.trainer`). +2. A second option is to pass in a path to a checkpoint (see: :ref:`pytorch_lightning.trainer.trainer.Trainer`). .. code-block:: python diff --git a/docs/source/conf.py b/docs/source/conf.py index ddccd79f6b..b77a67da6d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -355,10 +355,10 @@ autoclass_content = 'both' autodoc_default_options = { 'members': None, 'special-members': '__call__', - 'undoc-members': True, # 'exclude-members': '__weakref__', 'show-inheritance': True, 'private-members': True, + 'noindex': True, } # Sphinx will add “permalinks” for each heading and description environment as paragraph signs that diff --git a/docs/source/index.rst b/docs/source/index.rst index 7a04bdff59..541430d90c 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -71,6 +71,7 @@ PyTorch-Lightning Documentation CODE_OF_CONDUCT.md CONTRIBUTING.md BECOMING_A_CORE_CONTRIBUTOR.md + PULL_REQUEST_TEMPLATE.md governance.md Indices and tables diff --git a/docs/source/lightning-module.rst b/docs/source/lightning-module.rst index a05c1de444..3e329bec3a 100644 --- a/docs/source/lightning-module.rst +++ b/docs/source/lightning-module.rst @@ -5,6 +5,7 @@ LightningModule =============== .. automodule:: pytorch_lightning.core + :noindex: :exclude-members: _abc_impl, summarize, diff --git a/docs/source/loggers.rst b/docs/source/loggers.rst index cd9314093d..67cfdcf8b4 100644 --- a/docs/source/loggers.rst +++ b/docs/source/loggers.rst @@ -4,6 +4,7 @@ Loggers =========== .. automodule:: pytorch_lightning.loggers + :noindex: :exclude-members: _abc_impl, _save_model, diff --git a/docs/source/optimizers.rst b/docs/source/optimizers.rst index 2dd4c631c8..1d9e77a7de 100644 --- a/docs/source/optimizers.rst +++ b/docs/source/optimizers.rst @@ -53,9 +53,9 @@ Lightning will call each optimizer sequentially: Step optimizers at arbitrary intervals -------------------------------------- +---------------------------------------- To do more interesting things with your optimizers such as learning rate warm-up or odd scheduling, -override the :meth:`optimizer_step' function. +override the :meth:`optimizer_step` function. For example, here step optimizer A every 2 batches and optimizer B every 4 batches @@ -96,4 +96,4 @@ Here we add a learning-rate warm up # update params optimizer.step() - optimizer.zero_grad() \ No newline at end of file + optimizer.zero_grad() diff --git a/docs/source/profiler.rst b/docs/source/profiler.rst index 605a472fd8..115aaf2759 100644 --- a/docs/source/profiler.rst +++ b/docs/source/profiler.rst @@ -3,8 +3,9 @@ Performance and Bottleneck Profiler -=========== +=================================== .. automodule:: pytorch_lightning.profiler + :noindex: :exclude-members: _abc_impl, summarize, diff --git a/docs/source/trainer.rst b/docs/source/trainer.rst index fc4f7ee896..a9f65ba999 100644 --- a/docs/source/trainer.rst +++ b/docs/source/trainer.rst @@ -6,6 +6,7 @@ Trainer .. automodule:: pytorch_lightning.trainer :members: fit, test + :noindex: :exclude-members: run_pretrain_routine, _abc_impl, diff --git a/pl_examples/__init__.py b/pl_examples/__init__.py index 65522ace05..c75a843f6c 100644 --- a/pl_examples/__init__.py +++ b/pl_examples/__init__.py @@ -3,8 +3,8 @@ Template model definition ------------------------- In 99% of cases you want to just copy `one of the examples - `_ - to start a new lightningModule and change the core of what your model is actually trying to do. +`_ +to start a new lightningModule and change the core of what your model is actually trying to do. .. code-block:: bash diff --git a/pl_examples/basic_examples/lightning_module_template.py b/pl_examples/basic_examples/lightning_module_template.py index 4caf251ade..7e59e1eee6 100644 --- a/pl_examples/basic_examples/lightning_module_template.py +++ b/pl_examples/basic_examples/lightning_module_template.py @@ -15,10 +15,11 @@ from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from torchvision.datasets import MNIST -import pytorch_lightning as pl +from pytorch_lightning.core import LightningModule +from pytorch_lightning.core import data_loader -class LightningTemplateModel(pl.LightningModule): +class LightningTemplateModel(LightningModule): """ Sample model to show how to define a template """ diff --git a/pl_examples/domain_templates/gan.py b/pl_examples/domain_templates/gan.py index 78a813e82f..864ace68b9 100644 --- a/pl_examples/domain_templates/gan.py +++ b/pl_examples/domain_templates/gan.py @@ -19,7 +19,9 @@ import torchvision.transforms as transforms from torch.utils.data import DataLoader from torchvision.datasets import MNIST -import pytorch_lightning as pl +from pytorch_lightning.core import LightningModule +from pytorch_lightning.core import data_loader +from pytorch_lightning.trainer import Trainer class Generator(nn.Module): @@ -69,7 +71,7 @@ class Discriminator(nn.Module): return validity -class GAN(pl.LightningModule): +class GAN(LightningModule): def __init__(self, hparams): super(GAN, self).__init__() @@ -165,7 +167,7 @@ class GAN(pl.LightningModule): opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2)) return [opt_g, opt_d], [] - @pl.data_loader + @data_loader def train_dataloader(self): transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]) @@ -193,7 +195,7 @@ def main(hparams): # ------------------------ # 2 INIT TRAINER # ------------------------ - trainer = pl.Trainer() + trainer = Trainer() # ------------------------ # 3 START TRAINING diff --git a/pl_examples/full_examples/__init__.py b/pl_examples/full_examples/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pl_examples/full_examples/imagenet/__init__.py b/pl_examples/full_examples/imagenet/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pl_examples/full_examples/imagenet/imagenet_example.py b/pl_examples/full_examples/imagenet/imagenet_example.py index ce2fbf6a12..1104339173 100644 --- a/pl_examples/full_examples/imagenet/imagenet_example.py +++ b/pl_examples/full_examples/imagenet/imagenet_example.py @@ -19,6 +19,8 @@ import torchvision.models as models import torchvision.transforms as transforms import pytorch_lightning as pl +from pytorch_lightning.core import LightningModule +from pytorch_lightning.core import data_loader # pull out resnet names from torchvision models MODEL_NAMES = sorted( @@ -27,9 +29,11 @@ MODEL_NAMES = sorted( ) -class ImageNetLightningModel(pl.LightningModule): - +class ImageNetLightningModel(LightningModule): def __init__(self, hparams): + """ + TODO: add docstring here + """ super(ImageNetLightningModel, self).__init__() self.hparams = hparams self.model = models.__dict__[self.hparams.arch](pretrained=self.hparams.pretrained) @@ -128,7 +132,7 @@ class ImageNetLightningModel(pl.LightningModule): scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.1) return [optimizer], [scheduler] - @pl.data_loader + @data_loader def train_dataloader(self): normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], @@ -159,7 +163,7 @@ class ImageNetLightningModel(pl.LightningModule): ) return train_loader - @pl.data_loader + @data_loader def val_dataloader(self): normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], diff --git a/pytorch_lightning/core/lightning.py b/pytorch_lightning/core/lightning.py index fa70d72e7a..00f48d4967 100644 --- a/pytorch_lightning/core/lightning.py +++ b/pytorch_lightning/core/lightning.py @@ -365,7 +365,6 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks): def test_step(self, *args, **kwargs): """return whatever outputs will need to be aggregated in test_end - :param batch: The output of your dataloader. A tensor, tuple or list :param int batch_idx: Integer displaying which batch this is :param int dataloader_idx: Integer displaying which dataloader this is (only if multiple test datasets used) @@ -381,11 +380,13 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks): **OPTIONAL** - If you don't need to test you don't need to implement this method. In this step you'd normally - generate examples or calculate anything of interest such as accuracy. + If you don't need to test you don't need to implement this method. + In this step you'd normally generate examples or + calculate anything of interest such as accuracy. - When the validation_step is called, the model has been put in eval mode and PyTorch gradients - have been disabled. At the end of validation, model goes back to training mode and gradients are enabled. + When the validation_step is called, the model has been put in eval mode + and PyTorch gradients have been disabled. + At the end of validation, model goes back to training mode and gradients are enabled. The dict you return here will be available in the `test_end` method. @@ -578,7 +579,7 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks): 3. On a testing batch, the call goes to model.test_step Args: - model (LightningModule): the LightningModule currently being optimized + model (:class:`.LightningModule`): the LightningModule currently being optimized device_ids (list): the list of GPU ids Return: @@ -692,7 +693,7 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks): Args: amp (object): pointer to amp library object - model (LightningModule): pointer to current lightningModule + model (:class:`.LightningModule`): pointer to current lightningModule optimizers (list): list of optimizers passed in configure_optimizers() amp_level (str): AMP mode chosen ('O1', 'O2', etc...) @@ -1087,7 +1088,6 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks): @classmethod def load_from_metrics(cls, weights_path, tags_csv, map_location=None): r""" - You should use `load_from_checkpoint` instead! However, if your .ckpt weights don't have the hyperparameters saved, use this method to pass in a .csv with the hparams you'd like to use. These will be converted into a argparse.Namespace @@ -1097,10 +1097,11 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks): weights_path (str): Path to a PyTorch checkpoint tags_csv (str): Path to a .csv with two columns (key, value) as in this - Example:: - key,value - drop_prob,0.2 - batch_size,32 + + Example:: + key,value + drop_prob,0.2 + batch_size,32 map_location (dict | str | torch.device | function): If your checkpoint saved a GPU model and you now load on CPUs @@ -1163,7 +1164,7 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks): model = MyModel(hparams) - class MyModel(pl.LightningModule): + class MyModel(LightningModule): def __init__(self, hparams): self.learning_rate = hparams.learning_rate @@ -1172,7 +1173,7 @@ class LightningModule(ABC, GradInformation, ModelIO, ModelHooks): # when using a dict model = MyModel({'learning_rate': 0.1}) - class MyModel(pl.LightningModule): + class MyModel(LightningModule): def __init__(self, hparams): self.learning_rate = hparams['learning_rate'] diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py index fbfe79ffb3..967710d11d 100644 --- a/pytorch_lightning/core/memory.py +++ b/pytorch_lightning/core/memory.py @@ -277,6 +277,7 @@ def get_human_readable_count(number): """ Abbreviates an integer number with K, M, B, T for thousands, millions, billions and trillions, respectively. + Examples: 123 -> 123 1234 -> 1 K (one thousand) @@ -284,8 +285,9 @@ def get_human_readable_count(number): 3e9 -> 3 B (three billion) 4e12 -> 4 T (four trillion) 5e15 -> 5,000 T + :param number: a positive integer number - :returns a string formatted according to the pattern described above. + :return: a string formatted according to the pattern described above. """ assert number >= 0 labels = [' ', 'K', 'M', 'B', 'T'] diff --git a/pytorch_lightning/loggers/mlflow.py b/pytorch_lightning/loggers/mlflow.py index 63a5d08509..90888a0142 100644 --- a/pytorch_lightning/loggers/mlflow.py +++ b/pytorch_lightning/loggers/mlflow.py @@ -1,5 +1,5 @@ """ -Log using `mlflow '_ +Log using `mlflow `_ .. code-block:: python diff --git a/pytorch_lightning/loggers/neptune.py b/pytorch_lightning/loggers/neptune.py index e4ebba3f36..03916a1d16 100644 --- a/pytorch_lightning/loggers/neptune.py +++ b/pytorch_lightning/loggers/neptune.py @@ -103,8 +103,10 @@ class NeptuneLogger(LightningLoggerBase): Must be list of str or single str. Uploaded sources are displayed in the experiment’s Source code tab. If None is passed, Python file from which experiment was created will be uploaded. Pass empty list ([]) to upload no files. Unix style pathname pattern expansion is supported. - For example, you can pass '*.py' to upload all python source files from the current directory. - For recursion lookup use '**/*.py' (for Python 3.5 and later). For more information see glob library. + For example, you can pass '\*.py' + to upload all python source files from the current directory. + For recursion lookup use '\**/\*.py' (for Python 3.5 and later). + For more information see glob library. params (dict|None): Optional. Parameters of the experiment. After experiment creation params are read-only. Parameters are displayed in the experiment’s Parameters section and each key-value pair can be viewed in experiments view as a column. diff --git a/pytorch_lightning/trainer/distrib_parts.py b/pytorch_lightning/trainer/distrib_parts.py index 29bc8178b8..b994e3726d 100644 --- a/pytorch_lightning/trainer/distrib_parts.py +++ b/pytorch_lightning/trainer/distrib_parts.py @@ -269,14 +269,14 @@ Auto-slurm-job-submission ------------------------- Instead of manually building SLURM scripts, you can use the - `SlurmCluster object `_ - to do this for you. The SlurmCluster can also run a grid search if you pass - in a `HyperOptArgumentParser - `_. +`SlurmCluster object `_ +to do this for you. The SlurmCluster can also run a grid search if you pass +in a `HyperOptArgumentParser +`_. Here is an example where you run a grid search of 9 combinations of hyperparams. - The full examples are `here - `_. +The full examples are +`here `_. .. code-block:: python diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index cc22ebc56b..8b81c3de82 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -153,8 +153,9 @@ class Trainer(TrainerIOMixin, trainer = Trainer(checkpoint_callback=checkpoint_callback) - early_stop_callback: Callback for early stopping. If - set to ``True``, then the default callback monitoring ``'val_loss'`` is created. + early_stop_callback (:class:`pytorch_lightning.callbacks.EarlyStopping`): + Callback for early stopping. + If set to ``True``, then the default callback monitoring ``'val_loss'`` is created. Will raise an error if ``'val_loss'`` is not found. If set to ``False``, then early stopping will be disabled. If set to ``None``, then the default callback monitoring ``'val_loss'`` is created. @@ -1164,7 +1165,7 @@ class Trainer(TrainerIOMixin, Separates from fit to make sure you never run on your test set until you want to. Args: - model: The model to test. + model (:class:`.LightningModule`): The model to test. Example:: diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py index ef056f88a6..14b6c89f3a 100644 --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -114,7 +114,7 @@ Packed sequences as inputs When using PackedSequence, do 2 things: 1. return either a padded tensor in dataset or a list of variable length tensors - in the dataloader collate_fn (example above shows the list implementation). +in the dataloader collate_fn (example above shows the list implementation). 2. Pack the sequence in forward or training and validation steps depending on use case. .. code-block:: python diff --git a/tests/test_profiler.py b/tests/test_profiler.py index 5fae874e92..a383d3ebe6 100644 --- a/tests/test_profiler.py +++ b/tests/test_profiler.py @@ -4,19 +4,17 @@ import pytest from pytorch_lightning.profiler import Profiler, AdvancedProfiler -PROFILER_OVERHEAD_MAX_TOLERANCE = 0.001 +PROFILER_OVERHEAD_MAX_TOLERANCE = 0.0001 @pytest.fixture def simple_profiler(): - """Creates a new profiler for every test with `simple_profiler` as an arg.""" profiler = Profiler() return profiler @pytest.fixture def advanced_profiler(): - """Creates a new profiler for every test with `advanced_profiler` as an arg.""" profiler = AdvancedProfiler() return profiler @@ -51,13 +49,10 @@ def test_simple_profiler_describe(simple_profiler): simple_profiler.describe() -def _get_total_cprofile_duration(profile): - return sum([x.totaltime for x in profile.getstats()]) - - @pytest.mark.parametrize("action,expected", [("a", [3, 1]), ("b", [2]), ("c", [1])]) def test_advanced_profiler_durations(advanced_profiler, action, expected): - """Ensure the reported durations are reasonably accurate.""" + def _get_total_duration(profile): + return sum([x.totaltime for x in profile.getstats()]) for duration in expected: with advanced_profiler.profile(action): @@ -65,7 +60,7 @@ def test_advanced_profiler_durations(advanced_profiler, action, expected): # different environments have different precision when it comes to time.sleep() # see: https://github.com/PyTorchLightning/pytorch-lightning/issues/796 - recored_total_duration = _get_total_cprofile_duration( + recored_total_duration = _get_total_duration( advanced_profiler.profiled_actions[action] ) expected_total_duration = np.sum(expected) @@ -75,17 +70,21 @@ def test_advanced_profiler_durations(advanced_profiler, action, expected): def test_advanced_profiler_overhead(advanced_profiler, n_iter=5): - """Ensure that the profiler doesn't introduce too much overhead during training.""" + """ + ensure that the profiler doesn't introduce too much overhead during training + """ for _ in range(n_iter): with advanced_profiler.profile("no-op"): pass action_profile = advanced_profiler.profiled_actions["no-op"] - total_duration = _get_total_cprofile_duration(action_profile) + total_duration = sum([x.totaltime for x in action_profile.getstats()]) average_duration = total_duration / n_iter assert average_duration < PROFILER_OVERHEAD_MAX_TOLERANCE def test_advanced_profiler_describe(advanced_profiler): - """Ensure the profiler won't fail when reporting the summary.""" + """ + ensure the profiler won't fail when reporting the summary + """ advanced_profiler.describe()