From f83cca61079dd505d522fd157cf12b8df791672f Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Sat, 6 Feb 2021 00:33:12 +0100 Subject: [PATCH] formatting flake8 & isort (#5824) * formatting * isort * make * yapf * isort --- .yapfignore | 55 ++++++++++++++++++- Makefile | 31 +++++------ pytorch_lightning/trainer/data_loading.py | 8 +-- setup.cfg | 4 +- tests/callbacks/test_pruning.py | 2 +- tests/conftest.py | 1 + tests/models/test_cpu.py | 6 +- tests/models/test_hooks.py | 3 +- tests/test_profiler.py | 32 ++++------- .../connectors/test_callback_connector.py | 2 +- .../trainer/logging_/test_logger_connector.py | 3 +- .../optimization/test_manual_optimization.py | 4 +- tests/trainer/test_data_loading.py | 8 +-- tests/utilities/test_xla_device_utils.py | 3 +- 14 files changed, 94 insertions(+), 68 deletions(-) diff --git a/.yapfignore b/.yapfignore index 221d7db8d0..391069aa26 100644 --- a/.yapfignore +++ b/.yapfignore @@ -20,6 +20,7 @@ pytorch_lightning/core/* # TODO pytorch_lightning/loggers/* + # TODO pytorch_lightning/plugins/legacy/* @@ -33,4 +34,56 @@ pytorch_lightning/tuner/* # TODO -tests/* +tests/accelerators/* + + +# TODO +tests/base/* + + +# TODO +tests/callbacks/* + + +# TODO +tests/checkpointing/* + + +# TODO +tests/core/* + + +# TODO +tests/deprecated_api/* + + +# TODO +tests/loggers/* + + +# TODO +tests/metrics/* + + +# TODO +tests/models/* + + +# TODO +tests/overrides/* + + +# TODO +tests/plugins/* + + +# TODO +tests/trainer/* + + +# TODO +tests/tuner/* + + +# TODO +tests/utilities/* \ No newline at end of file diff --git a/Makefile b/Makefile index 142d03fcfb..71c31454f5 100644 --- a/Makefile +++ b/Makefile @@ -5,29 +5,26 @@ export SLURM_LOCALID=0 # assume you have installed need packages export SPHINX_MOCK_REQUIREMENTS=0 -test: - pip install -r requirements/devel.txt - # install APEX, see https://github.com/NVIDIA/apex#linux - - # use this to run tests - rm -rf _ckpt_* - rm -rf ./lightning_logs - python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests pl_examples -v --flake8 - python -m coverage report - - # specific file - # python -m coverage run --source pytorch_lightning -m pytest --flake8 --durations=0 -v -k - -docs: clean - pip install --quiet -r requirements/docs.txt - python -m sphinx -b html -W docs/source docs/build - clean: # clean all temp runs rm -rf $(shell find . -name "mlruns") + rm -rf $(shell find . -name "lightning_log") + rm -rf _ckpt_* rm -rf .mypy_cache rm -rf .pytest_cache rm -rf ./docs/build rm -rf ./docs/source/generated rm -rf ./docs/source/*/generated rm -rf ./docs/source/api + +test: clean + pip install -r requirements/devel.txt + # install APEX, see https://github.com/NVIDIA/apex#linux + + # use this to run tests + python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests pl_examples -v --flake8 + python -m coverage report + +docs: clean + pip install --quiet -r requirements/docs.txt + python -m sphinx -b html -W docs/source docs/build diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py index c77a494214..9586a18f59 100644 --- a/pytorch_lightning/trainer/data_loading.py +++ b/pytorch_lightning/trainer/data_loading.py @@ -18,10 +18,7 @@ from abc import ABC from copy import deepcopy from typing import Callable, Iterable, List, Optional, Tuple, Union -from torch.utils.data import BatchSampler -from torch.utils.data import DataLoader -from torch.utils.data import RandomSampler -from torch.utils.data import SequentialSampler +from torch.utils.data import BatchSampler, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from pytorch_lightning.accelerators.legacy.accelerator import Accelerator @@ -29,8 +26,7 @@ from pytorch_lightning.core import LightningModule from pytorch_lightning.trainer.supporters import CombinedLoader from pytorch_lightning.utilities import rank_zero_warn from pytorch_lightning.utilities.apply_func import apply_to_collection -from pytorch_lightning.utilities.data import has_iterable_dataset -from pytorch_lightning.utilities.data import has_len +from pytorch_lightning.utilities.data import has_iterable_dataset, has_len from pytorch_lightning.utilities.debugging import InternalDebugger from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.model_helpers import is_overridden diff --git a/setup.cfg b/setup.cfg index a9ba0bc2ac..8b02b462fa 100644 --- a/setup.cfg +++ b/setup.cfg @@ -81,9 +81,7 @@ format = pylint ignore = E731 # do not assign a lambda expression, use a def W503 # line break before binary operator - W504 # line break after binary operator - # TODO: delete these next 2 because we no longer use black - E203 # whitespace before ':'. Opposite convention enforced by black + # because of YAPF - till https://github.com/google/yapf/issues/897 is resolved E231 # missing whitespace after ',', ';', or ':'; for black diff --git a/tests/callbacks/test_pruning.py b/tests/callbacks/test_pruning.py index 37c8fb4647..24a5dc64d3 100644 --- a/tests/callbacks/test_pruning.py +++ b/tests/callbacks/test_pruning.py @@ -135,7 +135,7 @@ def train_with_pruning_callback( pruning_fn = pruning_list[rand_idx] model_pruning_args = { - "pruning_fn": custom_pruning_fn if use_custom_pruning_fn else pruning_fn , + "pruning_fn": custom_pruning_fn if use_custom_pruning_fn else pruning_fn, "parameters_to_prune": parameters_to_prune, "amount": 0.3, "use_global_unstructured": use_global_unstructured, diff --git a/tests/conftest.py b/tests/conftest.py index 07188fed4d..8dd8fdd251 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -47,6 +47,7 @@ def tmpdir_server(tmpdir): import os class Handler(SimpleHTTPRequestHandler): + def translate_path(self, path): # get the path from cwd path = super().translate_path(path) diff --git a/tests/models/test_cpu.py b/tests/models/test_cpu.py index 5f69c55a08..fe85673486 100644 --- a/tests/models/test_cpu.py +++ b/tests/models/test_cpu.py @@ -121,10 +121,8 @@ def test_early_stopping_cpu_model(tmpdir): model.unfreeze() -@pytest.mark.skipif(platform.system() == "Windows", - reason="Distributed training is not supported on Windows") -@pytest.mark.skipif((platform.system() == "Darwin" and - LooseVersion(torch.__version__) < LooseVersion("1.3.0")), +@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows") +@pytest.mark.skipif((platform.system() == "Darwin" and LooseVersion(torch.__version__) < LooseVersion("1.3.0")), reason="Distributed training is not supported on MacOS before Torch 1.3.0") def test_multi_cpu_model_ddp(tmpdir): """Make sure DDP works.""" diff --git a/tests/models/test_hooks.py b/tests/models/test_hooks.py index b0a69eaeda..793586bcbb 100644 --- a/tests/models/test_hooks.py +++ b/tests/models/test_hooks.py @@ -18,8 +18,7 @@ from unittest.mock import MagicMock import pytest import torch - -from pytorch_lightning import Trainer, Callback +from pytorch_lightning import Callback, Trainer from pytorch_lightning.accelerators.legacy.gpu_accelerator import GPUAccelerator from pytorch_lightning.trainer.states import TrainerState from tests.base import BoringModel, EvalModelTemplate, RandomDataset diff --git a/tests/test_profiler.py b/tests/test_profiler.py index 4728b11582..701b2a4bfb 100644 --- a/tests/test_profiler.py +++ b/tests/test_profiler.py @@ -53,7 +53,7 @@ def advanced_profiler(tmpdir): @pytest.mark.parametrize(["action", "expected"], [ pytest.param("a", [3, 1]), pytest.param("b", [2]), - pytest.param("c", [1]) + pytest.param("c", [1]), ]) def test_simple_profiler_durations(simple_profiler, action, expected): """Ensure the reported durations are reasonably accurate.""" @@ -64,15 +64,13 @@ def test_simple_profiler_durations(simple_profiler, action, expected): # different environments have different precision when it comes to time.sleep() # see: https://github.com/PyTorchLightning/pytorch-lightning/issues/796 - np.testing.assert_allclose( - simple_profiler.recorded_durations[action], expected, rtol=0.2 - ) + np.testing.assert_allclose(simple_profiler.recorded_durations[action], expected, rtol=0.2) @pytest.mark.parametrize(["action", "expected"], [ pytest.param("a", [3, 1]), pytest.param("b", [2]), - pytest.param("c", [1]) + pytest.param("c", [1]), ]) def test_simple_profiler_iterable_durations(simple_profiler, action, expected): """Ensure the reported durations are reasonably accurate.""" @@ -82,9 +80,7 @@ def test_simple_profiler_iterable_durations(simple_profiler, action, expected): pass # we exclude the last item in the recorded durations since that's when StopIteration is raised - np.testing.assert_allclose( - simple_profiler.recorded_durations[action][:-1], expected, rtol=0.2 - ) + np.testing.assert_allclose(simple_profiler.recorded_durations[action][:-1], expected, rtol=0.2) def test_simple_profiler_overhead(simple_profiler, n_iter=5): @@ -122,7 +118,7 @@ def test_simple_profiler_value_errors(simple_profiler): @pytest.mark.parametrize(["action", "expected"], [ pytest.param("a", [3, 1]), pytest.param("b", [2]), - pytest.param("c", [1]) + pytest.param("c", [1]), ]) def test_advanced_profiler_durations(advanced_profiler, action, expected): @@ -132,19 +128,15 @@ def test_advanced_profiler_durations(advanced_profiler, action, expected): # different environments have different precision when it comes to time.sleep() # see: https://github.com/PyTorchLightning/pytorch-lightning/issues/796 - recored_total_duration = _get_python_cprofile_total_duration( - advanced_profiler.profiled_actions[action] - ) + recored_total_duration = _get_python_cprofile_total_duration(advanced_profiler.profiled_actions[action]) expected_total_duration = np.sum(expected) - np.testing.assert_allclose( - recored_total_duration, expected_total_duration, rtol=0.2 - ) + np.testing.assert_allclose(recored_total_duration, expected_total_duration, rtol=0.2) @pytest.mark.parametrize(["action", "expected"], [ pytest.param("a", [3, 1]), pytest.param("b", [2]), - pytest.param("c", [1]) + pytest.param("c", [1]), ]) def test_advanced_profiler_iterable_durations(advanced_profiler, action, expected): """Ensure the reported durations are reasonably accurate.""" @@ -153,13 +145,9 @@ def test_advanced_profiler_iterable_durations(advanced_profiler, action, expecte for _ in advanced_profiler.profile_iterable(iterable, action): pass - recored_total_duration = _get_python_cprofile_total_duration( - advanced_profiler.profiled_actions[action] - ) + recored_total_duration = _get_python_cprofile_total_duration(advanced_profiler.profiled_actions[action]) expected_total_duration = np.sum(expected) - np.testing.assert_allclose( - recored_total_duration, expected_total_duration, rtol=0.2 - ) + np.testing.assert_allclose(recored_total_duration, expected_total_duration, rtol=0.2) def test_advanced_profiler_overhead(advanced_profiler, n_iter=5): diff --git a/tests/trainer/connectors/test_callback_connector.py b/tests/trainer/connectors/test_callback_connector.py index 1ea7117b17..35fa8362aa 100644 --- a/tests/trainer/connectors/test_callback_connector.py +++ b/tests/trainer/connectors/test_callback_connector.py @@ -2,7 +2,7 @@ from unittest.mock import Mock import torch -from pytorch_lightning import Trainer, Callback +from pytorch_lightning import Callback, Trainer from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint, ProgressBar from tests.base import BoringModel diff --git a/tests/trainer/logging_/test_logger_connector.py b/tests/trainer/logging_/test_logger_connector.py index ffdaea8c52..04512cf9db 100644 --- a/tests/trainer/logging_/test_logger_connector.py +++ b/tests/trainer/logging_/test_logger_connector.py @@ -28,8 +28,7 @@ from pytorch_lightning.trainer import Trainer from pytorch_lightning.trainer.connectors.logger_connector.callback_hook_validator import CallbackHookNameValidator from pytorch_lightning.trainer.connectors.logger_connector.metrics_holder import MetricsHolder from pytorch_lightning.utilities.exceptions import MisconfigurationException -from tests.base.boring_model import BoringModel -from tests.base.boring_model import RandomDataset +from tests.base.boring_model import BoringModel, RandomDataset def decorator_with_arguments(fx_name: str = '', hook_fx_name: str = None) -> Callable: diff --git a/tests/trainer/optimization/test_manual_optimization.py b/tests/trainer/optimization/test_manual_optimization.py index 1a7d99564b..011ec69ab7 100644 --- a/tests/trainer/optimization/test_manual_optimization.py +++ b/tests/trainer/optimization/test_manual_optimization.py @@ -899,7 +899,7 @@ def test_step_with_optimizer_closure_with_different_frequencies(mock_sgd_step, m # update discriminator every 4 baches # therefore, no gradient accumulation for discriminator - if batch_idx % 4 == 0 : + if batch_idx % 4 == 0: # Note: Set make_optimizer_step to True or it will use by default # Trainer(accumulate_grad_batches=x) opt_dis.step(closure=dis_closure, make_optimizer_step=True) @@ -1004,7 +1004,7 @@ def test_step_with_optimizer_closure_with_different_frequencies_ddp(mock_sgd_ste # update discriminator every 4 baches # therefore, no gradient accumulation for discriminator - if batch_idx % 4 == 0 : + if batch_idx % 4 == 0: # Note: Set make_optimizer_step to True or it will use by default # Trainer(accumulate_grad_batches=x) opt_dis.step(closure=dis_closure, make_optimizer_step=True, optim='adam') diff --git a/tests/trainer/test_data_loading.py b/tests/trainer/test_data_loading.py index f59c84ff62..4a5f08e670 100644 --- a/tests/trainer/test_data_loading.py +++ b/tests/trainer/test_data_loading.py @@ -16,13 +16,11 @@ import os import pytest import torch from torch.utils.data import DataLoader -from torch.utils.data.sampler import BatchSampler -from torch.utils.data.sampler import SequentialSampler +from torch.utils.data.sampler import BatchSampler, SequentialSampler from pytorch_lightning import Trainer from pytorch_lightning.utilities.exceptions import MisconfigurationException -from tests.base import BoringModel -from tests.base import RandomDataset +from tests.base import BoringModel, RandomDataset class IndexedRandomDataset(RandomDataset): @@ -88,7 +86,7 @@ def check_replace_distrubuted_sampler( limit_test_batches = 2 trainer_args = { "default_root_dir": tmpdir, - "limit_test_batches" : limit_test_batches, + "limit_test_batches": limit_test_batches, "accelerator": accelerator, } diff --git a/tests/utilities/test_xla_device_utils.py b/tests/utilities/test_xla_device_utils.py index 471792da9c..438360f991 100644 --- a/tests/utilities/test_xla_device_utils.py +++ b/tests/utilities/test_xla_device_utils.py @@ -17,8 +17,7 @@ from unittest.mock import patch import pytest import pytorch_lightning.utilities.xla_device_utils as xla_utils -from pytorch_lightning.utilities import _TPU_AVAILABLE -from pytorch_lightning.utilities import _XLA_AVAILABLE +from pytorch_lightning.utilities import _TPU_AVAILABLE, _XLA_AVAILABLE from tests.base.develop_utils import pl_multi_process_test