formatting flake8 & isort (#5824)
* formatting * isort * make * yapf * isort
This commit is contained in:
parent
74fb87552a
commit
f83cca6107
55
.yapfignore
55
.yapfignore
|
@ -20,6 +20,7 @@ pytorch_lightning/core/*
|
|||
# TODO
|
||||
pytorch_lightning/loggers/*
|
||||
|
||||
|
||||
# TODO
|
||||
pytorch_lightning/plugins/legacy/*
|
||||
|
||||
|
@ -33,4 +34,56 @@ pytorch_lightning/tuner/*
|
|||
|
||||
|
||||
# TODO
|
||||
tests/*
|
||||
tests/accelerators/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/base/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/callbacks/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/checkpointing/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/core/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/deprecated_api/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/loggers/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/metrics/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/models/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/overrides/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/plugins/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/trainer/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/tuner/*
|
||||
|
||||
|
||||
# TODO
|
||||
tests/utilities/*
|
31
Makefile
31
Makefile
|
@ -5,29 +5,26 @@ export SLURM_LOCALID=0
|
|||
# assume you have installed need packages
|
||||
export SPHINX_MOCK_REQUIREMENTS=0
|
||||
|
||||
test:
|
||||
pip install -r requirements/devel.txt
|
||||
# install APEX, see https://github.com/NVIDIA/apex#linux
|
||||
|
||||
# use this to run tests
|
||||
rm -rf _ckpt_*
|
||||
rm -rf ./lightning_logs
|
||||
python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests pl_examples -v --flake8
|
||||
python -m coverage report
|
||||
|
||||
# specific file
|
||||
# python -m coverage run --source pytorch_lightning -m pytest --flake8 --durations=0 -v -k
|
||||
|
||||
docs: clean
|
||||
pip install --quiet -r requirements/docs.txt
|
||||
python -m sphinx -b html -W docs/source docs/build
|
||||
|
||||
clean:
|
||||
# clean all temp runs
|
||||
rm -rf $(shell find . -name "mlruns")
|
||||
rm -rf $(shell find . -name "lightning_log")
|
||||
rm -rf _ckpt_*
|
||||
rm -rf .mypy_cache
|
||||
rm -rf .pytest_cache
|
||||
rm -rf ./docs/build
|
||||
rm -rf ./docs/source/generated
|
||||
rm -rf ./docs/source/*/generated
|
||||
rm -rf ./docs/source/api
|
||||
|
||||
test: clean
|
||||
pip install -r requirements/devel.txt
|
||||
# install APEX, see https://github.com/NVIDIA/apex#linux
|
||||
|
||||
# use this to run tests
|
||||
python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests pl_examples -v --flake8
|
||||
python -m coverage report
|
||||
|
||||
docs: clean
|
||||
pip install --quiet -r requirements/docs.txt
|
||||
python -m sphinx -b html -W docs/source docs/build
|
||||
|
|
|
@ -18,10 +18,7 @@ from abc import ABC
|
|||
from copy import deepcopy
|
||||
from typing import Callable, Iterable, List, Optional, Tuple, Union
|
||||
|
||||
from torch.utils.data import BatchSampler
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data import RandomSampler
|
||||
from torch.utils.data import SequentialSampler
|
||||
from torch.utils.data import BatchSampler, DataLoader, RandomSampler, SequentialSampler
|
||||
from torch.utils.data.distributed import DistributedSampler
|
||||
|
||||
from pytorch_lightning.accelerators.legacy.accelerator import Accelerator
|
||||
|
@ -29,8 +26,7 @@ from pytorch_lightning.core import LightningModule
|
|||
from pytorch_lightning.trainer.supporters import CombinedLoader
|
||||
from pytorch_lightning.utilities import rank_zero_warn
|
||||
from pytorch_lightning.utilities.apply_func import apply_to_collection
|
||||
from pytorch_lightning.utilities.data import has_iterable_dataset
|
||||
from pytorch_lightning.utilities.data import has_len
|
||||
from pytorch_lightning.utilities.data import has_iterable_dataset, has_len
|
||||
from pytorch_lightning.utilities.debugging import InternalDebugger
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from pytorch_lightning.utilities.model_helpers import is_overridden
|
||||
|
|
|
@ -81,9 +81,7 @@ format = pylint
|
|||
ignore =
|
||||
E731 # do not assign a lambda expression, use a def
|
||||
W503 # line break before binary operator
|
||||
W504 # line break after binary operator
|
||||
# TODO: delete these next 2 because we no longer use black
|
||||
E203 # whitespace before ':'. Opposite convention enforced by black
|
||||
# because of YAPF - till https://github.com/google/yapf/issues/897 is resolved
|
||||
E231 # missing whitespace after ',', ';', or ':'; for black
|
||||
|
||||
|
||||
|
|
|
@ -135,7 +135,7 @@ def train_with_pruning_callback(
|
|||
pruning_fn = pruning_list[rand_idx]
|
||||
|
||||
model_pruning_args = {
|
||||
"pruning_fn": custom_pruning_fn if use_custom_pruning_fn else pruning_fn ,
|
||||
"pruning_fn": custom_pruning_fn if use_custom_pruning_fn else pruning_fn,
|
||||
"parameters_to_prune": parameters_to_prune,
|
||||
"amount": 0.3,
|
||||
"use_global_unstructured": use_global_unstructured,
|
||||
|
|
|
@ -47,6 +47,7 @@ def tmpdir_server(tmpdir):
|
|||
import os
|
||||
|
||||
class Handler(SimpleHTTPRequestHandler):
|
||||
|
||||
def translate_path(self, path):
|
||||
# get the path from cwd
|
||||
path = super().translate_path(path)
|
||||
|
|
|
@ -121,10 +121,8 @@ def test_early_stopping_cpu_model(tmpdir):
|
|||
model.unfreeze()
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows",
|
||||
reason="Distributed training is not supported on Windows")
|
||||
@pytest.mark.skipif((platform.system() == "Darwin" and
|
||||
LooseVersion(torch.__version__) < LooseVersion("1.3.0")),
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@pytest.mark.skipif((platform.system() == "Darwin" and LooseVersion(torch.__version__) < LooseVersion("1.3.0")),
|
||||
reason="Distributed training is not supported on MacOS before Torch 1.3.0")
|
||||
def test_multi_cpu_model_ddp(tmpdir):
|
||||
"""Make sure DDP works."""
|
||||
|
|
|
@ -18,8 +18,7 @@ from unittest.mock import MagicMock
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
|
||||
from pytorch_lightning import Trainer, Callback
|
||||
from pytorch_lightning import Callback, Trainer
|
||||
from pytorch_lightning.accelerators.legacy.gpu_accelerator import GPUAccelerator
|
||||
from pytorch_lightning.trainer.states import TrainerState
|
||||
from tests.base import BoringModel, EvalModelTemplate, RandomDataset
|
||||
|
|
|
@ -53,7 +53,7 @@ def advanced_profiler(tmpdir):
|
|||
@pytest.mark.parametrize(["action", "expected"], [
|
||||
pytest.param("a", [3, 1]),
|
||||
pytest.param("b", [2]),
|
||||
pytest.param("c", [1])
|
||||
pytest.param("c", [1]),
|
||||
])
|
||||
def test_simple_profiler_durations(simple_profiler, action, expected):
|
||||
"""Ensure the reported durations are reasonably accurate."""
|
||||
|
@ -64,15 +64,13 @@ def test_simple_profiler_durations(simple_profiler, action, expected):
|
|||
|
||||
# different environments have different precision when it comes to time.sleep()
|
||||
# see: https://github.com/PyTorchLightning/pytorch-lightning/issues/796
|
||||
np.testing.assert_allclose(
|
||||
simple_profiler.recorded_durations[action], expected, rtol=0.2
|
||||
)
|
||||
np.testing.assert_allclose(simple_profiler.recorded_durations[action], expected, rtol=0.2)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(["action", "expected"], [
|
||||
pytest.param("a", [3, 1]),
|
||||
pytest.param("b", [2]),
|
||||
pytest.param("c", [1])
|
||||
pytest.param("c", [1]),
|
||||
])
|
||||
def test_simple_profiler_iterable_durations(simple_profiler, action, expected):
|
||||
"""Ensure the reported durations are reasonably accurate."""
|
||||
|
@ -82,9 +80,7 @@ def test_simple_profiler_iterable_durations(simple_profiler, action, expected):
|
|||
pass
|
||||
|
||||
# we exclude the last item in the recorded durations since that's when StopIteration is raised
|
||||
np.testing.assert_allclose(
|
||||
simple_profiler.recorded_durations[action][:-1], expected, rtol=0.2
|
||||
)
|
||||
np.testing.assert_allclose(simple_profiler.recorded_durations[action][:-1], expected, rtol=0.2)
|
||||
|
||||
|
||||
def test_simple_profiler_overhead(simple_profiler, n_iter=5):
|
||||
|
@ -122,7 +118,7 @@ def test_simple_profiler_value_errors(simple_profiler):
|
|||
@pytest.mark.parametrize(["action", "expected"], [
|
||||
pytest.param("a", [3, 1]),
|
||||
pytest.param("b", [2]),
|
||||
pytest.param("c", [1])
|
||||
pytest.param("c", [1]),
|
||||
])
|
||||
def test_advanced_profiler_durations(advanced_profiler, action, expected):
|
||||
|
||||
|
@ -132,19 +128,15 @@ def test_advanced_profiler_durations(advanced_profiler, action, expected):
|
|||
|
||||
# different environments have different precision when it comes to time.sleep()
|
||||
# see: https://github.com/PyTorchLightning/pytorch-lightning/issues/796
|
||||
recored_total_duration = _get_python_cprofile_total_duration(
|
||||
advanced_profiler.profiled_actions[action]
|
||||
)
|
||||
recored_total_duration = _get_python_cprofile_total_duration(advanced_profiler.profiled_actions[action])
|
||||
expected_total_duration = np.sum(expected)
|
||||
np.testing.assert_allclose(
|
||||
recored_total_duration, expected_total_duration, rtol=0.2
|
||||
)
|
||||
np.testing.assert_allclose(recored_total_duration, expected_total_duration, rtol=0.2)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(["action", "expected"], [
|
||||
pytest.param("a", [3, 1]),
|
||||
pytest.param("b", [2]),
|
||||
pytest.param("c", [1])
|
||||
pytest.param("c", [1]),
|
||||
])
|
||||
def test_advanced_profiler_iterable_durations(advanced_profiler, action, expected):
|
||||
"""Ensure the reported durations are reasonably accurate."""
|
||||
|
@ -153,13 +145,9 @@ def test_advanced_profiler_iterable_durations(advanced_profiler, action, expecte
|
|||
for _ in advanced_profiler.profile_iterable(iterable, action):
|
||||
pass
|
||||
|
||||
recored_total_duration = _get_python_cprofile_total_duration(
|
||||
advanced_profiler.profiled_actions[action]
|
||||
)
|
||||
recored_total_duration = _get_python_cprofile_total_duration(advanced_profiler.profiled_actions[action])
|
||||
expected_total_duration = np.sum(expected)
|
||||
np.testing.assert_allclose(
|
||||
recored_total_duration, expected_total_duration, rtol=0.2
|
||||
)
|
||||
np.testing.assert_allclose(recored_total_duration, expected_total_duration, rtol=0.2)
|
||||
|
||||
|
||||
def test_advanced_profiler_overhead(advanced_profiler, n_iter=5):
|
||||
|
|
|
@ -2,7 +2,7 @@ from unittest.mock import Mock
|
|||
|
||||
import torch
|
||||
|
||||
from pytorch_lightning import Trainer, Callback
|
||||
from pytorch_lightning import Callback, Trainer
|
||||
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint, ProgressBar
|
||||
from tests.base import BoringModel
|
||||
|
||||
|
|
|
@ -28,8 +28,7 @@ from pytorch_lightning.trainer import Trainer
|
|||
from pytorch_lightning.trainer.connectors.logger_connector.callback_hook_validator import CallbackHookNameValidator
|
||||
from pytorch_lightning.trainer.connectors.logger_connector.metrics_holder import MetricsHolder
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.base.boring_model import BoringModel
|
||||
from tests.base.boring_model import RandomDataset
|
||||
from tests.base.boring_model import BoringModel, RandomDataset
|
||||
|
||||
|
||||
def decorator_with_arguments(fx_name: str = '', hook_fx_name: str = None) -> Callable:
|
||||
|
|
|
@ -899,7 +899,7 @@ def test_step_with_optimizer_closure_with_different_frequencies(mock_sgd_step, m
|
|||
|
||||
# update discriminator every 4 baches
|
||||
# therefore, no gradient accumulation for discriminator
|
||||
if batch_idx % 4 == 0 :
|
||||
if batch_idx % 4 == 0:
|
||||
# Note: Set make_optimizer_step to True or it will use by default
|
||||
# Trainer(accumulate_grad_batches=x)
|
||||
opt_dis.step(closure=dis_closure, make_optimizer_step=True)
|
||||
|
@ -1004,7 +1004,7 @@ def test_step_with_optimizer_closure_with_different_frequencies_ddp(mock_sgd_ste
|
|||
|
||||
# update discriminator every 4 baches
|
||||
# therefore, no gradient accumulation for discriminator
|
||||
if batch_idx % 4 == 0 :
|
||||
if batch_idx % 4 == 0:
|
||||
# Note: Set make_optimizer_step to True or it will use by default
|
||||
# Trainer(accumulate_grad_batches=x)
|
||||
opt_dis.step(closure=dis_closure, make_optimizer_step=True, optim='adam')
|
||||
|
|
|
@ -16,13 +16,11 @@ import os
|
|||
import pytest
|
||||
import torch
|
||||
from torch.utils.data import DataLoader
|
||||
from torch.utils.data.sampler import BatchSampler
|
||||
from torch.utils.data.sampler import SequentialSampler
|
||||
from torch.utils.data.sampler import BatchSampler, SequentialSampler
|
||||
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.base import BoringModel
|
||||
from tests.base import RandomDataset
|
||||
from tests.base import BoringModel, RandomDataset
|
||||
|
||||
|
||||
class IndexedRandomDataset(RandomDataset):
|
||||
|
@ -88,7 +86,7 @@ def check_replace_distrubuted_sampler(
|
|||
limit_test_batches = 2
|
||||
trainer_args = {
|
||||
"default_root_dir": tmpdir,
|
||||
"limit_test_batches" : limit_test_batches,
|
||||
"limit_test_batches": limit_test_batches,
|
||||
"accelerator": accelerator,
|
||||
}
|
||||
|
||||
|
|
|
@ -17,8 +17,7 @@ from unittest.mock import patch
|
|||
import pytest
|
||||
|
||||
import pytorch_lightning.utilities.xla_device_utils as xla_utils
|
||||
from pytorch_lightning.utilities import _TPU_AVAILABLE
|
||||
from pytorch_lightning.utilities import _XLA_AVAILABLE
|
||||
from pytorch_lightning.utilities import _TPU_AVAILABLE, _XLA_AVAILABLE
|
||||
from tests.base.develop_utils import pl_multi_process_test
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue