From f8c058215ff243716e908aed87f42281d9a45c58 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Fri, 7 Aug 2020 23:22:05 +0200 Subject: [PATCH] simplify tests & cleaning (#2588) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * simplify * tmpdir * revert * clean * accel * types * test * edit test acc Co-authored-by: Adrian Wälchli * Update test acc Co-authored-by: Adrian Wälchli --- .pyrightconfig.json | 2 +- .run_local_tests.sh | 5 ----- docs/source/conf.py | 2 +- .../accelerator_backends/__init__.py | 7 ------- pytorch_lightning/accelerators/__init__.py | 7 +++++++ .../cpu_backend.py | 0 .../ddp2_backend.py | 0 .../ddp_backend.py | 0 .../ddp_spawn_backend.py | 0 .../dp_backend.py | 0 .../gpu_backend.py | 0 .../tpu_backend.py | 0 pytorch_lightning/trainer/trainer.py | 2 +- tests/core/test_datamodules.py | 2 +- .../trainer/test_trainer_steps_dict_return.py | 18 ++++++++++++------ 15 files changed, 23 insertions(+), 22 deletions(-) delete mode 100644 pytorch_lightning/accelerator_backends/__init__.py create mode 100644 pytorch_lightning/accelerators/__init__.py rename pytorch_lightning/{accelerator_backends => accelerators}/cpu_backend.py (100%) rename pytorch_lightning/{accelerator_backends => accelerators}/ddp2_backend.py (100%) rename pytorch_lightning/{accelerator_backends => accelerators}/ddp_backend.py (100%) rename pytorch_lightning/{accelerator_backends => accelerators}/ddp_spawn_backend.py (100%) rename pytorch_lightning/{accelerator_backends => accelerators}/dp_backend.py (100%) rename pytorch_lightning/{accelerator_backends => accelerators}/gpu_backend.py (100%) rename pytorch_lightning/{accelerator_backends => accelerators}/tpu_backend.py (100%) diff --git a/.pyrightconfig.json b/.pyrightconfig.json index 5f5c753023..97000d69dd 100644 --- a/.pyrightconfig.json +++ b/.pyrightconfig.json @@ -7,7 +7,7 @@ "pytorch_lightning/__init__.py", "pytorch_lightning/callbacks", "pytorch_lightning/core", - "pytorch_lightning/accelerator_backends", + "pytorch_lightning/accelerators", "pytorch_lightning/loggers", "pytorch_lightning/logging", "pytorch_lightning/metrics", diff --git a/.run_local_tests.sh b/.run_local_tests.sh index c0c030a78e..2ff2c56b76 100644 --- a/.run_local_tests.sh +++ b/.run_local_tests.sh @@ -6,11 +6,6 @@ export SLURM_LOCALID=0 # use this to run tests rm -rf _ckpt_* -rm -rf ./tests/save_dir* -rm -rf ./tests/mlruns_* -rm -rf ./tests/cometruns* -rm -rf ./tests/wandb* -rm -rf ./tests/tests/* rm -rf ./lightning_logs python -m coverage run --source pytorch_lightning -m py.test pytorch_lightning tests pl_examples -v --flake8 python -m coverage report -m diff --git a/docs/source/conf.py b/docs/source/conf.py index d0670254b1..f62b540720 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -138,7 +138,7 @@ language = None exclude_patterns = [ 'api/pytorch_lightning.rst', 'api/pl_examples.*', - 'api/pytorch_lightning.accelerator_backends.*', + 'api/pytorch_lightning.accelerators.*', 'api/modules.rst', 'PULL_REQUEST_TEMPLATE.md', ] diff --git a/pytorch_lightning/accelerator_backends/__init__.py b/pytorch_lightning/accelerator_backends/__init__.py deleted file mode 100644 index d56ca53d06..0000000000 --- a/pytorch_lightning/accelerator_backends/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from pytorch_lightning.accelerator_backends.gpu_backend import GPUBackend -from pytorch_lightning.accelerator_backends.tpu_backend import TPUBackend -from pytorch_lightning.accelerator_backends.dp_backend import DataParallelBackend -from pytorch_lightning.accelerator_backends.ddp_spawn_backend import DDPSpawnBackend -from pytorch_lightning.accelerator_backends.cpu_backend import CPUBackend -from pytorch_lightning.accelerator_backends.ddp_backend import DDPBackend -from pytorch_lightning.accelerator_backends.ddp2_backend import DDP2Backend diff --git a/pytorch_lightning/accelerators/__init__.py b/pytorch_lightning/accelerators/__init__.py new file mode 100644 index 0000000000..8230dc4f12 --- /dev/null +++ b/pytorch_lightning/accelerators/__init__.py @@ -0,0 +1,7 @@ +from pytorch_lightning.accelerators.gpu_backend import GPUBackend +from pytorch_lightning.accelerators.tpu_backend import TPUBackend +from pytorch_lightning.accelerators.dp_backend import DataParallelBackend +from pytorch_lightning.accelerators.ddp_spawn_backend import DDPSpawnBackend +from pytorch_lightning.accelerators.cpu_backend import CPUBackend +from pytorch_lightning.accelerators.ddp_backend import DDPBackend +from pytorch_lightning.accelerators.ddp2_backend import DDP2Backend diff --git a/pytorch_lightning/accelerator_backends/cpu_backend.py b/pytorch_lightning/accelerators/cpu_backend.py similarity index 100% rename from pytorch_lightning/accelerator_backends/cpu_backend.py rename to pytorch_lightning/accelerators/cpu_backend.py diff --git a/pytorch_lightning/accelerator_backends/ddp2_backend.py b/pytorch_lightning/accelerators/ddp2_backend.py similarity index 100% rename from pytorch_lightning/accelerator_backends/ddp2_backend.py rename to pytorch_lightning/accelerators/ddp2_backend.py diff --git a/pytorch_lightning/accelerator_backends/ddp_backend.py b/pytorch_lightning/accelerators/ddp_backend.py similarity index 100% rename from pytorch_lightning/accelerator_backends/ddp_backend.py rename to pytorch_lightning/accelerators/ddp_backend.py diff --git a/pytorch_lightning/accelerator_backends/ddp_spawn_backend.py b/pytorch_lightning/accelerators/ddp_spawn_backend.py similarity index 100% rename from pytorch_lightning/accelerator_backends/ddp_spawn_backend.py rename to pytorch_lightning/accelerators/ddp_spawn_backend.py diff --git a/pytorch_lightning/accelerator_backends/dp_backend.py b/pytorch_lightning/accelerators/dp_backend.py similarity index 100% rename from pytorch_lightning/accelerator_backends/dp_backend.py rename to pytorch_lightning/accelerators/dp_backend.py diff --git a/pytorch_lightning/accelerator_backends/gpu_backend.py b/pytorch_lightning/accelerators/gpu_backend.py similarity index 100% rename from pytorch_lightning/accelerator_backends/gpu_backend.py rename to pytorch_lightning/accelerators/gpu_backend.py diff --git a/pytorch_lightning/accelerator_backends/tpu_backend.py b/pytorch_lightning/accelerators/tpu_backend.py similarity index 100% rename from pytorch_lightning/accelerator_backends/tpu_backend.py rename to pytorch_lightning/accelerators/tpu_backend.py diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index b19481986e..02adf6eb67 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -51,7 +51,7 @@ from pytorch_lightning.utilities import parsing, rank_zero_info, rank_zero_only, from pytorch_lightning.utilities.debugging import InternalDebugger from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.trainer.configuration_validator import ConfigValidator -from pytorch_lightning.accelerator_backends import ( +from pytorch_lightning.accelerators import ( GPUBackend, TPUBackend, CPUBackend, DDPSpawnBackend, DataParallelBackend, DDPBackend, DDP2Backend) # warnings to ignore in trainer diff --git a/tests/core/test_datamodules.py b/tests/core/test_datamodules.py index c0c41be42f..0dc81c757a 100644 --- a/tests/core/test_datamodules.py +++ b/tests/core/test_datamodules.py @@ -299,7 +299,7 @@ def test_full_loop_ddp_spawn(tmpdir): trainer = Trainer( default_root_dir=tmpdir, - max_epochs=3, + max_epochs=5, weights_summary=None, distributed_backend='ddp_spawn', gpus=[0, 1] diff --git a/tests/trainer/test_trainer_steps_dict_return.py b/tests/trainer/test_trainer_steps_dict_return.py index 290983fbf6..7d6df7a207 100644 --- a/tests/trainer/test_trainer_steps_dict_return.py +++ b/tests/trainer/test_trainer_steps_dict_return.py @@ -56,7 +56,11 @@ def training_step_with_step_end(tmpdir): model.training_step_end = model.training_step_end_dict model.val_dataloader = None - trainer = Trainer(fast_dev_run=True, weights_summary=None) + trainer = Trainer( + default_root_dir=tmpdir, + fast_dev_run=True, + weights_summary=None, + ) trainer.fit(model) # make sure correct steps were called @@ -107,8 +111,7 @@ def test_full_training_loop_dict(tmpdir): assert trainer.progress_bar_metrics['epoch_end_pbar_1'] == 234 # make sure training outputs what is expected - for batch_idx, batch in enumerate(model.train_dataloader()): - break + batch_idx, batch = 0, next(iter(model.train_dataloader())) out = trainer.run_training_batch(batch, batch_idx) assert out.signal == 0 @@ -131,7 +134,11 @@ def test_train_step_epoch_end(tmpdir): model.training_epoch_end = model.training_epoch_end_dict model.val_dataloader = None - trainer = Trainer(max_epochs=1, weights_summary=None) + trainer = Trainer( + default_root_dir=tmpdir, + max_epochs=1, + weights_summary=None, + ) trainer.fit(model) # make sure correct steps were called @@ -144,8 +151,7 @@ def test_train_step_epoch_end(tmpdir): assert trainer.progress_bar_metrics['epoch_end_pbar_1'] == 234 # make sure training outputs what is expected - for batch_idx, batch in enumerate(model.train_dataloader()): - break + batch_idx, batch = 0, next(iter(model.train_dataloader())) out = trainer.run_training_batch(batch, batch_idx) assert out.signal == 0