simplify tests & cleaning (#2588)

* simplify

* tmpdir

* revert

* clean

* accel

* types

* test

* edit test acc

Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com>

* Update test acc

Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com>
This commit is contained in:
Jirka Borovec 2020-08-07 23:22:05 +02:00 committed by GitHub
parent 78d6592464
commit f8c058215f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 23 additions and 22 deletions

View File

@ -7,7 +7,7 @@
"pytorch_lightning/__init__.py",
"pytorch_lightning/callbacks",
"pytorch_lightning/core",
"pytorch_lightning/accelerator_backends",
"pytorch_lightning/accelerators",
"pytorch_lightning/loggers",
"pytorch_lightning/logging",
"pytorch_lightning/metrics",

View File

@ -6,11 +6,6 @@ export SLURM_LOCALID=0
# use this to run tests
rm -rf _ckpt_*
rm -rf ./tests/save_dir*
rm -rf ./tests/mlruns_*
rm -rf ./tests/cometruns*
rm -rf ./tests/wandb*
rm -rf ./tests/tests/*
rm -rf ./lightning_logs
python -m coverage run --source pytorch_lightning -m py.test pytorch_lightning tests pl_examples -v --flake8
python -m coverage report -m

View File

@ -138,7 +138,7 @@ language = None
exclude_patterns = [
'api/pytorch_lightning.rst',
'api/pl_examples.*',
'api/pytorch_lightning.accelerator_backends.*',
'api/pytorch_lightning.accelerators.*',
'api/modules.rst',
'PULL_REQUEST_TEMPLATE.md',
]

View File

@ -1,7 +0,0 @@
from pytorch_lightning.accelerator_backends.gpu_backend import GPUBackend
from pytorch_lightning.accelerator_backends.tpu_backend import TPUBackend
from pytorch_lightning.accelerator_backends.dp_backend import DataParallelBackend
from pytorch_lightning.accelerator_backends.ddp_spawn_backend import DDPSpawnBackend
from pytorch_lightning.accelerator_backends.cpu_backend import CPUBackend
from pytorch_lightning.accelerator_backends.ddp_backend import DDPBackend
from pytorch_lightning.accelerator_backends.ddp2_backend import DDP2Backend

View File

@ -0,0 +1,7 @@
from pytorch_lightning.accelerators.gpu_backend import GPUBackend
from pytorch_lightning.accelerators.tpu_backend import TPUBackend
from pytorch_lightning.accelerators.dp_backend import DataParallelBackend
from pytorch_lightning.accelerators.ddp_spawn_backend import DDPSpawnBackend
from pytorch_lightning.accelerators.cpu_backend import CPUBackend
from pytorch_lightning.accelerators.ddp_backend import DDPBackend
from pytorch_lightning.accelerators.ddp2_backend import DDP2Backend

View File

@ -51,7 +51,7 @@ from pytorch_lightning.utilities import parsing, rank_zero_info, rank_zero_only,
from pytorch_lightning.utilities.debugging import InternalDebugger
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.trainer.configuration_validator import ConfigValidator
from pytorch_lightning.accelerator_backends import (
from pytorch_lightning.accelerators import (
GPUBackend, TPUBackend, CPUBackend, DDPSpawnBackend, DataParallelBackend, DDPBackend, DDP2Backend)
# warnings to ignore in trainer

View File

@ -299,7 +299,7 @@ def test_full_loop_ddp_spawn(tmpdir):
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
max_epochs=5,
weights_summary=None,
distributed_backend='ddp_spawn',
gpus=[0, 1]

View File

@ -56,7 +56,11 @@ def training_step_with_step_end(tmpdir):
model.training_step_end = model.training_step_end_dict
model.val_dataloader = None
trainer = Trainer(fast_dev_run=True, weights_summary=None)
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=True,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
@ -107,8 +111,7 @@ def test_full_training_loop_dict(tmpdir):
assert trainer.progress_bar_metrics['epoch_end_pbar_1'] == 234
# make sure training outputs what is expected
for batch_idx, batch in enumerate(model.train_dataloader()):
break
batch_idx, batch = 0, next(iter(model.train_dataloader()))
out = trainer.run_training_batch(batch, batch_idx)
assert out.signal == 0
@ -131,7 +134,11 @@ def test_train_step_epoch_end(tmpdir):
model.training_epoch_end = model.training_epoch_end_dict
model.val_dataloader = None
trainer = Trainer(max_epochs=1, weights_summary=None)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
@ -144,8 +151,7 @@ def test_train_step_epoch_end(tmpdir):
assert trainer.progress_bar_metrics['epoch_end_pbar_1'] == 234
# make sure training outputs what is expected
for batch_idx, batch in enumerate(model.train_dataloader()):
break
batch_idx, batch = 0, next(iter(model.train_dataloader()))
out = trainer.run_training_batch(batch, batch_idx)
assert out.signal == 0