Use pytest tmpdir fixture (#482)

* Use pytest tmpdir

* Switch to tmpdir fixtures

* Switch to tmpdir fixture

* tmpdir fixture

* Fix more conflicts
This commit is contained in:
Nic Eggert 2019-12-03 07:01:04 -06:00 committed by William Falcon
parent a6d64ac013
commit 62f6f92fdf
7 changed files with 123 additions and 185 deletions

View File

@ -12,7 +12,7 @@ from pytorch_lightning.utilities.debugging import MisconfigurationException
import tests.utils as tutils
def test_amp_single_gpu():
def test_amp_single_gpu(tmpdir):
"""
Make sure DDP + AMP work
:return:
@ -26,6 +26,7 @@ def test_amp_single_gpu():
model = LightningTestModel(hparams)
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=True,
max_nb_epochs=1,
gpus=1,
@ -36,7 +37,7 @@ def test_amp_single_gpu():
tutils.run_model_test(trainer_options, model, hparams)
def test_no_amp_single_gpu():
def test_no_amp_single_gpu(tmpdir):
"""
Make sure DDP + AMP work
:return:
@ -50,6 +51,7 @@ def test_no_amp_single_gpu():
model = LightningTestModel(hparams)
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=True,
max_nb_epochs=1,
gpus=1,
@ -61,7 +63,7 @@ def test_no_amp_single_gpu():
tutils.run_model_test(trainer_options, model, hparams)
def test_amp_gpu_ddp():
def test_amp_gpu_ddp(tmpdir):
"""
Make sure DDP + AMP work
:return:
@ -76,6 +78,7 @@ def test_amp_gpu_ddp():
model = LightningTestModel(hparams)
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=True,
max_nb_epochs=1,
gpus=2,
@ -86,7 +89,7 @@ def test_amp_gpu_ddp():
tutils.run_model_test(trainer_options, model, hparams)
def test_amp_gpu_ddp_slurm_managed():
def test_amp_gpu_ddp_slurm_managed(tmpdir):
"""
Make sure DDP + AMP work
:return:
@ -111,10 +114,10 @@ def test_amp_gpu_ddp_slurm_managed():
use_amp=True
)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
# exp file to get meta
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(save_dir, False)
# exp file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
@ -157,10 +160,8 @@ def test_amp_gpu_ddp_slurm_managed():
model.freeze()
model.unfreeze()
tutils.clear_save_dir()
def test_cpu_model_with_amp():
def test_cpu_model_with_amp(tmpdir):
"""
Make sure model trains on CPU
:return:
@ -168,8 +169,9 @@ def test_cpu_model_with_amp():
tutils.reset_seed()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=False,
logger=tutils.get_test_tube_logger(),
logger=tutils.get_test_tube_logger(tmpdir),
max_nb_epochs=1,
train_percent_check=0.4,
val_percent_check=0.4,
@ -182,7 +184,7 @@ def test_cpu_model_with_amp():
tutils.run_model_test(trainer_options, model, hparams, on_gpu=False)
def test_amp_gpu_dp():
def test_amp_gpu_dp(tmpdir):
"""
Make sure DP + AMP work
:return:
@ -194,6 +196,7 @@ def test_amp_gpu_dp():
model, hparams = tutils.get_model()
trainer_options = dict(
default_save_path=tmpdir,
max_nb_epochs=1,
gpus='0, 1', # test init with gpu string
distributed_backend='dp',

View File

@ -15,7 +15,7 @@ from pytorch_lightning.testing import (
import tests.utils as tutils
def test_early_stopping_cpu_model():
def test_early_stopping_cpu_model(tmpdir):
"""
Test each of the trainer options
:return:
@ -24,13 +24,14 @@ def test_early_stopping_cpu_model():
stopping = EarlyStopping(monitor='val_loss', min_delta=0.1)
trainer_options = dict(
default_save_path=tmpdir,
early_stop_callback=stopping,
gradient_clip_val=1.0,
overfit_pct=0.20,
track_grad_norm=2,
print_nan_grads=True,
show_progress_bar=True,
logger=tutils.get_test_tube_logger(),
logger=tutils.get_test_tube_logger(tmpdir),
train_percent_check=0.1,
val_percent_check=0.1
)
@ -43,7 +44,7 @@ def test_early_stopping_cpu_model():
model.unfreeze()
def test_lbfgs_cpu_model():
def test_lbfgs_cpu_model(tmpdir):
"""
Test each of the trainer options
:return:
@ -51,6 +52,7 @@ def test_lbfgs_cpu_model():
tutils.reset_seed()
trainer_options = dict(
default_save_path=tmpdir,
max_nb_epochs=1,
print_nan_grads=True,
show_progress_bar=False,
@ -63,10 +65,8 @@ def test_lbfgs_cpu_model():
tutils.run_model_test_no_loggers(trainer_options, model, hparams,
on_gpu=False, min_acc=0.30)
tutils.clear_save_dir()
def test_default_logger_callbacks_cpu_model():
def test_default_logger_callbacks_cpu_model(tmpdir):
"""
Test each of the trainer options
:return:
@ -74,6 +74,7 @@ def test_default_logger_callbacks_cpu_model():
tutils.reset_seed()
trainer_options = dict(
default_save_path=tmpdir,
max_nb_epochs=1,
gradient_clip_val=1.0,
overfit_pct=0.20,
@ -90,25 +91,22 @@ def test_default_logger_callbacks_cpu_model():
model.freeze()
model.unfreeze()
tutils.clear_save_dir()
def test_running_test_after_fitting():
def test_running_test_after_fitting(tmpdir):
"""Verify test() on fitted model"""
tutils.reset_seed()
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
# logger file to get meta
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(tmpdir, False)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=False,
max_nb_epochs=1,
train_percent_check=0.4,
@ -129,10 +127,8 @@ def test_running_test_after_fitting():
# test we have good test accuracy
tutils.assert_ok_test_acc(trainer)
tutils.clear_save_dir()
def test_running_test_without_val():
def test_running_test_without_val(tmpdir):
tutils.reset_seed()
"""Verify test() works on a model with no val_loader"""
@ -143,10 +139,8 @@ def test_running_test_without_val():
hparams = tutils.get_hparams()
model = CurrentTestModel(hparams)
save_dir = tutils.init_save_dir()
# logger file to get meta
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(tmpdir, False)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
@ -172,8 +166,6 @@ def test_running_test_without_val():
# test we have good test accuracy
tutils.assert_ok_test_acc(trainer)
tutils.clear_save_dir()
def test_single_gpu_batch_parse():
tutils.reset_seed()
@ -219,7 +211,7 @@ def test_single_gpu_batch_parse():
assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor'
def test_simple_cpu():
def test_simple_cpu(tmpdir):
"""
Verify continue training session on CPU
:return:
@ -229,10 +221,9 @@ def test_simple_cpu():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_nb_epochs=1,
val_percent_check=0.1,
train_percent_check=0.1,
@ -245,10 +236,8 @@ def test_simple_cpu():
# traning complete
assert result == 1, 'amp + ddp model failed to complete'
tutils.clear_save_dir()
def test_cpu_model():
def test_cpu_model(tmpdir):
"""
Make sure model trains on CPU
:return:
@ -256,8 +245,9 @@ def test_cpu_model():
tutils.reset_seed()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=False,
logger=tutils.get_test_tube_logger(),
logger=tutils.get_test_tube_logger(tmpdir),
max_nb_epochs=1,
train_percent_check=0.4,
val_percent_check=0.4
@ -268,7 +258,7 @@ def test_cpu_model():
tutils.run_model_test(trainer_options, model, hparams, on_gpu=False)
def test_all_features_cpu_model():
def test_all_features_cpu_model(tmpdir):
"""
Test each of the trainer options
:return:
@ -276,12 +266,13 @@ def test_all_features_cpu_model():
tutils.reset_seed()
trainer_options = dict(
default_save_path=tmpdir,
gradient_clip_val=1.0,
overfit_pct=0.20,
track_grad_norm=2,
print_nan_grads=True,
show_progress_bar=False,
logger=tutils.get_test_tube_logger(),
logger=tutils.get_test_tube_logger(tmpdir),
accumulate_grad_batches=2,
max_nb_epochs=1,
train_percent_check=0.4,
@ -292,15 +283,13 @@ def test_all_features_cpu_model():
tutils.run_model_test(trainer_options, model, hparams, on_gpu=False)
def test_tbptt_cpu_model():
def test_tbptt_cpu_model(tmpdir):
"""
Test truncated back propagation through time works.
:return:
"""
tutils.reset_seed()
save_dir = tutils.init_save_dir()
truncated_bptt_steps = 2
sequence_size = 30
batch_size = 30
@ -348,6 +337,7 @@ def test_tbptt_cpu_model():
)
trainer_options = dict(
default_save_path=tmpdir,
max_nb_epochs=1,
truncated_bptt_steps=truncated_bptt_steps,
val_percent_check=0,
@ -368,10 +358,8 @@ def test_tbptt_cpu_model():
assert result == 1, 'training failed to complete'
tutils.clear_save_dir()
def test_single_gpu_model():
def test_single_gpu_model(tmpdir):
"""
Make sure single GPU works (DP mode)
:return:
@ -385,6 +373,7 @@ def test_single_gpu_model():
model, hparams = tutils.get_model()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=False,
max_nb_epochs=1,
train_percent_check=0.1,

View File

@ -20,7 +20,7 @@ import tests.utils as tutils
PRETEND_N_OF_GPUS = 16
def test_multi_gpu_model_ddp2():
def test_multi_gpu_model_ddp2(tmpdir):
"""
Make sure DDP2 works
:return:
@ -33,6 +33,7 @@ def test_multi_gpu_model_ddp2():
model, hparams = tutils.get_model()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=True,
max_nb_epochs=1,
train_percent_check=0.4,
@ -45,7 +46,7 @@ def test_multi_gpu_model_ddp2():
tutils.run_model_test(trainer_options, model, hparams)
def test_multi_gpu_model_ddp():
def test_multi_gpu_model_ddp(tmpdir):
"""
Make sure DDP works
:return:
@ -58,6 +59,7 @@ def test_multi_gpu_model_ddp():
model, hparams = tutils.get_model()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=False,
max_nb_epochs=1,
train_percent_check=0.4,
@ -100,7 +102,7 @@ def test_optimizer_return_options():
assert optim[0] == opts[0][0] and lr_sched[0] == 'lr_scheduler'
def test_cpu_slurm_save_load():
def test_cpu_slurm_save_load(tmpdir):
"""
Verify model save/load/checkpoint on CPU
:return:
@ -110,10 +112,10 @@ def test_cpu_slurm_save_load():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
# logger file to get meta
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(save_dir, False)
version = logger.version
@ -149,7 +151,7 @@ def test_cpu_slurm_save_load():
assert os.path.exists(saved_filepath)
# new logger file to get meta
logger = tutils.get_test_tube_logger(False, version=version)
logger = tutils.get_test_tube_logger(save_dir, False, version=version)
trainer_options = dict(
max_nb_epochs=1,
@ -174,10 +176,8 @@ def test_cpu_slurm_save_load():
# and our hook to predict using current model before any more weight updates
trainer.fit(model)
tutils.clear_save_dir()
def test_multi_gpu_none_backend():
def test_multi_gpu_none_backend(tmpdir):
"""
Make sure when using multiple GPUs the user can't use
distributed_backend = None
@ -190,6 +190,7 @@ def test_multi_gpu_none_backend():
model, hparams = tutils.get_model()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=False,
max_nb_epochs=1,
train_percent_check=0.1,
@ -201,7 +202,7 @@ def test_multi_gpu_none_backend():
tutils.run_model_test(trainer_options, model, hparams)
def test_multi_gpu_model_dp():
def test_multi_gpu_model_dp(tmpdir):
"""
Make sure DP works
:return:
@ -213,6 +214,7 @@ def test_multi_gpu_model_dp():
model, hparams = tutils.get_model()
trainer_options = dict(
default_save_path=tmpdir,
show_progress_bar=False,
distributed_backend='dp',
max_nb_epochs=1,
@ -227,7 +229,7 @@ def test_multi_gpu_model_dp():
memory.get_memory_profile('min_max')
def test_ddp_sampler_error():
def test_ddp_sampler_error(tmpdir):
"""
Make sure DDP + AMP work
:return:
@ -241,7 +243,7 @@ def test_ddp_sampler_error():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams, force_remove_distributed_sampler=True)
logger = tutils.get_test_tube_logger(True)
logger = tutils.get_test_tube_logger(tmpdir, True)
trainer = Trainer(
logger=logger,
@ -255,8 +257,6 @@ def test_ddp_sampler_error():
with pytest.warns(UserWarning):
trainer.get_dataloaders(model)
tutils.clear_save_dir()
@pytest.fixture
def mocked_device_count(monkeypatch):

View File

@ -10,7 +10,7 @@ from pytorch_lightning.logging import LightningLoggerBase, rank_zero_only
import tests.utils as tutils
def test_testtube_logger():
def test_testtube_logger(tmpdir):
"""
verify that basic functionality of test tube logger works
"""
@ -18,9 +18,9 @@ def test_testtube_logger():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(save_dir, False)
trainer_options = dict(
max_nb_epochs=1,
@ -33,10 +33,8 @@ def test_testtube_logger():
assert result == 1, "Training failed"
tutils.clear_save_dir()
def test_testtube_pickle():
def test_testtube_pickle(tmpdir):
"""
Verify that pickling a trainer containing a test tube logger works
"""
@ -45,9 +43,9 @@ def test_testtube_pickle():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(tmpdir, False)
logger.log_hyperparams(hparams)
logger.save()
@ -62,10 +60,8 @@ def test_testtube_pickle():
trainer2 = pickle.loads(pkl_bytes)
trainer2.logger.log_metrics({"acc": 1.0})
tutils.clear_save_dir()
def test_mlflow_logger():
def test_mlflow_logger(tmpdir):
"""
verify that basic functionality of mlflow logger works
"""
@ -79,8 +75,7 @@ def test_mlflow_logger():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
root_dir = os.path.dirname(os.path.realpath(__file__))
mlflow_dir = os.path.join(root_dir, "mlruns")
mlflow_dir = os.path.join(tmpdir, "mlruns")
logger = MLFlowLogger("test", f"file://{mlflow_dir}")
@ -96,10 +91,8 @@ def test_mlflow_logger():
print('result finished')
assert result == 1, "Training failed"
tutils.clear_save_dir()
def test_mlflow_pickle():
def test_mlflow_pickle(tmpdir):
"""
verify that pickling trainer with mlflow logger works
"""
@ -113,8 +106,7 @@ def test_mlflow_pickle():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
root_dir = os.path.dirname(os.path.realpath(__file__))
mlflow_dir = os.path.join(root_dir, "mlruns")
mlflow_dir = os.path.join(tmpdir, "mlruns")
logger = MLFlowLogger("test", f"file://{mlflow_dir}")
@ -128,10 +120,8 @@ def test_mlflow_pickle():
trainer2 = pickle.loads(pkl_bytes)
trainer2.logger.log_metrics({"acc": 1.0})
tutils.clear_save_dir()
def test_comet_logger():
def test_comet_logger(tmpdir):
"""
verify that basic functionality of Comet.ml logger works
"""
@ -145,8 +135,7 @@ def test_comet_logger():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
root_dir = os.path.dirname(os.path.realpath(__file__))
comet_dir = os.path.join(root_dir, "cometruns")
comet_dir = os.path.join(tmpdir, "cometruns")
# We test CometLogger in offline mode with local saves
logger = CometLogger(
@ -167,10 +156,8 @@ def test_comet_logger():
print('result finished')
assert result == 1, "Training failed"
tutils.clear_save_dir()
def test_comet_pickle():
def test_comet_pickle(tmpdir):
"""
verify that pickling trainer with comet logger works
"""
@ -184,8 +171,7 @@ def test_comet_pickle():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
root_dir = os.path.dirname(os.path.realpath(__file__))
comet_dir = os.path.join(root_dir, "cometruns")
comet_dir = os.path.join(tmpdir, "cometruns")
# We test CometLogger in offline mode with local saves
logger = CometLogger(
@ -204,8 +190,6 @@ def test_comet_pickle():
trainer2 = pickle.loads(pkl_bytes)
trainer2.logger.log_metrics({"acc": 1.0})
tutils.clear_save_dir()
def test_custom_logger(tmpdir):
class CustomLogger(LightningLoggerBase):

View File

@ -10,7 +10,7 @@ from pytorch_lightning.testing import LightningTestModel
import tests.utils as tutils
def test_running_test_pretrained_model_ddp():
def test_running_test_pretrained_model_ddp(tmpdir):
"""Verify test() on pretrained model"""
if not tutils.can_run_gpu_test():
return
@ -21,10 +21,10 @@ def test_running_test_pretrained_model_ddp():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
# exp file to get meta
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(save_dir, False)
# exp file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
@ -60,20 +60,18 @@ def test_running_test_pretrained_model_ddp():
for dataloader in model.test_dataloader():
tutils.run_prediction(dataloader, pretrained_model)
tutils.clear_save_dir()
def test_running_test_pretrained_model():
def test_running_test_pretrained_model(tmpdir):
tutils.reset_seed()
"""Verify test() on pretrained model"""
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
# logger file to get meta
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(save_dir, False)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
@ -102,17 +100,16 @@ def test_running_test_pretrained_model():
# test we have good test accuracy
tutils.assert_ok_test_acc(new_trainer)
tutils.clear_save_dir()
def test_load_model_from_checkpoint():
def test_load_model_from_checkpoint(tmpdir):
tutils.reset_seed()
"""Verify test() on pretrained model"""
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
trainer_options = dict(
show_progress_bar=False,
@ -143,10 +140,9 @@ def test_load_model_from_checkpoint():
# test we have good test accuracy
tutils.assert_ok_test_acc(new_trainer)
tutils.clear_save_dir()
def test_running_test_pretrained_model_dp():
def test_running_test_pretrained_model_dp(tmpdir):
tutils.reset_seed()
"""Verify test() on pretrained model"""
@ -156,10 +152,10 @@ def test_running_test_pretrained_model_dp():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
# logger file to get meta
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(save_dir, False)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
@ -190,10 +186,9 @@ def test_running_test_pretrained_model_dp():
# test we have good test accuracy
tutils.assert_ok_test_acc(new_trainer)
tutils.clear_save_dir()
def test_dp_resume():
def test_dp_resume(tmpdir):
"""
Make sure DP continues training correctly
:return:
@ -213,10 +208,10 @@ def test_dp_resume():
distributed_backend='dp',
)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
# get logger
logger = tutils.get_test_tube_logger(debug=False)
logger = tutils.get_test_tube_logger(save_dir, debug=False)
# exp file to get weights
# logger file to get weights
@ -244,7 +239,7 @@ def test_dp_resume():
trainer.hpc_save(save_dir, logger)
# init new trainer
new_logger = tutils.get_test_tube_logger(version=logger.version)
new_logger = tutils.get_test_tube_logger(save_dir, version=logger.version)
trainer_options['logger'] = new_logger
trainer_options['checkpoint_callback'] = ModelCheckpoint(save_dir)
trainer_options['train_percent_check'] = 0.2
@ -275,10 +270,8 @@ def test_dp_resume():
model.freeze()
model.unfreeze()
tutils.clear_save_dir()
def test_cpu_restore_training():
def test_cpu_restore_training(tmpdir):
"""
Verify continue training session on CPU
:return:
@ -288,11 +281,11 @@ def test_cpu_restore_training():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
# logger file to get meta
test_logger_version = 10
logger = tutils.get_test_tube_logger(False, version=test_logger_version)
logger = tutils.get_test_tube_logger(save_dir, False, version=test_logger_version)
trainer_options = dict(
max_nb_epochs=2,
@ -314,7 +307,7 @@ def test_cpu_restore_training():
# wipe-out trainer and model
# retrain with not much data... this simulates picking training back up after slurm
# we want to see if the weights come back correctly
new_logger = tutils.get_test_tube_logger(False, version=test_logger_version)
new_logger = tutils.get_test_tube_logger(save_dir, False, version=test_logger_version)
trainer_options = dict(
max_nb_epochs=2,
val_check_interval=0.50,
@ -343,10 +336,8 @@ def test_cpu_restore_training():
# and our hook to predict using current model before any more weight updates
trainer.fit(model)
tutils.clear_save_dir()
def test_model_saving_loading():
def test_model_saving_loading(tmpdir):
"""
Tests use case where trainer saves the model, and user loads it from tags independently
:return:
@ -356,10 +347,10 @@ def test_model_saving_loading():
hparams = tutils.get_hparams()
model = LightningTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
# logger file to get meta
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(save_dir, False)
trainer_options = dict(
max_nb_epochs=1,
@ -402,8 +393,6 @@ def test_model_saving_loading():
new_pred = model_2(x)
assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1
tutils.clear_save_dir()
if __name__ == '__main__':
pytest.main([__file__])

View File

@ -19,7 +19,7 @@ from pytorch_lightning.trainer.logging_mixin import TrainerLoggingMixin
import tests.utils as tutils
def test_no_val_module():
def test_no_val_module(tmpdir):
"""
Tests use case where trainer saves the model, and user loads it from tags independently
:return:
@ -33,10 +33,10 @@ def test_no_val_module():
model = CurrentTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
# logger file to get meta
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(save_dir, False)
trainer_options = dict(
max_nb_epochs=1,
@ -62,11 +62,8 @@ def test_no_val_module():
tags_csv=tags_path)
model_2.eval()
# make prediction
tutils.clear_save_dir()
def test_no_val_end_module():
def test_no_val_end_module(tmpdir):
"""
Tests use case where trainer saves the model, and user loads it from tags independently
:return:
@ -79,10 +76,10 @@ def test_no_val_end_module():
hparams = tutils.get_hparams()
model = CurrentTestModel(hparams)
save_dir = tutils.init_save_dir()
save_dir = tmpdir
# logger file to get meta
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(save_dir, False)
trainer_options = dict(
max_nb_epochs=1,
@ -108,11 +105,8 @@ def test_no_val_end_module():
tags_csv=tags_path)
model_2.eval()
# make prediction
tutils.clear_save_dir()
def test_gradient_accumulation_scheduling():
def test_gradient_accumulation_scheduling(tmpdir):
tutils.reset_seed()
"""
@ -177,7 +171,8 @@ def test_gradient_accumulation_scheduling():
trainer = Trainer(accumulate_grad_batches=schedule,
train_percent_check=0.1,
val_percent_check=0.1,
max_nb_epochs=4)
max_nb_epochs=4,
default_save_path=tmpdir)
# for the test
trainer.optimizer_step = optimizer_step
@ -186,14 +181,14 @@ def test_gradient_accumulation_scheduling():
trainer.fit(model)
def test_loading_meta_tags():
def test_loading_meta_tags(tmpdir):
tutils.reset_seed()
from argparse import Namespace
hparams = tutils.get_hparams()
# save tags
logger = tutils.get_test_tube_logger(False)
logger = tutils.get_test_tube_logger(tmpdir, False)
logger.log_hyperparams(Namespace(some_str='a_str', an_int=1, a_float=2.0))
logger.log_hyperparams(hparams)
logger.save()
@ -206,8 +201,6 @@ def test_loading_meta_tags():
assert tags.batch_size == 32 and tags.hidden_dim == 1000
tutils.clear_save_dir()
def test_dp_output_reduce():
mixin = TrainerLoggingMixin()
@ -232,11 +225,14 @@ def test_dp_output_reduce():
assert reduced['b']['c'] == out['b']['c']
def test_model_checkpoint_options():
def test_model_checkpoint_options(tmp_path):
"""
Test ModelCheckpoint options
:return:
"""
# TODO split this up into multiple tests
def mock_save_function(filepath):
open(filepath, 'a').close()
@ -244,7 +240,8 @@ def test_model_checkpoint_options():
model = LightningTestModel(hparams)
# simulated losses
save_dir = tutils.init_save_dir()
save_dir = tmp_path / "1"
save_dir.mkdir()
losses = [10, 9, 2.8, 5, 2.5]
# -----------------
@ -262,7 +259,8 @@ def test_model_checkpoint_options():
for i in range(0, len(losses)):
assert f'_ckpt_epoch_{i}.ckpt' in file_lists
tutils.clear_save_dir()
save_dir = tmp_path / "2"
save_dir.mkdir()
# -----------------
# CASE K=0 (none)
@ -275,7 +273,8 @@ def test_model_checkpoint_options():
assert len(file_lists) == 0, "Should save 0 models when save_top_k=0"
tutils.clear_save_dir()
save_dir = tmp_path / "3"
save_dir.mkdir()
# -----------------
# CASE K=1 (2.5, epoch 4)
@ -289,7 +288,8 @@ def test_model_checkpoint_options():
assert len(file_lists) == 1, "Should save 1 model when save_top_k=1"
assert 'test_prefix_ckpt_epoch_4.ckpt' in file_lists
tutils.clear_save_dir()
save_dir = tmp_path / "4"
save_dir.mkdir()
# -----------------
# CASE K=2 (2.5 epoch 4, 2.8 epoch 2)
@ -308,7 +308,8 @@ def test_model_checkpoint_options():
assert '_ckpt_epoch_2.ckpt' in file_lists
assert 'other_file.ckpt' in file_lists
tutils.clear_save_dir()
save_dir = tmp_path / "5"
save_dir.mkdir()
# -----------------
# CASE K=4 (save all 4 models)
@ -323,7 +324,8 @@ def test_model_checkpoint_options():
assert len(file_lists) == 4, 'Should save all 4 models when save_top_k=4 within same epoch'
tutils.clear_save_dir()
save_dir = tmp_path / "6"
save_dir.mkdir()
# -----------------
# CASE K=3 (save the 2nd, 3rd, 4th model)
@ -341,8 +343,6 @@ def test_model_checkpoint_options():
assert '_ckpt_epoch_0_v1.ckpt' in file_lists
assert '_ckpt_epoch_0.ckpt' in file_lists
tutils.clear_save_dir()
def test_model_freeze_unfreeze():
tutils.reset_seed()
@ -354,7 +354,7 @@ def test_model_freeze_unfreeze():
model.unfreeze()
def test_multiple_val_dataloader():
def test_multiple_val_dataloader(tmpdir):
"""
Verify multiple val_dataloader
:return:
@ -372,6 +372,7 @@ def test_multiple_val_dataloader():
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_nb_epochs=1,
val_percent_check=0.1,
train_percent_check=1.0,
@ -393,7 +394,7 @@ def test_multiple_val_dataloader():
tutils.run_prediction(dataloader, trainer.model)
def test_multiple_test_dataloader():
def test_multiple_test_dataloader(tmpdir):
"""
Verify multiple test_dataloader
:return:
@ -411,6 +412,7 @@ def test_multiple_test_dataloader():
# logger file to get meta
trainer_options = dict(
default_save_path=tmpdir,
max_nb_epochs=1,
val_percent_check=0.1,
train_percent_check=0.1,

View File

@ -17,7 +17,6 @@ from pytorch_lightning.testing import (
)
# generate a list of random seeds for each test
RANDOM_FILE_PATHS = list(np.random.randint(12000, 19000, 1000))
RANDOM_PORTS = list(np.random.randint(12000, 19000, 1000))
ROOT_SEED = 1234
torch.manual_seed(ROOT_SEED)
@ -26,8 +25,7 @@ RANDOM_SEEDS = list(np.random.randint(0, 10000, 1000))
def run_model_test_no_loggers(trainer_options, model, hparams, on_gpu=True, min_acc=0.50):
save_dir = init_save_dir()
trainer_options['default_save_path'] = save_dir
save_dir = trainer_options['default_save_path']
# fit model
trainer = Trainer(**trainer_options)
@ -49,14 +47,12 @@ def run_model_test_no_loggers(trainer_options, model, hparams, on_gpu=True, min_
trainer.model = pretrained_model
trainer.optimizers, trainer.lr_schedulers = pretrained_model.configure_optimizers()
clear_save_dir()
def run_model_test(trainer_options, model, hparams, on_gpu=True):
save_dir = init_save_dir()
save_dir = trainer_options['default_save_path']
# logger file to get meta
logger = get_test_tube_logger(False)
logger = get_test_tube_logger(save_dir, False)
# logger file to get weights
checkpoint = init_checkpoint_callback(logger)
@ -87,8 +83,6 @@ def run_model_test(trainer_options, model, hparams, on_gpu=True):
trainer.hpc_save(save_dir, logger)
trainer.hpc_load(save_dir, on_gpu=on_gpu)
clear_save_dir()
def get_hparams(continue_training=False, hpc_exp_number=0):
root_dir = os.path.dirname(os.path.realpath(__file__))
@ -126,35 +120,12 @@ def get_model(use_test_model=False, lbfgs=False):
return model, hparams
def get_test_tube_logger(debug=True, version=None):
def get_test_tube_logger(save_dir, debug=True, version=None):
# set up logger object without actually saving logs
root_dir = os.path.dirname(os.path.realpath(__file__))
save_dir = os.path.join(root_dir, 'save_dir')
logger = TestTubeLogger(save_dir, name='lightning_logs', debug=False, version=version)
return logger
def init_save_dir():
root_dir = os.path.dirname(os.path.realpath(__file__))
save_dir = os.path.join(root_dir, 'tests', 'save_dir')
if os.path.exists(save_dir):
n = RANDOM_FILE_PATHS.pop()
shutil.move(save_dir, save_dir + f'_{n}')
os.makedirs(save_dir, exist_ok=True)
return save_dir
def clear_save_dir():
root_dir = os.path.dirname(os.path.realpath(__file__))
save_dir = os.path.join(root_dir, 'tests', 'save_dir')
if os.path.exists(save_dir):
n = RANDOM_FILE_PATHS.pop()
shutil.move(save_dir, save_dir + f'_{n}')
def load_model(exp, root_weights_dir, module_class=LightningTemplateModel):
# load trained model
tags_path = exp.get_data_path(exp.name, exp.version)