missing tests default_root_dir=tmpdir (#6314)

* default_root_dir=tmpdir

* miss
This commit is contained in:
Jirka Borovec 2021-03-04 20:23:12 +01:00 committed by GitHub
parent 4f904556e4
commit b9cf1223b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 72 additions and 59 deletions

View File

@ -167,10 +167,7 @@ def test_callbacks_state_resume_from_checkpoint(tmpdir):
def get_trainer_args():
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="val_loss", save_last=True)
trainer_args = dict(
default_root_dir=tmpdir, max_steps=1, logger=False, callbacks=[
checkpoint,
callback_capture,
]
default_root_dir=tmpdir, max_steps=1, logger=False, callbacks=[checkpoint, callback_capture]
)
assert checkpoint.best_model_path == ""
assert checkpoint.best_model_score is None

View File

@ -105,8 +105,16 @@ def test_sync_batchnorm_ddp(tmpdir):
dm.setup(stage=None)
model = SyncBNModule(gpu_count=2, bn_targets=bn_outputs)
ddp = DDPSpawnPlugin(
parallel_devices=[torch.device("cuda", 0), torch.device("cuda", 1)],
num_nodes=1,
sync_batchnorm=True,
cluster_environment=TorchElasticEnvironment(),
find_unused_parameters=True
)
trainer = Trainer(
default_root_dir=tmpdir,
gpus=2,
num_nodes=1,
accelerator='ddp_spawn',
@ -115,15 +123,7 @@ def test_sync_batchnorm_ddp(tmpdir):
sync_batchnorm=True,
num_sanity_val_steps=0,
replace_sampler_ddp=False,
plugins=[
DDPSpawnPlugin(
parallel_devices=[torch.device("cuda", 0), torch.device("cuda", 1)],
num_nodes=1,
sync_batchnorm=True,
cluster_environment=TorchElasticEnvironment(),
find_unused_parameters=True
)
]
plugins=[ddp]
)
trainer.fit(model, dm)

View File

@ -133,7 +133,11 @@ def test_deepspeed_precision_choice(amp_backend, tmpdir):
"""
trainer = Trainer(
fast_dev_run=True, default_root_dir=tmpdir, plugins='deepspeed', amp_backend=amp_backend, precision=16
fast_dev_run=True,
default_root_dir=tmpdir,
plugins='deepspeed',
amp_backend=amp_backend,
precision=16,
)
assert isinstance(trainer.accelerator.training_type_plugin, DeepSpeedPlugin)
@ -178,13 +182,11 @@ def test_deepspeed_defaults(tmpdir):
@RunIf(deepspeed=True)
def test_invalid_deepspeed_defaults_no_precision(tmpdir):
"""
Test to ensure that using defaults, if precision is not set to 16, we throw an exception.
"""
"""Test to ensure that using defaults, if precision is not set to 16, we throw an exception."""
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
fast_dev_run=True,
plugins='deepspeed',
)
with pytest.raises(
@ -195,9 +197,7 @@ def test_invalid_deepspeed_defaults_no_precision(tmpdir):
@RunIf(min_gpus=1, deepspeed=True)
def test_warn_deepspeed_override_backward(tmpdir):
"""
Test to ensure that if the backward hook in the LightningModule is overridden, we throw a warning.
"""
"""Test to ensure that if the backward hook in the LightningModule is overridden, we throw a warning."""
class TestModel(BoringModel):
@ -205,17 +205,21 @@ def test_warn_deepspeed_override_backward(tmpdir):
return loss.backward()
model = TestModel()
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir, plugins=DeepSpeedPlugin(), gpus=1, precision=16)
trainer = Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
plugins=DeepSpeedPlugin(),
gpus=1,
precision=16,
)
with pytest.warns(UserWarning, match='Overridden backward hook in the LightningModule will be ignored'):
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True)
def test_deepspeed_run_configure_optimizers(tmpdir):
"""
Test end to end that deepspeed works with defaults (without ZeRO as that requires compilation),
whilst using configure_optimizers for optimizers and schedulers.
"""
"""Test end to end that deepspeed works with defaults (without ZeRO as that requires compilation),
whilst using configure_optimizers for optimizers and schedulers."""
class TestModel(BoringModel):
@ -234,7 +238,7 @@ def test_deepspeed_run_configure_optimizers(tmpdir):
default_root_dir=tmpdir,
gpus=1,
fast_dev_run=True,
precision=16
precision=16,
)
trainer.fit(model)
@ -267,7 +271,7 @@ def test_deepspeed_config(tmpdir, deepspeed_zero_config):
default_root_dir=tmpdir,
gpus=1,
fast_dev_run=True,
precision=16
precision=16,
)
trainer.fit(model)
@ -278,9 +282,7 @@ def test_deepspeed_config(tmpdir, deepspeed_zero_config):
@RunIf(min_gpus=1, deepspeed=True)
def test_deepspeed_custom_precision_params(tmpdir):
"""
Ensure if we modify the FP16 parameters via the DeepSpeedPlugin, the deepspeed config contains these changes.
"""
"""Ensure if we modify the FP16 parameters via the DeepSpeedPlugin, the deepspeed config contains these changes."""
class TestModel(BoringModel):
@ -293,24 +295,15 @@ def test_deepspeed_custom_precision_params(tmpdir):
raise SystemExit()
model = TestModel()
trainer = Trainer(
plugins=[
DeepSpeedPlugin(
loss_scale=10, initial_scale_power=10, loss_scale_window=10, hysteresis=10, min_loss_scale=10
)
],
precision=16,
gpus=1
)
ds = DeepSpeedPlugin(loss_scale=10, initial_scale_power=10, loss_scale_window=10, hysteresis=10, min_loss_scale=10)
trainer = Trainer(default_root_dir=tmpdir, plugins=[ds], precision=16, gpus=1)
with pytest.raises(SystemExit):
trainer.fit(model)
@RunIf(min_gpus=1, deepspeed=True)
def test_deepspeed_assert_config_zero_offload_disabled(tmpdir, deepspeed_zero_config):
"""
Ensure if we use a config and turn off cpu_offload, that this is set to False within the config.
"""
"""Ensure if we use a config and turn off cpu_offload, that this is set to False within the config."""
deepspeed_zero_config['zero_optimization']['cpu_offload'] = False
@ -321,7 +314,12 @@ def test_deepspeed_assert_config_zero_offload_disabled(tmpdir, deepspeed_zero_co
raise SystemExit()
model = TestModel()
trainer = Trainer(plugins=[DeepSpeedPlugin(config=deepspeed_zero_config)], precision=16, gpus=1)
trainer = Trainer(
plugins=[DeepSpeedPlugin(config=deepspeed_zero_config)],
precision=16,
gpus=1,
default_root_dir=tmpdir,
)
with pytest.raises(SystemExit):
trainer.fit(model)

View File

@ -38,6 +38,7 @@ def test_rpc_choice(tmpdir, ddp_backend, gpus, num_processes):
model = BoringModel()
trainer = Trainer(
default_root_dir=str(tmpdir),
fast_dev_run=True,
gpus=gpus,
num_processes=num_processes,
@ -76,7 +77,8 @@ def test_rpc_function_calls_ddp(tmpdir):
max_epochs=max_epochs,
gpus=2,
distributed_backend='ddp',
plugins=[plugin]
plugins=[plugin],
default_root_dir=tmpdir,
)
trainer.fit(model)

View File

@ -75,7 +75,7 @@ def training_step_scalar_with_step_end(tmpdir):
model.training_step_end = model.training_step_end__scalar
model.val_dataloader = None
trainer = Trainer(fast_dev_run=True, weights_summary=None)
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, weights_summary=None)
trainer.fit(model)
# make sure correct steps were called
@ -165,7 +165,11 @@ def test_train_step_epoch_end_scalar(tmpdir):
model.training_epoch_end = model.training_epoch_end__scalar
model.val_dataloader = None
trainer = Trainer(max_epochs=1, weights_summary=None)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
# make sure correct steps were called
@ -222,13 +226,13 @@ def test_dpp_reduce_mean_pbar(tmpdir):
trainer = Trainer(
max_epochs=1,
default_root_dir=os.getcwd(),
default_root_dir=tmpdir,
limit_train_batches=10,
limit_test_batches=2,
limit_val_batches=2,
accelerator=distributed_backend,
gpus=2,
precision=32
precision=32,
)
trainer.fit(model)

View File

@ -478,6 +478,7 @@ def test_auto_add_dataloader_idx(tmpdir, add_dataloader_idx):
""" test that auto_add_dataloader_idx argument works """
class TestModel(BoringModel):
def val_dataloader(self):
dl = super().val_dataloader()
return [dl, dl]
@ -495,10 +496,7 @@ def test_auto_add_dataloader_idx(tmpdir, add_dataloader_idx):
model = TestModel()
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=5
)
trainer = Trainer(default_root_dir=tmpdir, max_steps=5)
trainer.fit(model)
logged = trainer.logged_metrics

View File

@ -411,7 +411,7 @@ def test_different_batch_types_for_sizing(tmpdir):
assert generated == expected
def test_validation_step_with_string_data_logging():
def test_validation_step_with_string_data_logging(tmpdir):
class TestModel(BoringModel):
@ -436,7 +436,7 @@ def test_validation_step_with_string_data_logging():
# model
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
@ -491,7 +491,7 @@ def test_nested_datasouce_batch(tmpdir):
# model
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,

View File

@ -472,7 +472,11 @@ def test_resume_from_checkpoint_epoch_restored(monkeypatch, tmpdir, tmpdir_serve
state = pl_load(ckpt)
# Resume training
new_trainer = Trainer(resume_from_checkpoint=ckpt, max_epochs=2)
new_trainer = Trainer(
default_root_dir=tmpdir,
resume_from_checkpoint=ckpt,
max_epochs=2,
)
new_trainer.fit(next_model)
assert state["global_step"] + next_model.num_batches_seen == trainer.num_training_batches * trainer.max_epochs
assert next_model.num_on_load_checkpoint_called == 1

View File

@ -34,7 +34,12 @@ def test_num_training_batches(tmpdir):
"""
# when we have fewer batches in the dataloader we should use those instead of the limit
model = EvalModelTemplate()
trainer = Trainer(limit_val_batches=100, limit_train_batches=100, max_epochs=1)
trainer = Trainer(
limit_val_batches=100,
limit_train_batches=100,
max_epochs=1,
default_root_dir=tmpdir,
)
trainer.fit(model)
assert len(model.train_dataloader()) == 10
@ -45,7 +50,12 @@ def test_num_training_batches(tmpdir):
# when we have more batches in the dataloader we should limit them
model = EvalModelTemplate()
trainer = Trainer(limit_val_batches=7, limit_train_batches=7, max_epochs=1)
trainer = Trainer(
limit_val_batches=7,
limit_train_batches=7,
max_epochs=1,
default_root_dir=tmpdir,
)
trainer.fit(model)
assert len(model.train_dataloader()) == 10