Update `tests/checkpointing/*.py` to use `devices` instead of `gpus` or `ipus` (#11408)

Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
This commit is contained in:
Jv Kyle Eclarin 2022-01-12 00:47:01 -05:00 committed by GitHub
parent 7c93198dd6
commit d2d284fd6e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 11 additions and 5 deletions

View File

@ -115,7 +115,8 @@ def test_top_k_ddp(save_mock, tmpdir, k, epochs, val_check_interval, expected):
enable_model_summary=False,
val_check_interval=val_check_interval,
strategy="ddp",
gpus=2,
accelerator="gpu",
devices=2,
limit_train_batches=64,
limit_val_batches=32,
)

View File

@ -75,9 +75,11 @@ def test_resume_legacy_checkpoints(tmpdir, pl_version: str):
model = ClassificationModel()
es = EarlyStopping(monitor="val_acc", mode="max", min_delta=0.005)
stop = LimitNbEpochs(1)
trainer = Trainer(
default_root_dir=str(tmpdir),
gpus=int(torch.cuda.is_available()),
accelerator="auto",
devices=1,
precision=(16 if torch.cuda.is_available() else 32),
callbacks=[es, stop],
max_epochs=21,

View File

@ -396,7 +396,8 @@ def test_model_checkpoint_no_extraneous_invocations(tmpdir):
model_checkpoint = ModelCheckpointTestInvocations(monitor="early_stop_on", expected_count=num_epochs, save_top_k=-1)
trainer = Trainer(
strategy="ddp_spawn",
num_processes=2,
accelerator="cpu",
devices=2,
default_root_dir=tmpdir,
callbacks=[model_checkpoint],
max_epochs=num_epochs,

View File

@ -40,7 +40,7 @@ def test_model_torch_save_ddp_cpu(tmpdir):
model = BoringModel()
num_epochs = 1
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", num_processes=2, logger=False
default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", accelerator="cpu", devices=2, logger=False
)
temp_path = os.path.join(tmpdir, "temp.pt")
trainer.fit(model)
@ -55,7 +55,9 @@ def test_model_torch_save_ddp_cuda(tmpdir):
"""Test to ensure torch save does not fail for model and trainer using gpu ddp."""
model = BoringModel()
num_epochs = 1
trainer = Trainer(default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", gpus=2)
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", accelerator="gpu", devices=2
)
temp_path = os.path.join(tmpdir, "temp.pt")
trainer.fit(model)