From d2d284fd6e3e8f53e9a44ab233771850af1e4dab Mon Sep 17 00:00:00 2001 From: Jv Kyle Eclarin Date: Wed, 12 Jan 2022 00:47:01 -0500 Subject: [PATCH] Update `tests/checkpointing/*.py` to use `devices` instead of `gpus` or `ipus` (#11408) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos MocholĂ­ --- tests/checkpointing/test_checkpoint_callback_frequency.py | 3 ++- tests/checkpointing/test_legacy_checkpoints.py | 4 +++- tests/checkpointing/test_model_checkpoint.py | 3 ++- tests/checkpointing/test_torch_saving.py | 6 ++++-- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/tests/checkpointing/test_checkpoint_callback_frequency.py b/tests/checkpointing/test_checkpoint_callback_frequency.py index 66ac5648b8..90665a6db4 100644 --- a/tests/checkpointing/test_checkpoint_callback_frequency.py +++ b/tests/checkpointing/test_checkpoint_callback_frequency.py @@ -115,7 +115,8 @@ def test_top_k_ddp(save_mock, tmpdir, k, epochs, val_check_interval, expected): enable_model_summary=False, val_check_interval=val_check_interval, strategy="ddp", - gpus=2, + accelerator="gpu", + devices=2, limit_train_batches=64, limit_val_batches=32, ) diff --git a/tests/checkpointing/test_legacy_checkpoints.py b/tests/checkpointing/test_legacy_checkpoints.py index 63008d6ef0..e26f02603f 100644 --- a/tests/checkpointing/test_legacy_checkpoints.py +++ b/tests/checkpointing/test_legacy_checkpoints.py @@ -75,9 +75,11 @@ def test_resume_legacy_checkpoints(tmpdir, pl_version: str): model = ClassificationModel() es = EarlyStopping(monitor="val_acc", mode="max", min_delta=0.005) stop = LimitNbEpochs(1) + trainer = Trainer( default_root_dir=str(tmpdir), - gpus=int(torch.cuda.is_available()), + accelerator="auto", + devices=1, precision=(16 if torch.cuda.is_available() else 32), callbacks=[es, stop], max_epochs=21, diff --git a/tests/checkpointing/test_model_checkpoint.py b/tests/checkpointing/test_model_checkpoint.py index a332490585..0371d02d4b 100644 --- a/tests/checkpointing/test_model_checkpoint.py +++ b/tests/checkpointing/test_model_checkpoint.py @@ -396,7 +396,8 @@ def test_model_checkpoint_no_extraneous_invocations(tmpdir): model_checkpoint = ModelCheckpointTestInvocations(monitor="early_stop_on", expected_count=num_epochs, save_top_k=-1) trainer = Trainer( strategy="ddp_spawn", - num_processes=2, + accelerator="cpu", + devices=2, default_root_dir=tmpdir, callbacks=[model_checkpoint], max_epochs=num_epochs, diff --git a/tests/checkpointing/test_torch_saving.py b/tests/checkpointing/test_torch_saving.py index f9634a9dad..5f108a210a 100644 --- a/tests/checkpointing/test_torch_saving.py +++ b/tests/checkpointing/test_torch_saving.py @@ -40,7 +40,7 @@ def test_model_torch_save_ddp_cpu(tmpdir): model = BoringModel() num_epochs = 1 trainer = Trainer( - default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", num_processes=2, logger=False + default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", accelerator="cpu", devices=2, logger=False ) temp_path = os.path.join(tmpdir, "temp.pt") trainer.fit(model) @@ -55,7 +55,9 @@ def test_model_torch_save_ddp_cuda(tmpdir): """Test to ensure torch save does not fail for model and trainer using gpu ddp.""" model = BoringModel() num_epochs = 1 - trainer = Trainer(default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", gpus=2) + trainer = Trainer( + default_root_dir=tmpdir, max_epochs=num_epochs, strategy="ddp_spawn", accelerator="gpu", devices=2 + ) temp_path = os.path.join(tmpdir, "temp.pt") trainer.fit(model)