diff --git a/tests/tuner/test_auto_gpu_select.py b/tests/tuner/test_auto_gpu_select.py index c8b510f65e..f57a88c787 100644 --- a/tests/tuner/test_auto_gpu_select.py +++ b/tests/tuner/test_auto_gpu_select.py @@ -22,6 +22,7 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests.helpers.runif import RunIf +# TODO: add pytest.deprecated_call @daniellepintz @RunIf(min_gpus=2) @pytest.mark.parametrize( ["auto_select_gpus", "gpus", "expected_error"], diff --git a/tests/tuner/test_scale_batch_size.py b/tests/tuner/test_scale_batch_size.py index 31d3dd3dd3..13112ee9f4 100644 --- a/tests/tuner/test_scale_batch_size.py +++ b/tests/tuner/test_scale_batch_size.py @@ -127,7 +127,9 @@ def test_auto_scale_batch_size_trainer_arg(tmpdir, scale_arg): tutils.reset_seed() before_batch_size = 2 model = BatchSizeModel(batch_size=before_batch_size) - trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, auto_scale_batch_size=scale_arg, gpus=1) + trainer = Trainer( + default_root_dir=tmpdir, max_epochs=1, auto_scale_batch_size=scale_arg, accelerator="gpu", devices=1 + ) trainer.tune(model) after_batch_size = model.batch_size assert before_batch_size != after_batch_size, "Batch size was not altered after running auto scaling of batch size" @@ -169,7 +171,7 @@ def test_auto_scale_batch_size_set_model_attribute(tmpdir, use_hparams): model_class = HparamsBatchSizeModel if use_hparams else BatchSizeModel model = model_class(**hparams) - trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, auto_scale_batch_size=True, gpus=1) + trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, auto_scale_batch_size=True, accelerator="gpu", devices=1) trainer.tune(model, datamodule_fit) after_batch_size = model.hparams.batch_size if use_hparams else model.batch_size assert trainer.datamodule == datamodule_fit @@ -239,7 +241,9 @@ def test_error_on_dataloader_passed_to_fit(tmpdir): def test_auto_scale_batch_size_with_amp(tmpdir): before_batch_size = 2 model = BatchSizeModel(batch_size=before_batch_size) - trainer = Trainer(default_root_dir=tmpdir, max_steps=1, auto_scale_batch_size=True, gpus=1, precision=16) + trainer = Trainer( + default_root_dir=tmpdir, max_steps=1, auto_scale_batch_size=True, accelerator="gpu", devices=1, precision=16 + ) trainer.tune(model) after_batch_size = model.batch_size assert trainer.amp_backend == AMPType.NATIVE