Update `tests/tuner/*.py` to use `devices` instead of `gpus` or `ipus` (#11520)
Co-authored-by: Jirka <jirka.borovec@seznam.cz>
This commit is contained in:
parent
d132a9c3b7
commit
a34930b772
|
@ -22,6 +22,7 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
|||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
# TODO: add pytest.deprecated_call @daniellepintz
|
||||
@RunIf(min_gpus=2)
|
||||
@pytest.mark.parametrize(
|
||||
["auto_select_gpus", "gpus", "expected_error"],
|
||||
|
|
|
@ -127,7 +127,9 @@ def test_auto_scale_batch_size_trainer_arg(tmpdir, scale_arg):
|
|||
tutils.reset_seed()
|
||||
before_batch_size = 2
|
||||
model = BatchSizeModel(batch_size=before_batch_size)
|
||||
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, auto_scale_batch_size=scale_arg, gpus=1)
|
||||
trainer = Trainer(
|
||||
default_root_dir=tmpdir, max_epochs=1, auto_scale_batch_size=scale_arg, accelerator="gpu", devices=1
|
||||
)
|
||||
trainer.tune(model)
|
||||
after_batch_size = model.batch_size
|
||||
assert before_batch_size != after_batch_size, "Batch size was not altered after running auto scaling of batch size"
|
||||
|
@ -169,7 +171,7 @@ def test_auto_scale_batch_size_set_model_attribute(tmpdir, use_hparams):
|
|||
model_class = HparamsBatchSizeModel if use_hparams else BatchSizeModel
|
||||
model = model_class(**hparams)
|
||||
|
||||
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, auto_scale_batch_size=True, gpus=1)
|
||||
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, auto_scale_batch_size=True, accelerator="gpu", devices=1)
|
||||
trainer.tune(model, datamodule_fit)
|
||||
after_batch_size = model.hparams.batch_size if use_hparams else model.batch_size
|
||||
assert trainer.datamodule == datamodule_fit
|
||||
|
@ -239,7 +241,9 @@ def test_error_on_dataloader_passed_to_fit(tmpdir):
|
|||
def test_auto_scale_batch_size_with_amp(tmpdir):
|
||||
before_batch_size = 2
|
||||
model = BatchSizeModel(batch_size=before_batch_size)
|
||||
trainer = Trainer(default_root_dir=tmpdir, max_steps=1, auto_scale_batch_size=True, gpus=1, precision=16)
|
||||
trainer = Trainer(
|
||||
default_root_dir=tmpdir, max_steps=1, auto_scale_batch_size=True, accelerator="gpu", devices=1, precision=16
|
||||
)
|
||||
trainer.tune(model)
|
||||
after_batch_size = model.batch_size
|
||||
assert trainer.amp_backend == AMPType.NATIVE
|
||||
|
|
Loading…
Reference in New Issue