Update `tests/models/*.py` to use `devices` instead of `gpus` or `ipus` (#11470)
This commit is contained in:
parent
92a2a6e951
commit
8b4abe4edb
|
@ -77,13 +77,18 @@ class AMPTestModel(BoringModel):
|
|||
],
|
||||
)
|
||||
@pytest.mark.parametrize("precision", [16, "bf16"])
|
||||
@pytest.mark.parametrize("num_processes", [1, 2])
|
||||
def test_amp_cpus(tmpdir, strategy, precision, num_processes):
|
||||
@pytest.mark.parametrize("devices", [1, 2])
|
||||
def test_amp_cpus(tmpdir, strategy, precision, devices):
|
||||
"""Make sure combinations of AMP and training types work if supported."""
|
||||
tutils.reset_seed()
|
||||
|
||||
trainer = Trainer(
|
||||
default_root_dir=tmpdir, num_processes=num_processes, max_epochs=1, strategy=strategy, precision=precision
|
||||
default_root_dir=tmpdir,
|
||||
accelerator="cpu",
|
||||
devices=devices,
|
||||
max_epochs=1,
|
||||
strategy=strategy,
|
||||
precision=precision,
|
||||
)
|
||||
|
||||
model = AMPTestModel()
|
||||
|
@ -97,12 +102,19 @@ def test_amp_cpus(tmpdir, strategy, precision, num_processes):
|
|||
@RunIf(min_gpus=2, min_torch="1.10")
|
||||
@pytest.mark.parametrize("strategy", [None, "dp", "ddp_spawn"])
|
||||
@pytest.mark.parametrize("precision", [16, "bf16"])
|
||||
@pytest.mark.parametrize("gpus", [1, 2])
|
||||
def test_amp_gpus(tmpdir, strategy, precision, gpus):
|
||||
@pytest.mark.parametrize("devices", [1, 2])
|
||||
def test_amp_gpus(tmpdir, strategy, precision, devices):
|
||||
"""Make sure combinations of AMP and training types work if supported."""
|
||||
tutils.reset_seed()
|
||||
|
||||
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, gpus=gpus, strategy=strategy, precision=precision)
|
||||
trainer = Trainer(
|
||||
default_root_dir=tmpdir,
|
||||
max_epochs=1,
|
||||
accelerator="gpu",
|
||||
devices=devices,
|
||||
strategy=strategy,
|
||||
precision=precision,
|
||||
)
|
||||
|
||||
model = AMPTestModel()
|
||||
trainer.fit(model)
|
||||
|
@ -141,7 +153,8 @@ def test_amp_gpu_ddp_slurm_managed(tmpdir):
|
|||
trainer = Trainer(
|
||||
default_root_dir=tmpdir,
|
||||
max_epochs=1,
|
||||
gpus=[0],
|
||||
accelerator="gpu",
|
||||
devices=[0],
|
||||
strategy="ddp_spawn",
|
||||
precision=16,
|
||||
callbacks=[checkpoint],
|
||||
|
@ -195,7 +208,9 @@ def test_amp_with_apex(bwd_mock, tmpdir):
|
|||
model = CustomModel()
|
||||
model.training_epoch_end = None
|
||||
|
||||
trainer = Trainer(default_root_dir=tmpdir, max_steps=5, precision=16, amp_backend="apex", gpus=1)
|
||||
trainer = Trainer(
|
||||
default_root_dir=tmpdir, max_steps=5, precision=16, amp_backend="apex", accelerator="gpu", devices=1
|
||||
)
|
||||
assert str(trainer.amp_backend) == "AMPType.APEX"
|
||||
trainer.fit(model)
|
||||
assert trainer.state.finished, f"Training failed with {trainer.state}"
|
||||
|
|
|
@ -136,8 +136,8 @@ def test_multi_cpu_model_ddp(tmpdir):
|
|||
max_epochs=1,
|
||||
limit_train_batches=0.4,
|
||||
limit_val_batches=0.2,
|
||||
gpus=None,
|
||||
num_processes=2,
|
||||
accelerator="cpu",
|
||||
devices=2,
|
||||
strategy="ddp_spawn",
|
||||
)
|
||||
|
||||
|
|
|
@ -47,7 +47,8 @@ def test_multi_gpu_none_backend(tmpdir):
|
|||
max_epochs=1,
|
||||
limit_train_batches=0.2,
|
||||
limit_val_batches=0.2,
|
||||
gpus=2,
|
||||
accelerator="gpu",
|
||||
devices=2,
|
||||
)
|
||||
|
||||
dm = ClassifDataModule()
|
||||
|
@ -56,8 +57,8 @@ def test_multi_gpu_none_backend(tmpdir):
|
|||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
@pytest.mark.parametrize("gpus", [1, [0], [1]])
|
||||
def test_single_gpu_model(tmpdir, gpus):
|
||||
@pytest.mark.parametrize("devices", [1, [0], [1]])
|
||||
def test_single_gpu_model(tmpdir, devices):
|
||||
"""Make sure single GPU works (DP mode)."""
|
||||
trainer_options = dict(
|
||||
default_root_dir=tmpdir,
|
||||
|
@ -65,7 +66,8 @@ def test_single_gpu_model(tmpdir, gpus):
|
|||
max_epochs=1,
|
||||
limit_train_batches=0.1,
|
||||
limit_val_batches=0.1,
|
||||
gpus=gpus,
|
||||
accelerator="gpu",
|
||||
devices=devices,
|
||||
)
|
||||
|
||||
model = BoringModel()
|
||||
|
@ -94,7 +96,7 @@ def mocked_device_count_0(monkeypatch):
|
|||
|
||||
# Asking for a gpu when non are available will result in a MisconfigurationException
|
||||
@pytest.mark.parametrize(
|
||||
["gpus", "expected_root_gpu", "strategy"],
|
||||
["devices", "expected_root_gpu", "strategy"],
|
||||
[
|
||||
(1, None, "ddp"),
|
||||
(3, None, "ddp"),
|
||||
|
@ -105,13 +107,13 @@ def mocked_device_count_0(monkeypatch):
|
|||
("-1", None, "ddp"),
|
||||
],
|
||||
)
|
||||
def test_root_gpu_property_0_raising(mocked_device_count_0, gpus, expected_root_gpu, strategy):
|
||||
def test_root_gpu_property_0_raising(mocked_device_count_0, devices, expected_root_gpu, strategy):
|
||||
with pytest.raises(MisconfigurationException):
|
||||
Trainer(gpus=gpus, strategy=strategy)
|
||||
Trainer(accelerator="gpu", devices=devices, strategy=strategy)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
["gpus", "expected_root_gpu"],
|
||||
["devices", "expected_root_gpu"],
|
||||
[
|
||||
pytest.param(None, None, id="No gpus, expect gpu root device to be None"),
|
||||
pytest.param([0], 0, id="Oth gpu, expect gpu root device to be 0."),
|
||||
|
@ -120,12 +122,12 @@ def test_root_gpu_property_0_raising(mocked_device_count_0, gpus, expected_root_
|
|||
pytest.param([1, 2], 1, id="[1, 2] gpus, expect gpu root device to be 1."),
|
||||
],
|
||||
)
|
||||
def test_determine_root_gpu_device(gpus, expected_root_gpu):
|
||||
assert device_parser.determine_root_gpu_device(gpus) == expected_root_gpu
|
||||
def test_determine_root_gpu_device(devices, expected_root_gpu):
|
||||
assert device_parser.determine_root_gpu_device(devices) == expected_root_gpu
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
["gpus", "expected_gpu_ids"],
|
||||
["devices", "expected_gpu_ids"],
|
||||
[
|
||||
(None, None),
|
||||
(0, None),
|
||||
|
@ -143,20 +145,20 @@ def test_determine_root_gpu_device(gpus, expected_root_gpu):
|
|||
pytest.param("-1", list(range(PRETEND_N_OF_GPUS)), id="'-1' - use all gpus"),
|
||||
],
|
||||
)
|
||||
def test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):
|
||||
assert device_parser.parse_gpu_ids(gpus) == expected_gpu_ids
|
||||
def test_parse_gpu_ids(mocked_device_count, devices, expected_gpu_ids):
|
||||
assert device_parser.parse_gpu_ids(devices) == expected_gpu_ids
|
||||
|
||||
|
||||
@pytest.mark.parametrize("gpus", [0.1, -2, False, [-1], [None], ["0"], [0, 0]])
|
||||
def test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):
|
||||
@pytest.mark.parametrize("devices", [0.1, -2, False, [-1], [None], ["0"], [0, 0]])
|
||||
def test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, devices):
|
||||
with pytest.raises(MisconfigurationException):
|
||||
device_parser.parse_gpu_ids(gpus)
|
||||
device_parser.parse_gpu_ids(devices)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("gpus", [[1, 2, 19], -1, "-1"])
|
||||
def test_parse_gpu_fail_on_non_existent_id(mocked_device_count_0, gpus):
|
||||
@pytest.mark.parametrize("devices", [[1, 2, 19], -1, "-1"])
|
||||
def test_parse_gpu_fail_on_non_existent_id(mocked_device_count_0, devices):
|
||||
with pytest.raises(MisconfigurationException):
|
||||
device_parser.parse_gpu_ids(gpus)
|
||||
device_parser.parse_gpu_ids(devices)
|
||||
|
||||
|
||||
def test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count):
|
||||
|
@ -164,10 +166,10 @@ def test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count):
|
|||
device_parser.parse_gpu_ids([1, 2, 19])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("gpus", [-1, "-1"])
|
||||
def test_parse_gpu_returns_none_when_no_devices_are_available(mocked_device_count_0, gpus):
|
||||
@pytest.mark.parametrize("devices", [-1, "-1"])
|
||||
def test_parse_gpu_returns_none_when_no_devices_are_available(mocked_device_count_0, devices):
|
||||
with pytest.raises(MisconfigurationException):
|
||||
device_parser.parse_gpu_ids(gpus)
|
||||
device_parser.parse_gpu_ids(devices)
|
||||
|
||||
|
||||
@mock.patch.dict(
|
||||
|
@ -198,7 +200,7 @@ def test_torchelastic_gpu_parsing(mocked_device_count, mocked_is_available, gpus
|
|||
|
||||
@RunIf(min_gpus=1)
|
||||
def test_single_gpu_batch_parse():
|
||||
trainer = Trainer(gpus=1)
|
||||
trainer = Trainer(accelerator="gpu", devices=1)
|
||||
|
||||
# non-transferrable types
|
||||
primitive_objects = [None, {}, [], 1.0, "x", [None, 2], {"x": (1, 2), "y": None}]
|
||||
|
|
|
@ -203,7 +203,8 @@ def test_transfer_batch_hook_ddp(tmpdir):
|
|||
max_epochs=1,
|
||||
enable_model_summary=False,
|
||||
strategy="ddp",
|
||||
gpus=2,
|
||||
accelerator="gpu",
|
||||
devices=2,
|
||||
)
|
||||
trainer.fit(model)
|
||||
|
||||
|
|
|
@ -96,7 +96,8 @@ def test_model_saves_on_multi_gpu(tmpdir):
|
|||
max_epochs=1,
|
||||
limit_train_batches=10,
|
||||
limit_val_batches=10,
|
||||
gpus=[0, 1],
|
||||
accelerator="gpu",
|
||||
devices=[0, 1],
|
||||
strategy="ddp_spawn",
|
||||
enable_progress_bar=False,
|
||||
)
|
||||
|
|
|
@ -399,7 +399,8 @@ def test_running_test_pretrained_model_distrib_dp(tmpdir):
|
|||
limit_val_batches=5,
|
||||
callbacks=[checkpoint],
|
||||
logger=logger,
|
||||
gpus=[0, 1],
|
||||
accelerator="gpu",
|
||||
devices=[0, 1],
|
||||
strategy="dp",
|
||||
default_root_dir=tmpdir,
|
||||
)
|
||||
|
@ -445,7 +446,8 @@ def test_running_test_pretrained_model_distrib_ddp_spawn(tmpdir):
|
|||
limit_val_batches=2,
|
||||
callbacks=[checkpoint],
|
||||
logger=logger,
|
||||
gpus=[0, 1],
|
||||
accelerator="gpu",
|
||||
devices=[0, 1],
|
||||
strategy="ddp_spawn",
|
||||
default_root_dir=tmpdir,
|
||||
)
|
||||
|
@ -564,7 +566,7 @@ def test_dp_resume(tmpdir):
|
|||
model = CustomClassificationModelDP(lr=0.1)
|
||||
dm = ClassifDataModule()
|
||||
|
||||
trainer_options = dict(max_epochs=1, gpus=2, strategy="dp", default_root_dir=tmpdir)
|
||||
trainer_options = dict(max_epochs=1, accelerator="gpu", devices=2, strategy="dp", default_root_dir=tmpdir)
|
||||
|
||||
# get logger
|
||||
logger = tutils.get_default_logger(tmpdir)
|
||||
|
|
|
@ -50,7 +50,7 @@ class SerialLoaderBoringModel(BoringModel):
|
|||
|
||||
@RunIf(tpu=True)
|
||||
@pl_multi_process_test
|
||||
def test_model_tpu_cores_1(tmpdir):
|
||||
def test_model_tpu_devices_1(tmpdir):
|
||||
"""Make sure model trains on TPU."""
|
||||
tutils.reset_seed()
|
||||
trainer_options = dict(
|
||||
|
@ -90,7 +90,7 @@ def test_model_tpu_index(tmpdir, tpu_core):
|
|||
|
||||
@RunIf(tpu=True)
|
||||
@pl_multi_process_test
|
||||
def test_model_tpu_cores_8(tmpdir):
|
||||
def test_model_tpu_devices_8(tmpdir):
|
||||
"""Make sure model trains on TPU."""
|
||||
tutils.reset_seed()
|
||||
trainer_options = dict(
|
||||
|
@ -110,7 +110,7 @@ def test_model_tpu_cores_8(tmpdir):
|
|||
|
||||
@RunIf(tpu=True)
|
||||
@pl_multi_process_test
|
||||
def test_model_16bit_tpu_cores_1(tmpdir):
|
||||
def test_model_16bit_tpu_devices_1(tmpdir):
|
||||
"""Make sure model trains on TPU."""
|
||||
tutils.reset_seed()
|
||||
trainer_options = dict(
|
||||
|
@ -152,7 +152,7 @@ def test_model_16bit_tpu_index(tmpdir, tpu_core):
|
|||
|
||||
@RunIf(tpu=True)
|
||||
@pl_multi_process_test
|
||||
def test_model_16bit_tpu_cores_8(tmpdir):
|
||||
def test_model_16bit_tpu_devices_8(tmpdir):
|
||||
"""Make sure model trains on TPU."""
|
||||
tutils.reset_seed()
|
||||
trainer_options = dict(
|
||||
|
|
Loading…
Reference in New Issue