Remove `AcceleratorConnector.num_gpus` and deprecate `Trainer.num_gpus` (#12384)
This commit is contained in:
parent
caed77f155
commit
31c68d107e
|
@ -527,7 +527,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
|
||||||
- Deprecated `Trainer.devices` in favor of `Trainer.num_devices` and `Trainer.device_ids` ([#12151](https://github.com/PyTorchLightning/pytorch-lightning/pull/12151))
|
- Deprecated `Trainer.devices` in favor of `Trainer.num_devices` and `Trainer.device_ids` ([#12151](https://github.com/PyTorchLightning/pytorch-lightning/pull/12151))
|
||||||
|
|
||||||
|
|
||||||
- Deprecated `Trainer.root_gpu` in favor of `Trainer.strategy.root_device.index` when GPU is used. ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262))
|
- Deprecated `Trainer.root_gpu` in favor of `Trainer.strategy.root_device.index` when GPU is used ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262))
|
||||||
|
|
||||||
|
|
||||||
|
- Deprecated `Trainer.num_gpus` in favor of `Trainer.num_devices` when GPU is used ([#12384](https://github.com/PyTorchLightning/pytorch-lightning/pull/12384))
|
||||||
|
|
||||||
|
|
||||||
### Removed
|
### Removed
|
||||||
|
@ -720,6 +723,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
|
||||||
- Removed `AcceleratorConnector.root_gpu` property ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262))
|
- Removed `AcceleratorConnector.root_gpu` property ([#12262](https://github.com/PyTorchLightning/pytorch-lightning/pull/12262))
|
||||||
|
|
||||||
|
|
||||||
|
- Removed `AcceleratorConnector.num_gpus` property ([#12384](https://github.com/PyTorchLightning/pytorch-lightning/pull/12384))
|
||||||
|
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- Fixed an issue where `ModelCheckpoint` could delete older checkpoints when `dirpath` has changed during resumed training ([#12045](https://github.com/PyTorchLightning/pytorch-lightning/pull/12045))
|
- Fixed an issue where `ModelCheckpoint` could delete older checkpoints when `dirpath` has changed during resumed training ([#12045](https://github.com/PyTorchLightning/pytorch-lightning/pull/12045))
|
||||||
|
|
|
@ -815,12 +815,6 @@ class AcceleratorConnector:
|
||||||
return self.devices
|
return self.devices
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
@property
|
|
||||||
def num_gpus(self) -> int:
|
|
||||||
if isinstance(self.accelerator, GPUAccelerator):
|
|
||||||
return self.devices
|
|
||||||
return 0
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def gpus(self) -> Optional[Union[List[int], str, int]]:
|
def gpus(self) -> Optional[Union[List[int], str, int]]:
|
||||||
return self._gpus
|
return self._gpus
|
||||||
|
|
|
@ -2071,7 +2071,11 @@ class Trainer(
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def num_gpus(self) -> int:
|
def num_gpus(self) -> int:
|
||||||
return self._accelerator_connector.num_gpus
|
rank_zero_deprecation(
|
||||||
|
"`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
|
||||||
|
" Please use `Trainer.num_devices` instead."
|
||||||
|
)
|
||||||
|
return self.num_devices if isinstance(self.accelerator, GPUAccelerator) else 0
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def devices(self) -> int:
|
def devices(self) -> int:
|
||||||
|
|
|
@ -925,3 +925,40 @@ def test_root_gpu_property_0_passing(monkeypatch, gpus, expected_root_gpu, strat
|
||||||
"Please use `Trainer.strategy.root_device.index` instead."
|
"Please use `Trainer.strategy.root_device.index` instead."
|
||||||
):
|
):
|
||||||
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu
|
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
["gpus", "expected_num_gpus", "strategy"],
|
||||||
|
[
|
||||||
|
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
|
||||||
|
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
|
||||||
|
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
|
||||||
|
pytest.param(-1, 16, "ddp", id="-1 - use all gpus"),
|
||||||
|
pytest.param("-1", 16, "ddp", id="'-1' - use all gpus"),
|
||||||
|
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_trainer_gpu_parse(monkeypatch, gpus, expected_num_gpus, strategy):
|
||||||
|
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
|
||||||
|
monkeypatch.setattr(torch.cuda, "device_count", lambda: 16)
|
||||||
|
with pytest.deprecated_call(
|
||||||
|
match="`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
|
||||||
|
" Please use `Trainer.num_devices` instead."
|
||||||
|
):
|
||||||
|
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
["gpus", "expected_num_gpus", "strategy"],
|
||||||
|
[
|
||||||
|
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
|
||||||
|
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
def test_trainer_num_gpu_0(monkeypatch, gpus, expected_num_gpus, strategy):
|
||||||
|
monkeypatch.setattr(torch.cuda, "device_count", lambda: 0)
|
||||||
|
with pytest.deprecated_call(
|
||||||
|
match="`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
|
||||||
|
" Please use `Trainer.num_devices` instead."
|
||||||
|
):
|
||||||
|
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
|
||||||
|
|
|
@ -92,32 +92,6 @@ def mocked_device_count_0(monkeypatch):
|
||||||
monkeypatch.setattr(torch.cuda, "device_count", device_count)
|
monkeypatch.setattr(torch.cuda, "device_count", device_count)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
["gpus", "expected_num_gpus", "strategy"],
|
|
||||||
[
|
|
||||||
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
|
|
||||||
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
|
|
||||||
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
|
|
||||||
pytest.param(-1, PRETEND_N_OF_GPUS, "ddp", id="-1 - use all gpus"),
|
|
||||||
pytest.param("-1", PRETEND_N_OF_GPUS, "ddp", id="'-1' - use all gpus"),
|
|
||||||
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)"),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, strategy):
|
|
||||||
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize(
|
|
||||||
["gpus", "expected_num_gpus", "strategy"],
|
|
||||||
[
|
|
||||||
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
|
|
||||||
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
def test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, strategy):
|
|
||||||
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
|
|
||||||
|
|
||||||
|
|
||||||
# Asking for a gpu when non are available will result in a MisconfigurationException
|
# Asking for a gpu when non are available will result in a MisconfigurationException
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
["gpus", "expected_root_gpu", "strategy"],
|
["gpus", "expected_root_gpu", "strategy"],
|
||||||
|
|
|
@ -1222,7 +1222,8 @@ def test_trainer_config_accelerator(
|
||||||
assert isinstance(trainer.strategy, strategy_cls)
|
assert isinstance(trainer.strategy, strategy_cls)
|
||||||
assert strategy_cls.strategy_name == strategy_name
|
assert strategy_cls.strategy_name == strategy_name
|
||||||
assert isinstance(trainer.accelerator, accelerator_cls)
|
assert isinstance(trainer.accelerator, accelerator_cls)
|
||||||
assert trainer.num_gpus == num_gpus
|
trainer_num_gpus = trainer.num_devices if isinstance(trainer.accelerator, GPUAccelerator) else 0
|
||||||
|
assert trainer_num_gpus == num_gpus
|
||||||
|
|
||||||
|
|
||||||
def test_trainer_subclassing():
|
def test_trainer_subclassing():
|
||||||
|
@ -2097,7 +2098,8 @@ def test_trainer_config_strategy(monkeypatch, trainer_kwargs, strategy_cls, stra
|
||||||
assert isinstance(trainer.strategy, strategy_cls)
|
assert isinstance(trainer.strategy, strategy_cls)
|
||||||
assert strategy_cls.strategy_name == strategy_name
|
assert strategy_cls.strategy_name == strategy_name
|
||||||
assert isinstance(trainer.accelerator, accelerator_cls)
|
assert isinstance(trainer.accelerator, accelerator_cls)
|
||||||
assert trainer.num_gpus == num_gpus
|
trainer_num_gpus = trainer.num_devices if isinstance(trainer.accelerator, GPUAccelerator) else 0
|
||||||
|
assert trainer_num_gpus == num_gpus
|
||||||
assert trainer.num_nodes == trainer_kwargs.get("num_nodes", 1)
|
assert trainer.num_nodes == trainer_kwargs.get("num_nodes", 1)
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue