remove deprecation of gpu string parsing behavior (#8770)
This commit is contained in:
parent
d063059d03
commit
e541803636
|
@ -33,6 +33,9 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
|
|||
|
||||
### Changed
|
||||
|
||||
- Parsing of the `gpus` Trainer argument has changed: `gpus="n"` (str) no longer selects the GPU index n and instead selects the first n devices. ([#8770](https://github.com/PyTorchLightning/pytorch-lightning/pull/8770))
|
||||
|
||||
|
||||
- Replace `iteration_count` and other index attributes in the loops with progress dataclasses ([#8477](https://github.com/PyTorchLightning/pytorch-lightning/pull/8477))
|
||||
|
||||
|
||||
|
|
|
@ -216,19 +216,15 @@ Note in particular the difference between `gpus=0`, `gpus=[0]` and `gpus="0"`.
|
|||
+---------------+-----------+---------------------+---------------------------------+
|
||||
| [1, 3] | list | [1, 3] | GPUs 1 and 3 |
|
||||
+---------------+-----------+---------------------+---------------------------------+
|
||||
| "0" | str | [0] | GPU 0 |
|
||||
| "0" | str | None | CPU |
|
||||
+---------------+-----------+---------------------+---------------------------------+
|
||||
| "3" | str | [3] | GPU 3 (will change in v1.5) |
|
||||
| "3" | str | [0, 1, 2] | first 3 GPUs |
|
||||
+---------------+-----------+---------------------+---------------------------------+
|
||||
| "1, 3" | str | [1, 3] | GPUs 1 and 3 |
|
||||
+---------------+-----------+---------------------+---------------------------------+
|
||||
| "-1" | str | [0, 1, 2, ...] | all available GPUs |
|
||||
+---------------+-----------+---------------------+---------------------------------+
|
||||
|
||||
.. warning::
|
||||
The behavior for :code:`gpus="3"` (str) will change. Currently it selects the GPU with index 3, but will
|
||||
select the first 3 GPUs from v1.5.
|
||||
|
||||
.. note::
|
||||
|
||||
When specifying number of gpus as an integer ``gpus=k``, setting the trainer flag
|
||||
|
|
|
@ -11,15 +11,13 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import operator
|
||||
from typing import Any, List, MutableSequence, Optional, Tuple, Union
|
||||
|
||||
import torch
|
||||
|
||||
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
|
||||
from pytorch_lightning.utilities import _TPU_AVAILABLE, rank_zero_deprecation
|
||||
from pytorch_lightning.utilities import _TPU_AVAILABLE
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from pytorch_lightning.utilities.imports import _compare_version
|
||||
|
||||
|
||||
def determine_root_gpu_device(gpus: List[int]) -> Optional[int]:
|
||||
|
@ -72,11 +70,7 @@ def parse_gpu_ids(gpus: Optional[Union[int, str, List[int]]]) -> Optional[List[i
|
|||
_check_data_type(gpus)
|
||||
|
||||
# Handle the case when no gpus are requested
|
||||
if gpus is None or isinstance(gpus, int) and gpus == 0:
|
||||
return None
|
||||
|
||||
if _compare_version("pytorch_lightning", operator.ge, "1.5") and isinstance(gpus, str) and gpus.strip() == "0":
|
||||
# TODO: in v1.5 combine this with the above if statement
|
||||
if gpus is None or isinstance(gpus, int) and gpus == 0 or str(gpus).strip() == "0":
|
||||
return None
|
||||
|
||||
# We know user requested GPUs therefore if some of the
|
||||
|
@ -134,16 +128,7 @@ def _normalize_parse_gpu_string_input(s: Union[int, str, List[int]]) -> Union[in
|
|||
return -1
|
||||
if "," in s:
|
||||
return [int(x.strip()) for x in s.split(",") if len(x) > 0]
|
||||
num_gpus = int(s.strip())
|
||||
if _compare_version("pytorch_lightning", operator.lt, "1.5"):
|
||||
rank_zero_deprecation(
|
||||
f"Parsing of the Trainer argument gpus='{s}' (string) will change in the future."
|
||||
" In the current version of Lightning, this will select"
|
||||
f" CUDA device with index {num_gpus}, but from v1.5 it will select gpus"
|
||||
f" {list(range(num_gpus))} (same as gpus={s} (int))."
|
||||
)
|
||||
return [num_gpus]
|
||||
return num_gpus
|
||||
return int(s.strip())
|
||||
|
||||
|
||||
def _sanitize_gpu_ids(gpus: List[int]) -> List[int]:
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Test deprecated functionality which will be removed in v1.5.0"""
|
||||
import operator
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
from unittest import mock
|
||||
|
@ -26,8 +25,6 @@ from pytorch_lightning.core.decorators import auto_move_data
|
|||
from pytorch_lightning.loggers import WandbLogger
|
||||
from pytorch_lightning.plugins import DeepSpeedPlugin
|
||||
from pytorch_lightning.profiler import AdvancedProfiler, BaseProfiler, PyTorchProfiler, SimpleProfiler
|
||||
from pytorch_lightning.utilities import device_parser
|
||||
from pytorch_lightning.utilities.imports import _compare_version
|
||||
from tests.deprecated_api import no_deprecated_call
|
||||
from tests.helpers import BoringDataModule, BoringModel
|
||||
from tests.helpers.runif import RunIf
|
||||
|
@ -204,25 +201,6 @@ def test_v1_5_0_lighting_module_grad_norm(tmpdir):
|
|||
model.grad_norm(2)
|
||||
|
||||
|
||||
@pytest.mark.xfail(
|
||||
condition=_compare_version("pytorch_lightning", operator.ge, "1.5"), reason="parsing of string will change in v1.5"
|
||||
)
|
||||
@mock.patch("torch.cuda.device_count", return_value=4)
|
||||
def test_v1_5_0_trainer_gpus_str_parsing(*_):
|
||||
# TODO: when removing this, make sure docs in docs/advanced/multi-gpu.rst reflect the new
|
||||
# behavior regarding GPU selection. Ping @awaelchli if unsure.
|
||||
with pytest.deprecated_call(match=r"Parsing of the Trainer argument gpus='3' .* will change."):
|
||||
Trainer(gpus="3", accelerator="ddp_spawn")
|
||||
|
||||
with pytest.deprecated_call(match=r"Parsing of the Trainer argument gpus='3' .* will change."):
|
||||
gpus = device_parser.parse_gpu_ids("3")
|
||||
assert gpus == [3]
|
||||
|
||||
with pytest.deprecated_call(match=r"Parsing of the Trainer argument gpus='0' .* will change."):
|
||||
gpus = device_parser.parse_gpu_ids("0")
|
||||
assert gpus == [0]
|
||||
|
||||
|
||||
def test_v1_5_0_datamodule_setter():
|
||||
model = BoringModel()
|
||||
datamodule = BoringDataModule()
|
||||
|
|
|
@ -195,8 +195,8 @@ def test_determine_root_gpu_device(gpus, expected_root_gpu):
|
|||
pytest.param([0], [0]),
|
||||
pytest.param([1, 3], [1, 3]),
|
||||
pytest.param((1, 3), [1, 3]),
|
||||
pytest.param("0", None, marks=pytest.mark.skipif(PL_VERSION_LT_1_5, reason="available from v1.5")),
|
||||
pytest.param("3", [0, 1, 2], marks=pytest.mark.skipif(PL_VERSION_LT_1_5, reason="available from v1.5")),
|
||||
pytest.param("0", None),
|
||||
pytest.param("3", [0, 1, 2]),
|
||||
pytest.param("1, 3", [1, 3]),
|
||||
pytest.param("2,", [2]),
|
||||
pytest.param("-1", list(range(PRETEND_N_OF_GPUS)), id="'-1' - use all gpus"),
|
||||
|
|
Loading…
Reference in New Issue