Remove deprecated code in `pl.utilities.distributed` (#16390)

This commit is contained in:
Carlos Mocholí 2023-01-17 12:01:33 +01:00 committed by Luca Antiga
parent 886ad49a55
commit 47c38c75ba
3 changed files with 3 additions and 142 deletions

View File

@ -64,6 +64,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
* env_prefix
* env_parse
- Removed the deprecated code in `pl.utilities.distributed` ([#16390](https://github.com/Lightning-AI/lightning/pull/16390))
- Mark the `forward_module` argument as required ([#16386](https://github.com/Lightning-AI/lightning/pull/16386))
* Removed the deprecated `pl_module` argument from the distributed module wrappers
* Removed the deprecated `pytorch_lightning.overrides.base.unwrap_lightning_module` function

View File

@ -17,16 +17,8 @@ from typing import Any, Callable, Dict, Optional
import torch
from torch.nn.parallel.distributed import DistributedDataParallel
from lightning_fabric.utilities.distributed import _all_gather_ddp_if_available as new_all_gather_ddp_if_available
from lightning_fabric.utilities.distributed import _distributed_available as new_distributed_available
from lightning_fabric.utilities.distributed import _gather_all_tensors as new_gather_all_tensors
from lightning_fabric.utilities.distributed import (
_get_default_process_group_backend_for_device as new_get_default_process_group_backend_for_device,
)
from lightning_fabric.utilities.distributed import _init_dist_connection as new_init_dist_connection
from lightning_fabric.utilities.distributed import _sync_ddp as new_sync_ddp
from lightning_fabric.utilities.distributed import _sync_ddp_if_available as new_sync_ddp_if_available
from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_deprecation, rank_zero_info
from pytorch_lightning.utilities.rank_zero import rank_zero_debug, rank_zero_info
def register_ddp_comm_hook(
@ -150,80 +142,3 @@ def _collect_states_on_rank_zero(state: Dict[str, Any]) -> Dict[int, Any]:
if not new_distributed_available():
return {0: state}
return {rank: _broadcast_object_list(state, rank) for rank in range(torch.distributed.get_world_size())}
def all_gather_ddp_if_available(*args: Any, **kwargs: Any) -> Any:
rank_zero_deprecation(
"`pytorch_lightning.utilities.distributed.all_gather_ddp_if_available` has been deprecated in v1.8.0 and will"
" be removed in v2.0.0. This function is internal but you can copy over its implementation."
)
return new_all_gather_ddp_if_available(*args, **kwargs)
def distributed_available() -> Any:
rank_zero_deprecation(
"`pytorch_lightning.utilities.distributed.distributed_available` has been deprecated in v1.8.0 and will"
" be removed in v2.0.0. This function is internal but you can copy over its implementation."
)
return new_distributed_available()
def gather_all_tensors(*args: Any, **kwargs: Any) -> Any:
rank_zero_deprecation(
"`pytorch_lightning.utilities.distributed.gather_all_tensors` has been deprecated in v1.8.0 and will"
" be removed in v2.0.0. This function is internal but you can copy over its implementation."
)
return new_gather_all_tensors(*args, **kwargs)
def get_default_process_group_backend_for_device(*args: Any, **kwargs: Any) -> Any:
rank_zero_deprecation(
"`pytorch_lightning.utilities.distributed.get_default_process_group_backend_for_device` has been deprecated"
" in v1.8.0 and will be removed in v2.0.0. This function is internal but you can copy over its implementation."
" `lightning_fabric.utilities.distributed.get_default_process_group_backend_for_device` instead."
)
return new_get_default_process_group_backend_for_device(*args, **kwargs)
def init_dist_connection(*args: Any, **kwargs: Any) -> Any:
rank_zero_deprecation(
"`pytorch_lightning.utilities.distributed.init_dist_connection` has been deprecated in v1.8.0 and will"
" be removed in v2.0.0. This function is internal but you can copy over its implementation."
)
return new_init_dist_connection(*args, **kwargs)
def sync_ddp(*args: Any, **kwargs: Any) -> Any:
rank_zero_deprecation(
"`pytorch_lightning.utilities.distributed.sync_ddp` has been deprecated in v1.8.0 and will"
" be removed in v2.0.0. This function is internal but you can copy over its implementation."
)
return new_sync_ddp(*args, **kwargs)
def sync_ddp_if_available(*args: Any, **kwargs: Any) -> Any:
rank_zero_deprecation(
"`pytorch_lightning.utilities.distributed.sync_ddp_if_available` has been deprecated in v1.8.0 and will"
" be removed in v2.0.0. This function is internal but you can copy over its implementation."
)
return new_sync_ddp_if_available(*args, **kwargs)
def tpu_distributed() -> bool:
rank_zero_deprecation(
"`pytorch_lightning.utilities.distributed.tpu_distributed` has been deprecated in v1.8.0 and will"
" be removed in v2.0.0. This function is internal but you can copy over its implementation."
)
from lightning_fabric.accelerators.tpu import _tpu_distributed
return _tpu_distributed()
def rank_zero_only(*args: Any, **kwargs: Any) -> Any:
rank_zero_deprecation(
"`pytorch_lightning.utilities.distributed.rank_zero_only` has been deprecated in v1.8.1 and will"
" be removed in v2.0.0. You can import it from `pytorch_lightning.utilities` instead."
)
from pytorch_lightning.utilities.rank_zero import rank_zero_only as new_rank_zero_only
return new_rank_zero_only(*args, **kwargs)

View File

@ -22,7 +22,6 @@ from torch.utils.data import DataLoader
from pytorch_lightning.accelerators.cpu import CPUAccelerator
from pytorch_lightning.core.mixins.device_dtype_mixin import DeviceDtypeModuleMixin
from pytorch_lightning.demos.boring_classes import RandomDataset
from pytorch_lightning.plugins.environments import LightningEnvironment
from pytorch_lightning.strategies.utils import on_colab_kaggle
from pytorch_lightning.utilities.apply_func import (
apply_to_collection,
@ -44,16 +43,6 @@ from pytorch_lightning.utilities.device_parser import (
parse_hpus,
parse_tpu_cores,
)
from pytorch_lightning.utilities.distributed import (
all_gather_ddp_if_available,
distributed_available,
gather_all_tensors,
get_default_process_group_backend_for_device,
init_dist_connection,
sync_ddp,
sync_ddp_if_available,
tpu_distributed,
)
from pytorch_lightning.utilities.optimizer import optimizer_to_device, optimizers_to_device
from pytorch_lightning.utilities.seed import pl_worker_init_function, reset_seed, seed_everything
from pytorch_lightning.utilities.xla_device import inner_f, pl_multi_process, XLADeviceUtils
@ -88,11 +77,6 @@ def test_v1_10_deprecated_xla_device_utilities():
with pytest.deprecated_call(match="xla_device.XLADeviceUtils.tpu_device_exists` has been deprecated in v1.8.0"):
XLADeviceUtils.tpu_device_exists()
from pytorch_lightning.utilities.distributed import tpu_distributed
with pytest.deprecated_call(match="tpu_distributed` has been deprecated in v1.8.0"):
tpu_distributed()
def test_v1_10_deprecated_apply_func_utilities():
with pytest.deprecated_call(match="apply_func.apply_to_collection` has been deprecated in v1.8.0"):
@ -162,39 +146,6 @@ def test_v1_10_deprecated_device_parser_utilities():
parse_tpu_cores(None)
def test_v1_10_deprecated_distributed_utilities():
with pytest.deprecated_call(match="distributed.all_gather_ddp_if_available` has been deprecated in v1.8.0"):
all_gather_ddp_if_available(torch.tensor(1))
with pytest.deprecated_call(match="distributed.distributed_available` has been deprecated in v1.8.0"):
distributed_available()
with mock.patch("torch.distributed.get_world_size", return_value=2), mock.patch(
"torch.distributed.barrier"
), mock.patch("torch.distributed.all_gather"):
with pytest.deprecated_call(match="distributed.gather_all_tensors` has been deprecated in v1.8.0"):
gather_all_tensors(torch.tensor(1))
with pytest.deprecated_call(
match="distributed.get_default_process_group_backend_for_device` has been deprecated in v1.8.0"
):
get_default_process_group_backend_for_device(torch.device("cpu"))
with mock.patch("torch.distributed.is_initialized", return_value=True):
with pytest.deprecated_call(match="distributed.init_dist_connection` has been deprecated in v1.8.0"):
init_dist_connection(LightningEnvironment(), "gloo")
with pytest.deprecated_call(match="distributed.sync_ddp_if_available` has been deprecated in v1.8.0"):
sync_ddp_if_available(torch.tensor(1))
with mock.patch("torch.distributed.barrier"), mock.patch("torch.distributed.all_reduce"):
with pytest.deprecated_call(match="distributed.sync_ddp` has been deprecated in v1.8.0"):
sync_ddp(torch.tensor(1))
with pytest.deprecated_call(match="distributed.tpu_distributed` has been deprecated in v1.8.0"):
tpu_distributed()
def test_v1_10_deprecated_optimizer_utilities():
with pytest.deprecated_call(match="optimizer.optimizers_to_device` has been deprecated in v1.8.0"):
optimizers_to_device([torch.optim.Adam(torch.nn.Linear(1, 1).parameters())], "cpu")
@ -217,10 +168,3 @@ def test_v1_10_deprecated_seed_utilities():
def test_v1_10_deprecated_accelerator_setup_environment_method():
with pytest.deprecated_call(match="`Accelerator.setup_environment` has been deprecated in deprecated in v1.8.0"):
CPUAccelerator().setup_environment(torch.device("cpu"))
def test_v1_8_1_deprecated_rank_zero_only():
from pytorch_lightning.utilities.distributed import rank_zero_only
with pytest.deprecated_call(match="rank_zero_only` has been deprecated in v1.8.1"):
rank_zero_only(lambda: None)