diff --git a/docs/source/advanced/advanced_gpu.rst b/docs/source/advanced/advanced_gpu.rst index 4fb5a5ace2..7018e6cf35 100644 --- a/docs/source/advanced/advanced_gpu.rst +++ b/docs/source/advanced/advanced_gpu.rst @@ -291,7 +291,7 @@ Below we show an example of running `ZeRO-Offload `_. For this, all data pre-loading should be done on the main process inside :meth:`DataModule.__init__`. -As a result, all tensor-data will get automatically shared when using the :class:`~pytorch_lightning.plugins.DDPSpawnStrategy` training type strategy: +As a result, all tensor-data will get automatically shared when using the :class:`~pytorch_lightning.strategies.DDPSpawnStrategy` training type strategy: .. warning:: diff --git a/docs/source/common/trainer.rst b/docs/source/common/trainer.rst index 03204fecd2..efa720f5eb 100644 --- a/docs/source/common/trainer.rst +++ b/docs/source/common/trainer.rst @@ -1416,7 +1416,7 @@ Supports passing different training strategies with aliases (ddp, ddp_spawn, etc .. code-block:: python - from pytorch_lightning.plugins import DDPStrategy + from pytorch_lightning.strategies import DDPStrategy class CustomDDPStrategy(DDPStrategy): diff --git a/docs/source/extensions/accelerators.rst b/docs/source/extensions/accelerators.rst index 386c7853e7..be835bc196 100644 --- a/docs/source/extensions/accelerators.rst +++ b/docs/source/extensions/accelerators.rst @@ -23,7 +23,8 @@ One to handle differences from the training routine and one to handle different from pytorch_lightning import Trainer from pytorch_lightning.accelerators import GPUAccelerator - from pytorch_lightning.plugins import NativeMixedPrecisionPlugin, DDPStrategy + from pytorch_lightning.plugins import NativeMixedPrecisionPlugin + from pytorch_lightning.strategies import DDPStrategy accelerator = GPUAccelerator() precision_plugin = NativeMixedPrecisionPlugin(precision=16, device="cuda") diff --git a/docs/source/extensions/plugins.rst b/docs/source/extensions/plugins.rst index 79db10c6ef..14c2118b74 100644 --- a/docs/source/extensions/plugins.rst +++ b/docs/source/extensions/plugins.rst @@ -60,7 +60,7 @@ Expert users may choose to extend an existing plugin by overriding its methods . .. code-block:: python - from pytorch_lightning.plugins import DDPStrategy + from pytorch_lightning.strategies import DDPStrategy class CustomDDPStrategy(DDPStrategy): diff --git a/docs/source/guides/speed.rst b/docs/source/guides/speed.rst index d024d2f327..9b8f9dbe9d 100644 --- a/docs/source/guides/speed.rst +++ b/docs/source/guides/speed.rst @@ -86,7 +86,7 @@ This by default comes with a performance hit, and can be disabled in most cases. .. code-block:: python - from pytorch_lightning.plugins import DDPStrategy + from pytorch_lightning.strategies import DDPStrategy trainer = pl.Trainer( gpus=2, @@ -95,7 +95,7 @@ This by default comes with a performance hit, and can be disabled in most cases. .. code-block:: python - from pytorch_lightning.plugins import DDPSpawnStrategy + from pytorch_lightning.strategies import DDPSpawnStrategy trainer = pl.Trainer( gpus=2, diff --git a/docs/source/starter/lightning_lite.rst b/docs/source/starter/lightning_lite.rst index 759c913cdc..ee00831125 100644 --- a/docs/source/starter/lightning_lite.rst +++ b/docs/source/starter/lightning_lite.rst @@ -389,7 +389,7 @@ Additionally, you can pass in your custom training type strategy by configuring .. code-block:: python - from pytorch_lightning.plugins import DeepSpeedStrategy + from pytorch_lightning.strategies import DeepSpeedStrategy lite = Lite(strategy=DeepSpeedStrategy(stage=2), accelerator="gpu", devices=2) diff --git a/pytorch_lightning/lite/lite.py b/pytorch_lightning/lite/lite.py index 668dc91421..8161666cf5 100644 --- a/pytorch_lightning/lite/lite.py +++ b/pytorch_lightning/lite/lite.py @@ -26,7 +26,8 @@ from torch.utils.data import DataLoader, DistributedSampler, RandomSampler, Sequ from pytorch_lightning.accelerators.accelerator import Accelerator from pytorch_lightning.lite.wrappers import _LiteDataLoader, _LiteModule, _LiteOptimizer -from pytorch_lightning.plugins import DDPSpawnStrategy, DeepSpeedStrategy, PLUGIN_INPUT, Strategy, TPUSpawnStrategy +from pytorch_lightning.plugins import PLUGIN_INPUT +from pytorch_lightning.strategies import DDPSpawnStrategy, DeepSpeedStrategy, Strategy, TPUSpawnStrategy from pytorch_lightning.strategies.training_type_plugin import TBroadcast from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector from pytorch_lightning.utilities import _AcceleratorType, _StrategyType, move_data_to_device diff --git a/pytorch_lightning/lite/wrappers.py b/pytorch_lightning/lite/wrappers.py index ca91086089..b4a1bed0aa 100644 --- a/pytorch_lightning/lite/wrappers.py +++ b/pytorch_lightning/lite/wrappers.py @@ -20,7 +20,8 @@ from torch.optim import Optimizer from torch.utils.data import DataLoader from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin -from pytorch_lightning.plugins import PrecisionPlugin, Strategy +from pytorch_lightning.plugins import PrecisionPlugin +from pytorch_lightning.strategies import Strategy from pytorch_lightning.utilities.apply_func import apply_to_collection, move_data_to_device diff --git a/pytorch_lightning/loops/dataloader/prediction_loop.py b/pytorch_lightning/loops/dataloader/prediction_loop.py index 7d4c95cca8..0bcc81c63b 100644 --- a/pytorch_lightning/loops/dataloader/prediction_loop.py +++ b/pytorch_lightning/loops/dataloader/prediction_loop.py @@ -5,7 +5,7 @@ from torch.utils.data import DataLoader from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop from pytorch_lightning.loops.epoch.prediction_epoch_loop import PredictionEpochLoop -from pytorch_lightning.plugins import DDPSpawnStrategy +from pytorch_lightning.strategies import DDPSpawnStrategy from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.types import _PREDICT_OUTPUT diff --git a/pytorch_lightning/loops/utilities.py b/pytorch_lightning/loops/utilities.py index 32dc9b805a..e2fac63a8f 100644 --- a/pytorch_lightning/loops/utilities.py +++ b/pytorch_lightning/loops/utilities.py @@ -22,7 +22,7 @@ import torch from torch.optim import Optimizer import pytorch_lightning as pl -from pytorch_lightning.plugins import ParallelStrategy +from pytorch_lightning.strategies import ParallelStrategy from pytorch_lightning.utilities import rank_zero_warn from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher diff --git a/tests/accelerators/test_accelerator_connector.py b/tests/accelerators/test_accelerator_connector.py index 3c9e54866a..bc8807c3da 100644 --- a/tests/accelerators/test_accelerator_connector.py +++ b/tests/accelerators/test_accelerator_connector.py @@ -24,7 +24,14 @@ from pytorch_lightning import Trainer from pytorch_lightning.accelerators.accelerator import Accelerator from pytorch_lightning.accelerators.cpu import CPUAccelerator from pytorch_lightning.accelerators.gpu import GPUAccelerator -from pytorch_lightning.plugins import ( +from pytorch_lightning.plugins import PrecisionPlugin +from pytorch_lightning.plugins.environments import ( + KubeflowEnvironment, + LightningEnvironment, + SLURMEnvironment, + TorchElasticEnvironment, +) +from pytorch_lightning.strategies import ( DataParallelStrategy, DDP2Strategy, DDPShardedStrategy, @@ -33,15 +40,8 @@ from pytorch_lightning.plugins import ( DDPStrategy, DeepSpeedStrategy, ParallelStrategy, - PrecisionPlugin, SingleDeviceStrategy, ) -from pytorch_lightning.plugins.environments import ( - KubeflowEnvironment, - LightningEnvironment, - SLURMEnvironment, - TorchElasticEnvironment, -) from pytorch_lightning.utilities import _AcceleratorType, _StrategyType from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests.helpers.runif import RunIf @@ -97,7 +97,7 @@ def test_accelerator_choice_ddp_spawn(cuda_available_mock, device_count_mock): ) @mock.patch("torch.cuda.set_device") @mock.patch("torch.cuda.device_count", return_value=2) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_accelerator_choice_ddp_slurm(*_): with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"): trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=2) @@ -122,7 +122,7 @@ def test_accelerator_choice_ddp_slurm(*_): ) @mock.patch("torch.cuda.set_device") @mock.patch("torch.cuda.device_count", return_value=2) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_accelerator_choice_ddp2_slurm(*_): with pytest.deprecated_call(match=r"accelerator='ddp2'\)` has been deprecated in v1.5"): trainer = Trainer(fast_dev_run=True, accelerator="ddp2", gpus=2) @@ -147,7 +147,7 @@ def test_accelerator_choice_ddp2_slurm(*_): ) @mock.patch("torch.cuda.set_device") @mock.patch("torch.cuda.device_count", return_value=1) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_accelerator_choice_ddp_te(*_): with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"): trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=2) @@ -171,7 +171,7 @@ def test_accelerator_choice_ddp_te(*_): ) @mock.patch("torch.cuda.set_device") @mock.patch("torch.cuda.device_count", return_value=1) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_accelerator_choice_ddp2_te(*_): with pytest.deprecated_call(match=r"accelerator='ddp2'\)` has been deprecated in v1.5"): trainer = Trainer(fast_dev_run=True, accelerator="ddp2", gpus=2) @@ -186,7 +186,7 @@ def test_accelerator_choice_ddp2_te(*_): os.environ, {"WORLD_SIZE": "2", "LOCAL_WORLD_SIZE": "2", "RANK": "1", "LOCAL_RANK": "1", "GROUP_RANK": "0"} ) @mock.patch("torch.cuda.device_count", return_value=0) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_accelerator_choice_ddp_cpu_te(*_): trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", num_processes=2) assert isinstance(trainer.accelerator, CPUAccelerator) @@ -209,7 +209,7 @@ def test_accelerator_choice_ddp_cpu_te(*_): ) @mock.patch("torch.cuda.set_device") @mock.patch("torch.cuda.device_count", return_value=1) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_accelerator_choice_ddp_kubeflow(*_): with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"): trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=1) @@ -231,7 +231,7 @@ def test_accelerator_choice_ddp_kubeflow(*_): }, ) @mock.patch("torch.cuda.device_count", return_value=0) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_accelerator_choice_ddp_cpu_kubeflow(*_): trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", num_processes=1) assert isinstance(trainer.accelerator, CPUAccelerator) @@ -253,7 +253,7 @@ def test_accelerator_choice_ddp_cpu_kubeflow(*_): }, ) @mock.patch("torch.cuda.device_count", return_value=0) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_accelerator_choice_ddp_cpu_slurm(*_): trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", num_processes=2) assert trainer._accelerator_connector._is_slurm_managing_tasks() @@ -333,7 +333,7 @@ def test_accelerator_choice_ddp_cpu_custom_cluster(_, tmpdir): }, ) @mock.patch("torch.cuda.device_count", return_value=0) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_custom_accelerator(device_count_mock, setup_distributed_mock): class Accel(Accelerator): pass @@ -374,7 +374,7 @@ def test_custom_accelerator(device_count_mock, setup_distributed_mock): }, ) @mock.patch("torch.cuda.device_count", return_value=0) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_dist_backend_accelerator_mapping(*_): trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", num_processes=2) assert isinstance(trainer.accelerator, CPUAccelerator) @@ -707,7 +707,7 @@ def test_strategy_choice_ddp_spawn(cuda_available_mock, device_count_mock): "SLURM_LOCALID": "1", }, ) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) @pytest.mark.parametrize("strategy", ["ddp", DDPStrategy()]) def test_strategy_choice_ddp_slurm(setup_distributed_mock, strategy): trainer = Trainer(fast_dev_run=True, strategy=strategy, gpus=2) @@ -732,7 +732,7 @@ def test_strategy_choice_ddp_slurm(setup_distributed_mock, strategy): ) @mock.patch("torch.cuda.set_device") @mock.patch("torch.cuda.device_count", return_value=2) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) @pytest.mark.parametrize("strategy", ["ddp2", DDP2Strategy()]) def test_strategy_choice_ddp2_slurm(set_device_mock, device_count_mock, setup_distributed_mock, strategy): trainer = Trainer(fast_dev_run=True, strategy=strategy, gpus=2) @@ -757,7 +757,7 @@ def test_strategy_choice_ddp2_slurm(set_device_mock, device_count_mock, setup_di ) @mock.patch("torch.cuda.set_device") @mock.patch("torch.cuda.device_count", return_value=2) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_strategy_choice_ddp_te(*_): trainer = Trainer(fast_dev_run=True, strategy="ddp", gpus=2) assert isinstance(trainer.accelerator, GPUAccelerator) @@ -780,7 +780,7 @@ def test_strategy_choice_ddp_te(*_): ) @mock.patch("torch.cuda.set_device") @mock.patch("torch.cuda.device_count", return_value=2) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_strategy_choice_ddp2_te(*_): trainer = Trainer(fast_dev_run=True, strategy="ddp2", gpus=2) assert isinstance(trainer.accelerator, GPUAccelerator) @@ -794,7 +794,7 @@ def test_strategy_choice_ddp2_te(*_): os.environ, {"WORLD_SIZE": "2", "LOCAL_WORLD_SIZE": "2", "RANK": "1", "LOCAL_RANK": "1", "GROUP_RANK": "0"} ) @mock.patch("torch.cuda.device_count", return_value=0) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_strategy_choice_ddp_cpu_te(*_): trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", num_processes=2) assert isinstance(trainer.accelerator, CPUAccelerator) @@ -817,7 +817,7 @@ def test_strategy_choice_ddp_cpu_te(*_): ) @mock.patch("torch.cuda.set_device") @mock.patch("torch.cuda.device_count", return_value=1) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_strategy_choice_ddp_kubeflow(*_): trainer = Trainer(fast_dev_run=True, strategy="ddp", gpus=1) assert isinstance(trainer.accelerator, GPUAccelerator) @@ -838,7 +838,7 @@ def test_strategy_choice_ddp_kubeflow(*_): }, ) @mock.patch("torch.cuda.device_count", return_value=0) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) def test_strategy_choice_ddp_cpu_kubeflow(*_): trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", num_processes=2) assert isinstance(trainer.accelerator, CPUAccelerator) @@ -860,7 +860,7 @@ def test_strategy_choice_ddp_cpu_kubeflow(*_): }, ) @mock.patch("torch.cuda.device_count", return_value=0) -@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True) @pytest.mark.parametrize("strategy", ["ddp", DDPStrategy()]) def test_strategy_choice_ddp_cpu_slurm(device_count_mock, setup_distributed_mock, strategy): trainer = Trainer(fast_dev_run=True, strategy=strategy, num_processes=2) diff --git a/tests/accelerators/test_cpu.py b/tests/accelerators/test_cpu.py index fbb935e128..28011aa497 100644 --- a/tests/accelerators/test_cpu.py +++ b/tests/accelerators/test_cpu.py @@ -8,9 +8,9 @@ import torch import pytorch_lightning as pl from pytorch_lightning import Trainer from pytorch_lightning.accelerators import CPUAccelerator -from pytorch_lightning.plugins import SingleDeviceStrategy from pytorch_lightning.plugins.io.torch_plugin import TorchCheckpointIO from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin +from pytorch_lightning.strategies import SingleDeviceStrategy from tests.helpers.boring_model import BoringModel diff --git a/tests/accelerators/test_ipu.py b/tests/accelerators/test_ipu.py index 3e9b727dd6..3a250de38a 100644 --- a/tests/accelerators/test_ipu.py +++ b/tests/accelerators/test_ipu.py @@ -21,7 +21,8 @@ import torch.nn.functional as F from pytorch_lightning import Callback, seed_everything, Trainer from pytorch_lightning.accelerators import CPUAccelerator, IPUAccelerator from pytorch_lightning.core.lightning import LightningModule -from pytorch_lightning.plugins import IPUPrecisionPlugin, IPUStrategy +from pytorch_lightning.plugins import IPUPrecisionPlugin +from pytorch_lightning.strategies.ipu import IPUStrategy from pytorch_lightning.trainer.states import RunningStage, TrainerFn from pytorch_lightning.trainer.supporters import CombinedLoader from pytorch_lightning.utilities import _AcceleratorType, _IPU_AVAILABLE diff --git a/tests/accelerators/test_tpu.py b/tests/accelerators/test_tpu.py index 5f5a9b7d84..a4eb26a4bc 100644 --- a/tests/accelerators/test_tpu.py +++ b/tests/accelerators/test_tpu.py @@ -23,7 +23,8 @@ from torch.utils.data import DataLoader from pytorch_lightning import Trainer from pytorch_lightning.accelerators.cpu import CPUAccelerator from pytorch_lightning.accelerators.tpu import TPUAccelerator -from pytorch_lightning.plugins import DDPStrategy, TPUPrecisionPlugin, TPUSpawnStrategy, XLACheckpointIO +from pytorch_lightning.plugins import TPUPrecisionPlugin, XLACheckpointIO +from pytorch_lightning.strategies import DDPStrategy, TPUSpawnStrategy from pytorch_lightning.utilities import find_shared_parameters from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests.helpers.boring_model import BoringModel, RandomDataset diff --git a/tests/benchmarks/test_sharded_parity.py b/tests/benchmarks/test_sharded_parity.py index 97b4038159..7c932224ec 100644 --- a/tests/benchmarks/test_sharded_parity.py +++ b/tests/benchmarks/test_sharded_parity.py @@ -19,7 +19,7 @@ import pytest import torch from pytorch_lightning import seed_everything, Trainer -from pytorch_lightning.plugins import DDPSpawnShardedStrategy +from pytorch_lightning.strategies import DDPSpawnShardedStrategy from tests.helpers.boring_model import BoringModel, RandomDataset from tests.helpers.runif import RunIf diff --git a/tests/callbacks/test_stochastic_weight_avg.py b/tests/callbacks/test_stochastic_weight_avg.py index cfb6847a18..ace2d35964 100644 --- a/tests/callbacks/test_stochastic_weight_avg.py +++ b/tests/callbacks/test_stochastic_weight_avg.py @@ -22,8 +22,7 @@ from torch.utils.data import DataLoader from pytorch_lightning import LightningModule, Trainer from pytorch_lightning.callbacks import StochasticWeightAveraging -from pytorch_lightning.plugins import DDPSpawnStrategy -from pytorch_lightning.strategies import Strategy +from pytorch_lightning.strategies import DDPSpawnStrategy, Strategy from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests.helpers.boring_model import BoringModel, RandomDataset, RandomIterableDataset from tests.helpers.runif import RunIf diff --git a/tests/deprecated_api/test_remove_1-7.py b/tests/deprecated_api/test_remove_1-7.py index 45c58e810c..13bebcc9e3 100644 --- a/tests/deprecated_api/test_remove_1-7.py +++ b/tests/deprecated_api/test_remove_1-7.py @@ -27,7 +27,6 @@ from pytorch_lightning.callbacks.progress import ProgressBar from pytorch_lightning.callbacks.xla_stats_monitor import XLAStatsMonitor from pytorch_lightning.loggers import LoggerCollection, TestTubeLogger from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper -from pytorch_lightning.plugins import SingleDeviceStrategy from pytorch_lightning.plugins.environments import ( KubeflowEnvironment, LightningEnvironment, @@ -35,6 +34,7 @@ from pytorch_lightning.plugins.environments import ( SLURMEnvironment, TorchElasticEnvironment, ) +from pytorch_lightning.strategies import SingleDeviceStrategy from tests.callbacks.test_callbacks import OldStatefulCallback from tests.deprecated_api import _soft_unimport_module from tests.helpers import BoringModel diff --git a/tests/lite/test_lite.py b/tests/lite/test_lite.py index 3f147da99c..0f9a3b885d 100644 --- a/tests/lite/test_lite.py +++ b/tests/lite/test_lite.py @@ -25,7 +25,8 @@ from torch.utils.data import DataLoader, DistributedSampler, Sampler from pytorch_lightning.lite import LightningLite from pytorch_lightning.lite.wrappers import _LiteDataLoader, _LiteModule, _LiteOptimizer -from pytorch_lightning.plugins import DeepSpeedStrategy, PrecisionPlugin, Strategy +from pytorch_lightning.plugins import PrecisionPlugin +from pytorch_lightning.strategies import DeepSpeedStrategy, Strategy from pytorch_lightning.utilities import _StrategyType from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.seed import pl_worker_init_function diff --git a/tests/models/test_sync_batchnorm.py b/tests/models/test_sync_batchnorm.py index d490fbdae4..cb1cba0646 100644 --- a/tests/models/test_sync_batchnorm.py +++ b/tests/models/test_sync_batchnorm.py @@ -17,8 +17,8 @@ import torch.nn as nn import torch.nn.functional as F from pytorch_lightning import LightningModule, seed_everything, Trainer -from pytorch_lightning.plugins import DDPSpawnStrategy from pytorch_lightning.plugins.environments import LightningEnvironment +from pytorch_lightning.strategies import DDPSpawnStrategy from pytorch_lightning.utilities import FLOAT16_EPSILON from tests.helpers.datamodules import MNISTDataModule from tests.helpers.runif import RunIf diff --git a/tests/models/test_tpu.py b/tests/models/test_tpu.py index 833d6dd316..236f022536 100644 --- a/tests/models/test_tpu.py +++ b/tests/models/test_tpu.py @@ -24,7 +24,7 @@ import tests.helpers.utils as tutils from pytorch_lightning import Trainer from pytorch_lightning.accelerators import TPUAccelerator from pytorch_lightning.callbacks import EarlyStopping -from pytorch_lightning.plugins import TPUSpawnStrategy +from pytorch_lightning.strategies import TPUSpawnStrategy from pytorch_lightning.trainer.connectors.logger_connector.result import _Sync from pytorch_lightning.utilities import _AcceleratorType, _TPU_AVAILABLE from pytorch_lightning.utilities.distributed import ReduceOp diff --git a/tests/plugins/test_checkpoint_io_plugin.py b/tests/plugins/test_checkpoint_io_plugin.py index 7765afb07c..0ce073a34d 100644 --- a/tests/plugins/test_checkpoint_io_plugin.py +++ b/tests/plugins/test_checkpoint_io_plugin.py @@ -19,7 +19,8 @@ import torch from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint -from pytorch_lightning.plugins import CheckpointIO, SingleDeviceStrategy +from pytorch_lightning.plugins import CheckpointIO +from pytorch_lightning.strategies import SingleDeviceStrategy from pytorch_lightning.utilities.types import _PATH from tests.helpers.boring_model import BoringModel diff --git a/tests/plugins/test_cluster_integration.py b/tests/plugins/test_cluster_integration.py index 3694db9ccf..56c967fecf 100644 --- a/tests/plugins/test_cluster_integration.py +++ b/tests/plugins/test_cluster_integration.py @@ -18,8 +18,8 @@ import pytest import torch from pytorch_lightning import Trainer -from pytorch_lightning.plugins import DDP2Strategy, DDPShardedStrategy, DDPStrategy, DeepSpeedStrategy from pytorch_lightning.plugins.environments import LightningEnvironment, SLURMEnvironment, TorchElasticEnvironment +from pytorch_lightning.strategies import DDP2Strategy, DDPShardedStrategy, DDPStrategy, DeepSpeedStrategy from pytorch_lightning.utilities import rank_zero_only from tests.helpers.runif import RunIf diff --git a/tests/plugins/test_custom_plugin.py b/tests/plugins/test_custom_plugin.py index bf20f6ac93..f94a44fdb0 100644 --- a/tests/plugins/test_custom_plugin.py +++ b/tests/plugins/test_custom_plugin.py @@ -18,7 +18,7 @@ import pytest import torch from pytorch_lightning import Trainer -from pytorch_lightning.plugins import DDPStrategy, SingleDeviceStrategy +from pytorch_lightning.strategies import DDPStrategy, SingleDeviceStrategy from tests.helpers import BoringModel from tests.helpers.runif import RunIf diff --git a/tests/plugins/test_ddp_fully_sharded_with_full_state_dict.py b/tests/plugins/test_ddp_fully_sharded_with_full_state_dict.py index 98a65caaca..bc3a6c427c 100644 --- a/tests/plugins/test_ddp_fully_sharded_with_full_state_dict.py +++ b/tests/plugins/test_ddp_fully_sharded_with_full_state_dict.py @@ -7,7 +7,8 @@ import torch from pytorch_lightning import Trainer from pytorch_lightning.callbacks import ModelCheckpoint -from pytorch_lightning.plugins import DDPFullyShardedStrategy, FullyShardedNativeMixedPrecisionPlugin +from pytorch_lightning.plugins import FullyShardedNativeMixedPrecisionPlugin +from pytorch_lightning.strategies import DDPFullyShardedStrategy from pytorch_lightning.utilities import _FAIRSCALE_FULLY_SHARDED_AVAILABLE from pytorch_lightning.utilities.exceptions import MisconfigurationException from tests.helpers.boring_model import BoringModel diff --git a/tests/plugins/test_ddp_plugin.py b/tests/plugins/test_ddp_plugin.py index 028d1a719f..fa47380c7d 100644 --- a/tests/plugins/test_ddp_plugin.py +++ b/tests/plugins/test_ddp_plugin.py @@ -19,8 +19,8 @@ import torch from torch.nn.parallel import DistributedDataParallel from pytorch_lightning import LightningModule, Trainer -from pytorch_lightning.plugins import DDPStrategy from pytorch_lightning.plugins.environments import LightningEnvironment +from pytorch_lightning.strategies import DDPStrategy from pytorch_lightning.trainer.states import TrainerFn from tests.helpers.boring_model import BoringModel from tests.helpers.runif import RunIf diff --git a/tests/plugins/test_ddp_plugin_with_comm_hook.py b/tests/plugins/test_ddp_plugin_with_comm_hook.py index 7b4cfff592..3a5f4c0710 100644 --- a/tests/plugins/test_ddp_plugin_with_comm_hook.py +++ b/tests/plugins/test_ddp_plugin_with_comm_hook.py @@ -14,7 +14,7 @@ import torch from pytorch_lightning import Trainer -from pytorch_lightning.plugins import DDPSpawnStrategy, DDPStrategy +from pytorch_lightning.strategies import DDPSpawnStrategy, DDPStrategy from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_8, _TORCH_GREATER_EQUAL_1_10 from tests.helpers import BoringModel from tests.helpers.runif import RunIf diff --git a/tests/plugins/test_ddp_spawn_plugin.py b/tests/plugins/test_ddp_spawn_plugin.py index 48eda9091b..cd5b37e2e6 100644 --- a/tests/plugins/test_ddp_spawn_plugin.py +++ b/tests/plugins/test_ddp_spawn_plugin.py @@ -19,7 +19,7 @@ import torch from torch.nn.parallel.distributed import DistributedDataParallel from pytorch_lightning import LightningModule, Trainer -from pytorch_lightning.plugins import DDPSpawnStrategy +from pytorch_lightning.strategies import DDPSpawnStrategy from pytorch_lightning.trainer.states import TrainerFn from tests.helpers.boring_model import BoringDataModule, BoringModel from tests.helpers.runif import RunIf diff --git a/tests/plugins/test_deepspeed_plugin.py b/tests/plugins/test_deepspeed_plugin.py index fed7a11b3f..c349e40b2a 100644 --- a/tests/plugins/test_deepspeed_plugin.py +++ b/tests/plugins/test_deepspeed_plugin.py @@ -15,7 +15,8 @@ from torchmetrics import Accuracy from pytorch_lightning import LightningDataModule, LightningModule, seed_everything, Trainer from pytorch_lightning.callbacks import Callback, LearningRateMonitor, ModelCheckpoint -from pytorch_lightning.plugins import DeepSpeedPrecisionPlugin, DeepSpeedStrategy +from pytorch_lightning.plugins import DeepSpeedPrecisionPlugin +from pytorch_lightning.strategies import DeepSpeedStrategy from pytorch_lightning.strategies.deepspeed import LightningDeepSpeedModule from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.imports import _DEEPSPEED_AVAILABLE diff --git a/tests/plugins/test_plugins_registry.py b/tests/plugins/test_plugins_registry.py index d071c3843c..60e819f917 100644 --- a/tests/plugins/test_plugins_registry.py +++ b/tests/plugins/test_plugins_registry.py @@ -14,8 +14,8 @@ import pytest from pytorch_lightning import Trainer -from pytorch_lightning.plugins import ( - CheckpointIO, +from pytorch_lightning.plugins import CheckpointIO, TrainingTypePluginsRegistry +from pytorch_lightning.strategies import ( DDPFullyShardedStrategy, DDPShardedStrategy, DDPSpawnShardedStrategy, @@ -23,7 +23,6 @@ from pytorch_lightning.plugins import ( DDPStrategy, DeepSpeedStrategy, TPUSpawnStrategy, - TrainingTypePluginsRegistry, ) from tests.helpers.runif import RunIf diff --git a/tests/plugins/test_sharded_plugin.py b/tests/plugins/test_sharded_plugin.py index 5b9b98000d..2b7df57877 100644 --- a/tests/plugins/test_sharded_plugin.py +++ b/tests/plugins/test_sharded_plugin.py @@ -6,7 +6,7 @@ import pytest import torch from pytorch_lightning import LightningModule, Trainer -from pytorch_lightning.plugins import DDPShardedStrategy, DDPSpawnShardedStrategy +from pytorch_lightning.strategies import DDPShardedStrategy, DDPSpawnShardedStrategy from pytorch_lightning.trainer.states import TrainerFn from pytorch_lightning.utilities import _FAIRSCALE_AVAILABLE from tests.helpers.boring_model import BoringModel @@ -231,7 +231,7 @@ def test_configure_ddp(tmpdir): @RunIf(skip_windows=True, fairscale=True) -@mock.patch("pytorch_lightning.plugins.DDPShardedStrategy._wrap_optimizers", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers", autospec=True) @pytest.mark.parametrize("cls", [DDPShardedStrategy, DDPSpawnShardedStrategy]) def test_custom_kwargs_sharded(tmpdir, cls): """Tests to ensure that if custom kwargs are passed, they are set correctly.""" @@ -248,7 +248,7 @@ def test_custom_kwargs_sharded(tmpdir, cls): @RunIf(skip_windows=True, fairscale=True) -@mock.patch("pytorch_lightning.plugins.DDPShardedStrategy._wrap_optimizers", autospec=True) +@mock.patch("pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers", autospec=True) @pytest.mark.parametrize(["params", "expected_buffer_size"], [(dict(), 0), (dict(reduce_buffer_size=128), 128)]) @pytest.mark.parametrize("num_nodes", [1, 2]) def test_custom_kwargs_sharded_reduce_buffer_size(tmpdir, params, expected_buffer_size, num_nodes): diff --git a/tests/plugins/test_single_device_plugin.py b/tests/plugins/test_single_device_plugin.py index 835065ac5c..0e5ba39753 100644 --- a/tests/plugins/test_single_device_plugin.py +++ b/tests/plugins/test_single_device_plugin.py @@ -14,7 +14,7 @@ import torch from pytorch_lightning import Trainer -from pytorch_lightning.plugins import SingleDeviceStrategy +from pytorch_lightning.strategies import SingleDeviceStrategy from tests.helpers.boring_model import BoringModel from tests.helpers.runif import RunIf diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 743b1cea35..52bd2305d7 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -37,7 +37,7 @@ from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml, save_hparams_to_tags_csv from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler -from pytorch_lightning.plugins import ( +from pytorch_lightning.strategies import ( DataParallelStrategy, DDP2Strategy, DDPFullyShardedStrategy, diff --git a/tests/utilities/test_deepspeed_collate_checkpoint.py b/tests/utilities/test_deepspeed_collate_checkpoint.py index 7911eaa8ea..02e05a97bd 100644 --- a/tests/utilities/test_deepspeed_collate_checkpoint.py +++ b/tests/utilities/test_deepspeed_collate_checkpoint.py @@ -16,7 +16,7 @@ import os import torch from pytorch_lightning import Trainer -from pytorch_lightning.plugins import DeepSpeedStrategy +from pytorch_lightning.strategies import DeepSpeedStrategy from pytorch_lightning.utilities.deepspeed import convert_zero_checkpoint_to_fp32_state_dict from tests.helpers.boring_model import BoringModel from tests.helpers.runif import RunIf