Update strategy import statements (#11231)

This commit is contained in:
Adrian Wälchli 2021-12-23 08:26:28 +01:00 committed by GitHub
parent dedfde6859
commit c210e338ef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 92 additions and 85 deletions

View File

@ -291,7 +291,7 @@ Below we show an example of running `ZeRO-Offload <https://www.deepspeed.ai/tuto
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
model = MyModel()
trainer = Trainer(gpus=4, strategy="deepspeed_stage_2_offload", precision=16)
@ -310,7 +310,7 @@ You can also modify the ZeRO-Offload parameters via the plugin as below.
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
model = MyModel()
trainer = Trainer(
@ -335,7 +335,7 @@ For even more speed benefit, DeepSpeed offers an optimized CPU version of ADAM c
import pytorch_lightning
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
from deepspeed.ops.adam import DeepSpeedCPUAdam
@ -379,7 +379,7 @@ Also please have a look at our :ref:`deepspeed-zero-stage-3-tips` which contains
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
from deepspeed.ops.adam import FusedAdam
@ -403,7 +403,7 @@ You can also use the Lightning Trainer to run predict or evaluate with DeepSpeed
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
class MyModel(pl.LightningModule):
@ -429,7 +429,7 @@ This reduces the time taken to initialize very large models, as well as ensure w
import torch.nn as nn
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
from deepspeed.ops.adam import FusedAdam
@ -467,7 +467,7 @@ DeepSpeed ZeRO Stage 3 Offloads optimizer state, gradients to the host CPU to re
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
# Enable CPU Offloading
model = MyModel()
@ -496,7 +496,7 @@ Additionally, DeepSpeed supports offloading to NVMe drives for even larger model
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
# Enable CPU Offloading
model = MyModel()
@ -541,7 +541,7 @@ This saves memory when training larger models, however requires using a checkpoi
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
import deepspeed
@ -564,7 +564,7 @@ This saves memory when training larger models, however requires using a checkpoi
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
import deepspeed
@ -644,7 +644,7 @@ In some cases you may want to define your own DeepSpeed Config, to access all pa
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
deepspeed_config = {
"zero_allow_untested_optimizer": True,
@ -687,7 +687,7 @@ We support taking the config as a json formatted file:
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
model = MyModel()
trainer = Trainer(gpus=4, strategy=DeepSpeedStrategy("/path/to/deepspeed_config.json"), precision=16)
@ -722,7 +722,7 @@ This can reduce peak memory usage and throughput as saved memory will be equal t
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DDPStrategy
from pytorch_lightning.strategies import DDPStrategy
model = MyModel()
trainer = Trainer(gpus=4, strategy=DDPStrategy(gradient_as_bucket_view=True))
@ -741,7 +741,7 @@ Enable `FP16 Compress Hook for multi-node throughput improvement <https://pytorc
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DDPStrategy
from pytorch_lightning.strategies import DDPStrategy
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as default,
powerSGD_hook as powerSGD,
@ -760,7 +760,7 @@ Enable `PowerSGD for multi-node throughput improvement <https://pytorch.org/docs
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DDPStrategy
from pytorch_lightning.strategies import DDPStrategy
from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook as powerSGD
model = MyModel()
@ -786,7 +786,7 @@ Combine hooks for accumulated benefit:
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DDPStrategy
from pytorch_lightning.strategies import DDPStrategy
from torch.distributed.algorithms.ddp_comm_hooks import (
default_hooks as default,
powerSGD_hook as powerSGD,

View File

@ -61,7 +61,7 @@ You can also use pure 16-bit training, where the weights are also in 16-bit prec
.. code-block:: python
import pytorch_lightning as pl
from pytorch_lightning.plugins import IPUStrategy
from pytorch_lightning.strategies import IPUStrategy
model = MyLightningModule()
model = model.half()
@ -80,7 +80,7 @@ IPUs provide further optimizations to speed up training. By using the ``IPUStrat
.. code-block:: python
import pytorch_lightning as pl
from pytorch_lightning.plugins import IPUStrategy
from pytorch_lightning.strategies import IPUStrategy
model = MyLightningModule()
trainer = pl.Trainer(ipus=8, strategy=IPUStrategy(device_iterations=32))
@ -92,7 +92,7 @@ Note that by default we return the last device iteration loss. You can override
import poptorch
import pytorch_lightning as pl
from pytorch_lightning.plugins import IPUStrategy
from pytorch_lightning.strategies import IPUStrategy
model = MyLightningModule()
inference_opts = poptorch.Options()
@ -121,7 +121,7 @@ Lightning supports dumping all reports to a directory to open using the tool.
.. code-block:: python
import pytorch_lightning as pl
from pytorch_lightning.plugins import IPUStrategy
from pytorch_lightning.strategies import IPUStrategy
model = MyLightningModule()
trainer = pl.Trainer(ipus=8, strategy=IPUStrategy(autoreport_dir="report_dir/"))

View File

@ -25,7 +25,7 @@ Additionally, you can pass your custom registered training type plugins to the `
.. code-block:: python
from pytorch_lightning.plugins import DDPStrategy, TrainingTypePluginsRegistry, CheckpointIO
from pytorch_lightning.strategies import DDPStrategy, TrainingTypePluginsRegistry, CheckpointIO
class CustomCheckpointIO(CheckpointIO):

View File

@ -178,7 +178,7 @@ For example, when training Graph Neural Networks, a common strategy is to load t
A simple way to prevent redundant dataset replicas is to rely on :obj:`torch.multiprocessing` to share the `data automatically between spawned processes via shared memory <https://pytorch.org/docs/stable/notes/multiprocessing.html>`_.
For this, all data pre-loading should be done on the main process inside :meth:`DataModule.__init__`.
As a result, all tensor-data will get automatically shared when using the :class:`~pytorch_lightning.plugins.DDPSpawnStrategy` training type strategy:
As a result, all tensor-data will get automatically shared when using the :class:`~pytorch_lightning.strategies.DDPSpawnStrategy` training type strategy:
.. warning::

View File

@ -1416,7 +1416,7 @@ Supports passing different training strategies with aliases (ddp, ddp_spawn, etc
.. code-block:: python
from pytorch_lightning.plugins import DDPStrategy
from pytorch_lightning.strategies import DDPStrategy
class CustomDDPStrategy(DDPStrategy):

View File

@ -23,7 +23,8 @@ One to handle differences from the training routine and one to handle different
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators import GPUAccelerator
from pytorch_lightning.plugins import NativeMixedPrecisionPlugin, DDPStrategy
from pytorch_lightning.plugins import NativeMixedPrecisionPlugin
from pytorch_lightning.strategies import DDPStrategy
accelerator = GPUAccelerator()
precision_plugin = NativeMixedPrecisionPlugin(precision=16, device="cuda")

View File

@ -60,7 +60,7 @@ Expert users may choose to extend an existing plugin by overriding its methods .
.. code-block:: python
from pytorch_lightning.plugins import DDPStrategy
from pytorch_lightning.strategies import DDPStrategy
class CustomDDPStrategy(DDPStrategy):

View File

@ -86,7 +86,7 @@ This by default comes with a performance hit, and can be disabled in most cases.
.. code-block:: python
from pytorch_lightning.plugins import DDPStrategy
from pytorch_lightning.strategies import DDPStrategy
trainer = pl.Trainer(
gpus=2,
@ -95,7 +95,7 @@ This by default comes with a performance hit, and can be disabled in most cases.
.. code-block:: python
from pytorch_lightning.plugins import DDPSpawnStrategy
from pytorch_lightning.strategies import DDPSpawnStrategy
trainer = pl.Trainer(
gpus=2,

View File

@ -389,7 +389,7 @@ Additionally, you can pass in your custom training type strategy by configuring
.. code-block:: python
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
lite = Lite(strategy=DeepSpeedStrategy(stage=2), accelerator="gpu", devices=2)

View File

@ -26,7 +26,8 @@ from torch.utils.data import DataLoader, DistributedSampler, RandomSampler, Sequ
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.lite.wrappers import _LiteDataLoader, _LiteModule, _LiteOptimizer
from pytorch_lightning.plugins import DDPSpawnStrategy, DeepSpeedStrategy, PLUGIN_INPUT, Strategy, TPUSpawnStrategy
from pytorch_lightning.plugins import PLUGIN_INPUT
from pytorch_lightning.strategies import DDPSpawnStrategy, DeepSpeedStrategy, Strategy, TPUSpawnStrategy
from pytorch_lightning.strategies.training_type_plugin import TBroadcast
from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector
from pytorch_lightning.utilities import _AcceleratorType, _StrategyType, move_data_to_device

View File

@ -20,7 +20,8 @@ from torch.optim import Optimizer
from torch.utils.data import DataLoader
from pytorch_lightning.core.mixins import DeviceDtypeModuleMixin
from pytorch_lightning.plugins import PrecisionPlugin, Strategy
from pytorch_lightning.plugins import PrecisionPlugin
from pytorch_lightning.strategies import Strategy
from pytorch_lightning.utilities.apply_func import apply_to_collection, move_data_to_device

View File

@ -5,7 +5,7 @@ from torch.utils.data import DataLoader
from pytorch_lightning.loops.dataloader.dataloader_loop import DataLoaderLoop
from pytorch_lightning.loops.epoch.prediction_epoch_loop import PredictionEpochLoop
from pytorch_lightning.plugins import DDPSpawnStrategy
from pytorch_lightning.strategies import DDPSpawnStrategy
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.types import _PREDICT_OUTPUT

View File

@ -22,7 +22,7 @@ import torch
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.plugins import ParallelStrategy
from pytorch_lightning.strategies import ParallelStrategy
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.fetching import AbstractDataFetcher, DataLoaderIterDataFetcher

View File

@ -24,7 +24,14 @@ from pytorch_lightning import Trainer
from pytorch_lightning.accelerators.accelerator import Accelerator
from pytorch_lightning.accelerators.cpu import CPUAccelerator
from pytorch_lightning.accelerators.gpu import GPUAccelerator
from pytorch_lightning.plugins import (
from pytorch_lightning.plugins import PrecisionPlugin
from pytorch_lightning.plugins.environments import (
KubeflowEnvironment,
LightningEnvironment,
SLURMEnvironment,
TorchElasticEnvironment,
)
from pytorch_lightning.strategies import (
DataParallelStrategy,
DDP2Strategy,
DDPShardedStrategy,
@ -33,15 +40,8 @@ from pytorch_lightning.plugins import (
DDPStrategy,
DeepSpeedStrategy,
ParallelStrategy,
PrecisionPlugin,
SingleDeviceStrategy,
)
from pytorch_lightning.plugins.environments import (
KubeflowEnvironment,
LightningEnvironment,
SLURMEnvironment,
TorchElasticEnvironment,
)
from pytorch_lightning.utilities import _AcceleratorType, _StrategyType
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.runif import RunIf
@ -97,7 +97,7 @@ def test_accelerator_choice_ddp_spawn(cuda_available_mock, device_count_mock):
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_slurm(*_):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=2)
@ -122,7 +122,7 @@ def test_accelerator_choice_ddp_slurm(*_):
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp2_slurm(*_):
with pytest.deprecated_call(match=r"accelerator='ddp2'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp2", gpus=2)
@ -147,7 +147,7 @@ def test_accelerator_choice_ddp2_slurm(*_):
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_te(*_):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=2)
@ -171,7 +171,7 @@ def test_accelerator_choice_ddp_te(*_):
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp2_te(*_):
with pytest.deprecated_call(match=r"accelerator='ddp2'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp2", gpus=2)
@ -186,7 +186,7 @@ def test_accelerator_choice_ddp2_te(*_):
os.environ, {"WORLD_SIZE": "2", "LOCAL_WORLD_SIZE": "2", "RANK": "1", "LOCAL_RANK": "1", "GROUP_RANK": "0"}
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_cpu_te(*_):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", num_processes=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
@ -209,7 +209,7 @@ def test_accelerator_choice_ddp_cpu_te(*_):
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_kubeflow(*_):
with pytest.deprecated_call(match=r"accelerator='ddp'\)` has been deprecated in v1.5"):
trainer = Trainer(fast_dev_run=True, accelerator="ddp", gpus=1)
@ -231,7 +231,7 @@ def test_accelerator_choice_ddp_kubeflow(*_):
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_cpu_kubeflow(*_):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", num_processes=1)
assert isinstance(trainer.accelerator, CPUAccelerator)
@ -253,7 +253,7 @@ def test_accelerator_choice_ddp_cpu_kubeflow(*_):
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_accelerator_choice_ddp_cpu_slurm(*_):
trainer = Trainer(fast_dev_run=True, accelerator="ddp_cpu", num_processes=2)
assert trainer._accelerator_connector._is_slurm_managing_tasks()
@ -333,7 +333,7 @@ def test_accelerator_choice_ddp_cpu_custom_cluster(_, tmpdir):
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_custom_accelerator(device_count_mock, setup_distributed_mock):
class Accel(Accelerator):
pass
@ -374,7 +374,7 @@ def test_custom_accelerator(device_count_mock, setup_distributed_mock):
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_dist_backend_accelerator_mapping(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", num_processes=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
@ -707,7 +707,7 @@ def test_strategy_choice_ddp_spawn(cuda_available_mock, device_count_mock):
"SLURM_LOCALID": "1",
},
)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@pytest.mark.parametrize("strategy", ["ddp", DDPStrategy()])
def test_strategy_choice_ddp_slurm(setup_distributed_mock, strategy):
trainer = Trainer(fast_dev_run=True, strategy=strategy, gpus=2)
@ -732,7 +732,7 @@ def test_strategy_choice_ddp_slurm(setup_distributed_mock, strategy):
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@pytest.mark.parametrize("strategy", ["ddp2", DDP2Strategy()])
def test_strategy_choice_ddp2_slurm(set_device_mock, device_count_mock, setup_distributed_mock, strategy):
trainer = Trainer(fast_dev_run=True, strategy=strategy, gpus=2)
@ -757,7 +757,7 @@ def test_strategy_choice_ddp2_slurm(set_device_mock, device_count_mock, setup_di
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_strategy_choice_ddp_te(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp", gpus=2)
assert isinstance(trainer.accelerator, GPUAccelerator)
@ -780,7 +780,7 @@ def test_strategy_choice_ddp_te(*_):
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_strategy_choice_ddp2_te(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp2", gpus=2)
assert isinstance(trainer.accelerator, GPUAccelerator)
@ -794,7 +794,7 @@ def test_strategy_choice_ddp2_te(*_):
os.environ, {"WORLD_SIZE": "2", "LOCAL_WORLD_SIZE": "2", "RANK": "1", "LOCAL_RANK": "1", "GROUP_RANK": "0"}
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_strategy_choice_ddp_cpu_te(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", num_processes=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
@ -817,7 +817,7 @@ def test_strategy_choice_ddp_cpu_te(*_):
)
@mock.patch("torch.cuda.set_device")
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_strategy_choice_ddp_kubeflow(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp", gpus=1)
assert isinstance(trainer.accelerator, GPUAccelerator)
@ -838,7 +838,7 @@ def test_strategy_choice_ddp_kubeflow(*_):
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
def test_strategy_choice_ddp_cpu_kubeflow(*_):
trainer = Trainer(fast_dev_run=True, strategy="ddp_spawn", num_processes=2)
assert isinstance(trainer.accelerator, CPUAccelerator)
@ -860,7 +860,7 @@ def test_strategy_choice_ddp_cpu_kubeflow(*_):
},
)
@mock.patch("torch.cuda.device_count", return_value=0)
@mock.patch("pytorch_lightning.plugins.DDPStrategy.setup_distributed", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPStrategy.setup_distributed", autospec=True)
@pytest.mark.parametrize("strategy", ["ddp", DDPStrategy()])
def test_strategy_choice_ddp_cpu_slurm(device_count_mock, setup_distributed_mock, strategy):
trainer = Trainer(fast_dev_run=True, strategy=strategy, num_processes=2)

View File

@ -8,9 +8,9 @@ import torch
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators import CPUAccelerator
from pytorch_lightning.plugins import SingleDeviceStrategy
from pytorch_lightning.plugins.io.torch_plugin import TorchCheckpointIO
from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin
from pytorch_lightning.strategies import SingleDeviceStrategy
from tests.helpers.boring_model import BoringModel

View File

@ -21,7 +21,8 @@ import torch.nn.functional as F
from pytorch_lightning import Callback, seed_everything, Trainer
from pytorch_lightning.accelerators import CPUAccelerator, IPUAccelerator
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.plugins import IPUPrecisionPlugin, IPUStrategy
from pytorch_lightning.plugins import IPUPrecisionPlugin
from pytorch_lightning.strategies.ipu import IPUStrategy
from pytorch_lightning.trainer.states import RunningStage, TrainerFn
from pytorch_lightning.trainer.supporters import CombinedLoader
from pytorch_lightning.utilities import _AcceleratorType, _IPU_AVAILABLE

View File

@ -23,7 +23,8 @@ from torch.utils.data import DataLoader
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators.cpu import CPUAccelerator
from pytorch_lightning.accelerators.tpu import TPUAccelerator
from pytorch_lightning.plugins import DDPStrategy, TPUPrecisionPlugin, TPUSpawnStrategy, XLACheckpointIO
from pytorch_lightning.plugins import TPUPrecisionPlugin, XLACheckpointIO
from pytorch_lightning.strategies import DDPStrategy, TPUSpawnStrategy
from pytorch_lightning.utilities import find_shared_parameters
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel, RandomDataset

View File

@ -19,7 +19,7 @@ import pytest
import torch
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.plugins import DDPSpawnShardedStrategy
from pytorch_lightning.strategies import DDPSpawnShardedStrategy
from tests.helpers.boring_model import BoringModel, RandomDataset
from tests.helpers.runif import RunIf

View File

@ -22,8 +22,7 @@ from torch.utils.data import DataLoader
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import StochasticWeightAveraging
from pytorch_lightning.plugins import DDPSpawnStrategy
from pytorch_lightning.strategies import Strategy
from pytorch_lightning.strategies import DDPSpawnStrategy, Strategy
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel, RandomDataset, RandomIterableDataset
from tests.helpers.runif import RunIf

View File

@ -27,7 +27,6 @@ from pytorch_lightning.callbacks.progress import ProgressBar
from pytorch_lightning.callbacks.xla_stats_monitor import XLAStatsMonitor
from pytorch_lightning.loggers import LoggerCollection, TestTubeLogger
from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper
from pytorch_lightning.plugins import SingleDeviceStrategy
from pytorch_lightning.plugins.environments import (
KubeflowEnvironment,
LightningEnvironment,
@ -35,6 +34,7 @@ from pytorch_lightning.plugins.environments import (
SLURMEnvironment,
TorchElasticEnvironment,
)
from pytorch_lightning.strategies import SingleDeviceStrategy
from tests.callbacks.test_callbacks import OldStatefulCallback
from tests.deprecated_api import _soft_unimport_module
from tests.helpers import BoringModel

View File

@ -25,7 +25,8 @@ from torch.utils.data import DataLoader, DistributedSampler, Sampler
from pytorch_lightning.lite import LightningLite
from pytorch_lightning.lite.wrappers import _LiteDataLoader, _LiteModule, _LiteOptimizer
from pytorch_lightning.plugins import DeepSpeedStrategy, PrecisionPlugin, Strategy
from pytorch_lightning.plugins import PrecisionPlugin
from pytorch_lightning.strategies import DeepSpeedStrategy, Strategy
from pytorch_lightning.utilities import _StrategyType
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.seed import pl_worker_init_function

View File

@ -17,8 +17,8 @@ import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning import LightningModule, seed_everything, Trainer
from pytorch_lightning.plugins import DDPSpawnStrategy
from pytorch_lightning.plugins.environments import LightningEnvironment
from pytorch_lightning.strategies import DDPSpawnStrategy
from pytorch_lightning.utilities import FLOAT16_EPSILON
from tests.helpers.datamodules import MNISTDataModule
from tests.helpers.runif import RunIf

View File

@ -24,7 +24,7 @@ import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators import TPUAccelerator
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.plugins import TPUSpawnStrategy
from pytorch_lightning.strategies import TPUSpawnStrategy
from pytorch_lightning.trainer.connectors.logger_connector.result import _Sync
from pytorch_lightning.utilities import _AcceleratorType, _TPU_AVAILABLE
from pytorch_lightning.utilities.distributed import ReduceOp

View File

@ -19,7 +19,8 @@ import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.plugins import CheckpointIO, SingleDeviceStrategy
from pytorch_lightning.plugins import CheckpointIO
from pytorch_lightning.strategies import SingleDeviceStrategy
from pytorch_lightning.utilities.types import _PATH
from tests.helpers.boring_model import BoringModel

View File

@ -18,8 +18,8 @@ import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DDP2Strategy, DDPShardedStrategy, DDPStrategy, DeepSpeedStrategy
from pytorch_lightning.plugins.environments import LightningEnvironment, SLURMEnvironment, TorchElasticEnvironment
from pytorch_lightning.strategies import DDP2Strategy, DDPShardedStrategy, DDPStrategy, DeepSpeedStrategy
from pytorch_lightning.utilities import rank_zero_only
from tests.helpers.runif import RunIf

View File

@ -18,7 +18,7 @@ import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DDPStrategy, SingleDeviceStrategy
from pytorch_lightning.strategies import DDPStrategy, SingleDeviceStrategy
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf

View File

@ -7,7 +7,8 @@ import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.plugins import DDPFullyShardedStrategy, FullyShardedNativeMixedPrecisionPlugin
from pytorch_lightning.plugins import FullyShardedNativeMixedPrecisionPlugin
from pytorch_lightning.strategies import DDPFullyShardedStrategy
from pytorch_lightning.utilities import _FAIRSCALE_FULLY_SHARDED_AVAILABLE
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel

View File

@ -19,8 +19,8 @@ import torch
from torch.nn.parallel import DistributedDataParallel
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.plugins import DDPStrategy
from pytorch_lightning.plugins.environments import LightningEnvironment
from pytorch_lightning.strategies import DDPStrategy
from pytorch_lightning.trainer.states import TrainerFn
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf

View File

@ -14,7 +14,7 @@
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DDPSpawnStrategy, DDPStrategy
from pytorch_lightning.strategies import DDPSpawnStrategy, DDPStrategy
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_8, _TORCH_GREATER_EQUAL_1_10
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf

View File

@ -19,7 +19,7 @@ import torch
from torch.nn.parallel.distributed import DistributedDataParallel
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.plugins import DDPSpawnStrategy
from pytorch_lightning.strategies import DDPSpawnStrategy
from pytorch_lightning.trainer.states import TrainerFn
from tests.helpers.boring_model import BoringDataModule, BoringModel
from tests.helpers.runif import RunIf

View File

@ -15,7 +15,8 @@ from torchmetrics import Accuracy
from pytorch_lightning import LightningDataModule, LightningModule, seed_everything, Trainer
from pytorch_lightning.callbacks import Callback, LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.plugins import DeepSpeedPrecisionPlugin, DeepSpeedStrategy
from pytorch_lightning.plugins import DeepSpeedPrecisionPlugin
from pytorch_lightning.strategies import DeepSpeedStrategy
from pytorch_lightning.strategies.deepspeed import LightningDeepSpeedModule
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _DEEPSPEED_AVAILABLE

View File

@ -14,8 +14,8 @@
import pytest
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import (
CheckpointIO,
from pytorch_lightning.plugins import CheckpointIO, TrainingTypePluginsRegistry
from pytorch_lightning.strategies import (
DDPFullyShardedStrategy,
DDPShardedStrategy,
DDPSpawnShardedStrategy,
@ -23,7 +23,6 @@ from pytorch_lightning.plugins import (
DDPStrategy,
DeepSpeedStrategy,
TPUSpawnStrategy,
TrainingTypePluginsRegistry,
)
from tests.helpers.runif import RunIf

View File

@ -6,7 +6,7 @@ import pytest
import torch
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.plugins import DDPShardedStrategy, DDPSpawnShardedStrategy
from pytorch_lightning.strategies import DDPShardedStrategy, DDPSpawnShardedStrategy
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import _FAIRSCALE_AVAILABLE
from tests.helpers.boring_model import BoringModel
@ -231,7 +231,7 @@ def test_configure_ddp(tmpdir):
@RunIf(skip_windows=True, fairscale=True)
@mock.patch("pytorch_lightning.plugins.DDPShardedStrategy._wrap_optimizers", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers", autospec=True)
@pytest.mark.parametrize("cls", [DDPShardedStrategy, DDPSpawnShardedStrategy])
def test_custom_kwargs_sharded(tmpdir, cls):
"""Tests to ensure that if custom kwargs are passed, they are set correctly."""
@ -248,7 +248,7 @@ def test_custom_kwargs_sharded(tmpdir, cls):
@RunIf(skip_windows=True, fairscale=True)
@mock.patch("pytorch_lightning.plugins.DDPShardedStrategy._wrap_optimizers", autospec=True)
@mock.patch("pytorch_lightning.strategies.DDPShardedStrategy._wrap_optimizers", autospec=True)
@pytest.mark.parametrize(["params", "expected_buffer_size"], [(dict(), 0), (dict(reduce_buffer_size=128), 128)])
@pytest.mark.parametrize("num_nodes", [1, 2])
def test_custom_kwargs_sharded_reduce_buffer_size(tmpdir, params, expected_buffer_size, num_nodes):

View File

@ -14,7 +14,7 @@
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import SingleDeviceStrategy
from pytorch_lightning.strategies import SingleDeviceStrategy
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf

View File

@ -37,7 +37,7 @@ from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter
from pytorch_lightning.core.saving import load_hparams_from_tags_csv, load_hparams_from_yaml, save_hparams_to_tags_csv
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.overrides.distributed import IndexBatchSamplerWrapper, UnrepeatedDistributedSampler
from pytorch_lightning.plugins import (
from pytorch_lightning.strategies import (
DataParallelStrategy,
DDP2Strategy,
DDPFullyShardedStrategy,

View File

@ -16,7 +16,7 @@ import os
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.strategies import DeepSpeedStrategy
from pytorch_lightning.utilities.deepspeed import convert_zero_checkpoint_to_fp32_state_dict
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf