parent
22985d2f43
commit
0f9134e043
|
@ -1,4 +1,4 @@
|
|||
name: Recurent events
|
||||
name: Recurrent events
|
||||
|
||||
# https://jasonet.co/posts/scheduled-actions/
|
||||
# https://github.community/t/distinct-job-for-each-schedule/17811/2
|
|
@ -194,12 +194,7 @@ To visualize the profiled operation, you can either:
|
|||
|
||||
"""
|
||||
|
||||
from pytorch_lightning.profiler.profilers import (
|
||||
AdvancedProfiler,
|
||||
BaseProfiler,
|
||||
PassThroughProfiler,
|
||||
SimpleProfiler,
|
||||
)
|
||||
from pytorch_lightning.profiler.profilers import AdvancedProfiler, BaseProfiler, PassThroughProfiler, SimpleProfiler
|
||||
from pytorch_lightning.profiler.pytorch import PyTorchProfiler
|
||||
|
||||
__all__ = [
|
||||
|
|
|
@ -34,7 +34,7 @@ from pytorch_lightning.plugins import (
|
|||
from pytorch_lightning.plugins.environments import ClusterEnvironment, SLURMEnvironment, TorchElasticEnvironment
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers.boring_model import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def test_accelerator_choice_cpu(tmpdir):
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import platform
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
@ -21,7 +20,7 @@ import torch
|
|||
from pytorch_lightning import Trainer
|
||||
from tests.accelerators import ddp_model, DDPLauncher
|
||||
from tests.helpers.boring_model import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.utilities.distributed import call_training_script
|
||||
|
||||
CLI_ARGS = '--max_epochs 1 --gpus 2 --accelerator ddp'
|
||||
|
@ -83,7 +82,7 @@ def test_cli_to_pass(tmpdir, args=None):
|
|||
return '1'
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
@pytest.mark.skipif(torch.cuda.is_available(), reason="test doesn't requires GPU machine")
|
||||
def test_torch_distributed_backend_env_variables(tmpdir):
|
||||
"""
|
||||
|
|
|
@ -20,8 +20,8 @@ from pytorch_lightning.trainer import Trainer
|
|||
from pytorch_lightning.trainer.states import TrainerState
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.datamodules import ClassifDataModule
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.helpers.simple_models import ClassificationModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
|
|
|
@ -21,8 +21,8 @@ from pytorch_lightning.callbacks import EarlyStopping
|
|||
from pytorch_lightning.core import memory
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.datamodules import ClassifDataModule
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.helpers.simple_models import ClassificationModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
|
||||
PRETEND_N_OF_GPUS = 16
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
# limitations under the License.
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
from unittest import mock
|
||||
|
||||
import cloudpickle
|
||||
|
@ -27,6 +26,7 @@ from pytorch_lightning.trainer.states import TrainerState
|
|||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.datamodules import ClassifDataModule
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.helpers.simple_models import ClassificationModel
|
||||
|
||||
|
||||
|
@ -374,13 +374,13 @@ class EarlyStoppingModel(BoringModel):
|
|||
3,
|
||||
'ddp_cpu',
|
||||
2,
|
||||
marks=pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")),
|
||||
marks=RunIf(skip_windows=True)),
|
||||
pytest.param([EarlyStopping(monitor='cba', patience=3),
|
||||
EarlyStopping(monitor='abc')],
|
||||
3,
|
||||
'ddp_cpu',
|
||||
2,
|
||||
marks=pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")),
|
||||
marks=RunIf(skip_windows=True)),
|
||||
],
|
||||
)
|
||||
def test_multiple_early_stopping_callbacks(callbacks, expected_stop_epoch, accelerator, num_processes, tmpdir):
|
||||
|
|
|
@ -24,7 +24,7 @@ from pytorch_lightning.loggers.csv_logs import ExperimentWriter
|
|||
from pytorch_lightning.trainer.states import TrainerState
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@RunIf(min_gpus=1)
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import platform
|
||||
from collections import OrderedDict
|
||||
from logging import INFO
|
||||
from unittest import mock
|
||||
|
@ -27,7 +26,7 @@ from pytorch_lightning import seed_everything, Trainer
|
|||
from pytorch_lightning.callbacks import ModelPruning
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
class TestModel(BoringModel):
|
||||
|
@ -169,13 +168,12 @@ def test_pruning_callback_ddp(tmpdir, use_global_unstructured, parameters_to_pru
|
|||
)
|
||||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
def test_pruning_callback_ddp_spawn(tmpdir):
|
||||
train_with_pruning_callback(tmpdir, use_global_unstructured=True, accelerator="ddp_spawn", gpus=2)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_pruning_callback_ddp_cpu(tmpdir):
|
||||
train_with_pruning_callback(tmpdir, parameters_to_prune=True, accelerator="ddp_cpu", num_processes=2)
|
||||
|
||||
|
|
|
@ -21,8 +21,8 @@ from pytorch_lightning.callbacks import QuantizationAwareTraining
|
|||
from pytorch_lightning.metrics.functional.mean_relative_error import mean_relative_error
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers.datamodules import RegressDataModule
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.helpers.simple_models import RegressionModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
|
||||
|
||||
@pytest.mark.parametrize("observe", ['average', pytest.param('histogram', marks=RunIf(min_torch="1.5"))])
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import platform
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
@ -24,7 +23,7 @@ from pytorch_lightning import Trainer
|
|||
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_6
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers import BoringModel, RandomDataset
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
if _TORCH_GREATER_EQUAL_1_6:
|
||||
from pytorch_lightning.callbacks import StochasticWeightAveraging
|
||||
|
@ -128,8 +127,7 @@ def test_swa_callback_ddp_spawn(tmpdir):
|
|||
train_with_swa(tmpdir, accelerator="ddp_spawn", gpus=2)
|
||||
|
||||
|
||||
@RunIf(min_torch="1.6.0")
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="ddp_cpu is not available on Windows")
|
||||
@RunIf(min_torch="1.6.0", skip_windows=True)
|
||||
def test_swa_callback_ddp_cpu(tmpdir):
|
||||
train_with_swa(tmpdir, accelerator="ddp_cpu", num_processes=2)
|
||||
|
||||
|
|
|
@ -12,14 +12,12 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import platform
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from pytorch_lightning import Trainer
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def test_model_torch_save(tmpdir):
|
||||
|
@ -39,7 +37,7 @@ def test_model_torch_save(tmpdir):
|
|||
trainer = torch.load(temp_path)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_model_torch_save_ddp_cpu(tmpdir):
|
||||
"""Test to ensure torch save does not fail for model and trainer using cpu ddp."""
|
||||
model = BoringModel()
|
||||
|
|
|
@ -26,8 +26,8 @@ from pytorch_lightning.trainer.states import TrainerState
|
|||
from pytorch_lightning.utilities.model_helpers import is_overridden
|
||||
from tests.helpers import BoringDataModule, BoringModel
|
||||
from tests.helpers.datamodules import ClassifDataModule
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.helpers.simple_models import ClassificationModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.utils import reset_seed, set_random_master_port
|
||||
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ import torch
|
|||
|
||||
from pytorch_lightning.core.decorators import auto_move_data
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@RunIf(min_gpus=1)
|
||||
|
|
|
@ -21,7 +21,7 @@ from pytorch_lightning.utilities import _NATIVE_AMP_AVAILABLE
|
|||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.advanced_models import ParityModuleRNN
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
class EmptyModule(LightningModule):
|
||||
|
|
|
@ -11,9 +11,7 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import torch.multiprocessing as mp
|
||||
|
@ -21,6 +19,7 @@ import torch.multiprocessing as mp
|
|||
import tests.helpers.utils as tutils
|
||||
from pytorch_lightning.core.step_result import Result
|
||||
from pytorch_lightning.metrics import Metric
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
class DummyMetric(Metric):
|
||||
|
@ -90,7 +89,7 @@ def _ddp_test_fn(rank, worldsize):
|
|||
assert epoch_expected[k] == epoch_log[k]
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_result_reduce_ddp():
|
||||
"""Make sure result logging works with DDP"""
|
||||
tutils.reset_seed()
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import random
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
@ -26,7 +25,7 @@ from pytorch_lightning import Trainer
|
|||
from pytorch_lightning.core.step_result import Result
|
||||
from pytorch_lightning.trainer.states import TrainerState
|
||||
from tests.helpers import BoringDataModule, BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def _setup_ddp(rank, worldsize):
|
||||
|
@ -49,7 +48,7 @@ def _ddp_test_fn(rank, worldsize, result_cls: Result):
|
|||
|
||||
|
||||
@pytest.mark.parametrize("result_cls", [Result])
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_result_reduce_ddp(result_cls):
|
||||
"""Make sure result logging works with DDP"""
|
||||
tutils.reset_seed()
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Test deprecated functionality which will be removed in v1.4.0"""
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
@ -28,7 +27,7 @@ from pytorch_lightning.plugins import DDPSpawnPlugin
|
|||
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
|
||||
from tests.deprecated_api import _soft_unimport_module
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def test_v1_4_0_deprecated_trainer_attributes():
|
||||
|
@ -178,8 +177,7 @@ class CustomDDPPlugin(DDPSpawnPlugin):
|
|||
assert isinstance(self.model.module, LightningDistributedModule)
|
||||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
def test_v1_4_0_deprecated_lightning_distributed_data_parallel(tmpdir):
|
||||
model = BoringModel()
|
||||
trainer = Trainer(
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import sys
|
||||
from distutils.version import LooseVersion
|
||||
from typing import Optional
|
||||
|
||||
|
@ -31,13 +32,22 @@ class RunIf:
|
|||
assert arg1 > 0.0
|
||||
"""
|
||||
|
||||
def __new__(self, *args, min_gpus: int = 0, min_torch: Optional[str] = None, quantization: bool = False, **kwargs):
|
||||
def __new__(
|
||||
self,
|
||||
*args,
|
||||
min_gpus: int = 0,
|
||||
min_torch: Optional[str] = None,
|
||||
quantization: bool = False,
|
||||
skip_windows: bool = False,
|
||||
**kwargs
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
args: native pytest.mark.skipif arguments
|
||||
min_gpus: min number of gpus required to run test
|
||||
min_torch: minimum pytorch version to run test
|
||||
quantization: if `torch.quantization` package is required to run test
|
||||
skip_windows: skip test for Windows platform (typically fo some limited torch functionality)
|
||||
kwargs: native pytest.mark.skipif keyword arguments
|
||||
"""
|
||||
conditions = []
|
||||
|
@ -57,6 +67,10 @@ class RunIf:
|
|||
conditions.append(not _TORCH_QUANTIZE_AVAILABLE or _miss_default)
|
||||
reasons.append("missing PyTorch quantization")
|
||||
|
||||
if skip_windows:
|
||||
conditions.append(sys.platform == "win32")
|
||||
reasons.append("unimplemented on Windows")
|
||||
|
||||
reasons = [rs for cond, rs in zip(conditions, reasons) if cond]
|
||||
return pytest.mark.skipif(
|
||||
*args,
|
|
@ -14,7 +14,6 @@
|
|||
import inspect
|
||||
import os
|
||||
import pickle
|
||||
import platform
|
||||
from unittest import mock
|
||||
from unittest.mock import ANY
|
||||
|
||||
|
@ -34,6 +33,7 @@ from pytorch_lightning.loggers import (
|
|||
from pytorch_lightning.loggers.base import DummyExperiment
|
||||
from pytorch_lightning.trainer.states import TrainerState
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.loggers.test_comet import _patch_comet_atexit
|
||||
from tests.loggers.test_mlflow import mock_mlflow_run_creation
|
||||
|
||||
|
@ -332,7 +332,7 @@ class RankZeroLoggerCheck(Callback):
|
|||
TestTubeLogger,
|
||||
]
|
||||
)
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_logger_created_on_rank_zero_only(tmpdir, monkeypatch, logger_class):
|
||||
""" Test that loggers get replaced by dummy loggers on global rank > 0"""
|
||||
_patch_comet_atexit(monkeypatch)
|
||||
|
|
|
@ -18,7 +18,7 @@ import torch
|
|||
|
||||
from pytorch_lightning.metrics.compositional import CompositionalMetric
|
||||
from pytorch_lightning.metrics.metric import Metric
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
class DummyMetric(Metric):
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
import sys
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from pytorch_lightning.metrics import Metric
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.metrics.test_metric import Dummy
|
||||
from tests.metrics.utils import setup_ddp
|
||||
|
||||
|
@ -40,7 +39,7 @@ def _test_ddp_sum_cat(rank, worldsize):
|
|||
assert dummy.bar == worldsize
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
|
||||
@RunIf(skip_windows=True)
|
||||
@pytest.mark.parametrize("process", [_test_ddp_cat, _test_ddp_sum, _test_ddp_sum_cat])
|
||||
def test_ddp(process):
|
||||
torch.multiprocessing.spawn(process, args=(2, ), nprocs=2)
|
||||
|
@ -66,7 +65,7 @@ def _test_non_contiguous_tensors(rank, worldsize):
|
|||
metric.update(torch.randn(10, 5)[:, 0])
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_non_contiguous_tensors():
|
||||
""" Test that gather_all operation works for non contiguous tensors """
|
||||
torch.multiprocessing.spawn(_test_non_contiguous_tensors, args=(2, ), nprocs=2)
|
||||
|
|
|
@ -9,7 +9,7 @@ import torch
|
|||
from torch import nn
|
||||
|
||||
from pytorch_lightning.metrics.metric import Metric, MetricCollection
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
torch.manual_seed(42)
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ from pytorch_lightning.trainer.states import TrainerState
|
|||
from pytorch_lightning.utilities import _APEX_AVAILABLE
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
class AMPTestModel(BoringModel):
|
||||
|
|
|
@ -12,9 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import platform
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import tests.helpers.pipelines as tpipes
|
||||
|
@ -24,6 +22,7 @@ from pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint
|
|||
from pytorch_lightning.trainer.states import TrainerState
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.datamodules import ClassifDataModule
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.helpers.simple_models import ClassificationModel
|
||||
|
||||
|
||||
|
@ -126,7 +125,7 @@ def test_early_stopping_cpu_model(tmpdir):
|
|||
model.unfreeze()
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_multi_cpu_model_ddp(tmpdir):
|
||||
"""Make sure DDP works."""
|
||||
tutils.set_random_master_port()
|
||||
|
|
|
@ -25,8 +25,8 @@ from pytorch_lightning.utilities import device_parser
|
|||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.datamodules import ClassifDataModule
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.helpers.simple_models import ClassificationModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
|
||||
PRETEND_N_OF_GPUS = 16
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import torch
|
|||
from pytorch_lightning import Callback, Trainer
|
||||
from pytorch_lightning.trainer.states import TrainerState
|
||||
from tests.helpers import BoringModel, RandomDataset
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@pytest.mark.parametrize('max_steps', [1, 2, 3])
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
# limitations under the License.
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
|
@ -32,7 +31,7 @@ from pytorch_lightning.trainer.states import TrainerState
|
|||
from pytorch_lightning.utilities import _APEX_AVAILABLE, _HOROVOD_AVAILABLE, _NATIVE_AMP_AVAILABLE
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.advanced_models import BasicGAN
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
if _HOROVOD_AVAILABLE:
|
||||
import horovod
|
||||
|
@ -67,7 +66,7 @@ def _run_horovod(trainer_options, on_gpu=False):
|
|||
assert exit_code == 0
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_horovod_cpu(tmpdir):
|
||||
"""Test Horovod running multi-process on CPU."""
|
||||
trainer_options = dict(
|
||||
|
@ -84,7 +83,7 @@ def test_horovod_cpu(tmpdir):
|
|||
_run_horovod(trainer_options)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_horovod_cpu_implicit(tmpdir):
|
||||
"""Test Horovod without specifying a backend, inferring from env set by `horovodrun`."""
|
||||
trainer_options = dict(
|
||||
|
@ -100,9 +99,8 @@ def test_horovod_cpu_implicit(tmpdir):
|
|||
_run_horovod(trainer_options)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
|
||||
@pytest.mark.skipif(not _HOROVOD_NCCL_AVAILABLE, reason="test requires Horovod with NCCL support")
|
||||
@RunIf(min_gpus=2)
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
def test_horovod_multi_gpu(tmpdir):
|
||||
"""Test Horovod with multi-GPU support."""
|
||||
trainer_options = dict(
|
||||
|
@ -121,9 +119,8 @@ def test_horovod_multi_gpu(tmpdir):
|
|||
|
||||
|
||||
@pytest.mark.skip(reason="Horovod has a problem with broadcast when using apex?")
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
|
||||
@pytest.mark.skipif(not _HOROVOD_NCCL_AVAILABLE, reason="test requires Horovod with NCCL support")
|
||||
@RunIf(min_gpus=2)
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
@pytest.mark.skipif(not _APEX_AVAILABLE, reason="test requires apex")
|
||||
def test_horovod_apex(tmpdir):
|
||||
"""Test Horovod with multi-GPU support using apex amp."""
|
||||
|
@ -145,9 +142,8 @@ def test_horovod_apex(tmpdir):
|
|||
|
||||
|
||||
@pytest.mark.skip(reason="Skip till Horovod fixes integration with Native torch.cuda.amp")
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
|
||||
@pytest.mark.skipif(not _HOROVOD_NCCL_AVAILABLE, reason="test requires Horovod with NCCL support")
|
||||
@RunIf(min_gpus=2)
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
@pytest.mark.skipif(not _NATIVE_AMP_AVAILABLE, reason="test requires torch.cuda.amp")
|
||||
def test_horovod_amp(tmpdir):
|
||||
"""Test Horovod with multi-GPU support using native amp."""
|
||||
|
@ -168,9 +164,8 @@ def test_horovod_amp(tmpdir):
|
|||
_run_horovod(trainer_options, on_gpu=True)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
|
||||
@pytest.mark.skipif(not _HOROVOD_NCCL_AVAILABLE, reason="test requires Horovod with NCCL support")
|
||||
@RunIf(min_gpus=1)
|
||||
@RunIf(min_gpus=1, skip_windows=True)
|
||||
def test_horovod_transfer_batch_to_gpu(tmpdir):
|
||||
|
||||
class TestTrainingStepModel(BoringModel):
|
||||
|
@ -198,7 +193,7 @@ def test_horovod_transfer_batch_to_gpu(tmpdir):
|
|||
tpipes.run_model_test_without_loggers(trainer_options, model)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_horovod_multi_optimizer(tmpdir):
|
||||
model = BasicGAN()
|
||||
|
||||
|
@ -233,7 +228,7 @@ def test_horovod_multi_optimizer(tmpdir):
|
|||
# TODO: unclear Horovod failure...
|
||||
@pytest.mark.skip(reason="unclear Horovod failure...")
|
||||
@pytest.mark.skipif(not _HOROVOD_AVAILABLE, reason="Horovod is unavailable")
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_result_reduce_horovod(tmpdir):
|
||||
"""Make sure result logging works with Horovod.
|
||||
|
||||
|
@ -284,7 +279,7 @@ def test_result_reduce_horovod(tmpdir):
|
|||
# TODO: unclear Horovod failure...
|
||||
@pytest.mark.skip(reason="unclear Horovod failure...")
|
||||
@pytest.mark.skipif(not _HOROVOD_AVAILABLE, reason="Horovod is unavailable")
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_accuracy_metric_horovod():
|
||||
num_batches = 10
|
||||
batch_size = 16
|
||||
|
@ -335,7 +330,7 @@ def test_accuracy_metric_horovod():
|
|||
horovod.run(_compute_batch, np=2)
|
||||
|
||||
|
||||
# @pytest.mark.skipif(platform.system() == "Windows", reason="Horovod is not supported on Windows")
|
||||
# @RunIf(skip_windows=True)
|
||||
# def test_horovod_multi_optimizer_with_scheduling_stepping(tmpdir):
|
||||
# model = BoringModel()
|
||||
# model.configure_optimizers = model.configure_optimizers__multiple_schedulers
|
||||
|
|
|
@ -22,7 +22,7 @@ import tests.helpers.pipelines as tpipes
|
|||
import tests.helpers.utils as tutils
|
||||
from pytorch_lightning import Trainer
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def test_model_saves_with_input_sample(tmpdir):
|
||||
|
|
|
@ -30,8 +30,8 @@ from pytorch_lightning.callbacks import ModelCheckpoint
|
|||
from pytorch_lightning.trainer.states import RunningStage, TrainerState
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.datamodules import ClassifDataModule
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.helpers.simple_models import ClassificationModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
|
||||
|
||||
class ModelTrainerPropertyParity(Callback):
|
||||
|
|
|
@ -24,7 +24,7 @@ from pytorch_lightning.plugins.environments import TorchElasticEnvironment
|
|||
from pytorch_lightning.trainer.states import TrainerState
|
||||
from pytorch_lightning.utilities import FLOAT16_EPSILON
|
||||
from tests.helpers.datamodules import MNISTDataModule
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
from tests.helpers.utils import set_random_master_port
|
||||
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ import torch
|
|||
from tests.helpers import BoringModel
|
||||
from tests.helpers.advanced_models import BasicGAN, ParityModuleRNN
|
||||
from tests.helpers.datamodules import MNISTDataModule
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@pytest.mark.parametrize("modelclass", [
|
||||
|
|
|
@ -13,7 +13,7 @@ from pytorch_lightning.overrides.data_parallel import (
|
|||
)
|
||||
from pytorch_lightning.trainer.states import RunningStage
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@pytest.mark.parametrize("wrapper_class", [
|
||||
|
|
|
@ -8,7 +8,7 @@ from pytorch_lightning import Trainer
|
|||
from pytorch_lightning.plugins import NativeMixedPrecisionPlugin
|
||||
from pytorch_lightning.utilities import _NATIVE_AMP_AVAILABLE
|
||||
from tests.helpers.boring_model import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@pytest.mark.skipif(not _NATIVE_AMP_AVAILABLE, reason="Minimal PT version is set to 1.6")
|
||||
|
|
|
@ -12,7 +12,7 @@ from pytorch_lightning.plugins.training_type.deepspeed import LightningDeepSpeed
|
|||
from pytorch_lightning.utilities import _APEX_AVAILABLE, _DEEPSPEED_AVAILABLE, _NATIVE_AMP_AVAILABLE
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers.boring_model import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def test_deepspeed_lightning_module(tmpdir):
|
||||
|
|
|
@ -9,7 +9,7 @@ from pytorch_lightning.callbacks import Callback
|
|||
from pytorch_lightning.plugins.training_type.rpc_sequential import RPCPlugin
|
||||
from pytorch_lightning.utilities import _RPC_AVAILABLE
|
||||
from tests.helpers.boring_model import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@mock.patch.dict(
|
||||
|
|
|
@ -24,7 +24,7 @@ from pytorch_lightning.plugins.training_type.rpc_sequential import RPCSequential
|
|||
from pytorch_lightning.utilities import _FAIRSCALE_PIPE_AVAILABLE
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers.boring_model import RandomDataset
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@pytest.mark.skipif(not _FAIRSCALE_PIPE_AVAILABLE, reason="test requires FairScale to be installed")
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import os
|
||||
import platform
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
@ -10,7 +9,7 @@ from pytorch_lightning.plugins import DDPShardedPlugin, DDPSpawnShardedPlugin
|
|||
from pytorch_lightning.utilities import _APEX_AVAILABLE, _FAIRSCALE_AVAILABLE, _NATIVE_AMP_AVAILABLE
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers.boring_model import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@pytest.mark.parametrize(["accelerator"], [("ddp_sharded", ), ("ddp_sharded_spawn", )])
|
||||
|
@ -90,7 +89,7 @@ def test_ddp_choice_sharded_amp(tmpdir, accelerator):
|
|||
trainer.fit(model)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
|
||||
def test_ddp_sharded_plugin_checkpoint_cpu(tmpdir):
|
||||
"""
|
||||
|
@ -114,8 +113,7 @@ def test_ddp_sharded_plugin_checkpoint_cpu(tmpdir):
|
|||
assert torch.equal(ddp_param.to("cpu"), shard_param)
|
||||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
|
||||
def test_ddp_sharded_plugin_checkpoint_multi_gpu(tmpdir):
|
||||
"""
|
||||
|
@ -139,8 +137,7 @@ def test_ddp_sharded_plugin_checkpoint_multi_gpu(tmpdir):
|
|||
assert torch.equal(ddp_param.to("cpu"), shard_param)
|
||||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
|
||||
def test_ddp_sharded_plugin_finetune(tmpdir):
|
||||
"""
|
||||
|
@ -162,7 +159,7 @@ def test_ddp_sharded_plugin_finetune(tmpdir):
|
|||
trainer.fit(saved_model)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
|
||||
def test_ddp_sharded_plugin_resume_from_checkpoint(tmpdir):
|
||||
"""
|
||||
|
@ -194,8 +191,7 @@ def test_ddp_sharded_plugin_resume_from_checkpoint(tmpdir):
|
|||
|
||||
@pytest.mark.skip(reason="Not a critical test, skip till drone CI performance improves.")
|
||||
@pytest.mark.skip(reason="Currently unsupported restarting training on different number of devices.")
|
||||
@RunIf(min_gpus=2)
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
|
||||
def test_ddp_sharded_plugin_resume_from_checkpoint_downsize_gpus(tmpdir):
|
||||
"""
|
||||
|
@ -225,8 +221,7 @@ def test_ddp_sharded_plugin_resume_from_checkpoint_downsize_gpus(tmpdir):
|
|||
trainer.fit(model)
|
||||
|
||||
|
||||
@RunIf(min_gpus=1)
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(min_gpus=1, skip_windows=True)
|
||||
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
|
||||
def test_ddp_sharded_plugin_resume_from_checkpoint_gpu_to_cpu(tmpdir):
|
||||
"""
|
||||
|
@ -256,7 +251,7 @@ def test_ddp_sharded_plugin_resume_from_checkpoint_gpu_to_cpu(tmpdir):
|
|||
trainer.fit(model)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
|
||||
@pytest.mark.skipif(
|
||||
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
|
||||
|
@ -275,8 +270,7 @@ def test_ddp_sharded_plugin_test(tmpdir):
|
|||
trainer.test(model)
|
||||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
|
||||
def test_ddp_sharded_plugin_test_multigpu(tmpdir):
|
||||
"""
|
||||
|
|
|
@ -22,7 +22,7 @@ import torch
|
|||
from pytorch_lightning import Trainer
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.deterministic_model import DeterministicModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def test_training_step_scalar(tmpdir):
|
||||
|
|
|
@ -12,14 +12,11 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import platform
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from pytorch_lightning import Trainer
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
class TestModel(BoringModel):
|
||||
|
@ -33,7 +30,7 @@ class TestModel(BoringModel):
|
|||
assert logged_times == expected, msg
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_global_zero_only_logging_ddp_cpu(tmpdir):
|
||||
"""
|
||||
Makes sure logging only happens from root zero
|
||||
|
|
|
@ -29,7 +29,7 @@ from pytorch_lightning.trainer.connectors.logger_connector.callback_hook_validat
|
|||
from pytorch_lightning.trainer.connectors.logger_connector.metrics_holder import MetricsHolder
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers.boring_model import BoringModel, RandomDataset
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def decorator_with_arguments(fx_name: str = '', hook_fx_name: str = None) -> Callable:
|
||||
|
|
|
@ -31,7 +31,7 @@ from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
|
|||
from pytorch_lightning.core.lightning import LightningModule
|
||||
from tests.helpers.boring_model import BoringModel, RandomDictDataset, RandomDictStringDataset
|
||||
from tests.helpers.deterministic_model import DeterministicModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
|
||||
|
|
|
@ -26,7 +26,7 @@ from pytorch_lightning import seed_everything, Trainer
|
|||
from pytorch_lightning.callbacks import Callback
|
||||
from pytorch_lightning.utilities import _APEX_AVAILABLE
|
||||
from tests.helpers.boring_model import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
|
||||
|
|
|
@ -11,14 +11,11 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
from pytorch_lightning import Trainer
|
||||
from tests.accelerators import DDPLauncher
|
||||
from tests.helpers.boring_model import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
class TrainerGetModel(BoringModel):
|
||||
|
@ -47,7 +44,7 @@ def test_get_model(tmpdir):
|
|||
trainer.fit(model)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_get_model_ddp_cpu(tmpdir):
|
||||
"""
|
||||
Tests that `trainer.lightning_module` extracts the model correctly when using ddp on cpu
|
||||
|
@ -86,8 +83,7 @@ def test_get_model_gpu(tmpdir):
|
|||
trainer.fit(model)
|
||||
|
||||
|
||||
@RunIf(min_gpus=1)
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
|
||||
@RunIf(min_gpus=1, skip_windows=True)
|
||||
@DDPLauncher.run("--accelerator [accelerator]", max_epochs=["1"], accelerator=["ddp", "ddp_spawn"])
|
||||
def test_get_model_ddp_gpu(tmpdir, args=None):
|
||||
"""
|
||||
|
|
|
@ -20,7 +20,7 @@ from torch.utils.data.sampler import BatchSampler, SequentialSampler
|
|||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers import BoringModel, RandomDataset
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
class IndexedRandomDataset(RandomDataset):
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import platform
|
||||
from unittest import mock
|
||||
from unittest.mock import patch
|
||||
|
||||
|
@ -30,7 +29,7 @@ from pytorch_lightning.utilities.data import has_iterable_dataset, has_len
|
|||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.base import EvalModelTemplate
|
||||
from tests.helpers.boring_model import BoringModel, RandomDataset
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def test_fit_train_loader_only(tmpdir):
|
||||
|
@ -600,7 +599,7 @@ def test_error_on_zero_len_dataloader(tmpdir):
|
|||
trainer.fit(model)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
|
||||
@RunIf(skip_windows=True)
|
||||
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
|
||||
@patch('pytorch_lightning.trainer.data_loading.multiprocessing.cpu_count', return_value=4)
|
||||
def test_warning_with_few_workers(mock, tmpdir, ckpt_path):
|
||||
|
@ -646,7 +645,7 @@ def test_warning_with_few_workers(mock, tmpdir, ckpt_path):
|
|||
trainer.test(**test_options)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
|
||||
@RunIf(skip_windows=True)
|
||||
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
|
||||
@patch('pytorch_lightning.trainer.data_loading.multiprocessing.cpu_count', return_value=4)
|
||||
def test_warning_with_few_workers_multi_loader(mock, tmpdir, ckpt_path):
|
||||
|
@ -808,8 +807,7 @@ class DistribSamplerCallback(Callback):
|
|||
assert not test_sampler.shuffle
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
|
||||
@RunIf(min_gpus=2)
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
def test_dataloader_distributed_sampler(tmpdir):
|
||||
""" Test DistributedSampler and it's arguments for DDP backend """
|
||||
|
||||
|
@ -836,8 +834,7 @@ class ModelWithDataLoaderDistributedSampler(EvalModelTemplate):
|
|||
)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
|
||||
@RunIf(min_gpus=2)
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
def test_dataloader_distributed_sampler_already_attached(tmpdir):
|
||||
""" Test DistributedSampler and it's arguments for DDP backend when DistSampler already included on dataloader """
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
import math
|
||||
import os
|
||||
import pickle
|
||||
import platform
|
||||
import sys
|
||||
from argparse import Namespace
|
||||
from copy import deepcopy
|
||||
|
@ -42,7 +41,7 @@ from pytorch_lightning.utilities.cloud_io import load as pl_load
|
|||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.base import EvalModelTemplate
|
||||
from tests.helpers import BoringModel, RandomDataset
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -221,8 +220,14 @@ def test_trainer_accumulate_grad_batches_zero_grad(tmpdir, accumulate_grad_batch
|
|||
@pytest.mark.parametrize(
|
||||
["accumulate_grad_batches", "limit_train_batches"],
|
||||
[
|
||||
({1: 2, 3: 4}, 1.0),
|
||||
({1: 2, 3: 4}, 0.5), # not to be divisible by accumulate_grad_batches on purpose
|
||||
({
|
||||
1: 2,
|
||||
3: 4
|
||||
}, 1.0),
|
||||
({
|
||||
1: 2,
|
||||
3: 4
|
||||
}, 0.5), # not to be divisible by accumulate_grad_batches on purpose
|
||||
(3, 1.0),
|
||||
(3, 0.8), # not to be divisible by accumulate_grad_batches on purpose
|
||||
(4, 1.0),
|
||||
|
@ -1436,8 +1441,7 @@ def test_trainer_predict_ddp(tmpdir):
|
|||
predict(tmpdir, "ddp", 2, None, plugins=["ddp_sharded"])
|
||||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
@pytest.mark.skipif(
|
||||
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
|
||||
)
|
||||
|
@ -1453,7 +1457,7 @@ def test_trainer_predict_1_gpu(tmpdir):
|
|||
predict(tmpdir, None, 1, None)
|
||||
|
||||
|
||||
@pytest.mark.skipif(platform.system() == "Windows", reason="Distributed training is not supported on Windows")
|
||||
@RunIf(skip_windows=True)
|
||||
@pytest.mark.skipif(
|
||||
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
|
||||
)
|
||||
|
|
|
@ -22,7 +22,7 @@ import pytest
|
|||
import tests.helpers.utils as tutils
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.utilities import argparse
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@mock.patch('argparse.ArgumentParser.parse_args')
|
||||
|
|
|
@ -16,7 +16,7 @@ import torch
|
|||
import pytorch_lightning as pl
|
||||
import tests.helpers.utils as tutils
|
||||
from tests.base import EvalModelTemplate
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
|
|
|
@ -25,7 +25,7 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
|||
from tests.base import EvalModelTemplate
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.datamodules import MNISTDataModule
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def test_num_training_batches(tmpdir):
|
||||
|
|
|
@ -19,7 +19,7 @@ import torch
|
|||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.tuner.auto_gpu_select import pick_multiple_gpus
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
@RunIf(min_gpus=2)
|
||||
|
|
|
@ -8,7 +8,7 @@ import torch
|
|||
from pytorch_lightning import seed_everything, Trainer
|
||||
from pytorch_lightning.utilities import AllGatherGrad
|
||||
from tests.helpers.boring_model import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def setup_ddp(rank, world_size):
|
||||
|
@ -42,14 +42,13 @@ def _test_all_gather_ddp(rank, world_size):
|
|||
assert torch.allclose(grad2, tensor2.grad)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
|
||||
@RunIf(skip_windows=True)
|
||||
def test_all_gather_ddp():
|
||||
world_size = 3
|
||||
torch.multiprocessing.spawn(_test_all_gather_ddp, args=(world_size, ), nprocs=world_size)
|
||||
|
||||
|
||||
@pytest.mark.skipif(sys.platform == "win32", reason="DDP not available on windows")
|
||||
@RunIf(min_gpus=2)
|
||||
@RunIf(min_gpus=2, skip_windows=True)
|
||||
@pytest.mark.skipif(
|
||||
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
|
||||
)
|
||||
|
|
|
@ -17,7 +17,7 @@ import torchtext
|
|||
from torchtext.data.example import Example
|
||||
|
||||
from pytorch_lightning.utilities.apply_func import move_data_to_device
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
def _get_torchtext_data_iterator(include_lengths=False):
|
||||
|
|
|
@ -18,7 +18,7 @@ import torch.nn as nn
|
|||
from pytorch_lightning import Callback, Trainer
|
||||
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
|
||||
from tests.helpers import BoringModel
|
||||
from tests.helpers.skipif import RunIf
|
||||
from tests.helpers.runif import RunIf
|
||||
|
||||
|
||||
class SubSubModule(DeviceDtypeModuleMixin):
|
||||
|
|
Loading…
Reference in New Issue