Remove legacy pytest markers (#9761)

This commit is contained in:
Carlos Mocholí 2021-09-29 19:08:26 +02:00 committed by GitHub
parent 19008ce98f
commit 32003159f0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 1 additions and 33 deletions

View File

@ -24,11 +24,6 @@ addopts =
--strict
--doctest-modules
--color=yes
markers =
slow
remote_data
filterwarnings
gpus_param_tests
junit_duration_report = call

View File

@ -14,13 +14,12 @@
import os
import sys
import threading
from functools import partial, wraps
from functools import partial
from http.server import SimpleHTTPRequestHandler
from pathlib import Path
import pytest
import torch.distributed
import torch.multiprocessing as mp
from pytorch_lightning.plugins.environments.lightning_environment import find_free_network_port
from tests import _PATH_DATASETS
@ -88,21 +87,6 @@ def teardown_process_group():
torch.distributed.destroy_process_group()
def pytest_configure(config):
config.addinivalue_line("markers", "spawn: spawn test in a separate process using torch.multiprocessing.spawn")
@pytest.mark.tryfirst
def pytest_pyfunc_call(pyfuncitem):
if pyfuncitem.get_closest_marker("spawn"):
testfunction = pyfuncitem.obj
funcargs = pyfuncitem.funcargs
testargs = tuple(funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames)
mp.spawn(wraps, (testfunction, testargs))
return True
@pytest.fixture
def tmpdir_server(tmpdir):
if sys.version_info >= (3, 7):

View File

@ -92,7 +92,6 @@ def mocked_device_count_0(monkeypatch):
monkeypatch.setattr(torch.cuda, "device_count", device_count)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(
["gpus", "expected_num_gpus", "distributed_backend"],
[
@ -108,7 +107,6 @@ def test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, distrib
assert Trainer(gpus=gpus, accelerator=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(
["gpus", "expected_num_gpus", "distributed_backend"],
[
@ -120,7 +118,6 @@ def test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, distr
assert Trainer(gpus=gpus, accelerator=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(
["gpus", "expected_root_gpu", "distributed_backend"],
[
@ -136,7 +133,6 @@ def test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, distrib
assert Trainer(gpus=gpus, accelerator=distributed_backend).root_gpu == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(
["gpus", "expected_root_gpu", "distributed_backend"],
[
@ -150,7 +146,6 @@ def test_root_gpu_property_0_passing(mocked_device_count_0, gpus, expected_root_
# Asking for a gpu when non are available will result in a MisconfigurationException
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(
["gpus", "expected_root_gpu", "distributed_backend"],
[
@ -168,7 +163,6 @@ def test_root_gpu_property_0_raising(mocked_device_count_0, gpus, expected_root_
Trainer(gpus=gpus, accelerator=distributed_backend)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(
["gpus", "expected_root_gpu"],
[
@ -183,7 +177,6 @@ def test_determine_root_gpu_device(gpus, expected_root_gpu):
assert device_parser.determine_root_gpu_device(gpus) == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(
["gpus", "expected_gpu_ids"],
[
@ -206,7 +199,6 @@ def test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):
assert device_parser.parse_gpu_ids(gpus) == expected_gpu_ids
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(
["gpus"],
[
@ -225,20 +217,17 @@ def test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):
device_parser.parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [[1, 2, 19], -1, "-1"])
def test_parse_gpu_fail_on_non_existent_id(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
def test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids([1, 2, 19])
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [-1, "-1"])
def test_parse_gpu_returns_none_when_no_devices_are_available(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):