Remove unnecessary `pytest.param` usage (#9760)

This commit is contained in:
Carlos Mocholí 2021-09-30 04:42:11 +02:00 committed by GitHub
parent 8c9cb0c133
commit 7f95fd04d7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 63 additions and 96 deletions

View File

@ -149,13 +149,13 @@ def test_root_gpu_property_0_passing(mocked_device_count_0, gpus, expected_root_
@pytest.mark.parametrize(
["gpus", "expected_root_gpu", "distributed_backend"],
[
pytest.param(1, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param([1, 2], None, "ddp"),
pytest.param([0, 1], None, "ddp"),
pytest.param(-1, None, "ddp"),
pytest.param("-1", None, "ddp"),
(1, None, "ddp"),
(3, None, "ddp"),
(3, None, "ddp"),
([1, 2], None, "ddp"),
([0, 1], None, "ddp"),
(-1, None, "ddp"),
("-1", None, "ddp"),
],
)
def test_root_gpu_property_0_raising(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
@ -180,18 +180,18 @@ def test_determine_root_gpu_device(gpus, expected_root_gpu):
@pytest.mark.parametrize(
["gpus", "expected_gpu_ids"],
[
pytest.param(None, None),
pytest.param(0, None),
pytest.param(1, [0]),
pytest.param(3, [0, 1, 2]),
(None, None),
(0, None),
(1, [0]),
(3, [0, 1, 2]),
pytest.param(-1, list(range(PRETEND_N_OF_GPUS)), id="-1 - use all gpus"),
pytest.param([0], [0]),
pytest.param([1, 3], [1, 3]),
pytest.param((1, 3), [1, 3]),
pytest.param("0", None),
pytest.param("3", [0, 1, 2]),
pytest.param("1, 3", [1, 3]),
pytest.param("2,", [2]),
([0], [0]),
([1, 3], [1, 3]),
((1, 3), [1, 3]),
("0", None),
("3", [0, 1, 2]),
("1, 3", [1, 3]),
("2,", [2]),
pytest.param("-1", list(range(PRETEND_N_OF_GPUS)), id="'-1' - use all gpus"),
],
)
@ -199,19 +199,7 @@ def test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):
assert device_parser.parse_gpu_ids(gpus) == expected_gpu_ids
@pytest.mark.parametrize(
["gpus"],
[
pytest.param(0.1),
pytest.param(-2),
pytest.param(False),
pytest.param([]),
pytest.param([-1]),
pytest.param([None]),
pytest.param(["0"]),
pytest.param([0, 0]),
],
)
@pytest.mark.parametrize("gpus", [0.1, -2, False, [], [-1], [None], ["0"], [0, 0]])
def test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids(gpus)

View File

@ -253,7 +253,7 @@ def test_dataloaders_passed_to_fit(tmpdir):
@pytest.mark.parametrize(
["tpu_cores", "expected_tpu_id"],
[pytest.param(1, None), pytest.param(8, None), pytest.param([1], 1), pytest.param([8], 8)],
[(1, None), (8, None), ([1], 1), ([8], 8)],
)
@RunIf(tpu=True)
def test_tpu_id_to_be_as_expected(tpu_cores, expected_tpu_id):
@ -301,17 +301,17 @@ def test_broadcast_on_tpu():
@pytest.mark.parametrize(
["tpu_cores", "expected_tpu_id", "error_expected"],
[
pytest.param(1, None, False),
pytest.param(8, None, False),
pytest.param([1], 1, False),
pytest.param([8], 8, False),
pytest.param("1,", 1, False),
pytest.param("1", None, False),
pytest.param("9, ", 9, True),
pytest.param([9], 9, True),
pytest.param([0], 0, True),
pytest.param(2, None, True),
pytest.param(10, None, True),
(1, None, False),
(8, None, False),
([1], 1, False),
([8], 8, False),
("1,", 1, False),
("1", None, False),
("9, ", 9, True),
([9], 9, True),
([0], 0, True),
(2, None, True),
(10, None, True),
],
)
@RunIf(tpu=True)
@ -327,7 +327,7 @@ def test_tpu_choice(tmpdir, tpu_cores, expected_tpu_id, error_expected):
@pytest.mark.parametrize(
["cli_args", "expected"],
[pytest.param("--tpu_cores=8", {"tpu_cores": 8}), pytest.param("--tpu_cores=1,", {"tpu_cores": "1,"})],
[("--tpu_cores=8", {"tpu_cores": 8}), ("--tpu_cores=1,", {"tpu_cores": "1,"})],
)
@RunIf(tpu=True)
@pl_multi_process_test

View File

@ -52,9 +52,7 @@ def simple_profiler():
return SimpleProfiler()
@pytest.mark.parametrize(
["action", "expected"], [pytest.param("a", [3, 1]), pytest.param("b", [2]), pytest.param("c", [1])]
)
@pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])])
def test_simple_profiler_durations(simple_profiler, action: str, expected: list):
"""Ensure the reported durations are reasonably accurate."""
@ -67,9 +65,7 @@ def test_simple_profiler_durations(simple_profiler, action: str, expected: list)
np.testing.assert_allclose(simple_profiler.recorded_durations[action], expected, rtol=0.2)
@pytest.mark.parametrize(
["action", "expected"], [pytest.param("a", [3, 1]), pytest.param("b", [2]), pytest.param("c", [1])]
)
@pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])])
def test_simple_profiler_iterable_durations(simple_profiler, action: str, expected: list):
"""Ensure the reported durations are reasonably accurate."""
iterable = _sleep_generator(expected)
@ -163,9 +159,7 @@ def advanced_profiler(tmpdir):
return AdvancedProfiler(dirpath=tmpdir, filename="profiler")
@pytest.mark.parametrize(
["action", "expected"], [pytest.param("a", [3, 1]), pytest.param("b", [2]), pytest.param("c", [1])]
)
@pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])])
def test_advanced_profiler_durations(advanced_profiler, action: str, expected: list):
for duration in expected:
@ -179,9 +173,7 @@ def test_advanced_profiler_durations(advanced_profiler, action: str, expected: l
np.testing.assert_allclose(recored_total_duration, expected_total_duration, rtol=0.2)
@pytest.mark.parametrize(
["action", "expected"], [pytest.param("a", [3, 1]), pytest.param("b", [2]), pytest.param("c", [1])]
)
@pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])])
def test_advanced_profiler_iterable_durations(advanced_profiler, action: str, expected: list):
"""Ensure the reported durations are reasonably accurate."""
iterable = _sleep_generator(expected)

View File

@ -1012,7 +1012,7 @@ def test_batch_size_smaller_than_num_gpus(tmpdir):
@pytest.mark.parametrize(
["multiple_trainloader_mode", "num_training_batches"],
[pytest.param("min_size", 5), pytest.param("max_size_cycle", 10)],
[("min_size", 5), ("max_size_cycle", 10)],
)
def test_fit_multiple_train_loaders(tmpdir, multiple_trainloader_mode, num_training_batches):
"""Integration test for multple train loaders."""

View File

@ -113,23 +113,15 @@ def test_add_argparse_args_redefined_error(cli_args: list, monkeypatch):
@pytest.mark.parametrize(
["cli_args", "expected"],
[
pytest.param(
"--auto_lr_find --auto_scale_batch_size power", {"auto_lr_find": True, "auto_scale_batch_size": "power"}
),
pytest.param(
("--auto_lr_find --auto_scale_batch_size power", {"auto_lr_find": True, "auto_scale_batch_size": "power"}),
(
"--auto_lr_find any_string --auto_scale_batch_size",
{"auto_lr_find": "any_string", "auto_scale_batch_size": True},
),
pytest.param(
"--auto_lr_find TRUE --auto_scale_batch_size FALSE", {"auto_lr_find": True, "auto_scale_batch_size": False}
),
pytest.param(
"--auto_lr_find t --auto_scale_batch_size ON", {"auto_lr_find": True, "auto_scale_batch_size": True}
),
pytest.param(
"--auto_lr_find 0 --auto_scale_batch_size n", {"auto_lr_find": False, "auto_scale_batch_size": False}
),
pytest.param(
("--auto_lr_find TRUE --auto_scale_batch_size FALSE", {"auto_lr_find": True, "auto_scale_batch_size": False}),
("--auto_lr_find t --auto_scale_batch_size ON", {"auto_lr_find": True, "auto_scale_batch_size": True}),
("--auto_lr_find 0 --auto_scale_batch_size n", {"auto_lr_find": False, "auto_scale_batch_size": False}),
(
"",
{
# These parameters are marked as Optional[...] in Trainer.__init__, with None as default.
@ -175,7 +167,7 @@ def test_argparse_args_parsing_fast_dev_run(cli_args, expected):
@pytest.mark.parametrize(
["cli_args", "expected_parsed", "expected_device_ids"],
[pytest.param("", None, None), pytest.param("--gpus 1", 1, [0]), pytest.param("--gpus 0,", "0,", [0])],
[("", None, None), ("--gpus 1", 1, [0]), ("--gpus 0,", "0,", [0])],
)
@RunIf(min_gpus=1)
def test_argparse_args_parsing_gpus(cli_args, expected_parsed, expected_device_ids):
@ -195,10 +187,10 @@ def test_argparse_args_parsing_gpus(cli_args, expected_parsed, expected_device_i
@pytest.mark.parametrize(
["cli_args", "extra_args"],
[
pytest.param({}, {}),
pytest.param({"logger": False}, {}),
pytest.param({"logger": False}, {"logger": True}),
pytest.param({"logger": False}, {"checkpoint_callback": True}),
({}, {}),
({"logger": False}, {}),
({"logger": False}, {"logger": True}),
({"logger": False}, {"checkpoint_callback": True}),
],
)
def test_init_from_argparse_args(cli_args, extra_args):

View File

@ -51,7 +51,7 @@ def _get_torchtext_data_iterator(include_lengths=False):
@pytest.mark.parametrize("include_lengths", [False, True])
@pytest.mark.parametrize(["device"], [pytest.param(torch.device("cuda", 0))])
@pytest.mark.parametrize("device", [torch.device("cuda", 0)])
@RunIf(min_gpus=1)
def test_batch_move_data_to_device_torchtext_include_lengths(include_lengths, device):
data_iterator, _ = _get_torchtext_data_iterator(include_lengths=include_lengths)

View File

@ -46,10 +46,8 @@ class DeviceAssertCallback(Callback):
assert model.device == model.module.module.device
@pytest.mark.parametrize(
["dst_dtype"], [pytest.param(torch.float), pytest.param(torch.double), pytest.param(torch.half)]
)
@pytest.mark.parametrize(["dst_device"], [pytest.param(torch.device("cpu")), pytest.param(torch.device("cuda", 0))])
@pytest.mark.parametrize("dst_dtype", [torch.float, torch.double, torch.half])
@pytest.mark.parametrize("dst_device", [torch.device("cpu"), torch.device("cuda", 0)])
@RunIf(min_gpus=1)
def test_submodules_device_and_dtype(dst_device, dst_dtype):
"""Test that the device and dtype property updates propagate through mixed nesting of regular nn.Modules and
@ -85,11 +83,11 @@ def test_submodules_multi_gpu_ddp_spawn(tmpdir):
@pytest.mark.parametrize(
["device"],
"device",
[
pytest.param(None), # explicitly call without an index to see if the returning device contains an index
pytest.param(0),
pytest.param(torch.device("cuda", 0)),
None, # explicitly call without an index to see if the returning device contains an index
0,
torch.device("cuda", 0),
],
)
@RunIf(min_gpus=1)

View File

@ -160,10 +160,7 @@ def test_empty_model_summary_shapes(mode: str):
@RunIf(min_gpus=1)
@pytest.mark.parametrize("mode", ["full", "top"])
@pytest.mark.parametrize(
["device"],
[pytest.param(torch.device("cpu")), pytest.param(torch.device("cuda", 0)), pytest.param(torch.device("cuda", 0))],
)
@pytest.mark.parametrize("device", [torch.device("cpu"), torch.device("cuda", 0)])
def test_linear_model_summary_shapes(device, mode):
"""Test that the model summary correctly computes the input- and output shapes."""
model = UnorderedModel().to(device)
@ -247,13 +244,13 @@ def test_summary_with_scripted_modules(mode):
@pytest.mark.parametrize(
["example_input", "expected_size"],
[
pytest.param([], UNKNOWN_SIZE),
pytest.param((1, 2, 3), [UNKNOWN_SIZE] * 3),
pytest.param(torch.tensor(0), UNKNOWN_SIZE),
pytest.param(dict(tensor=torch.zeros(1, 2, 3)), UNKNOWN_SIZE),
pytest.param(torch.zeros(2, 3, 4), [2, 3, 4]),
pytest.param([torch.zeros(2, 3), torch.zeros(4, 5)], [[2, 3], [4, 5]]),
pytest.param((torch.zeros(2, 3), torch.zeros(4, 5)), [[2, 3], [4, 5]]),
([], UNKNOWN_SIZE),
((1, 2, 3), [UNKNOWN_SIZE] * 3),
(torch.tensor(0), UNKNOWN_SIZE),
(dict(tensor=torch.zeros(1, 2, 3)), UNKNOWN_SIZE),
(torch.zeros(2, 3, 4), [2, 3, 4]),
([torch.zeros(2, 3), torch.zeros(4, 5)], [[2, 3], [4, 5]]),
((torch.zeros(2, 3), torch.zeros(4, 5)), [[2, 3], [4, 5]]),
],
)
def test_example_input_array_types(example_input, expected_size, mode):