diff --git a/tests/callbacks/test_callbacks.py b/tests/callbacks/test_callbacks.py index 96e1973ccd..8ea1163e07 100644 --- a/tests/callbacks/test_callbacks.py +++ b/tests/callbacks/test_callbacks.py @@ -1,6 +1,7 @@ from pathlib import Path import pytest +import torch import tests.base.utils as tutils from pytorch_lightning import Callback @@ -8,7 +9,6 @@ from pytorch_lightning import Trainer, LightningModule from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.loggers import TensorBoardLogger from tests.base import EvalModelTemplate -import torch def test_early_stopping_functionality(tmpdir): diff --git a/tests/conftest.py b/tests/conftest.py index 216bbe171d..91312fc848 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,9 +1,9 @@ +import sys +import threading from functools import wraps, partial from http.server import SimpleHTTPRequestHandler -import sys import pytest -import threading import torch.multiprocessing as mp diff --git a/tests/core/test_decorators.py b/tests/core/test_decorators.py index 0f35a1630e..5725b7a339 100644 --- a/tests/core/test_decorators.py +++ b/tests/core/test_decorators.py @@ -1,8 +1,8 @@ import pytest import torch -from tests.base import EvalModelTemplate from pytorch_lightning.core.decorators import auto_move_data +from tests.base import EvalModelTemplate @pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") diff --git a/tests/metrics/functional/test_classification.py b/tests/metrics/functional/test_classification.py index 01dbdd6dc6..e4e6a5112e 100644 --- a/tests/metrics/functional/test_classification.py +++ b/tests/metrics/functional/test_classification.py @@ -1,6 +1,15 @@ +from functools import partial + import pytest import torch -from functools import partial +from sklearn.metrics import ( + accuracy_score as sk_accuracy, + precision_score as sk_precision, + recall_score as sk_recall, + f1_score as sk_f1_score, + fbeta_score as sk_fbeta_score, + confusion_matrix as sk_confusion_matrix +) from pytorch_lightning import seed_everything from pytorch_lightning.metrics.functional.classification import ( @@ -24,15 +33,6 @@ from pytorch_lightning.metrics.functional.classification import ( auc, ) -from sklearn.metrics import ( - accuracy_score as sk_accuracy, - precision_score as sk_precision, - recall_score as sk_recall, - f1_score as sk_f1_score, - fbeta_score as sk_fbeta_score, - confusion_matrix as sk_confusion_matrix -) - @pytest.mark.parametrize(['sklearn_metric', 'torch_metric'], [ (sk_accuracy, accuracy), diff --git a/tests/models/test_cpu.py b/tests/models/test_cpu.py index 051db81d1b..c31e157a71 100644 --- a/tests/models/test_cpu.py +++ b/tests/models/test_cpu.py @@ -1,6 +1,5 @@ import os import platform -from collections import namedtuple import pytest import torch @@ -9,8 +8,8 @@ from packaging.version import parse as version_parse import tests.base.utils as tutils from pytorch_lightning import Trainer from pytorch_lightning.callbacks import EarlyStopping -from tests.base import EvalModelTemplate from pytorch_lightning.callbacks import ModelCheckpoint +from tests.base import EvalModelTemplate def test_cpu_slurm_save_load(tmpdir): @@ -231,53 +230,6 @@ def test_running_test_no_val(tmpdir): tutils.assert_ok_model_acc(trainer) -@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") -def test_single_gpu_batch_parse(): - trainer = Trainer() - - # batch is just a tensor - batch = torch.rand(2, 3) - batch = trainer.transfer_batch_to_gpu(batch, 0) - assert batch.device.index == 0 and batch.type() == 'torch.cuda.FloatTensor' - - # tensor list - batch = [torch.rand(2, 3), torch.rand(2, 3)] - batch = trainer.transfer_batch_to_gpu(batch, 0) - assert batch[0].device.index == 0 and batch[0].type() == 'torch.cuda.FloatTensor' - assert batch[1].device.index == 0 and batch[1].type() == 'torch.cuda.FloatTensor' - - # tensor list of lists - batch = [[torch.rand(2, 3), torch.rand(2, 3)]] - batch = trainer.transfer_batch_to_gpu(batch, 0) - assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor' - assert batch[0][1].device.index == 0 and batch[0][1].type() == 'torch.cuda.FloatTensor' - - # tensor dict - batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}] - batch = trainer.transfer_batch_to_gpu(batch, 0) - assert batch[0]['a'].device.index == 0 and batch[0]['a'].type() == 'torch.cuda.FloatTensor' - assert batch[0]['b'].device.index == 0 and batch[0]['b'].type() == 'torch.cuda.FloatTensor' - - # tuple of tensor list and list of tensor dict - batch = ([torch.rand(2, 3) for _ in range(2)], - [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)} for _ in range(2)]) - batch = trainer.transfer_batch_to_gpu(batch, 0) - assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor' - - assert batch[1][0]['a'].device.index == 0 - assert batch[1][0]['a'].type() == 'torch.cuda.FloatTensor' - - assert batch[1][0]['b'].device.index == 0 - assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor' - - # namedtuple of tensor - BatchType = namedtuple('BatchType', ['a', 'b']) - batch = [BatchType(a=torch.rand(2, 3), b=torch.rand(2, 3)) for _ in range(2)] - batch = trainer.transfer_batch_to_gpu(batch, 0) - assert batch[0].a.device.index == 0 - assert batch[0].a.type() == 'torch.cuda.FloatTensor' - - def test_simple_cpu(tmpdir): """Verify continue training session on CPU.""" model = EvalModelTemplate() diff --git a/tests/models/test_gpu.py b/tests/models/test_gpu.py index 80249a727c..3fd44265ad 100644 --- a/tests/models/test_gpu.py +++ b/tests/models/test_gpu.py @@ -1,4 +1,4 @@ -import os +from collections import namedtuple import pytest import torch @@ -239,3 +239,50 @@ def test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count): def test_parse_gpu_returns_None_when_no_devices_are_available(mocked_device_count_0, gpus): with pytest.raises(MisconfigurationException): parse_gpu_ids(gpus) + + +@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine") +def test_single_gpu_batch_parse(): + trainer = Trainer() + + # batch is just a tensor + batch = torch.rand(2, 3) + batch = trainer.transfer_batch_to_gpu(batch, 0) + assert batch.device.index == 0 and batch.type() == 'torch.cuda.FloatTensor' + + # tensor list + batch = [torch.rand(2, 3), torch.rand(2, 3)] + batch = trainer.transfer_batch_to_gpu(batch, 0) + assert batch[0].device.index == 0 and batch[0].type() == 'torch.cuda.FloatTensor' + assert batch[1].device.index == 0 and batch[1].type() == 'torch.cuda.FloatTensor' + + # tensor list of lists + batch = [[torch.rand(2, 3), torch.rand(2, 3)]] + batch = trainer.transfer_batch_to_gpu(batch, 0) + assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor' + assert batch[0][1].device.index == 0 and batch[0][1].type() == 'torch.cuda.FloatTensor' + + # tensor dict + batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}] + batch = trainer.transfer_batch_to_gpu(batch, 0) + assert batch[0]['a'].device.index == 0 and batch[0]['a'].type() == 'torch.cuda.FloatTensor' + assert batch[0]['b'].device.index == 0 and batch[0]['b'].type() == 'torch.cuda.FloatTensor' + + # tuple of tensor list and list of tensor dict + batch = ([torch.rand(2, 3) for _ in range(2)], + [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)} for _ in range(2)]) + batch = trainer.transfer_batch_to_gpu(batch, 0) + assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor' + + assert batch[1][0]['a'].device.index == 0 + assert batch[1][0]['a'].type() == 'torch.cuda.FloatTensor' + + assert batch[1][0]['b'].device.index == 0 + assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor' + + # namedtuple of tensor + BatchType = namedtuple('BatchType', ['a', 'b']) + batch = [BatchType(a=torch.rand(2, 3), b=torch.rand(2, 3)) for _ in range(2)] + batch = trainer.transfer_batch_to_gpu(batch, 0) + assert batch[0].a.device.index == 0 + assert batch[0].a.type() == 'torch.cuda.FloatTensor' diff --git a/tests/models/test_grad_norm.py b/tests/models/test_grad_norm.py index 9140eef162..7a6659ecfa 100644 --- a/tests/models/test_grad_norm.py +++ b/tests/models/test_grad_norm.py @@ -1,12 +1,9 @@ -import torch -import pytest import numpy as np +import pytest -from pytorch_lightning import Trainer, seed_everything - +from pytorch_lightning import Trainer from pytorch_lightning.loggers import LightningLoggerBase from pytorch_lightning.utilities import rank_zero_only - from tests.base import EvalModelTemplate from tests.base.utils import reset_seed diff --git a/tests/models/test_hparams.py b/tests/models/test_hparams.py index eef38c4656..6a704c3f76 100644 --- a/tests/models/test_hparams.py +++ b/tests/models/test_hparams.py @@ -1,7 +1,8 @@ import os -import sys +import pickle from argparse import Namespace +import cloudpickle import pytest import torch from omegaconf import OmegaConf, Container @@ -9,8 +10,6 @@ from omegaconf import OmegaConf, Container from pytorch_lightning import Trainer, LightningModule from pytorch_lightning.utilities import AttributeDict from tests.base import EvalModelTemplate -import pickle -import cloudpickle class SaveHparamsModel(EvalModelTemplate): diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index e397f6f132..ea35f899c0 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -2,8 +2,8 @@ import glob import math import os import pickle -import types import sys +import types from argparse import Namespace from pathlib import Path @@ -18,8 +18,8 @@ from pytorch_lightning.core.saving import ( load_hparams_from_tags_csv, load_hparams_from_yaml, save_hparams_to_tags_csv) from pytorch_lightning.loggers import TensorBoardLogger from pytorch_lightning.trainer.logging import TrainerLoggingMixin -from pytorch_lightning.utilities.io import load as pl_load from pytorch_lightning.utilities.exceptions import MisconfigurationException +from pytorch_lightning.utilities.io import load as pl_load from tests.base import EvalModelTemplate