cleaning tests (#2201)
This commit is contained in:
parent
ff45842041
commit
db7bb4c348
|
@ -1,6 +1,7 @@
|
|||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
import tests.base.utils as tutils
|
||||
from pytorch_lightning import Callback
|
||||
|
@ -8,7 +9,6 @@ from pytorch_lightning import Trainer, LightningModule
|
|||
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
|
||||
from pytorch_lightning.loggers import TensorBoardLogger
|
||||
from tests.base import EvalModelTemplate
|
||||
import torch
|
||||
|
||||
|
||||
def test_early_stopping_functionality(tmpdir):
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
import sys
|
||||
import threading
|
||||
from functools import wraps, partial
|
||||
from http.server import SimpleHTTPRequestHandler
|
||||
|
||||
import sys
|
||||
import pytest
|
||||
import threading
|
||||
import torch.multiprocessing as mp
|
||||
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
from tests.base import EvalModelTemplate
|
||||
from pytorch_lightning.core.decorators import auto_move_data
|
||||
from tests.base import EvalModelTemplate
|
||||
|
||||
|
||||
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
|
||||
|
|
|
@ -1,6 +1,15 @@
|
|||
from functools import partial
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from functools import partial
|
||||
from sklearn.metrics import (
|
||||
accuracy_score as sk_accuracy,
|
||||
precision_score as sk_precision,
|
||||
recall_score as sk_recall,
|
||||
f1_score as sk_f1_score,
|
||||
fbeta_score as sk_fbeta_score,
|
||||
confusion_matrix as sk_confusion_matrix
|
||||
)
|
||||
|
||||
from pytorch_lightning import seed_everything
|
||||
from pytorch_lightning.metrics.functional.classification import (
|
||||
|
@ -24,15 +33,6 @@ from pytorch_lightning.metrics.functional.classification import (
|
|||
auc,
|
||||
)
|
||||
|
||||
from sklearn.metrics import (
|
||||
accuracy_score as sk_accuracy,
|
||||
precision_score as sk_precision,
|
||||
recall_score as sk_recall,
|
||||
f1_score as sk_f1_score,
|
||||
fbeta_score as sk_fbeta_score,
|
||||
confusion_matrix as sk_confusion_matrix
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(['sklearn_metric', 'torch_metric'], [
|
||||
(sk_accuracy, accuracy),
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import os
|
||||
import platform
|
||||
from collections import namedtuple
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
@ -9,8 +8,8 @@ from packaging.version import parse as version_parse
|
|||
import tests.base.utils as tutils
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.callbacks import EarlyStopping
|
||||
from tests.base import EvalModelTemplate
|
||||
from pytorch_lightning.callbacks import ModelCheckpoint
|
||||
from tests.base import EvalModelTemplate
|
||||
|
||||
|
||||
def test_cpu_slurm_save_load(tmpdir):
|
||||
|
@ -231,53 +230,6 @@ def test_running_test_no_val(tmpdir):
|
|||
tutils.assert_ok_model_acc(trainer)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
|
||||
def test_single_gpu_batch_parse():
|
||||
trainer = Trainer()
|
||||
|
||||
# batch is just a tensor
|
||||
batch = torch.rand(2, 3)
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch.device.index == 0 and batch.type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
# tensor list
|
||||
batch = [torch.rand(2, 3), torch.rand(2, 3)]
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch[0].device.index == 0 and batch[0].type() == 'torch.cuda.FloatTensor'
|
||||
assert batch[1].device.index == 0 and batch[1].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
# tensor list of lists
|
||||
batch = [[torch.rand(2, 3), torch.rand(2, 3)]]
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'
|
||||
assert batch[0][1].device.index == 0 and batch[0][1].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
# tensor dict
|
||||
batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}]
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch[0]['a'].device.index == 0 and batch[0]['a'].type() == 'torch.cuda.FloatTensor'
|
||||
assert batch[0]['b'].device.index == 0 and batch[0]['b'].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
# tuple of tensor list and list of tensor dict
|
||||
batch = ([torch.rand(2, 3) for _ in range(2)],
|
||||
[{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)} for _ in range(2)])
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
assert batch[1][0]['a'].device.index == 0
|
||||
assert batch[1][0]['a'].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
assert batch[1][0]['b'].device.index == 0
|
||||
assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
# namedtuple of tensor
|
||||
BatchType = namedtuple('BatchType', ['a', 'b'])
|
||||
batch = [BatchType(a=torch.rand(2, 3), b=torch.rand(2, 3)) for _ in range(2)]
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch[0].a.device.index == 0
|
||||
assert batch[0].a.type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
|
||||
def test_simple_cpu(tmpdir):
|
||||
"""Verify continue training session on CPU."""
|
||||
model = EvalModelTemplate()
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import os
|
||||
from collections import namedtuple
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
@ -239,3 +239,50 @@ def test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count):
|
|||
def test_parse_gpu_returns_None_when_no_devices_are_available(mocked_device_count_0, gpus):
|
||||
with pytest.raises(MisconfigurationException):
|
||||
parse_gpu_ids(gpus)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
|
||||
def test_single_gpu_batch_parse():
|
||||
trainer = Trainer()
|
||||
|
||||
# batch is just a tensor
|
||||
batch = torch.rand(2, 3)
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch.device.index == 0 and batch.type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
# tensor list
|
||||
batch = [torch.rand(2, 3), torch.rand(2, 3)]
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch[0].device.index == 0 and batch[0].type() == 'torch.cuda.FloatTensor'
|
||||
assert batch[1].device.index == 0 and batch[1].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
# tensor list of lists
|
||||
batch = [[torch.rand(2, 3), torch.rand(2, 3)]]
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'
|
||||
assert batch[0][1].device.index == 0 and batch[0][1].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
# tensor dict
|
||||
batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}]
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch[0]['a'].device.index == 0 and batch[0]['a'].type() == 'torch.cuda.FloatTensor'
|
||||
assert batch[0]['b'].device.index == 0 and batch[0]['b'].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
# tuple of tensor list and list of tensor dict
|
||||
batch = ([torch.rand(2, 3) for _ in range(2)],
|
||||
[{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)} for _ in range(2)])
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
assert batch[1][0]['a'].device.index == 0
|
||||
assert batch[1][0]['a'].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
assert batch[1][0]['b'].device.index == 0
|
||||
assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor'
|
||||
|
||||
# namedtuple of tensor
|
||||
BatchType = namedtuple('BatchType', ['a', 'b'])
|
||||
batch = [BatchType(a=torch.rand(2, 3), b=torch.rand(2, 3)) for _ in range(2)]
|
||||
batch = trainer.transfer_batch_to_gpu(batch, 0)
|
||||
assert batch[0].a.device.index == 0
|
||||
assert batch[0].a.type() == 'torch.cuda.FloatTensor'
|
||||
|
|
|
@ -1,12 +1,9 @@
|
|||
import torch
|
||||
import pytest
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from pytorch_lightning import Trainer, seed_everything
|
||||
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.loggers import LightningLoggerBase
|
||||
from pytorch_lightning.utilities import rank_zero_only
|
||||
|
||||
from tests.base import EvalModelTemplate
|
||||
from tests.base.utils import reset_seed
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
import sys
|
||||
import pickle
|
||||
from argparse import Namespace
|
||||
|
||||
import cloudpickle
|
||||
import pytest
|
||||
import torch
|
||||
from omegaconf import OmegaConf, Container
|
||||
|
@ -9,8 +10,6 @@ from omegaconf import OmegaConf, Container
|
|||
from pytorch_lightning import Trainer, LightningModule
|
||||
from pytorch_lightning.utilities import AttributeDict
|
||||
from tests.base import EvalModelTemplate
|
||||
import pickle
|
||||
import cloudpickle
|
||||
|
||||
|
||||
class SaveHparamsModel(EvalModelTemplate):
|
||||
|
|
|
@ -2,8 +2,8 @@ import glob
|
|||
import math
|
||||
import os
|
||||
import pickle
|
||||
import types
|
||||
import sys
|
||||
import types
|
||||
from argparse import Namespace
|
||||
from pathlib import Path
|
||||
|
||||
|
@ -18,8 +18,8 @@ from pytorch_lightning.core.saving import (
|
|||
load_hparams_from_tags_csv, load_hparams_from_yaml, save_hparams_to_tags_csv)
|
||||
from pytorch_lightning.loggers import TensorBoardLogger
|
||||
from pytorch_lightning.trainer.logging import TrainerLoggingMixin
|
||||
from pytorch_lightning.utilities.io import load as pl_load
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
from pytorch_lightning.utilities.io import load as pl_load
|
||||
from tests.base import EvalModelTemplate
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue