added single gpu train test
This commit is contained in:
parent
afa4548b12
commit
c7e8436083
|
@ -27,6 +27,34 @@ np.random.seed(SEED)
|
||||||
# TESTS
|
# TESTS
|
||||||
# ------------------------------------------------------------------------
|
# ------------------------------------------------------------------------
|
||||||
|
|
||||||
|
def test_amp_single_gpu():
|
||||||
|
"""
|
||||||
|
Make sure DDP + AMP work
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
if not torch.cuda.is_available():
|
||||||
|
warnings.warn('test_amp_gpu_ddp cannot run.'
|
||||||
|
'Rerun on a GPU node to run this test')
|
||||||
|
return
|
||||||
|
if not torch.cuda.device_count() > 1:
|
||||||
|
warnings.warn('test_amp_gpu_ddp cannot run.'
|
||||||
|
'Rerun on a node with 2+ GPUs to run this test')
|
||||||
|
return
|
||||||
|
|
||||||
|
hparams = get_hparams()
|
||||||
|
model = LightningTestModel(hparams)
|
||||||
|
|
||||||
|
trainer_options = dict(
|
||||||
|
progress_bar=True,
|
||||||
|
max_nb_epochs=1,
|
||||||
|
gpus=[0],
|
||||||
|
distributed_backend='dp',
|
||||||
|
use_amp=True
|
||||||
|
)
|
||||||
|
|
||||||
|
run_gpu_model_test(trainer_options, model, hparams)
|
||||||
|
|
||||||
|
|
||||||
def test_cpu_restore_training():
|
def test_cpu_restore_training():
|
||||||
"""
|
"""
|
||||||
Verify continue training session on CPU
|
Verify continue training session on CPU
|
||||||
|
|
Loading…
Reference in New Issue