2019-10-23 10:10:13 +00:00
|
|
|
import os
|
|
|
|
import warnings
|
|
|
|
from argparse import Namespace
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
import torch
|
|
|
|
|
|
|
|
from pl_examples import LightningTemplateModel
|
|
|
|
from pytorch_lightning import Trainer
|
2020-01-16 12:22:29 +00:00
|
|
|
from pytorch_lightning.callbacks import ModelCheckpoint
|
|
|
|
from pytorch_lightning.logging import TestTubeLogger, TensorBoardLogger
|
|
|
|
from pytorch_lightning.testing import LightningTestModel
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# generate a list of random seeds for each test
|
|
|
|
RANDOM_PORTS = list(np.random.randint(12000, 19000, 1000))
|
|
|
|
ROOT_SEED = 1234
|
|
|
|
torch.manual_seed(ROOT_SEED)
|
|
|
|
np.random.seed(ROOT_SEED)
|
|
|
|
RANDOM_SEEDS = list(np.random.randint(0, 10000, 1000))
|
2020-01-16 12:22:29 +00:00
|
|
|
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
|
2019-12-04 11:48:53 +00:00
|
|
|
def run_model_test_no_loggers(trainer_options, model, min_acc=0.50):
|
2019-12-03 13:01:04 +00:00
|
|
|
save_dir = trainer_options['default_save_path']
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# fit model
|
|
|
|
trainer = Trainer(**trainer_options)
|
|
|
|
result = trainer.fit(model)
|
|
|
|
|
|
|
|
# correct result and ok accuracy
|
|
|
|
assert result == 1, 'amp + ddp model failed to complete'
|
|
|
|
|
|
|
|
# test model loading
|
2020-01-16 12:22:29 +00:00
|
|
|
pretrained_model = load_model(trainer.logger,
|
|
|
|
trainer.checkpoint_callback.filepath,
|
|
|
|
path_expt=trainer_options.get('default_save_path'))
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# test new model accuracy
|
|
|
|
for dataloader in model.test_dataloader():
|
|
|
|
run_prediction(dataloader, pretrained_model, min_acc=min_acc)
|
|
|
|
|
|
|
|
if trainer.use_ddp:
|
|
|
|
# on hpc this would work fine... but need to hack it for the purpose of the test
|
|
|
|
trainer.model = pretrained_model
|
|
|
|
trainer.optimizers, trainer.lr_schedulers = pretrained_model.configure_optimizers()
|
|
|
|
|
|
|
|
|
2019-12-04 11:48:53 +00:00
|
|
|
def run_model_test(trainer_options, model, on_gpu=True):
|
2019-12-03 13:01:04 +00:00
|
|
|
save_dir = trainer_options['default_save_path']
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# logger file to get meta
|
2019-12-03 13:01:04 +00:00
|
|
|
logger = get_test_tube_logger(save_dir, False)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# logger file to get weights
|
|
|
|
checkpoint = init_checkpoint_callback(logger)
|
|
|
|
|
|
|
|
# add these to the trainer options
|
|
|
|
trainer_options['checkpoint_callback'] = checkpoint
|
|
|
|
trainer_options['logger'] = logger
|
|
|
|
|
|
|
|
# fit model
|
|
|
|
trainer = Trainer(**trainer_options)
|
|
|
|
result = trainer.fit(model)
|
|
|
|
|
|
|
|
# correct result and ok accuracy
|
|
|
|
assert result == 1, 'amp + ddp model failed to complete'
|
|
|
|
|
|
|
|
# test model loading
|
2020-01-16 12:22:29 +00:00
|
|
|
pretrained_model = load_model(logger, trainer.checkpoint_callback.filepath)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# test new model accuracy
|
|
|
|
[run_prediction(dataloader, pretrained_model) for dataloader in model.test_dataloader()]
|
|
|
|
|
|
|
|
if trainer.use_ddp or trainer.use_ddp2:
|
|
|
|
# on hpc this would work fine... but need to hack it for the purpose of the test
|
|
|
|
trainer.model = pretrained_model
|
|
|
|
trainer.optimizers, trainer.lr_schedulers = pretrained_model.configure_optimizers()
|
|
|
|
|
|
|
|
# test HPC loading / saving
|
|
|
|
trainer.hpc_save(save_dir, logger)
|
|
|
|
trainer.hpc_load(save_dir, on_gpu=on_gpu)
|
|
|
|
|
|
|
|
|
|
|
|
def get_hparams(continue_training=False, hpc_exp_number=0):
|
|
|
|
root_dir = os.path.dirname(os.path.realpath(__file__))
|
|
|
|
|
|
|
|
args = {
|
|
|
|
'drop_prob': 0.2,
|
|
|
|
'batch_size': 32,
|
|
|
|
'in_features': 28 * 28,
|
|
|
|
'learning_rate': 0.001 * 8,
|
|
|
|
'optimizer_name': 'adam',
|
|
|
|
'data_root': os.path.join(root_dir, 'mnist'),
|
|
|
|
'out_features': 10,
|
2019-12-04 11:48:53 +00:00
|
|
|
'hidden_dim': 1000,
|
|
|
|
}
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
if continue_training:
|
|
|
|
args['test_tube_do_checkpoint_load'] = True
|
|
|
|
args['hpc_exp_number'] = hpc_exp_number
|
|
|
|
|
|
|
|
hparams = Namespace(**args)
|
|
|
|
return hparams
|
|
|
|
|
|
|
|
|
|
|
|
def get_model(use_test_model=False, lbfgs=False):
|
|
|
|
# set up model with these hyperparams
|
|
|
|
hparams = get_hparams()
|
|
|
|
if lbfgs:
|
|
|
|
setattr(hparams, 'optimizer_name', 'lbfgs')
|
|
|
|
setattr(hparams, 'learning_rate', 0.002)
|
|
|
|
|
|
|
|
if use_test_model:
|
|
|
|
model = LightningTestModel(hparams)
|
|
|
|
else:
|
|
|
|
model = LightningTemplateModel(hparams)
|
|
|
|
|
|
|
|
return model, hparams
|
|
|
|
|
|
|
|
|
2019-12-03 13:01:04 +00:00
|
|
|
def get_test_tube_logger(save_dir, debug=True, version=None):
|
2019-10-23 10:10:13 +00:00
|
|
|
# set up logger object without actually saving logs
|
2019-12-04 11:48:53 +00:00
|
|
|
logger = TestTubeLogger(save_dir, name='lightning_logs', debug=debug, version=version)
|
2019-10-23 10:10:13 +00:00
|
|
|
return logger
|
|
|
|
|
|
|
|
|
2020-01-16 12:22:29 +00:00
|
|
|
def get_data_path(expt_logger, path_dir=None):
|
|
|
|
# some calls contain only experiment not complete logger
|
|
|
|
expt = expt_logger.experiment if hasattr(expt_logger, 'experiment') else expt_logger
|
|
|
|
# each logger has to have these attributes
|
|
|
|
name, version = expt_logger.name, expt_logger.version
|
|
|
|
# only the test-tube experiment has such attribute
|
|
|
|
if hasattr(expt, 'get_data_path'):
|
|
|
|
return expt.get_data_path(name, version)
|
|
|
|
# the other experiments...
|
|
|
|
if not path_dir:
|
|
|
|
path_dir = ROOT_PATH
|
|
|
|
path_expt = os.path.join(path_dir, name, 'version_%s' % version)
|
|
|
|
# try if the new sub-folder exists, typical case for test-tube
|
|
|
|
if not os.path.isdir(path_expt):
|
|
|
|
path_expt = path_dir
|
|
|
|
return path_expt
|
|
|
|
|
|
|
|
|
|
|
|
def load_model(exp, root_weights_dir, module_class=LightningTemplateModel, path_expt=None):
|
2019-10-23 10:10:13 +00:00
|
|
|
# load trained model
|
2020-01-16 12:22:29 +00:00
|
|
|
path_expt_dir = get_data_path(exp, path_dir=path_expt)
|
|
|
|
tags_path = os.path.join(path_expt_dir, TensorBoardLogger.NAME_CSV_TAGS)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
checkpoints = [x for x in os.listdir(root_weights_dir) if '.ckpt' in x]
|
|
|
|
weights_dir = os.path.join(root_weights_dir, checkpoints[0])
|
|
|
|
|
|
|
|
trained_model = module_class.load_from_metrics(weights_path=weights_dir,
|
|
|
|
tags_csv=tags_path)
|
|
|
|
|
|
|
|
assert trained_model is not None, 'loading model failed'
|
|
|
|
|
|
|
|
return trained_model
|
|
|
|
|
|
|
|
|
|
|
|
def run_prediction(dataloader, trained_model, dp=False, min_acc=0.50):
|
|
|
|
# run prediction on 1 batch
|
|
|
|
for batch in dataloader:
|
|
|
|
break
|
|
|
|
|
|
|
|
x, y = batch
|
|
|
|
x = x.view(x.size(0), -1)
|
|
|
|
|
|
|
|
if dp:
|
|
|
|
output = trained_model(batch, 0)
|
|
|
|
acc = output['val_acc']
|
|
|
|
acc = torch.mean(acc).item()
|
|
|
|
|
|
|
|
else:
|
|
|
|
y_hat = trained_model(x)
|
|
|
|
|
|
|
|
# acc
|
|
|
|
labels_hat = torch.argmax(y_hat, dim=1)
|
|
|
|
acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
|
|
|
|
acc = torch.tensor(acc)
|
|
|
|
acc = acc.item()
|
|
|
|
|
|
|
|
assert acc > min_acc, f'this model is expected to get > {min_acc} in test set (it got {acc})'
|
|
|
|
|
|
|
|
|
2020-01-17 10:50:26 +00:00
|
|
|
def assert_ok_model_acc(trainer, key='test_acc', thr=0.4):
|
2019-10-23 10:10:13 +00:00
|
|
|
# this model should get 0.80+ acc
|
2020-01-17 10:50:26 +00:00
|
|
|
acc = trainer.training_tqdm_dict[key]
|
|
|
|
assert acc > thr, f'Model failed to get expected {thr} accuracy. {key} = {acc}'
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
|
|
|
|
def can_run_gpu_test():
|
|
|
|
if not torch.cuda.is_available():
|
|
|
|
warnings.warn('test_multi_gpu_model_ddp cannot run.'
|
|
|
|
' Rerun on a GPU node to run this test')
|
|
|
|
return False
|
|
|
|
if not torch.cuda.device_count() > 1:
|
|
|
|
warnings.warn('test_multi_gpu_model_ddp cannot run.'
|
|
|
|
' Rerun on a node with 2+ GPUs to run this test')
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def reset_seed():
|
2020-01-17 10:50:26 +00:00
|
|
|
seed = RANDOM_SEEDS.pop()
|
|
|
|
torch.manual_seed(seed)
|
|
|
|
np.random.seed(seed)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
|
|
|
|
def set_random_master_port():
|
|
|
|
port = RANDOM_PORTS.pop()
|
|
|
|
os.environ['MASTER_PORT'] = str(port)
|
|
|
|
|
|
|
|
|
2020-01-16 12:22:29 +00:00
|
|
|
def init_checkpoint_callback(logger, path_dir=None):
|
|
|
|
exp_path = get_data_path(logger, path_dir=path_dir)
|
2019-10-23 10:10:13 +00:00
|
|
|
ckpt_dir = os.path.join(exp_path, 'checkpoints')
|
|
|
|
checkpoint = ModelCheckpoint(ckpt_dir)
|
|
|
|
return checkpoint
|