lightning/docs/source/examples/fully_featured_trainer.py

203 lines
5.5 KiB
Python
Raw Normal View History

2019-03-31 20:06:25 +00:00
import os
import sys
import numpy as np
2019-03-31 20:29:50 +00:00
from time import sleep
import torch
2019-03-31 20:06:25 +00:00
from test_tube import HyperOptArgumentParser, Experiment, SlurmCluster
2019-06-25 21:56:01 +00:00
from pytorch_lightning.models.trainer import Trainer
from pytorch_lightning.utils.arg_parse import add_default_args
2019-03-31 20:06:25 +00:00
2019-06-25 21:56:01 +00:00
from pytorch_lightning.utils.pt_callbacks import EarlyStopping, ModelCheckpoint
2019-03-31 20:06:25 +00:00
SEED = 2334
torch.manual_seed(SEED)
np.random.seed(SEED)
# ---------------------
# DEFINE MODEL HERE
# ---------------------
2019-06-25 21:56:01 +00:00
from docs.source.examples.example_model import ExampleModel
2019-03-31 20:06:25 +00:00
# ---------------------
AVAILABLE_MODELS = {
2019-03-31 20:29:50 +00:00
'model_template': ExampleModel
2019-03-31 20:06:25 +00:00
}
"""
Allows training by using command line arguments
2019-04-03 17:01:22 +00:00
Run by:
2019-03-31 20:06:25 +00:00
# TYPE YOUR RUN COMMAND HERE
"""
def main_local(hparams):
main(hparams, None, None)
def main(hparams, cluster, results_dict):
"""
Main training routine specific for this project
:param hparams:
:return:
"""
on_gpu = torch.cuda.is_available()
if hparams.disable_cuda:
on_gpu = False
device = 'cuda' if on_gpu else 'cpu'
hparams.__setattr__('device', device)
hparams.__setattr__('on_gpu', on_gpu)
hparams.__setattr__('nb_gpus', torch.cuda.device_count())
hparams.__setattr__('inference_mode', hparams.model_load_weights_path is not None)
# delay each training start to not overwrite logs
process_position, current_gpu = TRAINING_MODEL.get_process_position(hparams.gpus)
sleep(process_position + 1)
# init experiment
2019-04-03 13:07:20 +00:00
log_dir = os.path.dirname(os.path.realpath(__file__))
2019-03-31 20:06:25 +00:00
exp = Experiment(
2019-04-03 13:07:20 +00:00
name='test_tube_exp',
debug=True,
save_dir=log_dir,
version=0,
2019-03-31 20:06:25 +00:00
autosave=False,
2019-04-03 13:07:20 +00:00
description='test demo'
2019-03-31 20:06:25 +00:00
)
exp.argparse(hparams)
exp.save()
# build model
print('loading model...')
model = TRAINING_MODEL(hparams)
print('model built')
# callbacks
early_stop = EarlyStopping(
monitor=hparams.early_stop_metric,
patience=hparams.early_stop_patience,
verbose=True,
mode=hparams.early_stop_mode
)
model_save_path = '{}/{}/{}'.format(hparams.model_save_path, exp.name, exp.version)
checkpoint = ModelCheckpoint(
filepath=model_save_path,
save_function=None,
save_best_only=True,
verbose=True,
monitor=hparams.model_save_monitor_value,
mode=hparams.model_save_monitor_mode
)
# configure trainer
trainer = Trainer(
experiment=exp,
cluster=cluster,
checkpoint_callback=checkpoint,
early_stop_callback=early_stop,
)
# train model
trainer.fit(model)
def get_default_parser(strategy, root_dir):
possible_model_names = list(AVAILABLE_MODELS.keys())
parser = HyperOptArgumentParser(strategy=strategy, add_help=False)
2019-04-03 13:00:44 +00:00
add_default_args(parser, root_dir, possible_model_names=possible_model_names, rand_seed=SEED)
2019-03-31 20:06:25 +00:00
return parser
def get_model_name(args):
for i, arg in enumerate(args):
if 'model_name' in arg:
return args[i+1]
def optimize_on_cluster(hyperparams):
# enable cluster training
cluster = SlurmCluster(
hyperparam_optimizer=hyperparams,
log_path=hyperparams.tt_save_path,
test_tube_exp_name=hyperparams.tt_name
)
# email for cluster coms
cluster.notify_job_status(email='add_email_here', on_done=True, on_fail=True)
# configure cluster
cluster.per_experiment_nb_gpus = hyperparams.per_experiment_nb_gpus
cluster.job_time = '48:00:00'
cluster.gpu_type = '1080ti'
cluster.memory_mb_per_node = 48000
# any modules for code to run in env
cluster.add_command('source activate pytorch_lightning')
# name of exp
job_display_name = hyperparams.tt_name.split('_')[0]
job_display_name = job_display_name[0:3]
# run hopt
print('submitting jobs...')
cluster.optimize_parallel_cluster_gpu(
main,
nb_trials=hyperparams.nb_hopt_trials,
job_name=job_display_name
)
if __name__ == '__main__':
model_name = get_model_name(sys.argv)
2019-03-31 20:29:50 +00:00
if model_name is None:
model_name = 'model_template'
2019-03-31 20:06:25 +00:00
# use default args
root_dir = os.path.split(os.path.dirname(sys.modules['__main__'].__file__))[0]
parent_parser = get_default_parser(strategy='random_search', root_dir=root_dir)
# allow model to overwrite or extend args
TRAINING_MODEL = AVAILABLE_MODELS[model_name]
2019-06-25 22:09:29 +00:00
parser = TRAINING_MODEL.add_model_specific_args(parent_parser, root_dir)
2019-03-31 20:06:25 +00:00
hyperparams = parser.parse_args()
# format GPU layout
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# RUN TRAINING
if hyperparams.on_cluster:
2019-06-25 22:18:20 +00:00
# run on HPC cluster
2019-03-31 20:06:25 +00:00
print('RUNNING ON SLURM CLUSTER')
2019-06-25 22:18:20 +00:00
gpu_ids = hyperparams.gpus.split(';')
2019-03-31 20:06:25 +00:00
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(gpu_ids)
optimize_on_cluster(hyperparams)
2019-06-25 22:18:20 +00:00
elif hyperparams.gpus is None:
# run on cpu
print('RUNNING ON CPU')
main(hyperparams, None, None)
2019-03-31 20:06:25 +00:00
elif hyperparams.single_run_gpu:
2019-03-31 20:29:50 +00:00
# run on 1 gpu
2019-06-25 22:18:20 +00:00
gpu_ids = hyperparams.gpus.split(';')
2019-03-31 20:06:25 +00:00
print(f'RUNNING 1 TRIAL ON GPU. gpu: {gpu_ids[0]}')
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_ids[0]
main(hyperparams, None, None)
else:
2019-03-31 20:29:50 +00:00
# multiple GPUs on same machine
2019-06-25 22:18:20 +00:00
gpu_ids = hyperparams.gpus.split(';')
2019-03-31 20:06:25 +00:00
print(f'RUNNING MULTI GPU. GPU ids: {gpu_ids}')
hyperparams.optimize_parallel_gpu(
main_local,
gpu_ids=gpu_ids,
nb_trials=hyperparams.nb_hopt_trials,
nb_workers=len(gpu_ids)
2019-03-31 20:29:50 +00:00
)