cleaned up demos
This commit is contained in:
parent
c86524b0cc
commit
9fc01e3fd3
|
@ -1,4 +1,4 @@
|
|||
from .new_project_templates.lightning_module_template import LightningTemplateModel
|
||||
from .basic_examples.lightning_module_template import LightningTemplateModel
|
||||
|
||||
__all__ = [
|
||||
'LightningTemplateModel'
|
||||
|
|
|
@ -1,15 +1,13 @@
|
|||
"""
|
||||
16-bit single node, CPU example
|
||||
Runs a model on a single node across N-gpus.
|
||||
"""
|
||||
import os
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from test_tube import HyperOptArgumentParser, Experiment
|
||||
from argparse import ArgumentParser
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
|
||||
|
||||
from examples.new_project_templates.lightning_module_template import LightningTemplateModel
|
||||
from examples.basic_examples.lightning_module_template import LightningTemplateModel
|
||||
|
||||
SEED = 2334
|
||||
torch.manual_seed(SEED)
|
||||
|
@ -20,7 +18,6 @@ def main(hparams):
|
|||
"""
|
||||
Main training routine specific for this project
|
||||
:param hparams:
|
||||
:return:
|
||||
"""
|
||||
# ------------------------
|
||||
# 1 INIT LIGHTNING MODEL
|
||||
|
@ -30,11 +27,7 @@ def main(hparams):
|
|||
# ------------------------
|
||||
# 2 INIT TRAINER
|
||||
# ------------------------
|
||||
trainer = Trainer(
|
||||
gpus=hparams.gpus,
|
||||
use_amp=True,
|
||||
distributed_backend='dp'
|
||||
)
|
||||
trainer = Trainer()
|
||||
|
||||
# ------------------------
|
||||
# 3 START TRAINING
|
||||
|
@ -43,19 +36,14 @@ def main(hparams):
|
|||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# dirs
|
||||
# ------------------------
|
||||
# TRAINING ARGUMENTS
|
||||
# ------------------------
|
||||
# these are project-wide arguments
|
||||
root_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
parent_parser = ArgumentParser(add_help=False)
|
||||
|
||||
# although we user hyperOptParser, we are using it only as argparse right now
|
||||
parent_parser = HyperOptArgumentParser(strategy='grid_search', add_help=False)
|
||||
|
||||
# gpu args
|
||||
parent_parser.add_argument('--gpus', type=str, default='-1',
|
||||
help='how many gpus to use in the node.'
|
||||
'value -1 uses all the gpus on the node')
|
||||
|
||||
# allow model to overwrite or extend args
|
||||
# each LightningModule defines arguments relevant to it
|
||||
parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)
|
||||
hyperparams = parser.parse_args()
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
"""
|
||||
Runs a model on a single node across N-gpus.
|
||||
"""
|
||||
import os
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from argparse import ArgumentParser
|
||||
from pytorch_lightning import Trainer
|
||||
from examples.basic_examples.lightning_module_template import LightningTemplateModel
|
||||
|
||||
SEED = 2334
|
||||
torch.manual_seed(SEED)
|
||||
np.random.seed(SEED)
|
||||
|
||||
|
||||
def main(hparams):
|
||||
"""
|
||||
Main training routine specific for this project
|
||||
:param hparams:
|
||||
"""
|
||||
# ------------------------
|
||||
# 1 INIT LIGHTNING MODEL
|
||||
# ------------------------
|
||||
model = LightningTemplateModel(hparams)
|
||||
|
||||
# ------------------------
|
||||
# 2 INIT TRAINER
|
||||
# ------------------------
|
||||
trainer = Trainer(
|
||||
gpus=hparams.gpus,
|
||||
distributed_backend=hparams.distributed_backend,
|
||||
use_amp=hparams.use_16bit
|
||||
)
|
||||
|
||||
# ------------------------
|
||||
# 3 START TRAINING
|
||||
# ------------------------
|
||||
trainer.fit(model)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# ------------------------
|
||||
# TRAINING ARGUMENTS
|
||||
# ------------------------
|
||||
# these are project-wide arguments
|
||||
|
||||
root_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
parent_parser = ArgumentParser(add_help=False)
|
||||
|
||||
# gpu args
|
||||
parent_parser.add_argument(
|
||||
'--gpus',
|
||||
type=str,
|
||||
default='-1',
|
||||
help='any integer (number of GPUs to use) or -1 for all'
|
||||
)
|
||||
parent_parser.add_argument(
|
||||
'--distributed_backend',
|
||||
type=str,
|
||||
default=None,
|
||||
help='supports three options dp, ddp, ddp2'
|
||||
)
|
||||
parent_parser.add_argument(
|
||||
'--use_16bit',
|
||||
dest='use_16bit',
|
||||
action='store_true',
|
||||
help='if true uses 16 bit precision'
|
||||
)
|
||||
|
||||
# each LightningModule defines arguments relevant to it
|
||||
parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)
|
||||
hyperparams = parser.parse_args()
|
||||
|
||||
# ---------------------
|
||||
# RUN TRAINING
|
||||
# ---------------------
|
||||
main(hyperparams)
|
|
@ -10,7 +10,7 @@ from test_tube import HyperOptArgumentParser, Experiment, SlurmCluster
|
|||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
|
||||
|
||||
from examples.new_project_templates.lightning_module_template import LightningTemplateModel
|
||||
from examples.basic_examples.lightning_module_template import LightningTemplateModel
|
||||
|
||||
PORT = np.random.randint(12000, 20000, 1)[0]
|
||||
SEED = 2334
|
|
@ -7,7 +7,7 @@ import torch
|
|||
|
||||
from test_tube import HyperOptArgumentParser, Experiment
|
||||
from pytorch_lightning import Trainer
|
||||
from examples.new_project_templates.lightning_module_template import LightningTemplateModel
|
||||
from examples.basic_examples.lightning_module_template import LightningTemplateModel
|
||||
|
||||
SEED = 2334
|
||||
torch.manual_seed(SEED)
|
|
@ -1,42 +0,0 @@
|
|||
"""
|
||||
Runs a model on a single node on CPU only..
|
||||
"""
|
||||
import os
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from test_tube import HyperOptArgumentParser, Experiment
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
|
||||
|
||||
from examples.new_project_templates.lightning_module_template import LightningTemplateModel
|
||||
|
||||
SEED = 2334
|
||||
torch.manual_seed(SEED)
|
||||
np.random.seed(SEED)
|
||||
|
||||
|
||||
def main(hparams):
|
||||
"""
|
||||
Main training routine specific for this project
|
||||
:param hparams:
|
||||
:return:
|
||||
"""
|
||||
# ------------------------
|
||||
# 1 INIT LIGHTNING MODEL
|
||||
# ------------------------
|
||||
model = LightningTemplateModel(hparams)
|
||||
|
||||
# ------------------------
|
||||
# 2 INIT TRAINER
|
||||
# ------------------------
|
||||
trainer = Trainer()
|
||||
|
||||
# ------------------------
|
||||
# 3 START TRAINING
|
||||
# ------------------------
|
||||
trainer.fit(model)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(hyperparams)
|
|
@ -1,69 +0,0 @@
|
|||
"""
|
||||
Runs a model on a single node across N-gpus.
|
||||
"""
|
||||
import os
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from test_tube import HyperOptArgumentParser, Experiment
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
|
||||
|
||||
from examples.new_project_templates.lightning_module_template import LightningTemplateModel
|
||||
|
||||
SEED = 2334
|
||||
torch.manual_seed(SEED)
|
||||
np.random.seed(SEED)
|
||||
|
||||
|
||||
def main(hparams):
|
||||
"""
|
||||
Main training routine specific for this project
|
||||
:param hparams:
|
||||
:return:
|
||||
"""
|
||||
# ------------------------
|
||||
# 1 INIT LIGHTNING MODEL
|
||||
# ------------------------
|
||||
model = LightningTemplateModel(hparams)
|
||||
|
||||
# ------------------------
|
||||
# 2 INIT TRAINER
|
||||
# ------------------------
|
||||
trainer = Trainer(
|
||||
gpus=hparams.gpus,
|
||||
distributed_backend=hparams.dist_backend
|
||||
)
|
||||
|
||||
# ------------------------
|
||||
# 3 START TRAINING
|
||||
# ------------------------
|
||||
trainer.fit(model)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# dirs
|
||||
root_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
demo_log_dir = os.path.join(root_dir, 'pt_lightning_demo_logs')
|
||||
checkpoint_dir = os.path.join(demo_log_dir, 'model_weights')
|
||||
test_tube_dir = os.path.join(demo_log_dir, 'test_tube_data')
|
||||
|
||||
# although we user hyperOptParser, we are using it only as argparse right now
|
||||
parent_parser = HyperOptArgumentParser(strategy='grid_search', add_help=False)
|
||||
|
||||
# gpu args
|
||||
parent_parser.add_argument('--gpus', type=str, default='-1',
|
||||
help='how many gpus to use in the node.'
|
||||
' value -1 uses all the gpus on the node')
|
||||
parent_parser.add_argument('--dist_backend', type=str, default='ddp',
|
||||
help='When using multiple GPUs set Trainer(distributed_backend=dp) (or ddp)')
|
||||
|
||||
# allow model to overwrite or extend args
|
||||
parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)
|
||||
hyperparams = parser.parse_args()
|
||||
|
||||
# ---------------------
|
||||
# RUN TRAINING
|
||||
# ---------------------
|
||||
main(hyperparams)
|
|
@ -1,94 +0,0 @@
|
|||
"""
|
||||
Runs a model on a single node across N-gpus using dataParallel
|
||||
"""
|
||||
import os
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from test_tube import HyperOptArgumentParser, Experiment
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
|
||||
|
||||
from examples.new_project_templates.lightning_module_template import LightningTemplateModel
|
||||
|
||||
SEED = 2334
|
||||
torch.manual_seed(SEED)
|
||||
np.random.seed(SEED)
|
||||
|
||||
|
||||
def main(hparams):
|
||||
"""
|
||||
Main training routine specific for this project
|
||||
:param hparams:
|
||||
:return:
|
||||
"""
|
||||
# ------------------------
|
||||
# 1 INIT LIGHTNING MODEL
|
||||
# ------------------------
|
||||
print('loading model...')
|
||||
model = LightningTemplateModel(hparams)
|
||||
print('model built')
|
||||
|
||||
# ------------------------
|
||||
# 2 INIT Logger
|
||||
# ------------------------
|
||||
# init experiment
|
||||
exp = Experiment(
|
||||
name=hyperparams.experiment_name,
|
||||
save_dir=hyperparams.test_tube_save_path,
|
||||
autosave=False,
|
||||
description='test demo'
|
||||
)
|
||||
|
||||
exp.argparse(hparams)
|
||||
exp.save()
|
||||
|
||||
# ------------------------
|
||||
# 3 INIT TRAINER
|
||||
# ------------------------
|
||||
trainer = Trainer(
|
||||
experiment=exp,
|
||||
gpus=hparams.gpus,
|
||||
distributed_backend=hparams.dist_backend,
|
||||
)
|
||||
|
||||
# ------------------------
|
||||
# 4 START TRAINING
|
||||
# ------------------------
|
||||
trainer.fit(model)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# dirs
|
||||
root_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
demo_log_dir = os.path.join(root_dir, 'pt_lightning_demo_logs')
|
||||
checkpoint_dir = os.path.join(demo_log_dir, 'model_weights')
|
||||
test_tube_dir = os.path.join(demo_log_dir, 'test_tube_data')
|
||||
|
||||
# although we user hyperOptParser, we are using it only as argparse right now
|
||||
parent_parser = HyperOptArgumentParser(strategy='grid_search', add_help=False)
|
||||
|
||||
# gpu args
|
||||
parent_parser.add_argument('--gpus', type=str, default='-1',
|
||||
help='how many gpus to use in the node.'
|
||||
' value -1 uses all the gpus on the node')
|
||||
parent_parser.add_argument('--dist_backend', type=str, default='dp',
|
||||
help='When using multiple GPUs set Trainer(distributed_backend=dp) (or ddp)')
|
||||
parent_parser.add_argument('--test_tube_save_path', type=str, default=test_tube_dir,
|
||||
help='where to save logs')
|
||||
parent_parser.add_argument('--model_save_path', type=str, default=checkpoint_dir,
|
||||
help='where to save model')
|
||||
parent_parser.add_argument('--experiment_name', type=str, default='pt_lightning_exp_a',
|
||||
help='test tube exp name')
|
||||
|
||||
# allow model to overwrite or extend args
|
||||
parser = LightningTemplateModel.add_model_specific_args(parent_parser, root_dir)
|
||||
hyperparams = parser.parse_args()
|
||||
|
||||
# ---------------------
|
||||
# RUN TRAINING
|
||||
# ---------------------
|
||||
# run on HPC cluster
|
||||
print(f'RUNNING INTERACTIVE MODE ON GPUS. gpu ids: {hyperparams.gpus}')
|
||||
main(hyperparams)
|
|
@ -1,53 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from test_tube import HyperOptArgumentParser, Experiment
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.utilities.arg_parse import add_default_args
|
||||
from pytorch_lightning.callbacks.pt_callbacks import EarlyStopping, ModelCheckpoint
|
||||
|
||||
from examples.new_project_templates.lightning_module_template import LightningTemplateModel
|
||||
|
||||
|
||||
def main(hparams):
|
||||
"""
|
||||
Main training routine specific for this project
|
||||
:param hparams:
|
||||
:return:
|
||||
"""
|
||||
# init experiment
|
||||
exp = Experiment(
|
||||
name=hparams.tt_name,
|
||||
debug=hparams.debug,
|
||||
save_dir=hparams.tt_save_path,
|
||||
version=hparams.hpc_exp_number,
|
||||
autosave=False,
|
||||
description=hparams.tt_description
|
||||
)
|
||||
|
||||
exp.argparse(hparams)
|
||||
exp.save()
|
||||
|
||||
# build model
|
||||
model = LightningTemplateModel(hparams)
|
||||
|
||||
# configure trainer
|
||||
trainer = Trainer(experiment=exp)
|
||||
|
||||
# train model
|
||||
trainer.fit(model)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# use default args given by lightning
|
||||
root_dir = os.path.split(os.path.dirname(sys.modules['__main__'].__file__))[0]
|
||||
parent_parser = HyperOptArgumentParser(strategy='random_search', add_help=False)
|
||||
add_default_args(parent_parser, root_dir)
|
||||
|
||||
# allow model to overwrite or extend args
|
||||
parser = LightningTemplateModel.add_model_specific_args(parent_parser)
|
||||
hyperparams = parser.parse_args()
|
||||
|
||||
# train model
|
||||
main(hyperparams)
|
Loading…
Reference in New Issue