From b74a3c510630e3d8b5ab0efb415c32c00d5ee7f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Rzepi=C5=84ski?= Date: Sun, 29 Mar 2020 21:29:48 +0200 Subject: [PATCH] Fix training resuming docs (#1265) --- docs/source/weights_loading.rst | 10 ++++++---- pytorch_lightning/trainer/training_io.py | 8 +------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/docs/source/weights_loading.rst b/docs/source/weights_loading.rst index 2aebea1b17..ec06c8f63f 100644 --- a/docs/source/weights_loading.rst +++ b/docs/source/weights_loading.rst @@ -84,9 +84,7 @@ To save your own checkpoint call: Checkpoint Loading ------------------ -You might want to not only load a model but also continue training it. Use this method to -restore the trainer state as well. This will continue from the epoch and global step you last left off. -However, the dataloaders will start from the first batch again (if you shuffled it shouldn't matter). +To load a model along with its weights, biases and hyperparameters use following method: .. code-block:: python @@ -95,4 +93,8 @@ However, the dataloaders will start from the first batch again (if you shuffled y_hat = model(x) A LightningModule is no different than a nn.Module. This means you can load it and use it for -predictions as you would a nn.Module. \ No newline at end of file +predictions as you would a nn.Module. + + +.. note:: To restore the trainer state as well use + :meth:`pytorch_lightning.trainer.trainer.Trainer.resume_from_checkpoint`. \ No newline at end of file diff --git a/pytorch_lightning/trainer/training_io.py b/pytorch_lightning/trainer/training_io.py index 99264b7b5b..40238c2794 100644 --- a/pytorch_lightning/trainer/training_io.py +++ b/pytorch_lightning/trainer/training_io.py @@ -39,15 +39,9 @@ Lightning will restore the session if you pass a logger with the same version an .. code-block:: python from pytorch_lightning import Trainer - from pytorch_lightning.loggers import TestTubeLogger - logger = TestTubeLogger( - save_dir='./savepath', - version=1 # An existing version with a saved checkpoint - ) trainer = Trainer( - logger=logger, - default_save_path='./savepath' + resume_from_checkpoint=PATH ) # this fit call loads model weights and trainer state