diff --git a/docs/source-pytorch/common/evaluation_intermediate.rst b/docs/source-pytorch/common/evaluation_intermediate.rst index b780650cfe..35ae7ad3ba 100644 --- a/docs/source-pytorch/common/evaluation_intermediate.rst +++ b/docs/source-pytorch/common/evaluation_intermediate.rst @@ -41,10 +41,10 @@ To run the test set after training completes, use this method. # run full training trainer.fit(model) - # (1) load the best checkpoint automatically (lightning tracks this for you) + # (1) load the best checkpoint automatically (lightning tracks this for you during .fit()) trainer.test(ckpt_path="best") - # (2) load the last available checkpoint + # (2) load the last available checkpoint (only works if `ModelCheckpoint(save_last=True)`) trainer.test(ckpt_path="last") # (3) test using a specific checkpoint diff --git a/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py b/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py index a2f983d6f9..6ee7c3be4e 100644 --- a/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py +++ b/src/lightning/pytorch/trainer/connectors/checkpoint_connector.py @@ -185,7 +185,7 @@ class _CheckpointConnector: # not an error so it can be set and forget before the first `fit` run rank_zero_warn( f'.{fn}(ckpt_path="last") is set, but there is no last checkpoint available.' - " No checkpoint will be loaded." + " No checkpoint will be loaded. HINT: Set `ModelCheckpoint(..., save_last=True)`." ) return None ckpt_path = max(candidates_ts, key=candidates_ts.get) # type: ignore[arg-type]