From 5fe0dac119ca1b1e5c3d0971e6263d1fbf0c586c Mon Sep 17 00:00:00 2001 From: Adam Reeve Date: Sat, 20 Nov 2021 06:26:50 +1300 Subject: [PATCH] Fix misleading ModelCheckpoint documentation on every_n_epochs parameter (#10421) --- pytorch_lightning/callbacks/model_checkpoint.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/callbacks/model_checkpoint.py b/pytorch_lightning/callbacks/model_checkpoint.py index e195072c97..33f872f3a9 100644 --- a/pytorch_lightning/callbacks/model_checkpoint.py +++ b/pytorch_lightning/callbacks/model_checkpoint.py @@ -114,8 +114,14 @@ class ModelCheckpoint(Callback): guaranteed to execute at the exact time specified, but should be close. This must be mutually exclusive with ``every_n_train_steps`` and ``every_n_epochs``. every_n_epochs: Number of epochs between checkpoints. - If ``every_n_epochs == None or every_n_epochs == 0``, we skip saving when the epoch ends. - To disable, set ``every_n_epochs = 0``. This value must be ``None`` or non-negative. + This value must be ``None`` or non-negative. + To disable saving after each epoch, set ``every_n_epochs = 0``. + If all of ``every_n_epochs``, ``every_n_train_steps`` and + ``train_time_interval`` are ``None``, we save a checkpoint at the end of every epoch + (equivalent to ``every_n_epochs = 1``). + If ``every_n_epochs == None`` and either ``every_n_train_steps != None`` or ``train_time_interval != None``, + saving at the end of each epoch is disabled + (equivalent to ``every_n_epochs = 0``). This must be mutually exclusive with ``every_n_train_steps`` and ``train_time_interval``. Setting both ``ModelCheckpoint(..., every_n_epochs=V, save_on_train_epoch_end=False)`` and ``Trainer(max_epochs=N, check_val_every_n_epoch=M)``