diff --git a/pytorch_lightning/loops/epoch/training_epoch_loop.py b/pytorch_lightning/loops/epoch/training_epoch_loop.py index bc378c6bed..9063e2153d 100644 --- a/pytorch_lightning/loops/epoch/training_epoch_loop.py +++ b/pytorch_lightning/loops/epoch/training_epoch_loop.py @@ -221,6 +221,8 @@ class TrainingEpochLoop(loops.Loop): self.trainer.call_hook('on_epoch_end') self.trainer.logger_connector.on_epoch_end() + self.update_lr_schedulers('epoch', update_plateau_schedulers=True) + epoch_output = self._epoch_output # free memory self._epoch_output = None diff --git a/pytorch_lightning/loops/fit_loop.py b/pytorch_lightning/loops/fit_loop.py index a8eb44923a..15ebde94c8 100644 --- a/pytorch_lightning/loops/fit_loop.py +++ b/pytorch_lightning/loops/fit_loop.py @@ -233,8 +233,6 @@ class FitLoop(Loop): if self.epoch_loop.batches_seen == 0: return - self.epoch_loop.update_lr_schedulers('epoch', update_plateau_schedulers=True) - did_train_only = not self.trainer.enable_validation or self.epoch_loop.val_loop.skip if did_train_only: self.global_step -= 1