From 839019a3a7a8478dcea47240ae85c60e3c86d350 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Thu, 10 Jun 2021 15:02:14 +0200 Subject: [PATCH] Remove legacy teardown check in train loop (#7917) --- pytorch_lightning/trainer/training_loop.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py index 793ac5d921..c013db5b48 100644 --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -50,7 +50,6 @@ class TrainLoop: self.trainer = trainer self.accumulated_loss = None self.warning_cache = WarningCache() - self._teardown_already_run = False self.running_loss = TensorRunningAccum(window_length=20) self._skip_backward = False self._optimizer_freq_cumsum = None @@ -105,10 +104,6 @@ class TrainLoop: self.trainer.call_hook("on_train_start") def on_train_end(self): - if self._teardown_already_run: - return - self._teardown_already_run = True - # trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates # when a checkpoint was saved at the last step self.global_step -= 1