From 9e932f4dfd44c96b4d5ba8c6eea6736ae61ce2eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Fri, 11 Jun 2021 02:38:30 +0200 Subject: [PATCH] Delete `on_after_backward` unused argument (#7925) --- pytorch_lightning/trainer/training_loop.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/trainer/training_loop.py b/pytorch_lightning/trainer/training_loop.py index c013db5b48..ddab8f837b 100644 --- a/pytorch_lightning/trainer/training_loop.py +++ b/pytorch_lightning/trainer/training_loop.py @@ -246,7 +246,7 @@ class TrainLoop: opt_idx = int(np.argmax(self.optimizer_freq_cumsum > current_place_in_loop)) return [(opt_idx, self.trainer.optimizers[opt_idx])] - def on_after_backward(self, training_step_output, batch_idx, untouched_loss): + def on_after_backward(self, batch_idx, untouched_loss): # insert after step hook self.trainer.call_hook("on_after_backward") @@ -760,7 +760,7 @@ class TrainLoop: # hook - call this hook only # when gradients have finished to accumulate if not self.should_accumulate(): - self.on_after_backward(result.training_step_output, batch_idx, result.loss) + self.on_after_backward(batch_idx, result.loss) # check if loss or model weights are nan if self.trainer.terminate_on_nan: