From a008801e25a6e93214b410b68ca1edf2e3ddae14 Mon Sep 17 00:00:00 2001 From: Robert Bracco <47190785+rbracco@users.noreply.github.com> Date: Sun, 30 Oct 2022 19:58:27 -0400 Subject: [PATCH] Improve callback documentation for `outputs` and `accumulate_grad_batches` (Resolves #15315) (#15327) Co-authored-by: Rohit Gupta Co-authored-by: awaelchli --- src/pytorch_lightning/callbacks/callback.py | 7 ++++++- src/pytorch_lightning/core/module.py | 4 ++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/pytorch_lightning/callbacks/callback.py b/src/pytorch_lightning/callbacks/callback.py index 2f2ee17414..400d45d3b6 100644 --- a/src/pytorch_lightning/callbacks/callback.py +++ b/src/pytorch_lightning/callbacks/callback.py @@ -82,7 +82,12 @@ class Callback: def on_train_batch_end( self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: STEP_OUTPUT, batch: Any, batch_idx: int ) -> None: - """Called when the train batch ends.""" + """Called when the train batch ends. + + Note: + The value ``outputs["loss"]`` here will be the normalized value w.r.t ``accumulate_grad_batches`` of the + loss returned from ``training_step``. + """ def on_train_epoch_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None: """Called when the train epoch begins.""" diff --git a/src/pytorch_lightning/core/module.py b/src/pytorch_lightning/core/module.py index c557d8fb0f..7710e4d5c6 100644 --- a/src/pytorch_lightning/core/module.py +++ b/src/pytorch_lightning/core/module.py @@ -661,6 +661,10 @@ class LightningModule( Note: The loss value shown in the progress bar is smoothed (averaged) over the last values, so it differs from the actual loss returned in train/validation step. + + Note: + When ``accumulate_grad_batches`` > 1, the loss returned here will be automatically + normalized by ``accumulate_grad_batches`` internally. """ rank_zero_warn("`training_step` must be implemented to be used with the Lightning Trainer")