From 200ed9eb9f65dd476032f0af9a7fb4c1433935f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Wed, 15 Sep 2021 14:58:01 +0200 Subject: [PATCH] mark `OptimizerLoop.backward` method protected (#9514) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Carlos MocholĂ­ --- CHANGELOG.md | 1 + pytorch_lightning/loops/optimization/optimizer_loop.py | 4 ++-- tests/trainer/test_trainer.py | 8 ++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b50a382f0b..0271ee6115 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). * Removed `TrainingBatchLoop.backward()`; manual optimization now calls directly into `Accelerator.backward()` and automatic optimization handles backward in new `OptimizerLoop` ([#9265](https://github.com/PyTorchLightning/pytorch-lightning/pull/9265)) * Extracted `ManualOptimization` logic from `TrainingBatchLoop` into its own separate loop class ([#9266](https://github.com/PyTorchLightning/pytorch-lightning/pull/9266)) * Added `OutputResult` and `ManualResult` classes ([#9437](https://github.com/PyTorchLightning/pytorch-lightning/pull/9437), [#9424](https://github.com/PyTorchLightning/pytorch-lightning/pull/9424)) + * Marked `OptimizerLoop.backward` as protected ([#9514](https://github.com/PyTorchLightning/pytorch-lightning/pull/9514)) - Added support for saving and loading state of multiple callbacks of the same type ([#7187](https://github.com/PyTorchLightning/pytorch-lightning/pull/7187)) diff --git a/pytorch_lightning/loops/optimization/optimizer_loop.py b/pytorch_lightning/loops/optimization/optimizer_loop.py index 767e0f83ac..f5352c42b2 100644 --- a/pytorch_lightning/loops/optimization/optimizer_loop.py +++ b/pytorch_lightning/loops/optimization/optimizer_loop.py @@ -221,7 +221,7 @@ class OptimizerLoop(Loop): outputs, self.outputs = self.outputs, [] # free memory return outputs - def backward( + def _backward( self, loss: Tensor, optimizer: torch.optim.Optimizer, opt_idx: int, *args: Any, **kwargs: Any ) -> Tensor: """Performs the backward step. @@ -337,7 +337,7 @@ class OptimizerLoop(Loop): return None def backward_fn(loss: Tensor) -> Tensor: - self.backward(loss, optimizer, opt_idx) + self._backward(loss, optimizer, opt_idx) # check if model weights are nan if self.trainer.terminate_on_nan: diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 3549730312..e48e93dcb3 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -961,7 +961,7 @@ def test_gradient_clipping_by_norm(tmpdir, precision): gradient_clip_val=1.0, ) - old_backward = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.backward + old_backward = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._backward def backward(*args, **kwargs): # test that gradient is clipped correctly @@ -971,7 +971,7 @@ def test_gradient_clipping_by_norm(tmpdir, precision): assert (grad_norm - 1.0).abs() < 0.01, f"Gradient norm != 1.0: {grad_norm}" return ret_val - trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.backward = backward + trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._backward = backward trainer.fit(model) @@ -996,7 +996,7 @@ def test_gradient_clipping_by_value(tmpdir, precision): default_root_dir=tmpdir, ) - old_backward = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.backward + old_backward = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._backward def backward(*args, **kwargs): # test that gradient is clipped correctly @@ -1009,7 +1009,7 @@ def test_gradient_clipping_by_value(tmpdir, precision): ), f"Gradient max value {grad_max} != grad_clip_val {grad_clip_val} ." return ret_val - trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.backward = backward + trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._backward = backward trainer.fit(model)