From 965fc03b4cff39028e90634b2ddcb2e483f6c3c1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 16 Feb 2024 17:26:55 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- tests/tests_pytorch/models/test_hooks.py | 82 ++++++++++++------------ 1 file changed, 40 insertions(+), 42 deletions(-) diff --git a/tests/tests_pytorch/models/test_hooks.py b/tests/tests_pytorch/models/test_hooks.py index a022154800..596d08641d 100644 --- a/tests/tests_pytorch/models/test_hooks.py +++ b/tests/tests_pytorch/models/test_hooks.py @@ -266,48 +266,46 @@ class HookedModel(BoringModel): using_deepspeed = kwargs.get("strategy") == "deepspeed" out = [] for i in range(current_batch, batches): - out.extend( - [ - {"name": "on_before_batch_transfer", "args": (ANY, 0)}, - {"name": "transfer_batch_to_device", "args": (ANY, device, 0)}, - {"name": "on_after_batch_transfer", "args": (ANY, 0)}, - {"name": "Callback.on_train_batch_start", "args": (trainer, model, ANY, i)}, - {"name": "on_train_batch_start", "args": (ANY, i)}, - {"name": "forward", "args": (ANY,)}, - {"name": "training_step", "args": (ANY, i)}, - {"name": "Callback.on_before_zero_grad", "args": (trainer, model, ANY)}, - {"name": "on_before_zero_grad", "args": (ANY,)}, - {"name": "optimizer_zero_grad", "args": (current_epoch, i, ANY)}, - {"name": "Callback.on_before_backward", "args": (trainer, model, ANY)}, - {"name": "on_before_backward", "args": (ANY,)}, - # DeepSpeed handles backward internally - *([{"name": "backward", "args": (ANY,)}] if not using_deepspeed else []), - {"name": "Callback.on_after_backward", "args": (trainer, model)}, - {"name": "on_after_backward"}, - # note: unscaling happens here in the case of AMP - {"name": "Callback.on_before_optimizer_step", "args": (trainer, model, ANY)}, - {"name": "on_before_optimizer_step", "args": (ANY,)}, - { - "name": "clip_gradients", - "args": (ANY,), - "kwargs": {"gradient_clip_val": None, "gradient_clip_algorithm": None}, - }, - { - "name": "configure_gradient_clipping", - "args": (ANY,), - "kwargs": {"gradient_clip_val": None, "gradient_clip_algorithm": None}, - }, - # this is after because it refers to the `LightningModule.optimizer_step` hook which encapsulates - # the actual call to `Precision.optimizer_step` - { - "name": "optimizer_step", - "args": (current_epoch, i, ANY, ANY), - }, - *([{"name": "lr_scheduler_step", "args": ANY}] if i == (trainer.num_training_batches - 1) else []), - {"name": "Callback.on_train_batch_end", "args": (trainer, model, {"loss": ANY}, ANY, i)}, - {"name": "on_train_batch_end", "args": ({"loss": ANY}, ANY, i)}, - ] - ) + out.extend([ + {"name": "on_before_batch_transfer", "args": (ANY, 0)}, + {"name": "transfer_batch_to_device", "args": (ANY, device, 0)}, + {"name": "on_after_batch_transfer", "args": (ANY, 0)}, + {"name": "Callback.on_train_batch_start", "args": (trainer, model, ANY, i)}, + {"name": "on_train_batch_start", "args": (ANY, i)}, + {"name": "forward", "args": (ANY,)}, + {"name": "training_step", "args": (ANY, i)}, + {"name": "Callback.on_before_zero_grad", "args": (trainer, model, ANY)}, + {"name": "on_before_zero_grad", "args": (ANY,)}, + {"name": "optimizer_zero_grad", "args": (current_epoch, i, ANY)}, + {"name": "Callback.on_before_backward", "args": (trainer, model, ANY)}, + {"name": "on_before_backward", "args": (ANY,)}, + # DeepSpeed handles backward internally + *([{"name": "backward", "args": (ANY,)}] if not using_deepspeed else []), + {"name": "Callback.on_after_backward", "args": (trainer, model)}, + {"name": "on_after_backward"}, + # note: unscaling happens here in the case of AMP + {"name": "Callback.on_before_optimizer_step", "args": (trainer, model, ANY)}, + {"name": "on_before_optimizer_step", "args": (ANY,)}, + { + "name": "clip_gradients", + "args": (ANY,), + "kwargs": {"gradient_clip_val": None, "gradient_clip_algorithm": None}, + }, + { + "name": "configure_gradient_clipping", + "args": (ANY,), + "kwargs": {"gradient_clip_val": None, "gradient_clip_algorithm": None}, + }, + # this is after because it refers to the `LightningModule.optimizer_step` hook which encapsulates + # the actual call to `Precision.optimizer_step` + { + "name": "optimizer_step", + "args": (current_epoch, i, ANY, ANY), + }, + *([{"name": "lr_scheduler_step", "args": ANY}] if i == (trainer.num_training_batches - 1) else []), + {"name": "Callback.on_train_batch_end", "args": (trainer, model, {"loss": ANY}, ANY, i)}, + {"name": "on_train_batch_end", "args": ({"loss": ANY}, ANY, i)}, + ]) return out @staticmethod