2021-06-15 12:55:06 +00:00
|
|
|
# Copyright The PyTorch Lightning team.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
|
|
|
import logging
|
|
|
|
from contextlib import suppress
|
2021-06-23 10:25:29 +00:00
|
|
|
from typing import Any, Optional
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
import pytorch_lightning as pl
|
|
|
|
from pytorch_lightning.loops.base import Loop
|
2021-06-24 21:40:46 +00:00
|
|
|
from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop
|
|
|
|
from pytorch_lightning.loops.epoch.training_epoch_loop import TrainingEpochLoop
|
2021-06-15 12:55:06 +00:00
|
|
|
from pytorch_lightning.trainer.connectors.logger_connector.result import ResultCollection
|
|
|
|
from pytorch_lightning.trainer.supporters import TensorRunningAccum
|
|
|
|
from pytorch_lightning.utilities import rank_zero_info
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
class FitLoop(Loop):
|
|
|
|
"""This Loop iterates over the epochs to run the training
|
|
|
|
|
|
|
|
Args:
|
|
|
|
min_epochs: The minimum number of epochs
|
|
|
|
max_epochs: The maximum number of epochs
|
|
|
|
min_steps: The minimum number of steps
|
|
|
|
max_steps: The maximum number of epoch
|
|
|
|
|
|
|
|
.. note::
|
|
|
|
If neither the minimum epochs nor steps are specified the minimum number of epochs is set to 1
|
|
|
|
and if neither the maximum steps nor epochs are specified, the maximum epochs are set to 1000.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
min_epochs: Optional[int] = None,
|
|
|
|
max_epochs: Optional[int] = None,
|
|
|
|
min_steps: Optional[int] = None,
|
|
|
|
max_steps: Optional[int] = None
|
|
|
|
):
|
|
|
|
super().__init__()
|
|
|
|
self.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
|
|
|
|
self.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
|
2021-06-24 21:40:46 +00:00
|
|
|
self.epoch_loop = TrainingEpochLoop(min_steps, max_steps)
|
|
|
|
self.val_loop = EvaluationLoop()
|
2021-06-22 14:10:07 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def results(self) -> ResultCollection:
|
|
|
|
if self.trainer.training:
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.epoch_loop.results
|
2021-06-22 14:10:07 +00:00
|
|
|
elif self.trainer.validating:
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.val_loop.results
|
2021-06-22 14:10:07 +00:00
|
|
|
raise RuntimeError("`FitLoop.results` property isn't defined. Accessed outside of scope")
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def current_epoch(self) -> int:
|
|
|
|
"""Return the current epoch"""
|
|
|
|
return self.iteration_count
|
|
|
|
|
|
|
|
@current_epoch.setter
|
|
|
|
def current_epoch(self, value: int) -> None:
|
|
|
|
"""Setter for the current epoch"""
|
|
|
|
self.iteration_count = value
|
|
|
|
|
|
|
|
@property
|
|
|
|
def global_step(self) -> int:
|
|
|
|
"""Returns the global step"""
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.epoch_loop.global_step
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@global_step.setter
|
|
|
|
def global_step(self, value: int) -> None:
|
2021-06-24 21:40:46 +00:00
|
|
|
"""Sets the global step (forwards to epoch_loop)"""
|
|
|
|
self.epoch_loop.global_step = value
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def total_batch_idx(self) -> int:
|
|
|
|
"""Returns the total number of batches already run (across all epochs)"""
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.epoch_loop.total_batch_idx
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def batch_idx(self) -> int:
|
|
|
|
"""Returns the number of batches already run within this epoch"""
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.epoch_loop.iteration_count
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def split_idx(self) -> int:
|
|
|
|
"""Returns the index of the current batch split (within the current batch) for bptt"""
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.epoch_loop.split_idx
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def min_steps(self) -> int:
|
|
|
|
# TODO(@justusschock): Why aren't we using the attribute in this class?
|
|
|
|
"""Returns the minimum numnber of steps to run"""
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.epoch_loop.min_steps
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def max_steps(self) -> int:
|
|
|
|
"""Returns the maximum number of steps to run"""
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.epoch_loop.max_steps
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@max_steps.setter
|
|
|
|
def max_steps(self, value: int) -> None:
|
2021-06-24 21:40:46 +00:00
|
|
|
"""Sets the maximum number of steps (forwards to epoch_loop)"""
|
2021-06-15 12:55:06 +00:00
|
|
|
# TODO(@awaelchli): This setter is required by debugging connector (fast dev run), should be avoided
|
2021-06-24 21:40:46 +00:00
|
|
|
self.epoch_loop.max_steps = value
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def running_loss(self) -> TensorRunningAccum:
|
|
|
|
"""Returns the running loss"""
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.epoch_loop.batch_loop.running_loss
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def _skip_backward(self) -> bool:
|
|
|
|
""" Determines whether the loop will skip backward during automatic optimization. """
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.epoch_loop.batch_loop._skip_backward
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@_skip_backward.setter
|
|
|
|
def _skip_backward(self, value: bool) -> None:
|
|
|
|
""" Determines whether the loop will skip backward during automatic optimization. """
|
2021-06-24 21:40:46 +00:00
|
|
|
self.epoch_loop.batch_loop._skip_backward = value
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
@property
|
|
|
|
def done(self) -> bool:
|
|
|
|
"""Evaluates when to leave the loop.
|
|
|
|
|
|
|
|
Returns True if trainer.should_stop was set (e.g. by early stopping)
|
|
|
|
or if the maximum number of steps or epochs is reached.
|
|
|
|
"""
|
|
|
|
# TODO(@awaelchli): Move track steps inside training loop and move part of these condition inside training loop
|
|
|
|
stop_steps = self.max_steps is not None and self.global_step >= self.max_steps
|
|
|
|
stop_epochs = self.max_epochs is not None and self.current_epoch >= self.max_epochs
|
|
|
|
|
|
|
|
should_stop = False
|
|
|
|
if self.trainer.should_stop:
|
|
|
|
# early stopping
|
|
|
|
met_min_epochs = self.current_epoch >= self.min_epochs if self.min_epochs else True
|
|
|
|
met_min_steps = self.global_step >= self.min_steps if self.min_steps else True
|
|
|
|
if met_min_epochs and met_min_steps:
|
|
|
|
should_stop = True
|
|
|
|
else:
|
|
|
|
log.info(
|
|
|
|
'Trainer was signaled to stop but required minimum epochs'
|
|
|
|
f' ({self.min_epochs}) or minimum steps ({self.min_steps}) has'
|
|
|
|
' not been met. Training will continue...'
|
|
|
|
)
|
|
|
|
self.trainer.should_stop = should_stop
|
|
|
|
|
|
|
|
return stop_steps or should_stop or stop_epochs
|
|
|
|
|
|
|
|
@property
|
|
|
|
def skip(self) -> bool:
|
|
|
|
"""Whether we should skip the training and immediately return from the call to :meth:`run`."""
|
|
|
|
return self.done or self.trainer.num_training_batches == 0
|
|
|
|
|
|
|
|
def connect(self, trainer: 'pl.Trainer', *args: Any, **kwargs: Any) -> None:
|
|
|
|
"""Connects the loop with necessary arguments like the trainer"""
|
Loop Refactor 5/N - Prediction Loop (#7700)
* integrate d180bb2
* Minor changes
* Refactor loop logic into logger connector
* Refactor test
* Tighter fx validator
* Add back split idx
* Typing
* update
* Conflict
* Fix tests
* resolve grad_norm
* update
* move to train loop
* Bye grad_norm_dict parameter
* Fix sync test
* update
* Fix bug when validation is run mid epoch
* fix grad_norm_dict test
* Fix fx_validator test
* fix grad_norm_dict test
* Fix order bug
* Detach tensors in test
* resolve some tests
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* remove pdb
* resolve flake8
* Update test
* more tests
* Revert last thomas' changes
* resolve 1 test
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* Refactor context restoration
* integrate latest changes from logger connector refactor poc
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* integrate latest changes from logger connector refactor poc
* Minor changes
* update changelog
* Remove unused argument
* Update CHANGELOG
* Copy call_hook changes
* Docs
* Fix ref
* move to cpu
* Bad merge
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* remove pdb
* remove pdb
* Refactor to
* Avoid partial
* trigger ci
* Bad merge
* integrate latest logger connector changes
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* remove grad norm dicts list
* Diff
* properties first
* Bad merge
* Reuse metrics_to_scalars
* Use active loop
* Move to device
* resolve test
* integrate latest changes from logger connector poc
* define union
* define union
* Update logger connector
* Update result
* Update imports
* Update after rename
* Refactor reduce_fx and op
* Fix test after rename
* mypy
* integrate latest logger connector refactor poc changes
* Fix test
* Refactor test
* Deprecate `self.log(sync_dist_op)` in favor of `self.log(reduce_fx)`
* Undo field
* add redundant return
* rename
rename files and classes
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* rename
* Replace code
* Fix names and imports
* Remove metric_attribute
* imports
* loop hygiene
* yapf on loops
* protected new loop trigger
* rename NEW LOOP guard
* integrate latest logger connector changes
* integrate latest logger connector changes (eval loop)
* resolve todo dataloading reset
* re-add notebooks
* add missing init
* bad merge
* remove NEW_LOOP guard
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* flake8
* exclude coverage
coverage
* integrate #7917, remove teardown from training loop
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* update "accumulated_batches_reached" condition
based on if iter count was updated or not
* remove public loop properties
* make skip backward protected again
* typing base loop
* typing fit loop
* typing training_batch_loop
* typing evaluation loop
* typing prediction loop
* typing training epoch loop
* dataloader_loop
* evaluation_dataloader_loop
* prediction_dataloader_loop
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* integrate train loop changes from master
* integrate eval loop changes from master
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fix tpipes moving model to cpu and leaving it there.
* don't reset fit loop
don't reset fit loop
* fix test iteration count <-> batch_idx reset
* replace torch.Tensor -> Tensor
* fix attribute error to block_ddp_sync_behaviour
* fix flake8 and yapf conflict
* remove redundant override
* add classes
Co-authored-by: Justus Schock <justus.schock@rwth-aachen.de>
Co-authored-by: Justus Schock <justus.schock@posteo.de>
Co-authored-by: Justus Schock <12886177+justusschock@users.noreply.github.com>
* trainer changes
* connect
* clean up
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* update test renaming
* rename evaluation loop to evaluation epoch loop
* minor docstring improvements
* update chlog
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* try ci fix
* update code owners for pl/loops
* update mock path
* re-order
* simplify dataloader reset
* simplify get_dataloaders()
* save predictions on_run_end()
* improve skip condition re-routing
* re-order
* remove unused type import
* check which assert is failing
* pig
* hobbit
* teardown for evaluation
* Revert "hobbit"
This reverts commit e81b0dbee31da813ba6ad58f74d236863c86d18e.
* Revert "pig"
This reverts commit 33d89e0720ce7380af80917b15a79362d9416ae7.
* Revert "check which assert is failing"
This reverts commit b7483b425cab95290eb2cbf354ccb0a77004df83.
* free memory in fit loop teardown
* update docstring
* period
* remove dead code
* else carlos
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
* Update pytorch_lightning/loops/dataloader/evaluation_dataloader_loop.py
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
* update chlog
* unused imp
* move default construction in run_evaluation
* add something for lawyer to read
* switch typehint for eval loop trainer property
* add missing imports
* remove a todo that needs more discussion
* combine _get_num_dataloaders with the property
* Update pytorch_lightning/loops/dataloader/dataloader_loop.py
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
* black + yapf
* avoid coverage on old unused eval loop
* empty space in docstring
Co-authored-by: Ethan Harris <ewah1g13@soton.ac.uk>
* resolve todo for args forwarding
* weekproxy trainer
* fix check for num dataloaders kwargs
* clean up num prediction dataloaders property
* free memory
* rm notebooks folder
* rm old file
* revert changes to old eval loop
* bad merge
* undo teardown
* setup signature
* remove file for notes
* free memory
* chlog
* Revert "weekproxy trainer"
This reverts commit d4e6969170b80db4c9e6111fa9af507c740cde4a.
* connect trainer
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* clean up max batches and dataloaders
* max batches handling
* no grad handling
* unused argument
* protected attrs
* unused imports
* undo unintentional rename
* consistent naming
* capitalization in docstring
* list all args
* Update pytorch_lightning/loops/prediction_epoch_loop.py
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
* Update pytorch_lightning/loops/prediction_epoch_loop.py
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
* Update pytorch_lightning/loops/dataloader/prediction_dataloader_loop.py
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
* Update pytorch_lightning/loops/dataloader/prediction_dataloader_loop.py
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
* Update pytorch_lightning/loops/prediction_epoch_loop.py
Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
Co-authored-by: Carlos Mocholi <carlossmocholi@gmail.com>
Co-authored-by: tchaton <thomas@grid.ai>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Justus Schock <justus.schock@posteo.de>
Co-authored-by: Justus Schock <justus.schock@rwth-aachen.de>
Co-authored-by: Justus Schock <12886177+justusschock@users.noreply.github.com>
Co-authored-by: Ethan Harris <ewah1g13@soton.ac.uk>
2021-06-23 09:17:04 +00:00
|
|
|
super().connect(trainer, *args, **kwargs)
|
2021-06-24 21:40:46 +00:00
|
|
|
self.epoch_loop.connect(trainer)
|
|
|
|
self.val_loop.connect(trainer)
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
def reset(self) -> None:
|
|
|
|
"""Resets the internal state of this loop"""
|
|
|
|
|
|
|
|
def on_run_start(self) -> None:
|
|
|
|
"""Calls the ``on_train_start`` hook."""
|
|
|
|
self.results.to(device=self.trainer.lightning_module.device)
|
|
|
|
self.trainer.call_hook("on_train_start")
|
|
|
|
|
|
|
|
def on_advance_start(self) -> None:
|
|
|
|
"""Prepares the dataloader for training and calls the hooks ``on_epoch_start`` and ``on_train_epoch_start``"""
|
|
|
|
model = self.trainer.lightning_module
|
|
|
|
|
|
|
|
# reset train dataloader
|
|
|
|
if self.current_epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
|
|
|
|
self.trainer.reset_train_dataloader(model)
|
|
|
|
|
|
|
|
# TODO: specify the possible exception
|
|
|
|
with suppress(Exception):
|
|
|
|
# set seed for distributed sampler (enables shuffling for each epoch)
|
|
|
|
self.trainer.train_dataloader.sampler.set_epoch(self.current_epoch)
|
|
|
|
|
|
|
|
# changing gradient according accumulation_scheduler
|
|
|
|
self.trainer.accumulation_scheduler.on_train_epoch_start(self.trainer, self.trainer.lightning_module)
|
|
|
|
|
|
|
|
# stores accumulated grad fractions per batch
|
2021-06-24 21:40:46 +00:00
|
|
|
self.epoch_loop.batch_loop.accumulated_loss = TensorRunningAccum(
|
2021-06-15 12:55:06 +00:00
|
|
|
window_length=self.trainer.accumulate_grad_batches
|
|
|
|
)
|
|
|
|
|
|
|
|
def advance(self) -> None:
|
|
|
|
"""Runs one whole epoch."""
|
|
|
|
train_dataloader = self.trainer.accelerator.process_dataloader(self.trainer.train_dataloader)
|
|
|
|
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
|
|
|
|
|
|
|
|
with self.trainer.profiler.profile("run_training_epoch"):
|
|
|
|
# run train epoch
|
2021-06-24 21:40:46 +00:00
|
|
|
epoch_output = self.epoch_loop.run(train_dataloader)
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
if epoch_output is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
# the global step is manually decreased here due to backwards compatibility with existing loggers
|
|
|
|
# as they expect that the same step is used when logging epoch end metrics even when the batch loop has
|
|
|
|
# finished. this means the attribute does not exactly track the number of optimizer steps applied.
|
|
|
|
# TODO(@carmocca): deprecate and rename so users don't get confused
|
|
|
|
self.global_step -= 1
|
|
|
|
# log epoch metrics
|
|
|
|
self.trainer.logger_connector.update_train_epoch_metrics()
|
|
|
|
self.global_step += 1
|
|
|
|
|
|
|
|
def on_advance_end(self) -> None:
|
|
|
|
"""Updates the LR schedulers and does some internal bookkeeping"""
|
2021-06-24 21:40:46 +00:00
|
|
|
if self.epoch_loop.batches_seen == 0:
|
2021-06-15 12:55:06 +00:00
|
|
|
return
|
|
|
|
|
2021-06-24 21:40:46 +00:00
|
|
|
self.epoch_loop.update_lr_schedulers('epoch', update_plateau_schedulers=True)
|
2021-06-15 12:55:06 +00:00
|
|
|
|
2021-06-18 12:54:59 +00:00
|
|
|
did_train_only = self.trainer.disable_validation or self.trainer.evaluation_loop.skip
|
2021-06-15 12:55:06 +00:00
|
|
|
if did_train_only:
|
|
|
|
self.global_step -= 1
|
2021-06-23 10:25:29 +00:00
|
|
|
self._check_checkpoint_callback(True)
|
2021-06-15 12:55:06 +00:00
|
|
|
self.global_step += 1
|
|
|
|
|
|
|
|
def on_run_end(self) -> None:
|
|
|
|
"""Runs teardown logic and calls the ``on_train_end`` hook"""
|
|
|
|
# NOTE: the iteration_count/current_epoch is already incremented
|
|
|
|
# Lightning today does not increment the current epoch at the last epoch run in Trainer.fit
|
|
|
|
# To simulate that current behavior, we decrement here.
|
|
|
|
# TODO: must be fixed by https://github.com/PyTorchLightning/pytorch-lightning/issues/5007
|
|
|
|
self.current_epoch -= 1
|
|
|
|
|
|
|
|
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
|
|
|
|
# when a checkpoint was saved at the last step
|
2021-06-24 21:40:46 +00:00
|
|
|
self.epoch_loop.global_step -= 1
|
2021-06-15 12:55:06 +00:00
|
|
|
# TODO: see discussion/rework https://github.com/PyTorchLightning/pytorch-lightning/issues/7406
|
2021-06-23 10:25:29 +00:00
|
|
|
self._check_checkpoint_callback(should_update=True, is_last=True)
|
2021-06-24 21:40:46 +00:00
|
|
|
self.epoch_loop.global_step += 1
|
2021-06-15 12:55:06 +00:00
|
|
|
|
|
|
|
# hook
|
|
|
|
self.trainer.call_hook("on_train_end")
|
|
|
|
|
|
|
|
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
|
|
|
|
# It might be related to xla tensors blocked when moving the cpu
|
|
|
|
# kill loggers
|
|
|
|
if self.trainer.logger is not None:
|
|
|
|
self.trainer.logger.finalize("success")
|
|
|
|
|
|
|
|
# summarize profile results
|
|
|
|
self.trainer.profiler.describe()
|
|
|
|
|
|
|
|
# give accelerators a chance to finish
|
|
|
|
self.trainer.accelerator.on_train_end()
|
|
|
|
|
|
|
|
# reset bookkeeping
|
|
|
|
self.trainer._running_stage = None
|
|
|
|
|
|
|
|
def should_accumulate(self) -> bool:
|
|
|
|
"""Whether the gradients should be accumulated"""
|
2021-06-24 21:40:46 +00:00
|
|
|
return self.epoch_loop.batch_loop.should_accumulate()
|
2021-06-15 12:55:06 +00:00
|
|
|
|
2021-06-23 10:25:29 +00:00
|
|
|
def _check_checkpoint_callback(self, should_update: bool, is_last: bool = False):
|
2021-06-15 12:55:06 +00:00
|
|
|
"""Checks if checkpointing needs to be done"""
|
|
|
|
# TODO: bake this logic into the ModelCheckpoint callback
|
|
|
|
if should_update and self.trainer.checkpoint_connector.has_trained:
|
|
|
|
callbacks = self.trainer.checkpoint_callbacks
|
|
|
|
|
|
|
|
if is_last and any(cb.save_last and cb.verbose for cb in callbacks):
|
|
|
|
rank_zero_info("Saving latest checkpoint...")
|
|
|
|
|
|
|
|
model = self.trainer.lightning_module
|
|
|
|
|
|
|
|
for cb in callbacks:
|
|
|
|
cb.on_validation_end(self.trainer, model)
|