lightning/pytorch_lightning/loops/batch/training_batch_loop.py

197 lines
8.2 KiB
Python
Raw Normal View History

# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Tuple
import numpy as np
from deprecate import void
from torch import Tensor
from torch.optim import Optimizer
from pytorch_lightning.loops.base import Loop
from pytorch_lightning.loops.batch.manual import ManualOptimization
2021-09-02 11:40:05 +00:00
from pytorch_lightning.loops.optimizer.optimizer_loop import OptimizerLoop
from pytorch_lightning.trainer.supporters import TensorRunningAccum
2021-09-02 11:40:05 +00:00
from pytorch_lightning.utilities import AttributeDict
from pytorch_lightning.utilities.types import STEP_OUTPUT
from pytorch_lightning.utilities.warnings import WarningCache
class TrainingBatchLoop(Loop):
"""Runs over a single batch of data."""
def __init__(self) -> None:
super().__init__()
self.accumulated_loss: Optional[Tensor] = None
self.batch_outputs: Optional[List[List[STEP_OUTPUT]]] = None
self.running_loss: TensorRunningAccum = TensorRunningAccum(window_length=20)
# the current split index when the batch gets split into chunks in truncated backprop through time
self.split_idx: Optional[int] = None
2021-09-02 11:40:05 +00:00
self.optimizer_loop = OptimizerLoop()
self.manual_loop = ManualOptimization()
self._warning_cache: WarningCache = WarningCache()
self._optimizer_freq_cumsum: Optional[int] = None
self._remaining_splits: Optional[List[Any]] = None
@property
def done(self) -> bool:
"""Returns if all batch splits have been processed already."""
return len(self._remaining_splits) == 0
@property
def optimizer_freq_cumsum(self) -> int:
"""Returns the cumulated sum of optimizer frequencies."""
if self._optimizer_freq_cumsum is None:
self._optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
return self._optimizer_freq_cumsum
def connect(
self, optimizer_loop: Optional["Loop"] = None, manual_loop: Optional[ManualOptimization] = None
) -> None:
if optimizer_loop is not None:
self.optimizer_loop = optimizer_loop
if manual_loop is not None:
self.manual_loop = manual_loop
def run(self, batch: Any, batch_idx: int) -> AttributeDict:
"""Runs all the data splits and the ``on_batch_start`` and ``on_train_batch_start`` hooks.
Args:
batch: the current batch to run the train step on
batch_idx: the index of the current batch
"""
if batch is None:
self._warning_cache.warn("train_dataloader yielded None. If this was on purpose, ignore this warning...")
return AttributeDict(signal=0, training_step_output=[[]])
# hook
self.trainer.logger_connector.on_batch_start()
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, 0)
if response == -1:
return AttributeDict(signal=-1)
Add progress tracking on Loops - 2/n (#8362) * resolve issues * update * update * update * add more exceptions * resolve bug * update * update * update changelog * resolve bug * resolve comments * update * update * update changelog * update * update * remove space * update * add progress tracking to loops * validate json * update * convert to dict for better readability * validate reload * update * update * update on comments * remove deadcode * clean changelog * clean changelog * update * update on comments * CHANGELOG * CHANGELOG * Update pytorch_lightning/loops/base.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * whitespace suggestions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * make fault_tolerant_enabled protected * whitespace fixes around Args * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * typo it's -> its * fix copy-paste typo in progress docstring * Delete classes * Minor change * docs * protected get_loops_state * merge restore_loops with restore_progress * Fix tests after removals * explicit save with trainer.save_checkpoint() * handle optimization restart based on optimizer_idx * update increments * update val batch progress and remove iteration count * update progress tracking for dataloader loops * remove self.dataloader_idx from eval_epoch_loop * add batch progress to predict loop * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * incorporate progress tracking for current_epoch * Fix test * Actually remove it * Remove unused TrainingEpochProgress * Fix optimization progress - missing scheduler * Restarting changes * Scheduler progress * Unused property, reset on epoch * Resolve FIXME * Remove FIXME * fix test_progress (wip) * fix batch_progress.current.reset * Hold off on split progress. Out of scope of this PR * Unnecessary if * fix structure in test_progress * structure * clean up unused variables in test_progress * refactor naming and organization in test_progress * Unnecessary variable * Remove unnecessary diff * Improve comment * Undo typing change to avoid polluting everything with mypy fixes * Fix and improve test_loops.py * Fix and organize `test_loop_state_dict` * Remove unnecessary checks in test * Update test after disallowing updates on None attributes * Typing * Minor test cleanup * Fix and move loop test * Move test from progress to loops * Reset the scheduler progress * SchedulerProgress fix * Consistent whitespace * Fix final test * Minor test changes * One test to rule them all * Formatting * Rename and clean variables * Shorter names * Shorter scheduler name * Fix optimizer step calculation for stop_batch=2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Remove empty connects * Update CHANGELOG * Holy shit finally got the formula right * Fix final thing!!! * Do not check state dicts * parametrize multiple_dataloader progress test * Update CHANGELOG.md Co-authored-by: Carlos Mocholi <carlossmocholi@gmail.com> Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Justus Schock <justus.schock@posteo.de>
2021-07-19 08:31:45 +00:00
self.trainer.fit_loop.epoch_loop.batch_progress.increment_started()
super().run(batch, batch_idx)
Loop Refactor 5/N - Prediction Loop (#7700) * integrate d180bb2 * Minor changes * Refactor loop logic into logger connector * Refactor test * Tighter fx validator * Add back split idx * Typing * update * Conflict * Fix tests * resolve grad_norm * update * move to train loop * Bye grad_norm_dict parameter * Fix sync test * update * Fix bug when validation is run mid epoch * fix grad_norm_dict test * Fix fx_validator test * fix grad_norm_dict test * Fix order bug * Detach tensors in test * resolve some tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * remove pdb * resolve flake8 * Update test * more tests * Revert last thomas' changes * resolve 1 test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Refactor context restoration * integrate latest changes from logger connector refactor poc * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * integrate latest changes from logger connector refactor poc * Minor changes * update changelog * Remove unused argument * Update CHANGELOG * Copy call_hook changes * Docs * Fix ref * move to cpu * Bad merge * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * remove pdb * remove pdb * Refactor to * Avoid partial * trigger ci * Bad merge * integrate latest logger connector changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * remove grad norm dicts list * Diff * properties first * Bad merge * Reuse metrics_to_scalars * Use active loop * Move to device * resolve test * integrate latest changes from logger connector poc * define union * define union * Update logger connector * Update result * Update imports * Update after rename * Refactor reduce_fx and op * Fix test after rename * mypy * integrate latest logger connector refactor poc changes * Fix test * Refactor test * Deprecate `self.log(sync_dist_op)` in favor of `self.log(reduce_fx)` * Undo field * add redundant return * rename rename files and classes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * rename * Replace code * Fix names and imports * Remove metric_attribute * imports * loop hygiene * yapf on loops * protected new loop trigger * rename NEW LOOP guard * integrate latest logger connector changes * integrate latest logger connector changes (eval loop) * resolve todo dataloading reset * re-add notebooks * add missing init * bad merge * remove NEW_LOOP guard * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * flake8 * exclude coverage coverage * integrate #7917, remove teardown from training loop * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update "accumulated_batches_reached" condition based on if iter count was updated or not * remove public loop properties * make skip backward protected again * typing base loop * typing fit loop * typing training_batch_loop * typing evaluation loop * typing prediction loop * typing training epoch loop * dataloader_loop * evaluation_dataloader_loop * prediction_dataloader_loop * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * integrate train loop changes from master * integrate eval loop changes from master * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tpipes moving model to cpu and leaving it there. * don't reset fit loop don't reset fit loop * fix test iteration count <-> batch_idx reset * replace torch.Tensor -> Tensor * fix attribute error to block_ddp_sync_behaviour * fix flake8 and yapf conflict * remove redundant override * add classes Co-authored-by: Justus Schock <justus.schock@rwth-aachen.de> Co-authored-by: Justus Schock <justus.schock@posteo.de> Co-authored-by: Justus Schock <12886177+justusschock@users.noreply.github.com> * trainer changes * connect * clean up * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update test renaming * rename evaluation loop to evaluation epoch loop * minor docstring improvements * update chlog * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * try ci fix * update code owners for pl/loops * update mock path * re-order * simplify dataloader reset * simplify get_dataloaders() * save predictions on_run_end() * improve skip condition re-routing * re-order * remove unused type import * check which assert is failing * pig * hobbit * teardown for evaluation * Revert "hobbit" This reverts commit e81b0dbee31da813ba6ad58f74d236863c86d18e. * Revert "pig" This reverts commit 33d89e0720ce7380af80917b15a79362d9416ae7. * Revert "check which assert is failing" This reverts commit b7483b425cab95290eb2cbf354ccb0a77004df83. * free memory in fit loop teardown * update docstring * period * remove dead code * else carlos Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update pytorch_lightning/loops/dataloader/evaluation_dataloader_loop.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * update chlog * unused imp * move default construction in run_evaluation * add something for lawyer to read * switch typehint for eval loop trainer property * add missing imports * remove a todo that needs more discussion * combine _get_num_dataloaders with the property * Update pytorch_lightning/loops/dataloader/dataloader_loop.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * black + yapf * avoid coverage on old unused eval loop * empty space in docstring Co-authored-by: Ethan Harris <ewah1g13@soton.ac.uk> * resolve todo for args forwarding * weekproxy trainer * fix check for num dataloaders kwargs * clean up num prediction dataloaders property * free memory * rm notebooks folder * rm old file * revert changes to old eval loop * bad merge * undo teardown * setup signature * remove file for notes * free memory * chlog * Revert "weekproxy trainer" This reverts commit d4e6969170b80db4c9e6111fa9af507c740cde4a. * connect trainer * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * clean up max batches and dataloaders * max batches handling * no grad handling * unused argument * protected attrs * unused imports * undo unintentional rename * consistent naming * capitalization in docstring * list all args * Update pytorch_lightning/loops/prediction_epoch_loop.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update pytorch_lightning/loops/prediction_epoch_loop.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update pytorch_lightning/loops/dataloader/prediction_dataloader_loop.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update pytorch_lightning/loops/dataloader/prediction_dataloader_loop.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update pytorch_lightning/loops/prediction_epoch_loop.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> Co-authored-by: Carlos Mocholi <carlossmocholi@gmail.com> Co-authored-by: tchaton <thomas@grid.ai> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Justus Schock <justus.schock@posteo.de> Co-authored-by: Justus Schock <justus.schock@rwth-aachen.de> Co-authored-by: Justus Schock <12886177+justusschock@users.noreply.github.com> Co-authored-by: Ethan Harris <ewah1g13@soton.ac.uk>
2021-06-23 09:17:04 +00:00
output = AttributeDict(signal=0, training_step_output=self.batch_outputs)
self.batch_outputs = None # free memory
return output
def reset(self) -> None:
"""Resets the loop state."""
self.batch_outputs = [[] for _ in range(len(self.trainer.optimizers))]
def on_run_start(self, batch: Any, batch_idx: int):
"""Splits the data into tbptt splits.
Args:
batch: the current batch to run the trainstep on
batch_idx: the index of the current batch
"""
void(batch_idx)
self._remaining_splits = list(enumerate(self._tbptt_split_batch(batch)))
def advance(self, batch, batch_idx):
"""Runs the train step together with optimization (if necessary) on the current batch split.
Args:
batch: the current batch to run the training on (this is not the split!)
batch_idx: the index of the current batch
"""
void(batch)
split_idx, split_batch = self._remaining_splits.pop(0)
self.split_idx = split_idx
# let logger connector extract current batch size
self.trainer.logger_connector.on_train_split_start(batch_idx, split_idx, split_batch)
if self.trainer.lightning_module.automatic_optimization:
2021-09-02 11:40:05 +00:00
# in automatic optimization, hand over execution to the OptimizerLoop
optimizers = [optimizer for _, optimizer in self.get_active_optimizers(batch_idx)]
batch_outputs = self.optimizer_loop.run(split_batch, optimizers, batch_idx)
2021-09-02 11:40:05 +00:00
# combine outputs from each optimizer
for k in range(len(batch_outputs)):
self.batch_outputs[k].extend(batch_outputs[k])
else:
# in manual optimization, hand over execution to the ManualOptimization loop
result = self.manual_loop.run(split_batch, batch_idx)
if result is not None and result.loss is not None:
self.batch_outputs[0].append(result.drop_closure_loss())
def on_run_end(self) -> None:
self.optimizer_loop._hiddens = None
# this is not necessary as the manual loop runs for only 1 iteration, but just in case
self.manual_loop._hiddens = None
move batch to device before sending it to hooks (#7378) * update train step * test * x * limits * val * typeo * x * x * step * min gpus * run all loops * x * limit test * profiler * clean up accelerator code * move files * rename * move tests * changelog * reorder callbacks and model hooks * add test description * replace unneccessary method * fix chlog * adjust batch_to_device for DP Plugin * update tests for dataloader idx * unused imports * hook change * switch None * clear memory * change to None * None * None * memory savings * remove redundant todo * hack * cheat * Revert "cheat" This reverts commit a8433bd0b4bd35f218993335f7d4ff18977ae423. * Revert "hack" This reverts commit 43a6d1edeb62a15ac69ef69ef2352581ba1947a5. * update new epoch loop * remove from old loop code * update chlog * update hook test * changelog * teardown * integrate changes in new eval loop * fix hook calls * add prediction step * bad merge * Revert "bad merge" This reverts commit 488080863cf012dcf04446be3b7d973b7340687e. * fix train batch hook test * rm -rf _notebooks * update chlog * release memory * fix type * notebooks mess * debug * Revert "debug" This reverts commit eec4ee2f77b5eb39965211a250598ed5d2320e88. * teardown * fix teardown bug * debug * x * debug * Revert "debug" This reverts commit a6e61019462b80d09d31b65bed289fa6e4dd15f6. Revert "debug" This reverts commit 5ddeaec06911e96730aade1be6ee71d097b46b9a. debug debug Revert "debug" This reverts commit 605be746f7daedf265b2c05a1c153ce543394435. Revert "Revert "debug"" This reverts commit a7612d5410409ed886cfb609457349ecf44cbfa8. debug x x x s tol x tol * Fix changelog Co-authored-by: Jirka Borovec <Borda@users.noreply.github.com> Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
2021-07-05 08:31:39 +00:00
def teardown(self) -> None:
# release memory
self._remaining_splits = None
def num_active_optimizers(self, batch_idx: Optional[int] = None) -> int:
"""Gets the number of active optimizers based on their frequency."""
return len(self.get_active_optimizers(batch_idx))
def _tbptt_split_batch(self, batch: Any) -> List[Any]:
"""Splits a single batch into a list of sequence steps for tbptt.
Args:
batch: the current batch to split
"""
tbptt_steps = self.trainer.lightning_module.truncated_bptt_steps
if tbptt_steps == 0:
return [batch]
model_ref = self.trainer.lightning_module
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, tbptt_steps)
return splits
def _update_running_loss(self, current_loss: Tensor) -> None:
"""Updates the running loss value with the current value."""
if self.trainer.lightning_module.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(current_loss)
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
def get_active_optimizers(self, batch_idx: Optional[int] = None) -> List[Tuple[int, Optimizer]]:
"""Returns the currently active optimizers. When multiple optimizers are used with different frequencies,
only one of the optimizers is active at a time.
Returns:
A list of tuples (opt_idx, optimizer) of currently active optimizers.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
optimizers_loop_length = self.optimizer_freq_cumsum[-1]
current_place_in_loop = batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = int(np.argmax(self.optimizer_freq_cumsum > current_place_in_loop))
return [(opt_idx, self.trainer.optimizers[opt_idx])]