lightning/pytorch_lightning/strategies/single_device.py

95 lines
3.2 KiB
Python
Raw Normal View History

# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Union
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
import torch
import pytorch_lightning as pl
2021-08-13 16:35:31 +00:00
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.precision import PrecisionPlugin
from pytorch_lightning.strategies.training_type_plugin import Strategy
from pytorch_lightning.utilities import _XLA_AVAILABLE
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
class SingleDeviceStrategy(Strategy):
"""Strategy that handles communication on a single device."""
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
2021-08-13 16:35:31 +00:00
def __init__(
self,
device: torch.device,
3/n Move accelerator into Strategy (#11022) * remove training_step() from accelerator * remove test, val, predict step * move * wip * accelerator references * cpu training * rename occurrences in tests * update tests * pull from adrian's commit * fix changelog merge pro * fix accelerator_connector and other updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix doc build and some mypy * fix lite * fix gpu setup environment * support customized ttp and accelerator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tpu error check * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix precision_plugin initialization to recognisze cusomized plugin * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bug_report_model.py * Update accelerator_connector.py * update changelog * allow shorthand typing references to pl.Accelerator * rename helper method and add docstring * fix typing * Update pytorch_lightning/trainer/connectors/accelerator_connector.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update tests/accelerators/test_accelerator_connector.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update tests/accelerators/test_cpu.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix pre commit complaint * update typing to long ugly path * spacing in flow diagram * remove todo comments * docformatter * Update pytorch_lightning/plugins/training_type/training_type_plugin.py * revert test changes * improve custom plugin examples * remove redundant call to ttp attribute it is no longer a property * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
2021-12-16 04:41:34 +00:00
accelerator: Optional["pl.accelerators.accelerator.Accelerator"] = None,
2021-08-13 16:35:31 +00:00
checkpoint_io: Optional[CheckpointIO] = None,
precision_plugin: Optional[PrecisionPlugin] = None,
2021-08-13 16:35:31 +00:00
):
3/n Move accelerator into Strategy (#11022) * remove training_step() from accelerator * remove test, val, predict step * move * wip * accelerator references * cpu training * rename occurrences in tests * update tests * pull from adrian's commit * fix changelog merge pro * fix accelerator_connector and other updates * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix doc build and some mypy * fix lite * fix gpu setup environment * support customized ttp and accelerator * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix tpu error check * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix precision_plugin initialization to recognisze cusomized plugin * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bug_report_model.py * Update accelerator_connector.py * update changelog * allow shorthand typing references to pl.Accelerator * rename helper method and add docstring * fix typing * Update pytorch_lightning/trainer/connectors/accelerator_connector.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update tests/accelerators/test_accelerator_connector.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update tests/accelerators/test_cpu.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix pre commit complaint * update typing to long ugly path * spacing in flow diagram * remove todo comments * docformatter * Update pytorch_lightning/plugins/training_type/training_type_plugin.py * revert test changes * improve custom plugin examples * remove redundant call to ttp attribute it is no longer a property * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
2021-12-16 04:41:34 +00:00
super().__init__(accelerator=accelerator, checkpoint_io=checkpoint_io, precision_plugin=precision_plugin)
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
self.device: torch.device = device
self.global_rank = 0
self.local_rank = 0
self.world_size = 1
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
@property
def on_tpu(self) -> bool:
return self.root_device.type == "xla" and _XLA_AVAILABLE
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
@property
def on_gpu(self) -> bool:
return self.root_device.type == "cuda" and torch.cuda.is_available()
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
def reduce(self, tensor: Union[Any, torch.Tensor], *args: Any, **kwargs: Any) -> Union[Any, torch.Tensor]:
"""Reduces a tensor from several distributed processes to one aggregated tensor. As this plugin only
operates with a single device, the reduction is simply the identity.
Args:
tensor: the tensor to sync and reduce
*args: ignored
**kwargs: ignored
Return:
the unmodified input as reduction is not needed for single process operation
"""
return tensor
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
def all_gather(self, tensor: torch.Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> torch.Tensor:
"""Perform a all_gather on all processes."""
return tensor
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
@property
def root_device(self) -> torch.device:
return self.device
def model_to_device(self) -> None:
self.model.to(self.root_device)
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
def setup(self, trainer: "pl.Trainer") -> None:
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
self.model_to_device()
super().setup(trainer)
Hardware specific parts of Accelerator Refactoring (#5719) * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * add basic accelerator class. Co-Authored with @awaelchi * pep8 Co-authored-by: @awaelchi * add cpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add gpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu accelerator Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add accelerator connector Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single device training Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add single tpu Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * add tpu spawn Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> * make on_colab_kaggle utility func * fixes * move * yapf * . * . * . * flake8 * sync accelerator connector changes from dev1.2 * changelog * fix tpu handling * tpu * aval * yapf * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * Update pytorch_lightning/plugins/training_type/tpu_spawn.py Co-authored-by: chaton <thomas@grid.ai> * Update tpu_spawn.py * Update pytorch_lightning/accelerators/accelerator_connector.py Co-authored-by: chaton <thomas@grid.ai> * indentation Co-authored-by: Adrian Wälchli <aedu.waelchli@gmail.com> Co-authored-by: Jirka Borovec <jirka.borovec@seznam.cz> Co-authored-by: chaton <thomas@grid.ai>
2021-02-01 13:34:59 +00:00
@property
def is_global_zero(self) -> bool:
return True
def barrier(self, *args, **kwargs) -> None:
pass
def broadcast(self, obj: object, src: int = 0) -> object:
return obj
def teardown(self) -> None:
super().teardown()
if self.on_gpu:
# GPU teardown
self.lightning_module.cpu()
# clean up memory
2021-06-30 11:04:24 +00:00
torch.cuda.empty_cache()