2020-09-12 16:36:55 +00:00
|
|
|
# Copyright The PyTorch Lightning team.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License
|
2020-09-20 23:42:58 +00:00
|
|
|
import os
|
2020-09-12 16:36:55 +00:00
|
|
|
import re
|
|
|
|
import torch
|
|
|
|
|
|
|
|
from pytorch_lightning.utilities import AMPType
|
|
|
|
from pytorch_lightning.accelerators.base_backend import Accelerator
|
|
|
|
import torch.distributed as torch_distrib
|
|
|
|
import torch.distributed as dist
|
|
|
|
from pytorch_lightning.utilities.cloud_io import atomic_save
|
2020-09-12 17:55:55 +00:00
|
|
|
from pytorch_lightning.utilities.distributed import rank_zero_warn, rank_zero_only
|
|
|
|
from pytorch_lightning import _logger as log
|
2020-09-20 23:42:58 +00:00
|
|
|
from pytorch_lightning.utilities.seed import seed_everything
|
2020-09-12 16:36:55 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
from hydra.utils import to_absolute_path, get_original_cwd
|
|
|
|
from hydra.core.hydra_config import HydraConfig
|
|
|
|
except ImportError:
|
|
|
|
HYDRA_AVAILABLE = False
|
|
|
|
else:
|
|
|
|
HYDRA_AVAILABLE = True
|
|
|
|
|
|
|
|
|
|
|
|
class DDPBase(Accelerator):
|
|
|
|
|
|
|
|
def __init__(self, trainer):
|
|
|
|
super().__init__(trainer)
|
|
|
|
|
|
|
|
def training_step(self, args):
|
|
|
|
if self.trainer.amp_backend == AMPType.NATIVE:
|
|
|
|
with torch.cuda.amp.autocast():
|
|
|
|
output = self.trainer.model(*args)
|
|
|
|
else:
|
|
|
|
output = self.trainer.model(*args)
|
|
|
|
return output
|
|
|
|
|
|
|
|
def validation_step(self, args):
|
|
|
|
output = self.training_step(args)
|
|
|
|
return output
|
|
|
|
|
|
|
|
def test_step(self, args):
|
|
|
|
output = self.training_step(args)
|
|
|
|
return output
|
|
|
|
|
|
|
|
def barrier(self, name: str = None):
|
|
|
|
torch_distrib.barrier()
|
|
|
|
|
|
|
|
def early_stopping_should_stop(self, pl_module):
|
|
|
|
stop = torch.tensor(int(self.trainer.should_stop), device=pl_module.device)
|
|
|
|
dist.all_reduce(stop, op=dist.reduce_op.SUM)
|
|
|
|
dist.barrier()
|
|
|
|
should_stop = stop == self.trainer.world_size
|
|
|
|
return should_stop
|
|
|
|
|
|
|
|
def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results):
|
|
|
|
if self.trainer.distributed_backend.lower() not in ['ddp_spawn', 'ddp_cpu', 'tpu']:
|
|
|
|
return
|
|
|
|
|
|
|
|
# track the best model path
|
|
|
|
best_model_path = None
|
|
|
|
if self.trainer.checkpoint_callback is not None:
|
|
|
|
best_model_path = self.trainer.checkpoint_callback.best_model_path
|
|
|
|
|
|
|
|
if self.trainer.global_rank == 0 and mp_queue is not None:
|
|
|
|
rank_zero_warn('cleaning up ddp environment...')
|
|
|
|
# todo, pass complete checkpoint as state dictionary
|
|
|
|
mp_queue.put(best_model_path)
|
|
|
|
mp_queue.put(results)
|
|
|
|
|
|
|
|
# save the last weights
|
|
|
|
last_path = None
|
|
|
|
if not self.trainer.testing and best_model_path is not None and len(best_model_path) > 0:
|
|
|
|
last_path = re.sub('.ckpt', '.tmp_end.ckpt', best_model_path)
|
|
|
|
atomic_save(model.state_dict(), last_path)
|
|
|
|
mp_queue.put(last_path)
|
2020-09-12 17:55:55 +00:00
|
|
|
|
|
|
|
def ddp_train_tmp(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):
|
|
|
|
"""
|
|
|
|
Entry point for ddp
|
|
|
|
|
|
|
|
Args:
|
|
|
|
process_idx:
|
|
|
|
mp_queue: multiprocessing queue
|
|
|
|
model:
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
|
|
|
"""
|
2020-09-20 23:42:58 +00:00
|
|
|
seed = os.environ.get("PL_GLOBAL_SEED")
|
|
|
|
if seed is not None:
|
|
|
|
seed_everything(int(seed))
|
|
|
|
|
|
|
|
|
2020-09-12 17:55:55 +00:00
|
|
|
# offset the process id if requested
|
|
|
|
process_idx = process_idx + proc_offset
|
|
|
|
|
|
|
|
# show progressbar only on progress_rank 0
|
|
|
|
if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:
|
|
|
|
self.trainer.progress_bar_callback.disable()
|
|
|
|
|
|
|
|
# determine which process we are and world size
|
|
|
|
self.set_world_ranks(process_idx)
|
|
|
|
|
|
|
|
# set warning rank
|
|
|
|
rank_zero_only.rank = self.trainer.global_rank
|
|
|
|
|
|
|
|
# set up server using proc 0's ip address
|
|
|
|
# try to init for 20 times at max in case ports are taken
|
|
|
|
# where to store ip_table
|
|
|
|
model.trainer = self.trainer
|
|
|
|
model.init_ddp_connection(
|
|
|
|
self.trainer.global_rank,
|
|
|
|
self.trainer.world_size,
|
|
|
|
self.trainer.is_slurm_managing_tasks
|
|
|
|
)
|
|
|
|
|
|
|
|
# call setup after the ddp process has connected
|
|
|
|
self.trainer.call_setup_hook(model)
|
|
|
|
|
|
|
|
# on world_size=0 let everyone know training is starting
|
|
|
|
if self.trainer.is_global_zero:
|
|
|
|
log.info('-' * 100)
|
|
|
|
log.info(f'distributed_backend={self.trainer.distributed_backend}')
|
|
|
|
log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')
|
|
|
|
log.info('-' * 100)
|
|
|
|
|
|
|
|
# call sync_bn before .cuda(), configure_apex and configure_ddp
|
|
|
|
if self.trainer.sync_batchnorm:
|
|
|
|
model = model.configure_sync_batchnorm(model)
|
|
|
|
|
|
|
|
# move the model to the correct device
|
2020-09-12 19:27:11 +00:00
|
|
|
self.model_to_device(model, process_idx, is_master)
|
2020-09-12 17:55:55 +00:00
|
|
|
|
|
|
|
# CHOOSE OPTIMIZER
|
|
|
|
# allow for lr schedulers as well
|
|
|
|
optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
|
|
|
|
self.trainer.optimizers = optimizers
|
|
|
|
self.trainer.lr_schedulers = lr_schedulers
|
|
|
|
self.trainer.optimizer_frequencies = optimizer_frequencies
|
|
|
|
|
|
|
|
# set model properties before going into wrapper
|
|
|
|
self.trainer.model_connector.copy_trainer_model_properties(model)
|
|
|
|
|
|
|
|
# AMP -
|
|
|
|
# run through amp wrapper before going to distributed DP
|
2020-09-15 13:56:12 +00:00
|
|
|
model, optimizers = self.trainer.precision_connector.connect(model, optimizers)
|
2020-09-12 17:55:55 +00:00
|
|
|
|
|
|
|
# device ids change depending on the DDP setup
|
|
|
|
device_ids = self.get_device_ids()
|
|
|
|
|
|
|
|
# allow user to configure ddp
|
|
|
|
model = model.configure_ddp(model, device_ids)
|
|
|
|
|
|
|
|
# set up training routine
|
|
|
|
self.trainer.train_loop.setup_training(model)
|
|
|
|
|
|
|
|
# train or test
|
|
|
|
results = self.train_or_test()
|
|
|
|
|
|
|
|
# get original model
|
|
|
|
model = self.trainer.get_model()
|
|
|
|
|
|
|
|
# persist info in ddp_spawn
|
|
|
|
self.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)
|
|
|
|
|
|
|
|
# clean up memory
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
2020-09-12 20:28:29 +00:00
|
|
|
if self.trainer.global_rank == 0:
|
|
|
|
return results
|
|
|
|
|
2020-09-12 17:55:55 +00:00
|
|
|
def set_world_ranks(self, process_idx):
|
|
|
|
raise NotImplementedError('to create a ddp backend, please implement set_world_ranks')
|
|
|
|
|
2020-09-12 19:27:11 +00:00
|
|
|
def model_to_device(self, model, process_idx, is_master):
|
2020-09-12 17:55:55 +00:00
|
|
|
raise NotImplementedError('to create a ddp backend, please implement model_to_device')
|
|
|
|
|
|
|
|
def get_device_ids(self):
|
|
|
|
raise NotImplementedError('to create a ddp backend, please implement get_device_ids')
|