refactor 4 (#2711)

* refactor ddp_spawn
This commit is contained in:
William Falcon 2020-07-25 22:06:18 -04:00 committed by GitHub
parent 44d85c1219
commit 0d96b2698a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 182 additions and 28 deletions

View File

@ -1,3 +1,4 @@
from pytorch_lightning.accelerator_backends.gpu_backend import GPUBackend
from pytorch_lightning.accelerator_backends.tpu_backend import TPUBackend
from pytorch_lightning.accelerator_backends.dp_backend import DataParallelBackend
from pytorch_lightning.accelerator_backends.ddp_spawn_backend import DDPSpawnBackend

View File

@ -0,0 +1,172 @@
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import os
import torch
import torch.multiprocessing as mp
from pytorch_lightning.utilities.distributed import rank_zero_only
from pytorch_lightning import _logger as log
try:
from apex import amp
except ImportError:
APEX_AVAILABLE = False
else:
APEX_AVAILABLE = True
class DDPSpawnBackend(object):
def __init__(self, trainer):
self.trainer = trainer
self.q = None
def setup(self):
self.trainer.set_random_port()
# pass in a state q
smp = mp.get_context('spawn')
self.q = smp.SimpleQueue()
def train(self, model, nprocs):
mp.spawn(self.ddp_train, nprocs=nprocs, args=(self.q, model,))
def teardown(self, model):
# restore main state with best weights
best_path = self.q.get()
results = self.q.get()
last_path = self.q.get()
# transfer back the best path to the trainer
self.trainer.checkpoint_callback.best_model_path = best_path
# load last weights
if last_path is not None and not self.trainer.testing:
ckpt = torch.load(last_path, map_location=lambda storage, loc: storage)
model.load_state_dict(ckpt)
self.trainer.model = model
return results
def ddp_train(self, process_idx, q, model, is_master=False, proc_offset=0):
"""
Entry point for ddp
Args:
process_idx:
q:
model:
is_master:
proc_offset:
Returns:
"""
# offset the process id if requested
process_idx = process_idx + proc_offset
# show progressbar only on progress_rank 0
if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:
self.trainer.progress_bar_callback.disable()
# determine which process we are and world size
if self.trainer.use_ddp:
self.trainer.local_rank = process_idx
self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx
self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes
elif self.trainer.use_ddp2:
self.trainer.local_rank = self.trainer.node_rank
self.trainer.global_rank = self.trainer.node_rank
self.trainer.world_size = self.trainer.num_nodes
# set warning rank
rank_zero_only.rank = self.trainer.global_rank
# set up server using proc 0's ip address
# try to init for 20 times at max in case ports are taken
# where to store ip_table
model.trainer = self.trainer
model.init_ddp_connection(
self.trainer.global_rank,
self.trainer.world_size,
self.trainer.is_slurm_managing_tasks
)
# call setup after the ddp process has connected
if not self.trainer.testing:
self.trainer.setup('fit')
model.setup('fit')
# on world_size=0 let everyone know training is starting
if self.trainer.is_global_zero:
log.info('-' * 100)
log.info(f'distributed_backend={self.trainer.distributed_backend}')
log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')
log.info('-' * 100)
# CHOOSE OPTIMIZER
# allow for lr schedulers as well
optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
self.trainer.optimizers = optimizers
self.trainer.lr_schedulers = lr_schedulers
self.trainer.optimizer_frequencies = optimizer_frequencies
# MODEL
# copy model to each gpu
if self.trainer.on_gpu:
gpu_idx = process_idx
if is_master:
# source of truth is cuda for gpu idx
gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
gpu_idx = int(gpus[self.trainer.local_rank])
self.trainer.root_gpu = gpu_idx
torch.cuda.set_device(self.trainer.root_gpu)
model.cuda(self.trainer.root_gpu)
# set model properties before going into wrapper
self.trainer.copy_trainer_model_properties(model)
# AMP
# run through amp wrapper before going to distributed DP
# TODO: remove with dropping NVIDIA AMP support
native_amp_available = hasattr(torch.cuda, "amp") and hasattr(torch.cuda.amp, "autocast")
if self.trainer.use_amp and not native_amp_available:
model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)
self.trainer.optimizers = optimizers
self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)
# DDP2 uses all GPUs on the machine
if self.trainer.distributed_backend == 'ddp' or self.trainer.distributed_backend == 'ddp_spawn':
device_ids = [self.trainer.root_gpu]
elif self.trainer.use_ddp2:
device_ids = self.trainer.data_parallel_device_ids
else: # includes ddp_cpu
device_ids = None
# allow user to configure ddp
model = model.configure_ddp(model, device_ids)
# continue training routine
results = self.trainer.run_pretrain_routine(model)
# get original model
model = self.trainer.get_model()
# persist info in ddp_spawn
self.trainer.transfer_ddp_spawn_state_on_fit_end(model, q, results)
# clean up memory
torch.cuda.empty_cache()

View File

@ -51,7 +51,7 @@ from pytorch_lightning.utilities import parsing, rank_zero_info, rank_zero_only,
from pytorch_lightning.utilities.debugging import InternalDebugger
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.trainer.configuration_validator import ConfigValidator
from pytorch_lightning.accelerator_backends import GPUBackend, TPUBackend, DataParallelBackend
from pytorch_lightning.accelerator_backends import GPUBackend, TPUBackend, DataParallelBackend, DDPSpawnBackend
# warnings to ignore in trainer
warnings.filterwarnings(
@ -1055,10 +1055,16 @@ class Trainer(
self.ddp_train(process_idx=task, q=None, model=model)
elif self.distributed_backend == 'ddp_cpu':
results = self.__run_ddp_spawn(model, nprocs=self.num_processes)
self.accelerator_backend = DDPSpawnBackend(self)
self.accelerator_backend.setup()
self.accelerator_backend.train(model, nprocs=self.num_processes)
results = self.accelerator_backend.teardown(model)
elif self.distributed_backend == 'ddp_spawn':
results = self.__run_ddp_spawn(model, nprocs=self.num_processes)
self.accelerator_backend = DDPSpawnBackend(self)
self.accelerator_backend.setup()
self.accelerator_backend.train(model, nprocs=self.num_processes)
results = self.accelerator_backend.teardown(model)
elif self.distributed_backend == 'ddp':
self.set_random_port()
@ -1116,31 +1122,6 @@ class Trainer(
# used for testing or when we need to know that training succeeded
return results or 1
def __run_ddp_spawn(self, model, nprocs):
self.set_random_port()
# pass in a state q
smp = mp.get_context('spawn')
q = smp.SimpleQueue()
mp.spawn(self.ddp_train, nprocs=nprocs, args=(q, model,))
# restore main state with best weights
best_path = q.get()
results = q.get()
last_path = q.get()
# transfer back the best path to the trainer
self.checkpoint_callback.best_model_path = best_path
# load last weights
if last_path is not None and not self.testing:
ckpt = torch.load(last_path, map_location=lambda storage, loc: storage)
model.load_state_dict(ckpt)
self.model = model
return results
def can_prepare_data(self):
if self.prepare_data_per_node:
return self.local_rank == 0