From e3528afae3f178cf9d5d8ea6bc3f8a876646054a Mon Sep 17 00:00:00 2001 From: Phil Date: Wed, 12 Aug 2020 12:34:59 +0200 Subject: [PATCH] Move optimizer creation after device placement for ddp backends. (#2904) --- pytorch_lightning/accelerators/ddp2_backend.py | 14 +++++++------- pytorch_lightning/accelerators/ddp_backend.py | 14 +++++++------- .../accelerators/ddp_spawn_backend.py | 14 +++++++------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/pytorch_lightning/accelerators/ddp2_backend.py b/pytorch_lightning/accelerators/ddp2_backend.py index 85bda4cd8d..aa7d16a10c 100644 --- a/pytorch_lightning/accelerators/ddp2_backend.py +++ b/pytorch_lightning/accelerators/ddp2_backend.py @@ -106,13 +106,6 @@ class DDP2Backend(object): log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes') log.info('-' * 100) - # CHOOSE OPTIMIZER - # allow for lr schedulers as well - optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model) - self.trainer.optimizers = optimizers - self.trainer.lr_schedulers = lr_schedulers - self.trainer.optimizer_frequencies = optimizer_frequencies - # MODEL # copy model to each gpu if self.trainer.on_gpu: @@ -130,6 +123,13 @@ class DDP2Backend(object): torch.cuda.set_device(self.trainer.root_gpu) model.cuda(self.trainer.root_gpu) + # CHOOSE OPTIMIZER + # allow for lr schedulers as well + optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model) + self.trainer.optimizers = optimizers + self.trainer.lr_schedulers = lr_schedulers + self.trainer.optimizer_frequencies = optimizer_frequencies + # set model properties before going into wrapper self.trainer.copy_trainer_model_properties(model) diff --git a/pytorch_lightning/accelerators/ddp_backend.py b/pytorch_lightning/accelerators/ddp_backend.py index e499feda65..241e480e1b 100644 --- a/pytorch_lightning/accelerators/ddp_backend.py +++ b/pytorch_lightning/accelerators/ddp_backend.py @@ -169,13 +169,6 @@ class DDPBackend(object): log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes') log.info('-' * 100) - # CHOOSE OPTIMIZER - # allow for lr schedulers as well - optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model) - self.trainer.optimizers = optimizers - self.trainer.lr_schedulers = lr_schedulers - self.trainer.optimizer_frequencies = optimizer_frequencies - # call sync_bn before .cuda(), configure_apex and configure_ddp if self.trainer.sync_batchnorm: model = model.configure_sync_batchnorm(model) @@ -197,6 +190,13 @@ class DDPBackend(object): torch.cuda.set_device(self.trainer.root_gpu) model.cuda(self.trainer.root_gpu) + # CHOOSE OPTIMIZER + # allow for lr schedulers as well + optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model) + self.trainer.optimizers = optimizers + self.trainer.lr_schedulers = lr_schedulers + self.trainer.optimizer_frequencies = optimizer_frequencies + # set model properties before going into wrapper self.trainer.copy_trainer_model_properties(model) diff --git a/pytorch_lightning/accelerators/ddp_spawn_backend.py b/pytorch_lightning/accelerators/ddp_spawn_backend.py index 9ed68f6608..abcc235e36 100644 --- a/pytorch_lightning/accelerators/ddp_spawn_backend.py +++ b/pytorch_lightning/accelerators/ddp_spawn_backend.py @@ -110,13 +110,6 @@ class DDPSpawnBackend(object): log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes') log.info('-' * 100) - # CHOOSE OPTIMIZER - # allow for lr schedulers as well - optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model) - self.trainer.optimizers = optimizers - self.trainer.lr_schedulers = lr_schedulers - self.trainer.optimizer_frequencies = optimizer_frequencies - # call sync_bn before .cuda(), configure_apex and configure_ddp if self.trainer.sync_batchnorm: model = model.configure_sync_batchnorm(model) @@ -129,6 +122,13 @@ class DDPSpawnBackend(object): torch.cuda.set_device(self.trainer.root_gpu) model.cuda(self.trainer.root_gpu) + # CHOOSE OPTIMIZER + # allow for lr schedulers as well + optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model) + self.trainer.optimizers = optimizers + self.trainer.lr_schedulers = lr_schedulers + self.trainer.optimizer_frequencies = optimizer_frequencies + # set model properties before going into wrapper self.trainer.copy_trainer_model_properties(model)