From e86b191691729800c259318b8db7008a376a3fb7 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Thu, 18 Jul 2019 11:20:11 -0400 Subject: [PATCH] set dp as default backend --- pytorch_lightning/models/trainer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pytorch_lightning/models/trainer.py b/pytorch_lightning/models/trainer.py index 308963007e..d4d2469efd 100644 --- a/pytorch_lightning/models/trainer.py +++ b/pytorch_lightning/models/trainer.py @@ -311,6 +311,7 @@ class Trainer(TrainerIO): # when using multi-node or DDP within a node start each module in a separate process if self.use_ddp: + print('using ddp') # must copy only the meta of the exp so it survives pickle/unpickle when going to new process self.experiment = self.experiment.get_meta_copy() mp.spawn(self.ddp_train, nprocs=len(self.data_parallel_device_ids), args=(model, )) @@ -318,6 +319,7 @@ class Trainer(TrainerIO): # 1 gpu or dp option triggers training using DP module # easier to avoid NCCL issues elif self.use_dp: + print('using dp') self.dp_train(model) # ON CPU