From 7da133d91d9cdf513dc2d1f5d16e1ed91372cdb5 Mon Sep 17 00:00:00 2001 From: William Falcon Date: Sun, 21 Jul 2019 20:06:03 -0400 Subject: [PATCH] fixed ddp crash --- pytorch_lightning/models/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pytorch_lightning/models/trainer.py b/pytorch_lightning/models/trainer.py index 8108e01d82..8b8267c18f 100644 --- a/pytorch_lightning/models/trainer.py +++ b/pytorch_lightning/models/trainer.py @@ -178,9 +178,9 @@ class Trainer(TrainerIO): self.use_ddp = distributed_backend == 'ddp' # use ddp automatically if nb_gpu_nodes > 1 - if nb_gpu_nodes > 1: + if nb_gpu_nodes > 1 and self.use_dp: self.use_ddp = True - self.use_ddp = False + self.use_dp = False w = 'DataParallel does not support nb_gpu_nodes > 1. ' \ 'Switching to DistributedDataParallel for you. ' \ 'To silence this warning set distributed_backend=ddp'