From cc43d07db1ab77385feff04c01f040c5cad805a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrian=20W=C3=A4lchli?= Date: Sat, 5 Feb 2022 07:10:16 +0100 Subject: [PATCH] Remove legacy dead code in DDP script launch (#11678) Co-authored-by: Jirka Co-authored-by: Carlos Mocholi --- pytorch_lightning/strategies/ddp.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/pytorch_lightning/strategies/ddp.py b/pytorch_lightning/strategies/ddp.py index b5c986cd5c..e3281f3ad3 100644 --- a/pytorch_lightning/strategies/ddp.py +++ b/pytorch_lightning/strategies/ddp.py @@ -57,7 +57,7 @@ from pytorch_lightning.utilities.distributed import ( sync_ddp_if_available, ) from pytorch_lightning.utilities.enums import _StrategyType -from pytorch_lightning.utilities.exceptions import DeadlockDetectedException, MisconfigurationException +from pytorch_lightning.utilities.exceptions import DeadlockDetectedException from pytorch_lightning.utilities.seed import reset_seed from pytorch_lightning.utilities.types import STEP_OUTPUT @@ -211,13 +211,6 @@ class DDPStrategy(ParallelStrategy): else: # Script called as `python -m a.b.c` command = [sys.executable, "-m", __main__.__spec__.name] + sys.argv[1:] - # the visible devices tell us how many GPUs we want to use. - # when the trainer script was called the device has already been scoped by the time - # code reaches this point. so, to call the scripts, we need to leave cuda visible devices alone - # but forward the GPUs selected via environment variables - if self.parallel_devices is None: - raise MisconfigurationException("you selected (distribute_backend = ddp) but did not set Trainer(gpus=?)") - os.environ["WORLD_SIZE"] = f"{self.num_processes * self.num_nodes}" self.interactive_ddp_procs = []