diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index 8369130778..79e498f6e1 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -176,11 +176,6 @@ class AcceleratorConnector: self._training_type_plugin_resolved = False self.accelerator = self.select_accelerator() - # init flags for SLURM+DDP to work - self.world_size = 1 - self.interactive_ddp_procs = [] - self.global_rank = 0 - # benchmarking # TODO: should this be moved to GPU accelerator? torch.backends.cudnn.benchmark = self.benchmark @@ -1001,14 +996,6 @@ class AcceleratorConnector: # likely not on slurm, so set the slurm managed flag to false self.is_slurm_managing_tasks = False - # used for tests only, set this flag to simulate slurm managing a task - try: - should_fake = int(os.environ["FAKE_SLURM_MANAGING_TASKS"]) - if should_fake: - self.is_slurm_managing_tasks = True - except Exception: - pass - # notify user the that slurm is managing tasks if self.is_slurm_managing_tasks: rank_zero_info("Multi-processing is handled by Slurm.")