From b256d6acd3179cddeeee5c2e279d4c9725c9ab76 Mon Sep 17 00:00:00 2001 From: Santiago Castro Date: Wed, 28 Jul 2021 01:06:45 -0700 Subject: [PATCH] Avoid unnecessary list creation (#8595) --- pytorch_lightning/accelerators/gpu.py | 2 +- pytorch_lightning/loggers/base.py | 4 ++-- pytorch_lightning/profiler/simple.py | 2 +- pytorch_lightning/utilities/debugging.py | 2 +- tests/callbacks/test_stochastic_weight_avg.py | 2 +- tests/checkpointing/test_model_checkpoint.py | 2 +- tests/trainer/test_trainer.py | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pytorch_lightning/accelerators/gpu.py b/pytorch_lightning/accelerators/gpu.py index 2c90676f6e..e25a5fa3c4 100644 --- a/pytorch_lightning/accelerators/gpu.py +++ b/pytorch_lightning/accelerators/gpu.py @@ -49,7 +49,7 @@ class GPUAccelerator(Accelerator): def set_nvidia_flags(local_rank: int) -> None: # set the correct cuda visible devices (using pci order) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - all_gpu_ids = ",".join([str(x) for x in range(torch.cuda.device_count())]) + all_gpu_ids = ",".join(str(x) for x in range(torch.cuda.device_count())) devices = os.getenv("CUDA_VISIBLE_DEVICES", all_gpu_ids) _log.info(f"LOCAL_RANK: {local_rank} - CUDA_VISIBLE_DEVICES: [{devices}]") diff --git a/pytorch_lightning/loggers/base.py b/pytorch_lightning/loggers/base.py index fc232b579b..21fb72642d 100644 --- a/pytorch_lightning/loggers/base.py +++ b/pytorch_lightning/loggers/base.py @@ -417,11 +417,11 @@ class LoggerCollection(LightningLoggerBase): @property def name(self) -> str: - return "_".join([str(logger.name) for logger in self._logger_iterable]) + return "_".join(str(logger.name) for logger in self._logger_iterable) @property def version(self) -> str: - return "_".join([str(logger.version) for logger in self._logger_iterable]) + return "_".join(str(logger.version) for logger in self._logger_iterable) class DummyExperiment: diff --git a/pytorch_lightning/profiler/simple.py b/pytorch_lightning/profiler/simple.py index fbd2fb005a..8616f72345 100644 --- a/pytorch_lightning/profiler/simple.py +++ b/pytorch_lightning/profiler/simple.py @@ -88,7 +88,7 @@ class SimpleProfiler(BaseProfiler): if self.extended: if len(self.recorded_durations) > 0: - max_key = np.max([len(k) for k in self.recorded_durations.keys()]) + max_key = max(len(k) for k in self.recorded_durations.keys()) def log_row(action, mean, num_calls, total, per): row = f"{sep}{action:<{max_key}s}\t| {mean:<15}\t|" diff --git a/pytorch_lightning/utilities/debugging.py b/pytorch_lightning/utilities/debugging.py index edd6ef44b6..3585480d02 100644 --- a/pytorch_lightning/utilities/debugging.py +++ b/pytorch_lightning/utilities/debugging.py @@ -163,7 +163,7 @@ class InternalDebugger: @property def num_seen_sanity_check_batches(self): - count = len([x for x in self.saved_val_losses if x["sanity_check"]]) + count = sum(1 for x in self.saved_val_losses if x["sanity_check"]) return count @property diff --git a/tests/callbacks/test_stochastic_weight_avg.py b/tests/callbacks/test_stochastic_weight_avg.py index c33383e71a..eed7ccd635 100644 --- a/tests/callbacks/test_stochastic_weight_avg.py +++ b/tests/callbacks/test_stochastic_weight_avg.py @@ -210,7 +210,7 @@ def test_trainer_and_stochastic_weight_avg(tmpdir, use_callbacks: bool, stochast ) trainer.fit(model) if use_callbacks or stochastic_weight_avg: - assert len([cb for cb in trainer.callbacks if isinstance(cb, StochasticWeightAveraging)]) == 1 + assert sum(1 for cb in trainer.callbacks if isinstance(cb, StochasticWeightAveraging)) == 1 assert trainer.callbacks[0]._swa_lrs == (1e-3 if use_callbacks else 0.1) else: assert all(not isinstance(cb, StochasticWeightAveraging) for cb in trainer.callbacks) diff --git a/tests/checkpointing/test_model_checkpoint.py b/tests/checkpointing/test_model_checkpoint.py index 6efb674c5a..f9a199a7eb 100644 --- a/tests/checkpointing/test_model_checkpoint.py +++ b/tests/checkpointing/test_model_checkpoint.py @@ -1039,7 +1039,7 @@ def test_configure_model_checkpoint(tmpdir): # default configuration trainer = Trainer(checkpoint_callback=True, callbacks=[], **kwargs) - assert len([c for c in trainer.callbacks if isinstance(c, ModelCheckpoint)]) == 1 + assert sum(1 for c in trainer.callbacks if isinstance(c, ModelCheckpoint)) == 1 assert isinstance(trainer.checkpoint_callback, ModelCheckpoint) # custom callback passed to callbacks list, checkpoint_callback=True is ignored diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index e0f909a7b8..4ea233c8c2 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -556,7 +556,7 @@ def test_trainer_min_steps_and_min_epochs_not_reached(tmpdir, caplog): trainer.fit(model) message = f"minimum epochs ({min_epochs}) or minimum steps (None) has not been met. Training will continue" - num_messages = len([record.message for record in caplog.records if message in record.message]) + num_messages = sum(1 for record in caplog.records if message in record.message) assert num_messages == min_epochs - 2 assert model.training_step_invoked == min_epochs * 2