Avoid unnecessary list creation (#8595)

This commit is contained in:
Santiago Castro 2021-07-28 01:06:45 -07:00 committed by GitHub
parent df16713745
commit b256d6acd3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 8 additions and 8 deletions

View File

@ -49,7 +49,7 @@ class GPUAccelerator(Accelerator):
def set_nvidia_flags(local_rank: int) -> None:
# set the correct cuda visible devices (using pci order)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
all_gpu_ids = ",".join([str(x) for x in range(torch.cuda.device_count())])
all_gpu_ids = ",".join(str(x) for x in range(torch.cuda.device_count()))
devices = os.getenv("CUDA_VISIBLE_DEVICES", all_gpu_ids)
_log.info(f"LOCAL_RANK: {local_rank} - CUDA_VISIBLE_DEVICES: [{devices}]")

View File

@ -417,11 +417,11 @@ class LoggerCollection(LightningLoggerBase):
@property
def name(self) -> str:
return "_".join([str(logger.name) for logger in self._logger_iterable])
return "_".join(str(logger.name) for logger in self._logger_iterable)
@property
def version(self) -> str:
return "_".join([str(logger.version) for logger in self._logger_iterable])
return "_".join(str(logger.version) for logger in self._logger_iterable)
class DummyExperiment:

View File

@ -88,7 +88,7 @@ class SimpleProfiler(BaseProfiler):
if self.extended:
if len(self.recorded_durations) > 0:
max_key = np.max([len(k) for k in self.recorded_durations.keys()])
max_key = max(len(k) for k in self.recorded_durations.keys())
def log_row(action, mean, num_calls, total, per):
row = f"{sep}{action:<{max_key}s}\t| {mean:<15}\t|"

View File

@ -163,7 +163,7 @@ class InternalDebugger:
@property
def num_seen_sanity_check_batches(self):
count = len([x for x in self.saved_val_losses if x["sanity_check"]])
count = sum(1 for x in self.saved_val_losses if x["sanity_check"])
return count
@property

View File

@ -210,7 +210,7 @@ def test_trainer_and_stochastic_weight_avg(tmpdir, use_callbacks: bool, stochast
)
trainer.fit(model)
if use_callbacks or stochastic_weight_avg:
assert len([cb for cb in trainer.callbacks if isinstance(cb, StochasticWeightAveraging)]) == 1
assert sum(1 for cb in trainer.callbacks if isinstance(cb, StochasticWeightAveraging)) == 1
assert trainer.callbacks[0]._swa_lrs == (1e-3 if use_callbacks else 0.1)
else:
assert all(not isinstance(cb, StochasticWeightAveraging) for cb in trainer.callbacks)

View File

@ -1039,7 +1039,7 @@ def test_configure_model_checkpoint(tmpdir):
# default configuration
trainer = Trainer(checkpoint_callback=True, callbacks=[], **kwargs)
assert len([c for c in trainer.callbacks if isinstance(c, ModelCheckpoint)]) == 1
assert sum(1 for c in trainer.callbacks if isinstance(c, ModelCheckpoint)) == 1
assert isinstance(trainer.checkpoint_callback, ModelCheckpoint)
# custom callback passed to callbacks list, checkpoint_callback=True is ignored

View File

@ -556,7 +556,7 @@ def test_trainer_min_steps_and_min_epochs_not_reached(tmpdir, caplog):
trainer.fit(model)
message = f"minimum epochs ({min_epochs}) or minimum steps (None) has not been met. Training will continue"
num_messages = len([record.message for record in caplog.records if message in record.message])
num_messages = sum(1 for record in caplog.records if message in record.message)
assert num_messages == min_epochs - 2
assert model.training_step_invoked == min_epochs * 2