From e63968ab8867a5aae8ab9d883f186ce379199cff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mochol=C3=AD?= Date: Mon, 26 Jul 2021 14:38:12 +0200 Subject: [PATCH] Add `pyupgrade` to `pre-commit` (#8557) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/prepare-nightly_version.py | 2 +- .github/prune-packages.py | 2 +- .pre-commit-config.yaml | 7 +++++++ docs/source/conf.py | 5 ++--- pl_examples/basic_examples/dali_image_classifier.py | 4 ++-- pl_examples/domain_templates/reinforce_learn_Qnet.py | 4 ++-- pytorch_lightning/callbacks/pruning.py | 6 +++++- pytorch_lightning/core/memory.py | 4 ++-- pytorch_lightning/core/mixins/hparams_mixin.py | 2 +- pytorch_lightning/core/saving.py | 2 +- pytorch_lightning/loggers/base.py | 5 ++--- pytorch_lightning/loggers/csv_logs.py | 5 ++--- pytorch_lightning/loggers/wandb.py | 2 +- pytorch_lightning/overrides/torch_distributed.py | 2 +- .../plugins/precision/precision_plugin.py | 3 +-- pytorch_lightning/plugins/training_type/horovod.py | 2 +- pytorch_lightning/setup_tools.py | 2 +- .../trainer/connectors/accelerator_connector.py | 2 +- pytorch_lightning/trainer/supporters.py | 12 ++++++------ pytorch_lightning/trainer/trainer.py | 2 +- pytorch_lightning/tuner/lr_finder.py | 6 +++--- pytorch_lightning/utilities/debugging.py | 2 +- pytorch_lightning/utilities/parsing.py | 2 +- requirements/adjust_versions.py | 2 +- tests/callbacks/test_early_stopping.py | 2 +- tests/callbacks/test_lr_monitor.py | 2 +- tests/callbacks/test_pruning.py | 2 +- tests/checkpointing/test_model_checkpoint.py | 4 ++-- tests/conftest.py | 2 +- tests/core/test_lightning_module.py | 6 ++---- tests/core/test_lightning_optimizer.py | 2 +- tests/loggers/test_csv.py | 2 +- tests/models/test_gpu.py | 2 +- tests/models/test_horovod.py | 6 +++--- tests/profiler/test_profiler.py | 4 ++-- tests/trainer/logging_/test_logger_connector.py | 2 +- tests/trainer/test_supporters.py | 8 ++++---- tests/trainer/test_trainer.py | 4 ++-- 38 files changed, 70 insertions(+), 65 deletions(-) diff --git a/.github/prepare-nightly_version.py b/.github/prepare-nightly_version.py index 06e7c9cdbf..3b8eb41397 100644 --- a/.github/prepare-nightly_version.py +++ b/.github/prepare-nightly_version.py @@ -11,7 +11,7 @@ now = datetime.datetime.now() now_date = now.strftime("%Y%m%d") print(f"prepare init '{_PATH_INFO}' - replace version by {now_date}") -with open(_PATH_INFO, "r") as fp: +with open(_PATH_INFO) as fp: init = fp.read() init = re.sub(r'__version__ = [\d\.\w\'"]+', f'__version__ = "{now_date}"', init) with open(_PATH_INFO, "w") as fp: diff --git a/.github/prune-packages.py b/.github/prune-packages.py index 25499cf4e5..fb0e6018d3 100644 --- a/.github/prune-packages.py +++ b/.github/prune-packages.py @@ -3,7 +3,7 @@ from pprint import pprint def main(req_file: str, *pkgs): - with open(req_file, "r") as fp: + with open(req_file) as fp: lines = fp.readlines() for pkg in pkgs: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dc43d591a7..682fac3570 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -45,6 +45,13 @@ repos: )$ - id: detect-private-key + - repo: https://github.com/asottile/pyupgrade + rev: v2.23.0 + hooks: + - id: pyupgrade + args: [--py36-plus] + name: Upgrade code + - repo: https://github.com/PyCQA/isort rev: 5.9.2 hooks: diff --git a/docs/source/conf.py b/docs/source/conf.py index 0e2679d2ad..7e2d938a0a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # @@ -48,7 +47,7 @@ HelperCLI.copy_notebooks(PATH_RAW_NB, PATH_IPYNB) def _transform_changelog(path_in: str, path_out: str) -> None: - with open(path_in, "r") as fp: + with open(path_in) as fp: chlog_lines = fp.readlines() # enrich short subsub-titles to be unique chlog_ver = "" @@ -291,7 +290,7 @@ def setup(app): def package_list_from_file(file): """List up package name (not containing version and extras) from a package list file""" mocked_packages = [] - with open(file, "r") as fp: + with open(file) as fp: for ln in fp.readlines(): # Example: `tqdm>=4.41.0` => `tqdm` # `[` is for package with extras diff --git a/pl_examples/basic_examples/dali_image_classifier.py b/pl_examples/basic_examples/dali_image_classifier.py index 597678f82a..9a0b6e002f 100644 --- a/pl_examples/basic_examples/dali_image_classifier.py +++ b/pl_examples/basic_examples/dali_image_classifier.py @@ -44,7 +44,7 @@ else: ops, Pipeline, DALIClassificationIterator, LastBatchPolicy = ..., ABC, ABC, ABC -class ExternalMNISTInputIterator(object): +class ExternalMNISTInputIterator: """ This iterator class wraps torchvision's MNIST dataset and returns the images and labels in batches """ @@ -78,7 +78,7 @@ class ExternalSourcePipeline(Pipeline): """ def __init__(self, batch_size, eii, num_threads, device_id): - super(ExternalSourcePipeline, self).__init__(batch_size, num_threads, device_id, seed=12) + super().__init__(batch_size, num_threads, device_id, seed=12) self.source = ops.ExternalSource(source=eii, num_outputs=2) self.build() diff --git a/pl_examples/domain_templates/reinforce_learn_Qnet.py b/pl_examples/domain_templates/reinforce_learn_Qnet.py index b1e172a1c0..06ddb646e1 100644 --- a/pl_examples/domain_templates/reinforce_learn_Qnet.py +++ b/pl_examples/domain_templates/reinforce_learn_Qnet.py @@ -66,7 +66,7 @@ class DQN(nn.Module): n_actions: number of discrete actions available in the environment hidden_size: size of hidden layers """ - super(DQN, self).__init__() + super().__init__() self.net = nn.Sequential(nn.Linear(obs_size, hidden_size), nn.ReLU(), nn.Linear(hidden_size, n_actions)) def forward(self, x): @@ -106,7 +106,7 @@ class ReplayBuffer: def sample(self, batch_size: int) -> Tuple: indices = np.random.choice(len(self.buffer), batch_size, replace=False) - states, actions, rewards, dones, next_states = zip(*[self.buffer[idx] for idx in indices]) + states, actions, rewards, dones, next_states = zip(*(self.buffer[idx] for idx in indices)) return ( np.array(states), diff --git a/pytorch_lightning/callbacks/pruning.py b/pytorch_lightning/callbacks/pruning.py index 3a74b6e6f5..e69c9e1765 100644 --- a/pytorch_lightning/callbacks/pruning.py +++ b/pytorch_lightning/callbacks/pruning.py @@ -52,7 +52,11 @@ _PYTORCH_PRUNING_METHOD = { _PARAM_TUPLE = Tuple[nn.Module, str] _PARAM_LIST = Sequence[_PARAM_TUPLE] _MODULE_CONTAINERS = (LightningModule, nn.Sequential, nn.ModuleList, nn.ModuleDict) -_LayerRef = TypedDict("_LayerRef", {"data": nn.Module, "names": List[Tuple[int, str]]}) + + +class _LayerRef(TypedDict): + data: nn.Module + names: List[Tuple[int, str]] class ModelPruning(Callback): diff --git a/pytorch_lightning/core/memory.py b/pytorch_lightning/core/memory.py index 76dec2013f..fcc2f54ed6 100644 --- a/pytorch_lightning/core/memory.py +++ b/pytorch_lightning/core/memory.py @@ -34,7 +34,7 @@ PARAMETER_NUM_UNITS = [" ", "K", "M", "B", "T"] UNKNOWN_SIZE = "?" -class LayerSummary(object): +class LayerSummary: """ Summary class for a single layer in a :class:`~pytorch_lightning.core.lightning.LightningModule`. It collects the following information: @@ -126,7 +126,7 @@ class LayerSummary(object): return sum(np.prod(p.shape) if not _is_lazy_weight_tensor(p) else 0 for p in self._module.parameters()) -class ModelSummary(object): +class ModelSummary: """ Generates a summary of all layers in a :class:`~pytorch_lightning.core.lightning.LightningModule`. diff --git a/pytorch_lightning/core/mixins/hparams_mixin.py b/pytorch_lightning/core/mixins/hparams_mixin.py index 814a3a88ca..029ecc173b 100644 --- a/pytorch_lightning/core/mixins/hparams_mixin.py +++ b/pytorch_lightning/core/mixins/hparams_mixin.py @@ -22,7 +22,7 @@ from pytorch_lightning.utilities import AttributeDict from pytorch_lightning.utilities.parsing import save_hyperparameters -class HyperparametersMixin(object): +class HyperparametersMixin: __jit_unused_properties__ = ["hparams", "hparams_initial"] diff --git a/pytorch_lightning/core/saving.py b/pytorch_lightning/core/saving.py index 568ccdf633..7501a027fd 100644 --- a/pytorch_lightning/core/saving.py +++ b/pytorch_lightning/core/saving.py @@ -44,7 +44,7 @@ if _OMEGACONF_AVAILABLE: CHECKPOINT_PAST_HPARAMS_KEYS = ("hparams", "module_arguments") # used in 0.7.6 -class ModelIO(object): +class ModelIO: CHECKPOINT_HYPER_PARAMS_KEY = "hyper_parameters" CHECKPOINT_HYPER_PARAMS_NAME = "hparams_name" CHECKPOINT_HYPER_PARAMS_TYPE = "hparams_type" diff --git a/pytorch_lightning/loggers/base.py b/pytorch_lightning/loggers/base.py index e1af18a8fb..fc232b579b 100644 --- a/pytorch_lightning/loggers/base.py +++ b/pytorch_lightning/loggers/base.py @@ -249,8 +249,7 @@ class LightningLoggerBase(ABC): key = str(key) if isinstance(value, (MutableMapping, Namespace)): value = vars(value) if isinstance(value, Namespace) else value - for d in _dict_generator(value, prefixes + [key]): - yield d + yield from _dict_generator(value, prefixes + [key]) else: yield prefixes + [key, value if value is not None else str(None)] else: @@ -425,7 +424,7 @@ class LoggerCollection(LightningLoggerBase): return "_".join([str(logger.version) for logger in self._logger_iterable]) -class DummyExperiment(object): +class DummyExperiment: """Dummy experiment""" def nop(self, *args, **kw): diff --git a/pytorch_lightning/loggers/csv_logs.py b/pytorch_lightning/loggers/csv_logs.py index eeb1650d9a..f0cef7acd6 100644 --- a/pytorch_lightning/loggers/csv_logs.py +++ b/pytorch_lightning/loggers/csv_logs.py @@ -19,7 +19,6 @@ CSV logger for basic experiment logging that does not require opening ports """ import csv -import io import logging import os from argparse import Namespace @@ -35,7 +34,7 @@ from pytorch_lightning.utilities.distributed import rank_zero_only log = logging.getLogger(__name__) -class ExperimentWriter(object): +class ExperimentWriter: r""" Experiment writer for CSVLogger. @@ -95,7 +94,7 @@ class ExperimentWriter(object): last_m.update(m) metrics_keys = list(last_m.keys()) - with io.open(self.metrics_file_path, "w", newline="") as f: + with open(self.metrics_file_path, "w", newline="") as f: self.writer = csv.DictWriter(f, fieldnames=metrics_keys) self.writer.writeheader() self.writer.writerows(self.metrics) diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py index 9c22be4ea3..6b1c178003 100644 --- a/pytorch_lightning/loggers/wandb.py +++ b/pytorch_lightning/loggers/wandb.py @@ -253,7 +253,7 @@ class WandbLogger(LightningLoggerBase): checkpoint_callback.best_model_path: checkpoint_callback.best_model_score, **checkpoint_callback.best_k_models, } - checkpoints = sorted([(Path(p).stat().st_mtime, p, s) for p, s in checkpoints.items() if Path(p).is_file()]) + checkpoints = sorted((Path(p).stat().st_mtime, p, s) for p, s in checkpoints.items() if Path(p).is_file()) checkpoints = [ c for c in checkpoints if c[1] not in self._logged_model_time.keys() or self._logged_model_time[c[1]] < c[0] ] diff --git a/pytorch_lightning/overrides/torch_distributed.py b/pytorch_lightning/overrides/torch_distributed.py index 8562d89a8b..3a1ceca7d9 100644 --- a/pytorch_lightning/overrides/torch_distributed.py +++ b/pytorch_lightning/overrides/torch_distributed.py @@ -48,7 +48,7 @@ def _broadcast_object_list(object_list, src=0, group=None): my_rank = get_rank() # Serialize object_list elements to tensors on src rank. if my_rank == src: - tensor_list, size_list = zip(*[_object_to_tensor(obj) for obj in object_list]) + tensor_list, size_list = zip(*(_object_to_tensor(obj) for obj in object_list)) object_sizes_tensor = torch.cat(size_list) else: object_sizes_tensor = torch.LongTensor(len(object_list)) diff --git a/pytorch_lightning/plugins/precision/precision_plugin.py b/pytorch_lightning/plugins/precision/precision_plugin.py index 19b42dbd57..1261fea87c 100644 --- a/pytorch_lightning/plugins/precision/precision_plugin.py +++ b/pytorch_lightning/plugins/precision/precision_plugin.py @@ -40,8 +40,7 @@ class PrecisionPlugin(Plugin, CheckpointHooks): Maybe different in other precision plugins. """ for group in optimizer.param_groups: - for p in group["params"]: - yield p + yield from group["params"] def connect( self, model: Module, optimizers: List[Optimizer], lr_schedulers: List[Any] diff --git a/pytorch_lightning/plugins/training_type/horovod.py b/pytorch_lightning/plugins/training_type/horovod.py index d5ecabf51b..b61c7caa6a 100644 --- a/pytorch_lightning/plugins/training_type/horovod.py +++ b/pytorch_lightning/plugins/training_type/horovod.py @@ -200,5 +200,5 @@ class HorovodPlugin(ParallelPlugin): @staticmethod def _filter_named_parameters(model: nn.Module, optimizer: Optimizer) -> List[Tuple[str, nn.Parameter]]: - opt_params = set(p for group in optimizer.param_groups for p in group.get("params", [])) + opt_params = {p for group in optimizer.param_groups for p in group.get("params", [])} return [(name, p) for name, p in model.named_parameters() if p in opt_params] diff --git a/pytorch_lightning/setup_tools.py b/pytorch_lightning/setup_tools.py index b276c66a5d..6b1a9d3bca 100644 --- a/pytorch_lightning/setup_tools.py +++ b/pytorch_lightning/setup_tools.py @@ -25,7 +25,7 @@ def _load_requirements(path_dir: str, file_name: str = "requirements.txt", comme >>> _load_requirements(_PROJECT_ROOT) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE ['numpy...', 'torch...', ...] """ - with open(os.path.join(path_dir, file_name), "r") as file: + with open(os.path.join(path_dir, file_name)) as file: lines = [ln.strip() for ln in file.readlines()] reqs = [] for ln in lines: diff --git a/pytorch_lightning/trainer/connectors/accelerator_connector.py b/pytorch_lightning/trainer/connectors/accelerator_connector.py index 7c54128a84..6b6c42888d 100644 --- a/pytorch_lightning/trainer/connectors/accelerator_connector.py +++ b/pytorch_lightning/trainer/connectors/accelerator_connector.py @@ -80,7 +80,7 @@ if _HOROVOD_AVAILABLE: log = logging.getLogger(__name__) -class AcceleratorConnector(object): +class AcceleratorConnector: def __init__( self, num_processes, diff --git a/pytorch_lightning/trainer/supporters.py b/pytorch_lightning/trainer/supporters.py index 5e41f1a14c..058202fea4 100644 --- a/pytorch_lightning/trainer/supporters.py +++ b/pytorch_lightning/trainer/supporters.py @@ -35,7 +35,7 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException from pytorch_lightning.utilities.imports import _fault_tolerant_enabled -class TensorRunningAccum(object): +class TensorRunningAccum: """Tracks a running accumulation values (min, max, mean) without graph references. @@ -112,7 +112,7 @@ class TensorRunningAccum(object): return getattr(self.memory[: self.current_idx], how)() -class PredictionCollection(object): +class PredictionCollection: def __init__(self, global_rank: int, world_size: int): self.global_rank = global_rank self.world_size = world_size @@ -170,7 +170,7 @@ class PredictionCollection(object): torch.save(outputs, fp) -class CycleIterator(object): +class CycleIterator: """ Iterator for restarting a dataloader if it runs out of samples """ @@ -230,7 +230,7 @@ class CycleIterator(object): return self.length -class CombinedDataset(object): +class CombinedDataset: """ Combine multiple datasets and compute their statistics """ @@ -323,7 +323,7 @@ class DataLoaderDict(Dict): pass -class CombinedLoader(object): +class CombinedLoader: """ Combines different dataloaders and allows sampling in parallel. Supported modes are 'min_size', which raises StopIteration after the shortest loader @@ -510,7 +510,7 @@ class CombinedLoader(object): return self._calc_num_batches(self.loaders) -class CombinedLoaderIterator(object): +class CombinedLoaderIterator: """ Custom Iterator returning data from multple loaders, and allows sampling in parallel """ diff --git a/pytorch_lightning/trainer/trainer.py b/pytorch_lightning/trainer/trainer.py index 5640318d75..58894a5571 100644 --- a/pytorch_lightning/trainer/trainer.py +++ b/pytorch_lightning/trainer/trainer.py @@ -875,7 +875,7 @@ class Trainer( # ---------------------------- # INSPECT THE CORE LOOPS # ---------------------------- - f""" + fr""" Lightning internal flow looks like this: {Trainer.fit} or {Trainer.test} or {Trainer.predict} || | || diff --git a/pytorch_lightning/tuner/lr_finder.py b/pytorch_lightning/tuner/lr_finder.py index 232d061944..46b44a7a26 100644 --- a/pytorch_lightning/tuner/lr_finder.py +++ b/pytorch_lightning/tuner/lr_finder.py @@ -60,7 +60,7 @@ def _determine_lr_attr_name(trainer: "pl.Trainer", model: "pl.LightningModule") ) -class _LRFinder(object): +class _LRFinder: """LR finder object. This object stores the results of lr_find(). Args: @@ -396,7 +396,7 @@ class _LinearLR(_LRScheduler): def __init__(self, optimizer: torch.optim.Optimizer, end_lr: float, num_iter: int, last_epoch: int = -1): self.end_lr = end_lr self.num_iter = num_iter - super(_LinearLR, self).__init__(optimizer, last_epoch) + super().__init__(optimizer, last_epoch) def get_lr(self): curr_iter = self.last_epoch + 1 @@ -435,7 +435,7 @@ class _ExponentialLR(_LRScheduler): def __init__(self, optimizer: torch.optim.Optimizer, end_lr: float, num_iter: int, last_epoch: int = -1): self.end_lr = end_lr self.num_iter = num_iter - super(_ExponentialLR, self).__init__(optimizer, last_epoch) + super().__init__(optimizer, last_epoch) def get_lr(self): curr_iter = self.last_epoch + 1 diff --git a/pytorch_lightning/utilities/debugging.py b/pytorch_lightning/utilities/debugging.py index 87f5b08620..edd6ef44b6 100644 --- a/pytorch_lightning/utilities/debugging.py +++ b/pytorch_lightning/utilities/debugging.py @@ -34,7 +34,7 @@ def enabled_only(fn: Callable): return wrapped_fn -class InternalDebugger(object): +class InternalDebugger: def __init__(self, trainer): self.enabled = os.environ.get("PL_DEV_DEBUG", "0") == "1" self.trainer = trainer diff --git a/pytorch_lightning/utilities/parsing.py b/pytorch_lightning/utilities/parsing.py index 889bf65237..03a748b050 100644 --- a/pytorch_lightning/utilities/parsing.py +++ b/pytorch_lightning/utilities/parsing.py @@ -280,7 +280,7 @@ class AttributeDict(Dict): def __repr__(self) -> str: if not len(self): return "" - max_key_length = max([len(str(k)) for k in self]) + max_key_length = max(len(str(k)) for k in self) tmp_name = "{:" + str(max_key_length + 3) + "s} {}" rows = [tmp_name.format(f'"{n}":', self[n]) for n in sorted(self.keys())] out = "\n".join(rows) diff --git a/requirements/adjust_versions.py b/requirements/adjust_versions.py index 523d5440c0..4d0bbb5c00 100644 --- a/requirements/adjust_versions.py +++ b/requirements/adjust_versions.py @@ -37,7 +37,7 @@ def main(path_req: str, torch_version: Optional[str] = None) -> None: torch_version = torch.__version__ assert torch_version, f"invalid torch: {torch_version}" - with open(path_req, "r") as fp: + with open(path_req) as fp: req = fp.read() # remove comments req = re.sub(rf"\s*#.*{os.linesep}", os.linesep, req) diff --git a/tests/callbacks/test_early_stopping.py b/tests/callbacks/test_early_stopping.py index 460d5d0731..cec2d78b92 100644 --- a/tests/callbacks/test_early_stopping.py +++ b/tests/callbacks/test_early_stopping.py @@ -275,7 +275,7 @@ def test_min_steps_override_early_stopping_functionality(tmpdir, step_freeze: in class Model(BoringModel): def __init__(self, step_freeze): - super(Model, self).__init__() + super().__init__() self._step_freeze = step_freeze diff --git a/tests/callbacks/test_lr_monitor.py b/tests/callbacks/test_lr_monitor.py index 5811e4ff4a..d742781599 100644 --- a/tests/callbacks/test_lr_monitor.py +++ b/tests/callbacks/test_lr_monitor.py @@ -351,7 +351,7 @@ def test_multiple_optimizers_basefinetuning(tmpdir): class Check(Callback): def on_train_epoch_start(self, trainer, pl_module) -> None: - num_param_groups = sum([len(opt.param_groups) for opt in trainer.optimizers]) + num_param_groups = sum(len(opt.param_groups) for opt in trainer.optimizers) assert lr_monitor.lr_sch_names == ["lr-Adam", "lr-Adam-1"] if trainer.current_epoch == 0: assert num_param_groups == 3 diff --git a/tests/callbacks/test_pruning.py b/tests/callbacks/test_pruning.py index 9014a9865c..110e52a78c 100644 --- a/tests/callbacks/test_pruning.py +++ b/tests/callbacks/test_pruning.py @@ -56,7 +56,7 @@ class TestPruningMethod(pytorch_prune.BasePruningMethod): @classmethod def apply(cls, module, name, amount): - return super(TestPruningMethod, cls).apply(module, name, amount=amount) + return super().apply(module, name, amount=amount) def train_with_pruning_callback( diff --git a/tests/checkpointing/test_model_checkpoint.py b/tests/checkpointing/test_model_checkpoint.py index 2b0b636d4c..6efb674c5a 100644 --- a/tests/checkpointing/test_model_checkpoint.py +++ b/tests/checkpointing/test_model_checkpoint.py @@ -312,7 +312,7 @@ def test_model_checkpoint_to_yaml(tmpdir, save_top_k: int): path_yaml = os.path.join(tmpdir, "best_k_models.yaml") checkpoint.to_yaml(path_yaml) - d = yaml.full_load(open(path_yaml, "r")) + d = yaml.full_load(open(path_yaml)) best_k = dict(checkpoint.best_k_models.items()) assert d == best_k @@ -1206,7 +1206,7 @@ def test_ckpt_version_after_rerun_same_trainer(tmpdir): trainer.fit(BoringModel()) ckpt_range = range(mc.STARTING_VERSION, trainer.max_epochs + mc.STARTING_VERSION) - expected = {"test.ckpt", *[f"test-v{i}.ckpt" for i in ckpt_range]} + expected = {"test.ckpt", *(f"test-v{i}.ckpt" for i in ckpt_range)} # check best_k_models state assert {Path(f).name for f in mc.best_k_models} == expected # check created ckpts diff --git a/tests/conftest.py b/tests/conftest.py index a54d766608..c8a2cbd66a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -68,7 +68,7 @@ def pytest_pyfunc_call(pyfuncitem): if pyfuncitem.get_closest_marker("spawn"): testfunction = pyfuncitem.obj funcargs = pyfuncitem.funcargs - testargs = tuple([funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames]) + testargs = tuple(funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames) mp.spawn(wraps, (testfunction, testargs)) return True diff --git a/tests/core/test_lightning_module.py b/tests/core/test_lightning_module.py index 33054c0585..e44619aca2 100644 --- a/tests/core/test_lightning_module.py +++ b/tests/core/test_lightning_module.py @@ -263,10 +263,8 @@ def test_toggle_untoggle_3_optimizers_shared_parameters(tmpdir): @staticmethod def combine_generators(gen_1, gen_2): - for p in gen_1: - yield p - for p in gen_2: - yield p + yield from gen_1 + yield from gen_2 def configure_optimizers(self): optimizer_1 = SGD(self.combine_generators(self.layer_1.parameters(), self.layer_2.parameters()), lr=0.1) diff --git a/tests/core/test_lightning_optimizer.py b/tests/core/test_lightning_optimizer.py index 5461d33d76..f8c3317b6c 100644 --- a/tests/core/test_lightning_optimizer.py +++ b/tests/core/test_lightning_optimizer.py @@ -302,7 +302,7 @@ class OptimizerWithHooks(Optimizer): d = {"params": params, "mod": mod, "layer_type": mod_class} self.params.append(d) - super(OptimizerWithHooks, self).__init__(self.params, {"lr": 0.01}) + super().__init__(self.params, {"lr": 0.01}) def _save_input(self, mod, i): """Saves input of layer""" diff --git a/tests/loggers/test_csv.py b/tests/loggers/test_csv.py index 624ce6e2e0..f28377471d 100644 --- a/tests/loggers/test_csv.py +++ b/tests/loggers/test_csv.py @@ -79,7 +79,7 @@ def test_file_logger_log_metrics(tmpdir, step_idx): logger.save() path_csv = os.path.join(logger.log_dir, ExperimentWriter.NAME_METRICS_FILE) - with open(path_csv, "r") as fp: + with open(path_csv) as fp: lines = fp.readlines() assert len(lines) == 2 assert all(n in lines[0] for n in metrics) diff --git a/tests/models/test_gpu.py b/tests/models/test_gpu.py index dcc6159bc2..b954f28023 100644 --- a/tests/models/test_gpu.py +++ b/tests/models/test_gpu.py @@ -365,7 +365,7 @@ def test_non_blocking(): batch = trainer.accelerator.batch_to_device(batch, torch.device("cuda:0")) mocked.assert_called_with(torch.device("cuda", 0), non_blocking=True) - class BatchObject(object): + class BatchObject: def to(self, *args, **kwargs): pass diff --git a/tests/models/test_horovod.py b/tests/models/test_horovod.py index 2a0cd5034d..909a321df1 100644 --- a/tests/models/test_horovod.py +++ b/tests/models/test_horovod.py @@ -221,11 +221,11 @@ def test_horovod_transfer_batch_to_gpu(tmpdir): class TestTrainingStepModel(BoringModel): def training_step(self, batch, *args, **kwargs): assert str(batch.device) != "cpu" - return super(TestTrainingStepModel, self).training_step(batch, *args, **kwargs) + return super().training_step(batch, *args, **kwargs) def validation_step(self, batch, *args, **kwargs): assert str(batch.device) != "cpu" - return super(TestTrainingStepModel, self).validation_step(batch, *args, **kwargs) + return super().validation_step(batch, *args, **kwargs) model = TestTrainingStepModel() @@ -269,7 +269,7 @@ def test_horovod_multi_optimizer(tmpdir): return set(list(model.parameters())) def get_optimizer_params(optimizer): - return set([p for group in optimizer.param_groups for p in group.get("params", [])]) + return {p for group in optimizer.param_groups for p in group.get("params", [])} assert get_model_params(model.generator) != get_model_params(model.discriminator) assert get_model_params(model.generator) == get_optimizer_params(trainer.optimizers[0]) diff --git a/tests/profiler/test_profiler.py b/tests/profiler/test_profiler.py index 6c38ae704e..dd1c5af904 100644 --- a/tests/profiler/test_profiler.py +++ b/tests/profiler/test_profiler.py @@ -36,7 +36,7 @@ PROFILER_OVERHEAD_MAX_TOLERANCE = 0.0005 def _get_python_cprofile_total_duration(profile): - return sum([x.inlinetime for x in profile.getstats()]) + return sum(x.inlinetime for x in profile.getstats()) def _sleep_generator(durations): @@ -322,7 +322,7 @@ def test_pytorch_profiler_trainer_test(tmpdir): assert path.read_text("utf-8") if _KINETO_AVAILABLE: - files = sorted([file for file in os.listdir(tmpdir) if file.endswith(".json")]) + files = sorted(file for file in os.listdir(tmpdir) if file.endswith(".json")) assert any(f"test-{pytorch_profiler.filename}" in f for f in files) path = pytorch_profiler.dirpath / f"test-{pytorch_profiler.filename}.txt" assert path.read_text("utf-8") diff --git a/tests/trainer/logging_/test_logger_connector.py b/tests/trainer/logging_/test_logger_connector.py index 382cc4d112..f07344397b 100644 --- a/tests/trainer/logging_/test_logger_connector.py +++ b/tests/trainer/logging_/test_logger_connector.py @@ -29,7 +29,7 @@ from tests.helpers.runif import RunIf def test_fx_validator(tmpdir): - funcs_name = sorted([f for f in dir(Callback) if not f.startswith("_")]) + funcs_name = sorted(f for f in dir(Callback) if not f.startswith("_")) callbacks_func = [ "on_before_backward", diff --git a/tests/trainer/test_supporters.py b/tests/trainer/test_supporters.py index 6d24368b60..a4bb622477 100644 --- a/tests/trainer/test_supporters.py +++ b/tests/trainer/test_supporters.py @@ -169,7 +169,7 @@ def test_combined_loader_dict_min_size(): combined_loader = CombinedLoader(loaders, "min_size") - assert len(combined_loader) == min([len(v) for v in loaders.values()]) + assert len(combined_loader) == min(len(v) for v in loaders.values()) for idx, item in enumerate(combined_loader): assert isinstance(item, dict) @@ -188,7 +188,7 @@ def test_combined_loader_dict_max_size_cycle(): combined_loader = CombinedLoader(loaders, "max_size_cycle") - assert len(combined_loader) == max([len(v) for v in loaders.values()]) + assert len(combined_loader) == max(len(v) for v in loaders.values()) for idx, item in enumerate(combined_loader): assert isinstance(item, dict) @@ -207,7 +207,7 @@ def test_combined_loader_sequence_min_size(): combined_loader = CombinedLoader(loaders, "min_size") - assert len(combined_loader) == min([len(v) for v in loaders]) + assert len(combined_loader) == min(len(v) for v in loaders) for idx, item in enumerate(combined_loader): assert isinstance(item, Sequence) @@ -225,7 +225,7 @@ def test_combined_loader_sequence_max_size_cycle(): combined_loader = CombinedLoader(loaders, "max_size_cycle") - assert len(combined_loader) == max([len(v) for v in loaders]) + assert len(combined_loader) == max(len(v) for v in loaders) for idx, item in enumerate(combined_loader): assert isinstance(item, Sequence) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 9b9eaa2a74..e0f909a7b8 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -879,7 +879,7 @@ def test_gradient_clipping(tmpdir): ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens) parameters = model.parameters() grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2) - assert (grad_norm - 1.0).abs() < 0.01, "Gradient norm != 1.0: {grad_norm}".format(grad_norm=grad_norm) + assert (grad_norm - 1.0).abs() < 0.01, f"Gradient norm != 1.0: {grad_norm}" return ret_val @@ -952,7 +952,7 @@ def test_gradient_clipping_fp16(tmpdir): ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens) parameters = model.parameters() grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2) - assert (grad_norm - 1.0).abs() < 0.01, "Gradient norm != 1.0: {grad_norm}".format(grad_norm=grad_norm) + assert (grad_norm - 1.0).abs() < 0.01, f"Gradient norm != 1.0: {grad_norm}" return ret_val