Add `pyupgrade` to `pre-commit` (#8557)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
a64cc37394
commit
e63968ab88
|
@ -11,7 +11,7 @@ now = datetime.datetime.now()
|
|||
now_date = now.strftime("%Y%m%d")
|
||||
|
||||
print(f"prepare init '{_PATH_INFO}' - replace version by {now_date}")
|
||||
with open(_PATH_INFO, "r") as fp:
|
||||
with open(_PATH_INFO) as fp:
|
||||
init = fp.read()
|
||||
init = re.sub(r'__version__ = [\d\.\w\'"]+', f'__version__ = "{now_date}"', init)
|
||||
with open(_PATH_INFO, "w") as fp:
|
||||
|
|
|
@ -3,7 +3,7 @@ from pprint import pprint
|
|||
|
||||
|
||||
def main(req_file: str, *pkgs):
|
||||
with open(req_file, "r") as fp:
|
||||
with open(req_file) as fp:
|
||||
lines = fp.readlines()
|
||||
|
||||
for pkg in pkgs:
|
||||
|
|
|
@ -45,6 +45,13 @@ repos:
|
|||
)$
|
||||
- id: detect-private-key
|
||||
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v2.23.0
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py36-plus]
|
||||
name: Upgrade code
|
||||
|
||||
- repo: https://github.com/PyCQA/isort
|
||||
rev: 5.9.2
|
||||
hooks:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
|
@ -48,7 +47,7 @@ HelperCLI.copy_notebooks(PATH_RAW_NB, PATH_IPYNB)
|
|||
|
||||
|
||||
def _transform_changelog(path_in: str, path_out: str) -> None:
|
||||
with open(path_in, "r") as fp:
|
||||
with open(path_in) as fp:
|
||||
chlog_lines = fp.readlines()
|
||||
# enrich short subsub-titles to be unique
|
||||
chlog_ver = ""
|
||||
|
@ -291,7 +290,7 @@ def setup(app):
|
|||
def package_list_from_file(file):
|
||||
"""List up package name (not containing version and extras) from a package list file"""
|
||||
mocked_packages = []
|
||||
with open(file, "r") as fp:
|
||||
with open(file) as fp:
|
||||
for ln in fp.readlines():
|
||||
# Example: `tqdm>=4.41.0` => `tqdm`
|
||||
# `[` is for package with extras
|
||||
|
|
|
@ -44,7 +44,7 @@ else:
|
|||
ops, Pipeline, DALIClassificationIterator, LastBatchPolicy = ..., ABC, ABC, ABC
|
||||
|
||||
|
||||
class ExternalMNISTInputIterator(object):
|
||||
class ExternalMNISTInputIterator:
|
||||
"""
|
||||
This iterator class wraps torchvision's MNIST dataset and returns the images and labels in batches
|
||||
"""
|
||||
|
@ -78,7 +78,7 @@ class ExternalSourcePipeline(Pipeline):
|
|||
"""
|
||||
|
||||
def __init__(self, batch_size, eii, num_threads, device_id):
|
||||
super(ExternalSourcePipeline, self).__init__(batch_size, num_threads, device_id, seed=12)
|
||||
super().__init__(batch_size, num_threads, device_id, seed=12)
|
||||
self.source = ops.ExternalSource(source=eii, num_outputs=2)
|
||||
self.build()
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ class DQN(nn.Module):
|
|||
n_actions: number of discrete actions available in the environment
|
||||
hidden_size: size of hidden layers
|
||||
"""
|
||||
super(DQN, self).__init__()
|
||||
super().__init__()
|
||||
self.net = nn.Sequential(nn.Linear(obs_size, hidden_size), nn.ReLU(), nn.Linear(hidden_size, n_actions))
|
||||
|
||||
def forward(self, x):
|
||||
|
@ -106,7 +106,7 @@ class ReplayBuffer:
|
|||
|
||||
def sample(self, batch_size: int) -> Tuple:
|
||||
indices = np.random.choice(len(self.buffer), batch_size, replace=False)
|
||||
states, actions, rewards, dones, next_states = zip(*[self.buffer[idx] for idx in indices])
|
||||
states, actions, rewards, dones, next_states = zip(*(self.buffer[idx] for idx in indices))
|
||||
|
||||
return (
|
||||
np.array(states),
|
||||
|
|
|
@ -52,7 +52,11 @@ _PYTORCH_PRUNING_METHOD = {
|
|||
_PARAM_TUPLE = Tuple[nn.Module, str]
|
||||
_PARAM_LIST = Sequence[_PARAM_TUPLE]
|
||||
_MODULE_CONTAINERS = (LightningModule, nn.Sequential, nn.ModuleList, nn.ModuleDict)
|
||||
_LayerRef = TypedDict("_LayerRef", {"data": nn.Module, "names": List[Tuple[int, str]]})
|
||||
|
||||
|
||||
class _LayerRef(TypedDict):
|
||||
data: nn.Module
|
||||
names: List[Tuple[int, str]]
|
||||
|
||||
|
||||
class ModelPruning(Callback):
|
||||
|
|
|
@ -34,7 +34,7 @@ PARAMETER_NUM_UNITS = [" ", "K", "M", "B", "T"]
|
|||
UNKNOWN_SIZE = "?"
|
||||
|
||||
|
||||
class LayerSummary(object):
|
||||
class LayerSummary:
|
||||
"""
|
||||
Summary class for a single layer in a :class:`~pytorch_lightning.core.lightning.LightningModule`.
|
||||
It collects the following information:
|
||||
|
@ -126,7 +126,7 @@ class LayerSummary(object):
|
|||
return sum(np.prod(p.shape) if not _is_lazy_weight_tensor(p) else 0 for p in self._module.parameters())
|
||||
|
||||
|
||||
class ModelSummary(object):
|
||||
class ModelSummary:
|
||||
"""
|
||||
Generates a summary of all layers in a :class:`~pytorch_lightning.core.lightning.LightningModule`.
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ from pytorch_lightning.utilities import AttributeDict
|
|||
from pytorch_lightning.utilities.parsing import save_hyperparameters
|
||||
|
||||
|
||||
class HyperparametersMixin(object):
|
||||
class HyperparametersMixin:
|
||||
|
||||
__jit_unused_properties__ = ["hparams", "hparams_initial"]
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ if _OMEGACONF_AVAILABLE:
|
|||
CHECKPOINT_PAST_HPARAMS_KEYS = ("hparams", "module_arguments") # used in 0.7.6
|
||||
|
||||
|
||||
class ModelIO(object):
|
||||
class ModelIO:
|
||||
CHECKPOINT_HYPER_PARAMS_KEY = "hyper_parameters"
|
||||
CHECKPOINT_HYPER_PARAMS_NAME = "hparams_name"
|
||||
CHECKPOINT_HYPER_PARAMS_TYPE = "hparams_type"
|
||||
|
|
|
@ -249,8 +249,7 @@ class LightningLoggerBase(ABC):
|
|||
key = str(key)
|
||||
if isinstance(value, (MutableMapping, Namespace)):
|
||||
value = vars(value) if isinstance(value, Namespace) else value
|
||||
for d in _dict_generator(value, prefixes + [key]):
|
||||
yield d
|
||||
yield from _dict_generator(value, prefixes + [key])
|
||||
else:
|
||||
yield prefixes + [key, value if value is not None else str(None)]
|
||||
else:
|
||||
|
@ -425,7 +424,7 @@ class LoggerCollection(LightningLoggerBase):
|
|||
return "_".join([str(logger.version) for logger in self._logger_iterable])
|
||||
|
||||
|
||||
class DummyExperiment(object):
|
||||
class DummyExperiment:
|
||||
"""Dummy experiment"""
|
||||
|
||||
def nop(self, *args, **kw):
|
||||
|
|
|
@ -19,7 +19,6 @@ CSV logger for basic experiment logging that does not require opening ports
|
|||
|
||||
"""
|
||||
import csv
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
from argparse import Namespace
|
||||
|
@ -35,7 +34,7 @@ from pytorch_lightning.utilities.distributed import rank_zero_only
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExperimentWriter(object):
|
||||
class ExperimentWriter:
|
||||
r"""
|
||||
Experiment writer for CSVLogger.
|
||||
|
||||
|
@ -95,7 +94,7 @@ class ExperimentWriter(object):
|
|||
last_m.update(m)
|
||||
metrics_keys = list(last_m.keys())
|
||||
|
||||
with io.open(self.metrics_file_path, "w", newline="") as f:
|
||||
with open(self.metrics_file_path, "w", newline="") as f:
|
||||
self.writer = csv.DictWriter(f, fieldnames=metrics_keys)
|
||||
self.writer.writeheader()
|
||||
self.writer.writerows(self.metrics)
|
||||
|
|
|
@ -253,7 +253,7 @@ class WandbLogger(LightningLoggerBase):
|
|||
checkpoint_callback.best_model_path: checkpoint_callback.best_model_score,
|
||||
**checkpoint_callback.best_k_models,
|
||||
}
|
||||
checkpoints = sorted([(Path(p).stat().st_mtime, p, s) for p, s in checkpoints.items() if Path(p).is_file()])
|
||||
checkpoints = sorted((Path(p).stat().st_mtime, p, s) for p, s in checkpoints.items() if Path(p).is_file())
|
||||
checkpoints = [
|
||||
c for c in checkpoints if c[1] not in self._logged_model_time.keys() or self._logged_model_time[c[1]] < c[0]
|
||||
]
|
||||
|
|
|
@ -48,7 +48,7 @@ def _broadcast_object_list(object_list, src=0, group=None):
|
|||
my_rank = get_rank()
|
||||
# Serialize object_list elements to tensors on src rank.
|
||||
if my_rank == src:
|
||||
tensor_list, size_list = zip(*[_object_to_tensor(obj) for obj in object_list])
|
||||
tensor_list, size_list = zip(*(_object_to_tensor(obj) for obj in object_list))
|
||||
object_sizes_tensor = torch.cat(size_list)
|
||||
else:
|
||||
object_sizes_tensor = torch.LongTensor(len(object_list))
|
||||
|
|
|
@ -40,8 +40,7 @@ class PrecisionPlugin(Plugin, CheckpointHooks):
|
|||
Maybe different in other precision plugins.
|
||||
"""
|
||||
for group in optimizer.param_groups:
|
||||
for p in group["params"]:
|
||||
yield p
|
||||
yield from group["params"]
|
||||
|
||||
def connect(
|
||||
self, model: Module, optimizers: List[Optimizer], lr_schedulers: List[Any]
|
||||
|
|
|
@ -200,5 +200,5 @@ class HorovodPlugin(ParallelPlugin):
|
|||
|
||||
@staticmethod
|
||||
def _filter_named_parameters(model: nn.Module, optimizer: Optimizer) -> List[Tuple[str, nn.Parameter]]:
|
||||
opt_params = set(p for group in optimizer.param_groups for p in group.get("params", []))
|
||||
opt_params = {p for group in optimizer.param_groups for p in group.get("params", [])}
|
||||
return [(name, p) for name, p in model.named_parameters() if p in opt_params]
|
||||
|
|
|
@ -25,7 +25,7 @@ def _load_requirements(path_dir: str, file_name: str = "requirements.txt", comme
|
|||
>>> _load_requirements(_PROJECT_ROOT) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
|
||||
['numpy...', 'torch...', ...]
|
||||
"""
|
||||
with open(os.path.join(path_dir, file_name), "r") as file:
|
||||
with open(os.path.join(path_dir, file_name)) as file:
|
||||
lines = [ln.strip() for ln in file.readlines()]
|
||||
reqs = []
|
||||
for ln in lines:
|
||||
|
|
|
@ -80,7 +80,7 @@ if _HOROVOD_AVAILABLE:
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AcceleratorConnector(object):
|
||||
class AcceleratorConnector:
|
||||
def __init__(
|
||||
self,
|
||||
num_processes,
|
||||
|
|
|
@ -35,7 +35,7 @@ from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
|||
from pytorch_lightning.utilities.imports import _fault_tolerant_enabled
|
||||
|
||||
|
||||
class TensorRunningAccum(object):
|
||||
class TensorRunningAccum:
|
||||
"""Tracks a running accumulation values (min, max, mean) without graph
|
||||
references.
|
||||
|
||||
|
@ -112,7 +112,7 @@ class TensorRunningAccum(object):
|
|||
return getattr(self.memory[: self.current_idx], how)()
|
||||
|
||||
|
||||
class PredictionCollection(object):
|
||||
class PredictionCollection:
|
||||
def __init__(self, global_rank: int, world_size: int):
|
||||
self.global_rank = global_rank
|
||||
self.world_size = world_size
|
||||
|
@ -170,7 +170,7 @@ class PredictionCollection(object):
|
|||
torch.save(outputs, fp)
|
||||
|
||||
|
||||
class CycleIterator(object):
|
||||
class CycleIterator:
|
||||
"""
|
||||
Iterator for restarting a dataloader if it runs out of samples
|
||||
"""
|
||||
|
@ -230,7 +230,7 @@ class CycleIterator(object):
|
|||
return self.length
|
||||
|
||||
|
||||
class CombinedDataset(object):
|
||||
class CombinedDataset:
|
||||
"""
|
||||
Combine multiple datasets and compute their statistics
|
||||
"""
|
||||
|
@ -323,7 +323,7 @@ class DataLoaderDict(Dict):
|
|||
pass
|
||||
|
||||
|
||||
class CombinedLoader(object):
|
||||
class CombinedLoader:
|
||||
"""
|
||||
Combines different dataloaders and allows sampling in parallel.
|
||||
Supported modes are 'min_size', which raises StopIteration after the shortest loader
|
||||
|
@ -510,7 +510,7 @@ class CombinedLoader(object):
|
|||
return self._calc_num_batches(self.loaders)
|
||||
|
||||
|
||||
class CombinedLoaderIterator(object):
|
||||
class CombinedLoaderIterator:
|
||||
"""
|
||||
Custom Iterator returning data from multple loaders, and allows sampling in parallel
|
||||
"""
|
||||
|
|
|
@ -875,7 +875,7 @@ class Trainer(
|
|||
# ----------------------------
|
||||
# INSPECT THE CORE LOOPS
|
||||
# ----------------------------
|
||||
f"""
|
||||
fr"""
|
||||
Lightning internal flow looks like this:
|
||||
{Trainer.fit} or {Trainer.test} or {Trainer.predict} ||
|
||||
| ||
|
||||
|
|
|
@ -60,7 +60,7 @@ def _determine_lr_attr_name(trainer: "pl.Trainer", model: "pl.LightningModule")
|
|||
)
|
||||
|
||||
|
||||
class _LRFinder(object):
|
||||
class _LRFinder:
|
||||
"""LR finder object. This object stores the results of lr_find().
|
||||
|
||||
Args:
|
||||
|
@ -396,7 +396,7 @@ class _LinearLR(_LRScheduler):
|
|||
def __init__(self, optimizer: torch.optim.Optimizer, end_lr: float, num_iter: int, last_epoch: int = -1):
|
||||
self.end_lr = end_lr
|
||||
self.num_iter = num_iter
|
||||
super(_LinearLR, self).__init__(optimizer, last_epoch)
|
||||
super().__init__(optimizer, last_epoch)
|
||||
|
||||
def get_lr(self):
|
||||
curr_iter = self.last_epoch + 1
|
||||
|
@ -435,7 +435,7 @@ class _ExponentialLR(_LRScheduler):
|
|||
def __init__(self, optimizer: torch.optim.Optimizer, end_lr: float, num_iter: int, last_epoch: int = -1):
|
||||
self.end_lr = end_lr
|
||||
self.num_iter = num_iter
|
||||
super(_ExponentialLR, self).__init__(optimizer, last_epoch)
|
||||
super().__init__(optimizer, last_epoch)
|
||||
|
||||
def get_lr(self):
|
||||
curr_iter = self.last_epoch + 1
|
||||
|
|
|
@ -34,7 +34,7 @@ def enabled_only(fn: Callable):
|
|||
return wrapped_fn
|
||||
|
||||
|
||||
class InternalDebugger(object):
|
||||
class InternalDebugger:
|
||||
def __init__(self, trainer):
|
||||
self.enabled = os.environ.get("PL_DEV_DEBUG", "0") == "1"
|
||||
self.trainer = trainer
|
||||
|
|
|
@ -280,7 +280,7 @@ class AttributeDict(Dict):
|
|||
def __repr__(self) -> str:
|
||||
if not len(self):
|
||||
return ""
|
||||
max_key_length = max([len(str(k)) for k in self])
|
||||
max_key_length = max(len(str(k)) for k in self)
|
||||
tmp_name = "{:" + str(max_key_length + 3) + "s} {}"
|
||||
rows = [tmp_name.format(f'"{n}":', self[n]) for n in sorted(self.keys())]
|
||||
out = "\n".join(rows)
|
||||
|
|
|
@ -37,7 +37,7 @@ def main(path_req: str, torch_version: Optional[str] = None) -> None:
|
|||
torch_version = torch.__version__
|
||||
assert torch_version, f"invalid torch: {torch_version}"
|
||||
|
||||
with open(path_req, "r") as fp:
|
||||
with open(path_req) as fp:
|
||||
req = fp.read()
|
||||
# remove comments
|
||||
req = re.sub(rf"\s*#.*{os.linesep}", os.linesep, req)
|
||||
|
|
|
@ -275,7 +275,7 @@ def test_min_steps_override_early_stopping_functionality(tmpdir, step_freeze: in
|
|||
|
||||
class Model(BoringModel):
|
||||
def __init__(self, step_freeze):
|
||||
super(Model, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
self._step_freeze = step_freeze
|
||||
|
||||
|
|
|
@ -351,7 +351,7 @@ def test_multiple_optimizers_basefinetuning(tmpdir):
|
|||
|
||||
class Check(Callback):
|
||||
def on_train_epoch_start(self, trainer, pl_module) -> None:
|
||||
num_param_groups = sum([len(opt.param_groups) for opt in trainer.optimizers])
|
||||
num_param_groups = sum(len(opt.param_groups) for opt in trainer.optimizers)
|
||||
assert lr_monitor.lr_sch_names == ["lr-Adam", "lr-Adam-1"]
|
||||
if trainer.current_epoch == 0:
|
||||
assert num_param_groups == 3
|
||||
|
|
|
@ -56,7 +56,7 @@ class TestPruningMethod(pytorch_prune.BasePruningMethod):
|
|||
|
||||
@classmethod
|
||||
def apply(cls, module, name, amount):
|
||||
return super(TestPruningMethod, cls).apply(module, name, amount=amount)
|
||||
return super().apply(module, name, amount=amount)
|
||||
|
||||
|
||||
def train_with_pruning_callback(
|
||||
|
|
|
@ -312,7 +312,7 @@ def test_model_checkpoint_to_yaml(tmpdir, save_top_k: int):
|
|||
|
||||
path_yaml = os.path.join(tmpdir, "best_k_models.yaml")
|
||||
checkpoint.to_yaml(path_yaml)
|
||||
d = yaml.full_load(open(path_yaml, "r"))
|
||||
d = yaml.full_load(open(path_yaml))
|
||||
best_k = dict(checkpoint.best_k_models.items())
|
||||
assert d == best_k
|
||||
|
||||
|
@ -1206,7 +1206,7 @@ def test_ckpt_version_after_rerun_same_trainer(tmpdir):
|
|||
trainer.fit(BoringModel())
|
||||
|
||||
ckpt_range = range(mc.STARTING_VERSION, trainer.max_epochs + mc.STARTING_VERSION)
|
||||
expected = {"test.ckpt", *[f"test-v{i}.ckpt" for i in ckpt_range]}
|
||||
expected = {"test.ckpt", *(f"test-v{i}.ckpt" for i in ckpt_range)}
|
||||
# check best_k_models state
|
||||
assert {Path(f).name for f in mc.best_k_models} == expected
|
||||
# check created ckpts
|
||||
|
|
|
@ -68,7 +68,7 @@ def pytest_pyfunc_call(pyfuncitem):
|
|||
if pyfuncitem.get_closest_marker("spawn"):
|
||||
testfunction = pyfuncitem.obj
|
||||
funcargs = pyfuncitem.funcargs
|
||||
testargs = tuple([funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames])
|
||||
testargs = tuple(funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames)
|
||||
|
||||
mp.spawn(wraps, (testfunction, testargs))
|
||||
return True
|
||||
|
|
|
@ -263,10 +263,8 @@ def test_toggle_untoggle_3_optimizers_shared_parameters(tmpdir):
|
|||
|
||||
@staticmethod
|
||||
def combine_generators(gen_1, gen_2):
|
||||
for p in gen_1:
|
||||
yield p
|
||||
for p in gen_2:
|
||||
yield p
|
||||
yield from gen_1
|
||||
yield from gen_2
|
||||
|
||||
def configure_optimizers(self):
|
||||
optimizer_1 = SGD(self.combine_generators(self.layer_1.parameters(), self.layer_2.parameters()), lr=0.1)
|
||||
|
|
|
@ -302,7 +302,7 @@ class OptimizerWithHooks(Optimizer):
|
|||
d = {"params": params, "mod": mod, "layer_type": mod_class}
|
||||
self.params.append(d)
|
||||
|
||||
super(OptimizerWithHooks, self).__init__(self.params, {"lr": 0.01})
|
||||
super().__init__(self.params, {"lr": 0.01})
|
||||
|
||||
def _save_input(self, mod, i):
|
||||
"""Saves input of layer"""
|
||||
|
|
|
@ -79,7 +79,7 @@ def test_file_logger_log_metrics(tmpdir, step_idx):
|
|||
logger.save()
|
||||
|
||||
path_csv = os.path.join(logger.log_dir, ExperimentWriter.NAME_METRICS_FILE)
|
||||
with open(path_csv, "r") as fp:
|
||||
with open(path_csv) as fp:
|
||||
lines = fp.readlines()
|
||||
assert len(lines) == 2
|
||||
assert all(n in lines[0] for n in metrics)
|
||||
|
|
|
@ -365,7 +365,7 @@ def test_non_blocking():
|
|||
batch = trainer.accelerator.batch_to_device(batch, torch.device("cuda:0"))
|
||||
mocked.assert_called_with(torch.device("cuda", 0), non_blocking=True)
|
||||
|
||||
class BatchObject(object):
|
||||
class BatchObject:
|
||||
def to(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
|
|
@ -221,11 +221,11 @@ def test_horovod_transfer_batch_to_gpu(tmpdir):
|
|||
class TestTrainingStepModel(BoringModel):
|
||||
def training_step(self, batch, *args, **kwargs):
|
||||
assert str(batch.device) != "cpu"
|
||||
return super(TestTrainingStepModel, self).training_step(batch, *args, **kwargs)
|
||||
return super().training_step(batch, *args, **kwargs)
|
||||
|
||||
def validation_step(self, batch, *args, **kwargs):
|
||||
assert str(batch.device) != "cpu"
|
||||
return super(TestTrainingStepModel, self).validation_step(batch, *args, **kwargs)
|
||||
return super().validation_step(batch, *args, **kwargs)
|
||||
|
||||
model = TestTrainingStepModel()
|
||||
|
||||
|
@ -269,7 +269,7 @@ def test_horovod_multi_optimizer(tmpdir):
|
|||
return set(list(model.parameters()))
|
||||
|
||||
def get_optimizer_params(optimizer):
|
||||
return set([p for group in optimizer.param_groups for p in group.get("params", [])])
|
||||
return {p for group in optimizer.param_groups for p in group.get("params", [])}
|
||||
|
||||
assert get_model_params(model.generator) != get_model_params(model.discriminator)
|
||||
assert get_model_params(model.generator) == get_optimizer_params(trainer.optimizers[0])
|
||||
|
|
|
@ -36,7 +36,7 @@ PROFILER_OVERHEAD_MAX_TOLERANCE = 0.0005
|
|||
|
||||
|
||||
def _get_python_cprofile_total_duration(profile):
|
||||
return sum([x.inlinetime for x in profile.getstats()])
|
||||
return sum(x.inlinetime for x in profile.getstats())
|
||||
|
||||
|
||||
def _sleep_generator(durations):
|
||||
|
@ -322,7 +322,7 @@ def test_pytorch_profiler_trainer_test(tmpdir):
|
|||
assert path.read_text("utf-8")
|
||||
|
||||
if _KINETO_AVAILABLE:
|
||||
files = sorted([file for file in os.listdir(tmpdir) if file.endswith(".json")])
|
||||
files = sorted(file for file in os.listdir(tmpdir) if file.endswith(".json"))
|
||||
assert any(f"test-{pytorch_profiler.filename}" in f for f in files)
|
||||
path = pytorch_profiler.dirpath / f"test-{pytorch_profiler.filename}.txt"
|
||||
assert path.read_text("utf-8")
|
||||
|
|
|
@ -29,7 +29,7 @@ from tests.helpers.runif import RunIf
|
|||
|
||||
|
||||
def test_fx_validator(tmpdir):
|
||||
funcs_name = sorted([f for f in dir(Callback) if not f.startswith("_")])
|
||||
funcs_name = sorted(f for f in dir(Callback) if not f.startswith("_"))
|
||||
|
||||
callbacks_func = [
|
||||
"on_before_backward",
|
||||
|
|
|
@ -169,7 +169,7 @@ def test_combined_loader_dict_min_size():
|
|||
|
||||
combined_loader = CombinedLoader(loaders, "min_size")
|
||||
|
||||
assert len(combined_loader) == min([len(v) for v in loaders.values()])
|
||||
assert len(combined_loader) == min(len(v) for v in loaders.values())
|
||||
|
||||
for idx, item in enumerate(combined_loader):
|
||||
assert isinstance(item, dict)
|
||||
|
@ -188,7 +188,7 @@ def test_combined_loader_dict_max_size_cycle():
|
|||
|
||||
combined_loader = CombinedLoader(loaders, "max_size_cycle")
|
||||
|
||||
assert len(combined_loader) == max([len(v) for v in loaders.values()])
|
||||
assert len(combined_loader) == max(len(v) for v in loaders.values())
|
||||
|
||||
for idx, item in enumerate(combined_loader):
|
||||
assert isinstance(item, dict)
|
||||
|
@ -207,7 +207,7 @@ def test_combined_loader_sequence_min_size():
|
|||
|
||||
combined_loader = CombinedLoader(loaders, "min_size")
|
||||
|
||||
assert len(combined_loader) == min([len(v) for v in loaders])
|
||||
assert len(combined_loader) == min(len(v) for v in loaders)
|
||||
|
||||
for idx, item in enumerate(combined_loader):
|
||||
assert isinstance(item, Sequence)
|
||||
|
@ -225,7 +225,7 @@ def test_combined_loader_sequence_max_size_cycle():
|
|||
|
||||
combined_loader = CombinedLoader(loaders, "max_size_cycle")
|
||||
|
||||
assert len(combined_loader) == max([len(v) for v in loaders])
|
||||
assert len(combined_loader) == max(len(v) for v in loaders)
|
||||
|
||||
for idx, item in enumerate(combined_loader):
|
||||
assert isinstance(item, Sequence)
|
||||
|
|
|
@ -879,7 +879,7 @@ def test_gradient_clipping(tmpdir):
|
|||
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
|
||||
parameters = model.parameters()
|
||||
grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
|
||||
assert (grad_norm - 1.0).abs() < 0.01, "Gradient norm != 1.0: {grad_norm}".format(grad_norm=grad_norm)
|
||||
assert (grad_norm - 1.0).abs() < 0.01, f"Gradient norm != 1.0: {grad_norm}"
|
||||
|
||||
return ret_val
|
||||
|
||||
|
@ -952,7 +952,7 @@ def test_gradient_clipping_fp16(tmpdir):
|
|||
ret_val = old_training_step_and_backward(split_batch, batch_idx, opt_idx, optimizer, hiddens)
|
||||
parameters = model.parameters()
|
||||
grad_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), 2) for p in parameters]), 2)
|
||||
assert (grad_norm - 1.0).abs() < 0.01, "Gradient norm != 1.0: {grad_norm}".format(grad_norm=grad_norm)
|
||||
assert (grad_norm - 1.0).abs() < 0.01, f"Gradient norm != 1.0: {grad_norm}"
|
||||
|
||||
return ret_val
|
||||
|
||||
|
|
Loading…
Reference in New Issue