annotat unused vars (#5017)

* annotate all unused vars

* rank_zero_warn

* Apply suggestions from code review

Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>

* f1 fixed

Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
This commit is contained in:
Jirka Borovec 2020-12-19 13:53:06 +01:00 committed by GitHub
parent f3748ba808
commit 2d54116baa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 39 additions and 15 deletions

View File

@ -383,6 +383,7 @@ class AcceleratorConnector:
return 'OMPI_COMM_WORLD_RANK' in os.environ or 'HOROVOD_RANK' in os.environ
def set_nvidia_flags(self, is_slurm_managing_tasks, data_parallel_device_ids):
# Todo: required argument `is_slurm_managing_tasks` is not used
if data_parallel_device_ids is None:
return

View File

@ -100,6 +100,7 @@ class DDP2Accelerator(Accelerator):
return output
def set_world_ranks(self, process_idx):
# Todo: required argument `process_idx` is not used
self.trainer.local_rank = self.trainer.node_rank
self.trainer.global_rank = self.trainer.node_rank
self.trainer.world_size = self.trainer.num_nodes
@ -131,6 +132,7 @@ class DDP2Accelerator(Accelerator):
Dict with evaluation results
"""
# Todo: required argument `mp_queue` is not used
# show progressbar only on progress_rank 0
if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:
self.trainer.progress_bar_callback.disable()

View File

@ -100,7 +100,7 @@ class DDPAccelerator(Accelerator):
command = sys.argv
try:
full_path = path_lib(command[0])
except Exception as e:
except Exception:
full_path = abspath(command[0])
command[0] = full_path
@ -192,6 +192,7 @@ class DDPAccelerator(Accelerator):
self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes
def init_device(self, process_idx):
# Todo: required argument `process_idx` is not used
self.trainer.root_gpu = self.trainer.data_parallel_device_ids[self.trainer.local_rank]
torch.cuda.set_device(self.trainer.root_gpu)

View File

@ -43,6 +43,7 @@ class DDPCPUHPCAccelerator(DDPHPCAccelerator):
self.nickname = 'ddp_cpu'
def model_to_device(self, model, process_idx):
# Todo: required argument `process_idx` is not used
model.cpu()
def get_device_ids(self):

View File

@ -213,6 +213,7 @@ class DDPCPUSpawnAccelerator(Accelerator):
self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes
def model_to_device(self, model, process_idx):
# Todo: required argument `process_idx` is not used
model.cpu()
def get_device_ids(self):
@ -227,6 +228,7 @@ class DDPCPUSpawnAccelerator(Accelerator):
self.trainer.model = model
def transfer_distrib_spawn_state_on_fit_end(self, model, mp_queue, results):
# Todo: required argument `model` is not used
# track the best model path
best_model_path = None
if self.trainer.checkpoint_callback is not None:

View File

@ -196,6 +196,8 @@ class DDPSpawnAccelerator(Accelerator):
self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes
def init_device(self, process_idx, is_master):
# Todo: required argument `process_idx` is not used
# Todo: required argument `is_master` is not used
gpu_idx = self.trainer.data_parallel_device_ids[self.trainer.local_rank]
self.trainer.root_gpu = gpu_idx
torch.cuda.set_device(self.trainer.root_gpu)

View File

@ -126,6 +126,7 @@ class TPUAccelerator(Accelerator):
"""
Here we are inside each individual process
"""
# Todo: required argument `tpu_core_idx` is not used
if not trainer:
trainer = self.trainer
@ -281,6 +282,7 @@ class TPUAccelerator(Accelerator):
"""
Dump a temporary checkpoint after ddp ends to get weights out of the process
"""
# Todo: required argument `model` is not used
if self.trainer.is_global_zero:
path = os.path.join(self.trainer.default_root_dir, '__temp_weight_distributed_end.ckpt')
self.trainer.save_checkpoint(path)

View File

@ -350,6 +350,7 @@ class ModelCheckpoint(Callback):
log.debug(f"Removed checkpoint: {filepath}")
def _save_model(self, filepath: str, trainer, pl_module):
# Todo: required argument `pl_module` is not used
# in debugging, track when we save checkpoints
trainer.dev_debugger.track_checkpointing_history(filepath)
@ -461,6 +462,7 @@ class ModelCheckpoint(Callback):
The base path gets extended with logger name and version (if these are available)
and subfolder "checkpoints".
"""
# Todo: required argument `pl_module` is not used
if self.dirpath is not None:
return # short circuit

View File

@ -1163,6 +1163,7 @@ class LightningModule(
optimizer:
optimizer_idx:
"""
# Todo: required argument `optimizer_idx` is not used
for param in self.parameters():
param.requires_grad = False

View File

@ -267,7 +267,6 @@ class LightningOptimizer:
if closure is None:
closure = do_nothing_closure
profile_name = f"optimizer_step_{self._optimizer_idx}"
else:
if not isinstance(closure, types.FunctionType):
raise MisconfigurationException("When closure is provided, it should be a function")

View File

@ -377,7 +377,7 @@ def save_hparams_to_yaml(config_yaml, hparams: Union[dict, Namespace]) -> None:
for k, v in hparams.items():
try:
yaml.dump(v)
except TypeError as err:
except TypeError:
warn(f"Skipping '{k}' parameter because it is not possible to safely dump to YAML.")
hparams[k] = type(v).__name__
else:

View File

@ -248,7 +248,7 @@ class Result(Dict):
def extract_batch_size(batch):
try:
batch_size = Result.unpack_batch_size(batch)
except RecursionError as re:
except RecursionError:
batch_size = 1
return batch_size
@ -527,7 +527,7 @@ class Result(Dict):
result[k] = torch.tensor(result[k]).float()
try:
reduced_val = weighted_mean(result[k], batch_sizes)
except Exception as e:
except Exception:
reduced_val = torch.mean(result[k])
else:
reduced_val = fx(result[k])

View File

@ -48,9 +48,9 @@ class LightningDistributed:
torch.save(obj, buffer)
data = bytearray(buffer.getbuffer())
length_tensor = torch.tensor([len(data)]).long().to(self.device)
length_tensor = self._broadcast(length_tensor, src=0, group=group)
self._broadcast(length_tensor, src=0, group=group)
data_tensor = torch.ByteTensor(data).to(self.device)
data_tensor = self._broadcast(data_tensor, src=0, group=group)
self._broadcast(data_tensor, src=0, group=group)
def _receive(self, group=WORLD):
length_tensor = torch.tensor([0]).long().to(self.device)

View File

@ -219,6 +219,9 @@ class F1(FBeta):
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
if multilabel is not False:
rank_zero_warn(f'The `multilabel={multilabel}` parameter is unused and will not have any effect.')
super().__init__(
num_classes=num_classes,
beta=1.0,

View File

@ -188,7 +188,6 @@ def stat_scores_multiple_classes(
tps = torch.zeros((num_classes + 1,), device=pred.device)
fps = torch.zeros((num_classes + 1,), device=pred.device)
tns = torch.zeros((num_classes + 1,), device=pred.device)
fns = torch.zeros((num_classes + 1,), device=pred.device)
sups = torch.zeros((num_classes + 1,), device=pred.device)

View File

@ -16,6 +16,7 @@ from typing import Tuple
import torch
from pytorch_lightning.metrics.utils import _input_format_classification_one_hot, class_reduce
from pytorch_lightning.utilities import rank_zero_warn
def _fbeta_update(

View File

@ -74,7 +74,6 @@ def bleu_score(
assert len(translate_corpus) == len(reference_corpus)
numerator = torch.zeros(n_gram)
denominator = torch.zeros(n_gram)
precision_scores = torch.zeros(n_gram)
c = 0.0
r = 0.0

View File

@ -3,6 +3,8 @@ from typing import Tuple, Optional
import torch
from pytorch_lightning.utilities import rank_zero_warn
def _psnr_compute(
sum_squared_error: torch.Tensor,
@ -11,6 +13,8 @@ def _psnr_compute(
base: float = 10.0,
reduction: str = 'elementwise_mean',
) -> torch.Tensor:
if reduction != 'elementwise_mean':
rank_zero_warn(f'The `reduction={reduction}` parameter is unused and will not have any effect.')
psnr_base_e = 2 * torch.log(data_range) - torch.log(sum_squared_error / n_obs)
psnr = psnr_base_e * (10 / torch.log(torch.tensor(base)))
return psnr

View File

@ -257,7 +257,6 @@ def parallel_apply(modules, inputs, kwargs_tup=None, devices=None): # pragma: n
def _worker(i, module, input, kwargs, device=None):
torch.set_grad_enabled(grad_enabled)
fx_called: str = ''
if device is None:
device = get_a_var(input).get_device()
try:

View File

@ -78,6 +78,7 @@ class DDPPlugin(LightningPlugin):
world_size: int,
is_slurm_managing_tasks: bool = True,
) -> None:
# Todo: required argument `is_slurm_managing_tasks` is not used
os.environ["MASTER_ADDR"] = str(cluster_environment.master_address())
os.environ["MASTER_PORT"] = str(cluster_environment.master_port())
os.environ["WORLD_SIZE"] = str(cluster_environment.world_size())

View File

@ -102,6 +102,7 @@ class DataConnector(object):
model.test_dataloader = _PatchDataLoader(test_dataloaders)
def attach_datamodule(self, model, datamodule: Optional[LightningDataModule], stage: str) -> None:
# Todo: required argument `stage` is not used
# We use datamodule if it's been provided on .fit or .test, otherwise we check model for it
datamodule = datamodule or getattr(model, 'datamodule', None)

View File

@ -55,6 +55,7 @@ class LoggerConnector:
)
def on_evaluation_batch_start(self, testing, batch, dataloader_idx, num_dataloaders):
# Todo: required argument `testing` is not used
model = self.trainer.get_model()
# set dataloader_idx only if multiple ones
model._current_dataloader_idx = dataloader_idx if num_dataloaders > 1 else None
@ -216,6 +217,7 @@ class LoggerConnector:
self.__process_eval_epoch_end_results_and_log_legacy(deprecated_eval_results, test_mode)
def evaluation_epoch_end(self, testing):
# Todo: required argument `testing` is not used
# reset dataloader idx
model_ref = self.trainer.get_model()
model_ref._current_dataloader_idx = None
@ -397,7 +399,7 @@ class LoggerConnector:
sample_obj = opt_idx_outputs[0][0] if isinstance(opt_idx_outputs[0], list) else opt_idx_outputs[0]
is_result_obj = len(epoch_output) > 0 and isinstance(sample_obj, Result)
is_1_0_result = is_result_obj and 'extra' in sample_obj
except IndexError as e:
except IndexError:
is_result_obj = False
is_1_0_result = False

View File

@ -83,6 +83,8 @@ class SLURMConnector:
signal.signal(signal.SIGTERM, self.term_handler)
def sig_handler(self, signum, frame): # pragma: no-cover
# Todo: required argument `signum` is not used
# Todo: required argument `frame` is not used
if self.trainer.is_global_zero:
# save weights
log.info('handling SIGUSR1')
@ -106,7 +108,8 @@ class SLURMConnector:
self.trainer.logger.close()
def term_handler(self, signum, frame):
# save
# Todo: required argument `signum` is not used
# Todo: required argument `frame` is not used
log.info("bypassing sigterm")
# todo: this is the same func as slurm_environment.py `master_port`

View File

@ -90,14 +90,14 @@ class EvaluationLoop(object):
else:
self.trainer.call_hook('on_validation_start', *args, **kwargs)
def on_evaluation_model_eval(self, *args, **kwargs):
def on_evaluation_model_eval(self, *_, **__):
model_ref = self.trainer.get_model()
if self.testing:
model_ref.on_test_model_eval()
else:
model_ref.on_validation_model_eval()
def on_evaluation_model_train(self, *args, **kwargs):
def on_evaluation_model_train(self, *_, **__):
model_ref = self.trainer.get_model()
if self.testing:
model_ref.on_test_model_train()

View File

@ -647,7 +647,6 @@ class TrainLoop:
grad_norm_dic = {}
# bookkeeping
using_results_obj = False
self.trainer.hiddens = None
# track all outputs across time and num of optimizers