flake8 & isort (#5647)

This commit is contained in:
Jirka Borovec 2021-01-25 20:31:38 +01:00 committed by GitHub
parent 304f9c5bca
commit 7b30133a82
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 7 additions and 5 deletions

View File

@ -182,7 +182,7 @@ class ModelSummary(object):
self._model = model
self._mode = mode
self._layer_summary = self.summarize()
self._precision_megabytes = (self._model.precision / 8.0) * 1e-6 # 1 byte -> 8 bits
self._precision_megabytes = (self._model.precision / 8.0) * 1e-6 # 1 byte -> 8 bits
@property
def named_modules(self) -> List[Tuple[str, nn.Module]]:
@ -389,9 +389,11 @@ def get_gpu_memory_map() -> Dict[str, int]:
}
return gpu_memory_map
def get_formatted_model_size(total_model_size: float) -> float:
return f"{total_model_size:,.3f}"
def get_human_readable_count(number: int) -> str:
"""
Abbreviates an integer number with K, M, B, T for thousands, millions,

View File

@ -29,9 +29,8 @@ from pytorch_lightning.utilities.warning_utils import WarningCache
_WANDB_AVAILABLE = _module_available("wandb")
try:
from wandb.wandb_run import Run
import wandb
from wandb.wandb_run import Run
except ImportError:
# needed for test mocks, these tests shall be updated
wandb, Run = None, None

View File

@ -40,8 +40,8 @@ class PreCalculatedModel(BoringModel):
def __init__(self, precision: int = 32):
super().__init__()
self.layer = nn.Linear(32, 1000, bias=False) # 32K params
self.layer1 = nn.Linear(1000, 218, bias=False) # 218K params
self.layer = nn.Linear(32, 1000, bias=False) # 32K params
self.layer1 = nn.Linear(1000, 218, bias=False) # 218K params
# calculate model size based on precision.
self.pre_calculated_model_size = 1.0 / (32 / precision)
@ -50,6 +50,7 @@ class PreCalculatedModel(BoringModel):
x = self.layer(x)
return self.layer1(x)
class UnorderedModel(LightningModule):
""" A model in which the layers not defined in order of execution """