Remove NaNs from loss in LRFinder (#1862)
* Remove NaNs from loss in LRFinder * np.isfinite * chlog * add test * chlog Co-authored-by: Jirka <jirka@pytorchlightning.ai>
This commit is contained in:
parent
a153fe4c2a
commit
ac76dfcf62
|
@ -14,6 +14,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
|
|||
|
||||
- Allow user to select individual TPU core to train on ([#1729](https://github.com/PyTorchLightning/pytorch-lightning/pull/1729))
|
||||
|
||||
- Removed non-finite values from loss in `LRFinder` ([#1862](https://github.com/PyTorchLightning/pytorch-lightning/pull/1862))
|
||||
|
||||
### Deprecated
|
||||
|
||||
### Removed
|
||||
|
|
|
@ -321,8 +321,9 @@ class _LRFinder(object):
|
|||
|
||||
"""
|
||||
try:
|
||||
loss = self.results["loss"][skip_begin:-skip_end]
|
||||
min_grad = (np.gradient(np.array(loss))).argmin()
|
||||
loss = np.array(self.results["loss"][skip_begin:-skip_end])
|
||||
loss = loss[np.isfinite(loss)]
|
||||
min_grad = np.gradient(loss).argmin()
|
||||
self._optimal_idx = min_grad + skip_begin
|
||||
return self.results["lr"][self._optimal_idx]
|
||||
except Exception:
|
||||
|
|
|
@ -124,7 +124,7 @@ def test_call_to_trainer_method(tmpdir):
|
|||
# logger file to get meta
|
||||
trainer = Trainer(
|
||||
default_save_path=tmpdir,
|
||||
max_epochs=5,
|
||||
max_epochs=5
|
||||
)
|
||||
|
||||
lrfinder = trainer.lr_find(model, mode='linear')
|
||||
|
@ -170,7 +170,7 @@ def test_suggestion_parameters_work(tmpdir):
|
|||
# logger file to get meta
|
||||
trainer = Trainer(
|
||||
default_save_path=tmpdir,
|
||||
max_epochs=10,
|
||||
max_epochs=10
|
||||
)
|
||||
|
||||
lrfinder = trainer.lr_find(model)
|
||||
|
@ -179,3 +179,24 @@ def test_suggestion_parameters_work(tmpdir):
|
|||
|
||||
assert lr1 != lr2, \
|
||||
'Skipping parameter did not influence learning rate'
|
||||
|
||||
|
||||
def test_suggestion_with_non_finite_values(tmpdir):
|
||||
""" Test that non-finite values does not alter results """
|
||||
|
||||
hparams = EvalModelTemplate.get_default_hparams()
|
||||
model = EvalModelTemplate(hparams)
|
||||
|
||||
# logger file to get meta
|
||||
trainer = Trainer(
|
||||
default_save_path=tmpdir,
|
||||
max_epochs=10
|
||||
)
|
||||
|
||||
lrfinder = trainer.lr_find(model)
|
||||
before_lr = lrfinder.suggestion()
|
||||
lrfinder.results['loss'][-1] = float('nan')
|
||||
after_lr = lrfinder.suggestion()
|
||||
|
||||
assert before_lr == after_lr, \
|
||||
'Learning rate was altered because of non-finite loss values'
|
||||
|
|
Loading…
Reference in New Issue