Remove NaNs from loss in LRFinder (#1862)

* Remove NaNs from loss in LRFinder

* np.isfinite

* chlog

* add test

* chlog

Co-authored-by: Jirka <jirka@pytorchlightning.ai>
This commit is contained in:
Rohit Gupta 2020-05-19 12:09:19 +05:30 committed by GitHub
parent a153fe4c2a
commit ac76dfcf62
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 28 additions and 4 deletions

View File

@ -14,6 +14,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
- Allow user to select individual TPU core to train on ([#1729](https://github.com/PyTorchLightning/pytorch-lightning/pull/1729)) - Allow user to select individual TPU core to train on ([#1729](https://github.com/PyTorchLightning/pytorch-lightning/pull/1729))
- Removed non-finite values from loss in `LRFinder` ([#1862](https://github.com/PyTorchLightning/pytorch-lightning/pull/1862))
### Deprecated ### Deprecated
### Removed ### Removed

View File

@ -321,8 +321,9 @@ class _LRFinder(object):
""" """
try: try:
loss = self.results["loss"][skip_begin:-skip_end] loss = np.array(self.results["loss"][skip_begin:-skip_end])
min_grad = (np.gradient(np.array(loss))).argmin() loss = loss[np.isfinite(loss)]
min_grad = np.gradient(loss).argmin()
self._optimal_idx = min_grad + skip_begin self._optimal_idx = min_grad + skip_begin
return self.results["lr"][self._optimal_idx] return self.results["lr"][self._optimal_idx]
except Exception: except Exception:

View File

@ -124,7 +124,7 @@ def test_call_to_trainer_method(tmpdir):
# logger file to get meta # logger file to get meta
trainer = Trainer( trainer = Trainer(
default_save_path=tmpdir, default_save_path=tmpdir,
max_epochs=5, max_epochs=5
) )
lrfinder = trainer.lr_find(model, mode='linear') lrfinder = trainer.lr_find(model, mode='linear')
@ -170,7 +170,7 @@ def test_suggestion_parameters_work(tmpdir):
# logger file to get meta # logger file to get meta
trainer = Trainer( trainer = Trainer(
default_save_path=tmpdir, default_save_path=tmpdir,
max_epochs=10, max_epochs=10
) )
lrfinder = trainer.lr_find(model) lrfinder = trainer.lr_find(model)
@ -179,3 +179,24 @@ def test_suggestion_parameters_work(tmpdir):
assert lr1 != lr2, \ assert lr1 != lr2, \
'Skipping parameter did not influence learning rate' 'Skipping parameter did not influence learning rate'
def test_suggestion_with_non_finite_values(tmpdir):
""" Test that non-finite values does not alter results """
hparams = EvalModelTemplate.get_default_hparams()
model = EvalModelTemplate(hparams)
# logger file to get meta
trainer = Trainer(
default_save_path=tmpdir,
max_epochs=10
)
lrfinder = trainer.lr_find(model)
before_lr = lrfinder.suggestion()
lrfinder.results['loss'][-1] = float('nan')
after_lr = lrfinder.suggestion()
assert before_lr == after_lr, \
'Learning rate was altered because of non-finite loss values'