nb steps in early stop (#3909)
* nb steps * if * skip * rev * seed * seed
This commit is contained in:
parent
39b3704285
commit
064ae53d63
|
@ -31,6 +31,7 @@ jobs:
|
|||
pip list
|
||||
|
||||
- name: Cache datasets
|
||||
# todo this probably does not work with docker images, rather cache dockers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: Datasets # This path is specific to Ubuntu
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
"pytorch_lightning/trainer/connectors/checkpoint_connector.py",
|
||||
"pytorch_lightning/trainer/connectors/data_connector.py",
|
||||
"pytorch_lightning/trainer/connectors/logger_connector.py",
|
||||
"pytorch_lightning/trainer/connectors/slurm_connector.py",
|
||||
"pytorch_lightning/distributed/dist.py",
|
||||
"pytorch_lightning/tuner",
|
||||
"pytorch_lightning/plugins"
|
||||
|
|
|
@ -205,6 +205,7 @@ class EarlyStopping(Callback):
|
|||
|
||||
def on_train_end(self, trainer, pl_module):
|
||||
if self.stopped_epoch > 0 and self.verbose > 0:
|
||||
# todo: remove this old warning
|
||||
rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from "1" until v0.6.x,'
|
||||
' but will start from "0" in v0.8.0.', DeprecationWarning)
|
||||
log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.')
|
||||
|
|
|
@ -149,7 +149,7 @@ class CheckpointConnector:
|
|||
self.trainer.global_step = checkpoint['global_step']
|
||||
self.trainer.current_epoch = checkpoint['epoch']
|
||||
|
||||
# crash if max_epochs is lower than the current epoch from the checkpoint
|
||||
# crash if max_epochs is lower then the current epoch from the checkpoint
|
||||
if self.trainer.current_epoch > self.trainer.max_epochs:
|
||||
m = f"""
|
||||
you restored a checkpoint with current_epoch={self.trainer.current_epoch}
|
||||
|
|
|
@ -6,7 +6,7 @@ import cloudpickle
|
|||
import pytest
|
||||
import torch
|
||||
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning import Trainer, seed_everything
|
||||
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
|
||||
from tests.base import EvalModelTemplate
|
||||
from pytorch_lightning.utilities.exceptions import MisconfigurationException
|
||||
|
@ -35,7 +35,7 @@ def test_resume_early_stopping_from_checkpoint(tmpdir):
|
|||
https://github.com/PyTorchLightning/pytorch-lightning/issues/1464
|
||||
https://github.com/PyTorchLightning/pytorch-lightning/issues/1463
|
||||
"""
|
||||
|
||||
seed_everything(42)
|
||||
model = EvalModelTemplate()
|
||||
checkpoint_callback = ModelCheckpoint(monitor="early_stop_on", save_top_k=1)
|
||||
early_stop_callback = EarlyStoppingTestRestore()
|
||||
|
@ -60,7 +60,7 @@ def test_resume_early_stopping_from_checkpoint(tmpdir):
|
|||
early_stop_callback = EarlyStoppingTestRestore(early_stop_callback_state)
|
||||
new_trainer = Trainer(
|
||||
default_root_dir=tmpdir,
|
||||
max_epochs=2,
|
||||
max_epochs=1,
|
||||
resume_from_checkpoint=checkpoint_filepath,
|
||||
early_stop_callback=early_stop_callback,
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue