diff --git a/.github/workflows/ci_test-conda.yml b/.github/workflows/ci_test-conda.yml index 2076976bac..a718269058 100644 --- a/.github/workflows/ci_test-conda.yml +++ b/.github/workflows/ci_test-conda.yml @@ -31,6 +31,7 @@ jobs: pip list - name: Cache datasets + # todo this probably does not work with docker images, rather cache dockers uses: actions/cache@v2 with: path: Datasets # This path is specific to Ubuntu diff --git a/.pyrightconfig.json b/.pyrightconfig.json index cb14993e2c..3f00d9a3e4 100644 --- a/.pyrightconfig.json +++ b/.pyrightconfig.json @@ -35,6 +35,7 @@ "pytorch_lightning/trainer/connectors/checkpoint_connector.py", "pytorch_lightning/trainer/connectors/data_connector.py", "pytorch_lightning/trainer/connectors/logger_connector.py", + "pytorch_lightning/trainer/connectors/slurm_connector.py", "pytorch_lightning/distributed/dist.py", "pytorch_lightning/tuner", "pytorch_lightning/plugins" diff --git a/pytorch_lightning/callbacks/early_stopping.py b/pytorch_lightning/callbacks/early_stopping.py index 866a4471bb..58fc9ec781 100644 --- a/pytorch_lightning/callbacks/early_stopping.py +++ b/pytorch_lightning/callbacks/early_stopping.py @@ -205,6 +205,7 @@ class EarlyStopping(Callback): def on_train_end(self, trainer, pl_module): if self.stopped_epoch > 0 and self.verbose > 0: + # todo: remove this old warning rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from "1" until v0.6.x,' ' but will start from "0" in v0.8.0.', DeprecationWarning) log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.') diff --git a/pytorch_lightning/trainer/connectors/checkpoint_connector.py b/pytorch_lightning/trainer/connectors/checkpoint_connector.py index caf20a888a..044039f708 100644 --- a/pytorch_lightning/trainer/connectors/checkpoint_connector.py +++ b/pytorch_lightning/trainer/connectors/checkpoint_connector.py @@ -149,7 +149,7 @@ class CheckpointConnector: self.trainer.global_step = checkpoint['global_step'] self.trainer.current_epoch = checkpoint['epoch'] - # crash if max_epochs is lower than the current epoch from the checkpoint + # crash if max_epochs is lower then the current epoch from the checkpoint if self.trainer.current_epoch > self.trainer.max_epochs: m = f""" you restored a checkpoint with current_epoch={self.trainer.current_epoch} diff --git a/tests/callbacks/test_early_stopping.py b/tests/callbacks/test_early_stopping.py index 8a1daaf695..0e01fee7a8 100644 --- a/tests/callbacks/test_early_stopping.py +++ b/tests/callbacks/test_early_stopping.py @@ -6,7 +6,7 @@ import cloudpickle import pytest import torch -from pytorch_lightning import Trainer +from pytorch_lightning import Trainer, seed_everything from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from tests.base import EvalModelTemplate from pytorch_lightning.utilities.exceptions import MisconfigurationException @@ -35,7 +35,7 @@ def test_resume_early_stopping_from_checkpoint(tmpdir): https://github.com/PyTorchLightning/pytorch-lightning/issues/1464 https://github.com/PyTorchLightning/pytorch-lightning/issues/1463 """ - + seed_everything(42) model = EvalModelTemplate() checkpoint_callback = ModelCheckpoint(monitor="early_stop_on", save_top_k=1) early_stop_callback = EarlyStoppingTestRestore() @@ -60,7 +60,7 @@ def test_resume_early_stopping_from_checkpoint(tmpdir): early_stop_callback = EarlyStoppingTestRestore(early_stop_callback_state) new_trainer = Trainer( default_root_dir=tmpdir, - max_epochs=2, + max_epochs=1, resume_from_checkpoint=checkpoint_filepath, early_stop_callback=early_stop_callback, )