2020-10-13 11:18:07 +00:00
|
|
|
# Copyright The PyTorch Lightning team.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2020-03-03 20:16:57 +00:00
|
|
|
import glob
|
2020-02-01 20:47:58 +00:00
|
|
|
import logging as log
|
2019-12-04 11:48:53 +00:00
|
|
|
import os
|
2020-06-08 11:19:34 +00:00
|
|
|
import pickle
|
2021-01-23 23:52:04 +00:00
|
|
|
from copy import deepcopy
|
2021-10-06 14:57:40 +00:00
|
|
|
from typing import Generic, Mapping, TypeVar
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2020-06-09 20:51:30 +00:00
|
|
|
import cloudpickle
|
2020-02-25 15:36:44 +00:00
|
|
|
import pytest
|
2019-10-23 10:10:13 +00:00
|
|
|
import torch
|
2021-02-11 14:32:07 +00:00
|
|
|
import torch.nn.functional as F
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2021-02-08 10:52:02 +00:00
|
|
|
import tests.helpers.pipelines as tpipes
|
|
|
|
import tests.helpers.utils as tutils
|
2021-01-23 23:52:04 +00:00
|
|
|
from pytorch_lightning import Callback, Trainer
|
|
|
|
from pytorch_lightning.callbacks import ModelCheckpoint
|
2021-10-06 14:57:40 +00:00
|
|
|
from pytorch_lightning.trainer.states import RunningStage, TrainerFn
|
2021-02-09 10:10:52 +00:00
|
|
|
from tests.helpers import BoringModel
|
2021-02-11 14:32:07 +00:00
|
|
|
from tests.helpers.datamodules import ClassifDataModule
|
2021-03-02 09:36:01 +00:00
|
|
|
from tests.helpers.runif import RunIf
|
2021-02-11 14:32:07 +00:00
|
|
|
from tests.helpers.simple_models import ClassificationModel
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
|
2020-10-05 15:10:40 +00:00
|
|
|
class ModelTrainerPropertyParity(Callback):
|
|
|
|
def _check_properties(self, trainer, pl_module):
|
|
|
|
assert trainer.global_step == pl_module.global_step
|
|
|
|
assert trainer.current_epoch == pl_module.current_epoch
|
|
|
|
|
|
|
|
def on_train_start(self, trainer, pl_module):
|
|
|
|
self._check_properties(trainer, pl_module)
|
|
|
|
|
|
|
|
def on_train_batch_start(self, trainer, pl_module, *args, **kwargs):
|
|
|
|
self._check_properties(trainer, pl_module)
|
|
|
|
|
|
|
|
def on_train_batch_end(self, trainer, pl_module, *args, **kwargs):
|
|
|
|
self._check_properties(trainer, pl_module)
|
|
|
|
|
|
|
|
def on_epoch_end(self, trainer, pl_module):
|
|
|
|
self._check_properties(trainer, pl_module)
|
|
|
|
|
|
|
|
def on_train_end(self, trainer, pl_module):
|
|
|
|
self._check_properties(trainer, pl_module)
|
|
|
|
|
|
|
|
|
2021-02-11 14:32:07 +00:00
|
|
|
class ValTestLossBoringModel(BoringModel):
|
|
|
|
def __init__(self, batch_size=4):
|
|
|
|
super().__init__()
|
|
|
|
self.save_hyperparameters()
|
|
|
|
|
|
|
|
def validation_step(self, batch, batch_idx):
|
|
|
|
out = super().validation_step(batch, batch_idx)
|
2021-07-26 11:37:35 +00:00
|
|
|
self.log("val_loss", out["x"])
|
2021-02-11 14:32:07 +00:00
|
|
|
return out
|
|
|
|
|
|
|
|
def test_step(self, batch, batch_idx):
|
|
|
|
out = super().test_step(batch, batch_idx)
|
2021-07-26 11:37:35 +00:00
|
|
|
self.log("test_loss", out["y"])
|
2021-02-11 14:32:07 +00:00
|
|
|
return out
|
|
|
|
|
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
T = TypeVar("T")
|
2021-02-11 14:32:07 +00:00
|
|
|
|
|
|
|
|
|
|
|
class GenericParentValTestLossBoringModel(Generic[T], ValTestLossBoringModel):
|
|
|
|
def __init__(self, batch_size: int = 4):
|
|
|
|
super().__init__(batch_size=batch_size)
|
|
|
|
|
|
|
|
|
|
|
|
class GenericValTestLossBoringModel(GenericParentValTestLossBoringModel[int]):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2021-02-23 22:08:46 +00:00
|
|
|
class CustomClassificationModelDP(ClassificationModel):
|
|
|
|
def _step(self, batch, batch_idx):
|
|
|
|
x, y = batch
|
|
|
|
logits = self(x)
|
2021-07-26 11:37:35 +00:00
|
|
|
return {"logits": logits, "y": y}
|
2021-02-23 22:08:46 +00:00
|
|
|
|
|
|
|
def training_step(self, batch, batch_idx):
|
|
|
|
out = self._step(batch, batch_idx)
|
2021-07-26 11:37:35 +00:00
|
|
|
loss = F.cross_entropy(out["logits"], out["y"])
|
2021-02-23 22:08:46 +00:00
|
|
|
return loss
|
|
|
|
|
|
|
|
def validation_step(self, batch, batch_idx):
|
|
|
|
return self._step(batch, batch_idx)
|
|
|
|
|
|
|
|
def test_step(self, batch, batch_idx):
|
|
|
|
return self._step(batch, batch_idx)
|
|
|
|
|
|
|
|
def validation_step_end(self, outputs):
|
2021-07-26 11:37:35 +00:00
|
|
|
self.log("val_acc", self.valid_acc(outputs["logits"], outputs["y"]))
|
2021-02-23 22:08:46 +00:00
|
|
|
|
|
|
|
|
2021-10-25 19:05:31 +00:00
|
|
|
def test_model_properties_fit_ckpt_path(tmpdir):
|
2021-09-06 12:49:09 +00:00
|
|
|
"""Test that properties like `current_epoch` and `global_step` in model and trainer are always the same."""
|
2021-02-11 14:32:07 +00:00
|
|
|
model = BoringModel()
|
|
|
|
checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, monitor="val_loss", save_last=True)
|
2020-10-05 15:10:40 +00:00
|
|
|
trainer_args = dict(
|
|
|
|
default_root_dir=tmpdir,
|
2020-10-30 03:47:37 +00:00
|
|
|
max_epochs=1,
|
2021-02-11 14:32:07 +00:00
|
|
|
limit_train_batches=2,
|
|
|
|
limit_val_batches=2,
|
2020-10-05 15:10:40 +00:00
|
|
|
logger=False,
|
2020-12-01 00:09:46 +00:00
|
|
|
callbacks=[checkpoint_callback, ModelTrainerPropertyParity()], # this performs the assertions
|
2020-10-05 15:10:40 +00:00
|
|
|
)
|
|
|
|
trainer = Trainer(**trainer_args)
|
|
|
|
trainer.fit(model)
|
2020-10-30 03:47:37 +00:00
|
|
|
|
|
|
|
trainer_args.update(max_epochs=2)
|
2021-10-25 19:05:31 +00:00
|
|
|
trainer = Trainer(**trainer_args)
|
|
|
|
trainer.fit(model, ckpt_path=str(tmpdir / "last.ckpt"))
|
2020-10-30 03:47:37 +00:00
|
|
|
|
|
|
|
|
2021-10-25 19:05:31 +00:00
|
|
|
def test_trainer_properties_restore_ckpt_path(tmpdir):
|
2021-10-06 14:57:40 +00:00
|
|
|
"""Test that required trainer properties are set correctly when resuming from checkpoint in different
|
|
|
|
phases."""
|
|
|
|
|
|
|
|
class CustomClassifModel(ClassificationModel):
|
|
|
|
def configure_optimizers(self):
|
|
|
|
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
|
|
|
|
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
|
|
|
|
return [optimizer], [lr_scheduler]
|
|
|
|
|
|
|
|
model = CustomClassifModel()
|
|
|
|
dm = ClassifDataModule()
|
|
|
|
checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, save_last=True)
|
|
|
|
trainer_args = dict(
|
|
|
|
default_root_dir=tmpdir,
|
|
|
|
max_epochs=1,
|
|
|
|
limit_train_batches=2,
|
|
|
|
limit_val_batches=2,
|
|
|
|
logger=False,
|
|
|
|
callbacks=[checkpoint_callback],
|
|
|
|
num_sanity_val_steps=0,
|
|
|
|
)
|
|
|
|
trainer = Trainer(**trainer_args)
|
|
|
|
trainer.fit(model, datamodule=dm)
|
|
|
|
|
|
|
|
resume_ckpt = str(tmpdir / "last.ckpt")
|
|
|
|
state_dict = torch.load(resume_ckpt)
|
|
|
|
|
2021-10-25 19:05:31 +00:00
|
|
|
trainer_args.update({"max_epochs": 3, "enable_checkpointing": False, "callbacks": []})
|
2021-10-06 14:57:40 +00:00
|
|
|
|
|
|
|
class CustomClassifModel(CustomClassifModel):
|
|
|
|
def _is_equal(self, a, b):
|
|
|
|
if isinstance(a, torch.Tensor):
|
|
|
|
return torch.all(torch.eq(a, b))
|
|
|
|
|
|
|
|
if isinstance(a, Mapping):
|
|
|
|
return all(self._is_equal(a.get(k, None), b.get(k, None)) for k in b.keys())
|
|
|
|
|
|
|
|
return a == b
|
|
|
|
|
|
|
|
def _check_optimizers(self):
|
|
|
|
return all(
|
|
|
|
self._is_equal(self.trainer.optimizers[i].state_dict(), state_dict["optimizer_states"][i])
|
|
|
|
for i in range(len(self.trainer.optimizers))
|
|
|
|
)
|
|
|
|
|
|
|
|
def _check_schedulers(self):
|
|
|
|
return all(
|
|
|
|
self._is_equal(self.trainer.lr_schedulers[i]["scheduler"].state_dict(), state_dict["lr_schedulers"][i])
|
|
|
|
for i in range(len(self.trainer.lr_schedulers))
|
|
|
|
)
|
|
|
|
|
|
|
|
def _check_model_state_dict(self):
|
|
|
|
for k in self.state_dict():
|
|
|
|
yield self._is_equal(self.state_dict()[k], state_dict["state_dict"][k])
|
|
|
|
|
|
|
|
def _test_on_val_test_predict_tune_start(self):
|
|
|
|
assert self.trainer.current_epoch == state_dict["epoch"]
|
|
|
|
assert self.trainer.global_step == state_dict["global_step"]
|
2021-10-25 19:05:31 +00:00
|
|
|
assert all(self._check_model_state_dict())
|
2021-10-06 14:57:40 +00:00
|
|
|
|
|
|
|
# no optimizes and schedulers are loaded otherwise
|
|
|
|
if self.trainer.state.fn != TrainerFn.TUNING:
|
|
|
|
return
|
|
|
|
|
|
|
|
assert not self._check_optimizers()
|
|
|
|
assert not self._check_schedulers()
|
|
|
|
|
|
|
|
def on_train_start(self):
|
|
|
|
if self.trainer.state.fn == TrainerFn.TUNING:
|
|
|
|
self._test_on_val_test_predict_tune_start()
|
|
|
|
else:
|
|
|
|
assert self.trainer.current_epoch == state_dict["epoch"]
|
|
|
|
assert self.trainer.global_step == state_dict["global_step"]
|
|
|
|
assert all(self._check_model_state_dict())
|
|
|
|
assert self._check_optimizers()
|
|
|
|
assert self._check_schedulers()
|
|
|
|
|
|
|
|
def on_validation_start(self):
|
|
|
|
if self.trainer.state.fn == TrainerFn.VALIDATING:
|
|
|
|
self._test_on_val_test_predict_tune_start()
|
|
|
|
|
|
|
|
def on_test_start(self):
|
|
|
|
self._test_on_val_test_predict_tune_start()
|
|
|
|
|
2021-10-25 19:05:31 +00:00
|
|
|
for fn in ("fit", "validate", "test", "predict"):
|
2021-10-06 14:57:40 +00:00
|
|
|
model = CustomClassifModel()
|
|
|
|
dm = ClassifDataModule()
|
|
|
|
trainer_args["auto_scale_batch_size"] = (fn == "tune",)
|
|
|
|
trainer = Trainer(**trainer_args)
|
|
|
|
trainer_fn = getattr(trainer, fn)
|
2021-10-25 19:05:31 +00:00
|
|
|
trainer_fn(model, datamodule=dm, ckpt_path=resume_ckpt)
|
2021-10-06 14:57:40 +00:00
|
|
|
|
|
|
|
|
2021-01-05 00:52:35 +00:00
|
|
|
def test_try_resume_from_non_existing_checkpoint(tmpdir):
|
2021-10-25 19:05:31 +00:00
|
|
|
"""Test that trying to resume from non-existing `ckpt_path` fails with an error."""
|
2021-05-04 09:16:34 +00:00
|
|
|
model = BoringModel()
|
2021-10-25 19:05:31 +00:00
|
|
|
trainer = Trainer()
|
2021-05-04 09:16:34 +00:00
|
|
|
|
2021-04-28 13:03:29 +00:00
|
|
|
with pytest.raises(FileNotFoundError, match="Aborting training"):
|
2021-10-25 19:05:31 +00:00
|
|
|
trainer.fit(model, ckpt_path=str(tmpdir / "non_existing.ckpt"))
|
2021-01-05 00:52:35 +00:00
|
|
|
|
|
|
|
|
2020-10-30 03:47:37 +00:00
|
|
|
class CaptureCallbacksBeforeTraining(Callback):
|
|
|
|
callbacks = []
|
|
|
|
|
2021-06-15 12:55:06 +00:00
|
|
|
def on_pretrain_routine_end(self, trainer, pl_module):
|
2020-10-30 03:47:37 +00:00
|
|
|
self.callbacks = deepcopy(trainer.callbacks)
|
|
|
|
|
|
|
|
|
2021-10-25 19:05:31 +00:00
|
|
|
def test_callbacks_state_fit_ckpt_path(tmpdir):
|
2021-07-26 11:37:35 +00:00
|
|
|
"""Test that resuming from a checkpoint restores callbacks that persist state."""
|
2021-02-11 14:32:07 +00:00
|
|
|
dm = ClassifDataModule()
|
|
|
|
model = ClassificationModel()
|
2020-10-30 03:47:37 +00:00
|
|
|
callback_capture = CaptureCallbacksBeforeTraining()
|
|
|
|
|
|
|
|
def get_trainer_args():
|
2021-02-11 14:32:07 +00:00
|
|
|
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="val_loss", save_last=True)
|
2020-10-30 03:47:37 +00:00
|
|
|
trainer_args = dict(
|
2021-06-28 17:49:56 +00:00
|
|
|
default_root_dir=tmpdir,
|
|
|
|
max_steps=1,
|
|
|
|
logger=False,
|
|
|
|
callbacks=[checkpoint, callback_capture],
|
2021-07-26 11:37:35 +00:00
|
|
|
limit_val_batches=2,
|
2020-10-30 03:47:37 +00:00
|
|
|
)
|
|
|
|
assert checkpoint.best_model_path == ""
|
2020-11-18 08:09:44 +00:00
|
|
|
assert checkpoint.best_model_score is None
|
2020-10-30 03:47:37 +00:00
|
|
|
return trainer_args
|
|
|
|
|
|
|
|
# initial training
|
|
|
|
trainer = Trainer(**get_trainer_args())
|
2021-02-11 14:32:07 +00:00
|
|
|
trainer.fit(model, datamodule=dm)
|
2020-10-30 03:47:37 +00:00
|
|
|
callbacks_before_resume = deepcopy(trainer.callbacks)
|
|
|
|
|
|
|
|
# resumed training
|
2021-10-25 19:05:31 +00:00
|
|
|
trainer = Trainer(**get_trainer_args())
|
|
|
|
trainer.fit(model, datamodule=dm, ckpt_path=str(tmpdir / "last.ckpt"))
|
2020-10-30 03:47:37 +00:00
|
|
|
|
|
|
|
assert len(callbacks_before_resume) == len(callback_capture.callbacks)
|
|
|
|
|
|
|
|
for before, after in zip(callbacks_before_resume, callback_capture.callbacks):
|
|
|
|
if isinstance(before, ModelCheckpoint):
|
|
|
|
assert before.best_model_path == after.best_model_path
|
|
|
|
assert before.best_model_score == after.best_model_score
|
|
|
|
|
|
|
|
|
2021-10-25 19:05:31 +00:00
|
|
|
def test_callbacks_references_fit_ckpt_path(tmpdir):
|
2021-07-26 11:37:35 +00:00
|
|
|
"""Test that resuming from a checkpoint sets references as expected."""
|
2021-02-11 14:32:07 +00:00
|
|
|
dm = ClassifDataModule()
|
|
|
|
model = ClassificationModel()
|
2021-06-28 17:49:56 +00:00
|
|
|
args = {
|
2021-07-26 11:37:35 +00:00
|
|
|
"default_root_dir": tmpdir,
|
|
|
|
"max_steps": 1,
|
|
|
|
"logger": False,
|
2021-06-28 17:49:56 +00:00
|
|
|
"limit_val_batches": 2,
|
2021-07-26 11:37:35 +00:00
|
|
|
"num_sanity_val_steps": 0,
|
2021-06-28 17:49:56 +00:00
|
|
|
}
|
2020-10-30 03:47:37 +00:00
|
|
|
|
|
|
|
# initial training
|
2021-02-11 14:32:07 +00:00
|
|
|
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="val_loss", save_last=True)
|
2020-10-30 03:47:37 +00:00
|
|
|
trainer = Trainer(**args, callbacks=[checkpoint])
|
2021-02-03 21:40:57 +00:00
|
|
|
assert checkpoint is trainer.callbacks[-1] is trainer.checkpoint_callback
|
2021-02-11 14:32:07 +00:00
|
|
|
trainer.fit(model, datamodule=dm)
|
2020-10-30 03:47:37 +00:00
|
|
|
|
|
|
|
# resumed training
|
2021-02-11 14:32:07 +00:00
|
|
|
new_checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="val_loss", save_last=True)
|
2020-10-30 03:47:37 +00:00
|
|
|
# pass in a new checkpoint object, which should take
|
|
|
|
# precedence over the one in the last.ckpt file
|
2021-10-25 19:05:31 +00:00
|
|
|
trainer = Trainer(**args, callbacks=[new_checkpoint])
|
2020-10-30 03:47:37 +00:00
|
|
|
assert checkpoint is not new_checkpoint
|
2021-02-03 21:40:57 +00:00
|
|
|
assert new_checkpoint is trainer.callbacks[-1] is trainer.checkpoint_callback
|
2021-10-25 19:05:31 +00:00
|
|
|
trainer.fit(model, datamodule=dm, ckpt_path=str(tmpdir / "last.ckpt"))
|
2020-10-05 15:10:40 +00:00
|
|
|
|
|
|
|
|
2021-03-02 08:03:32 +00:00
|
|
|
@RunIf(min_gpus=2)
|
2020-07-09 00:33:48 +00:00
|
|
|
def test_running_test_pretrained_model_distrib_dp(tmpdir):
|
2019-12-04 11:48:53 +00:00
|
|
|
"""Verify `test()` on pretrained model."""
|
2021-01-07 10:50:08 +00:00
|
|
|
|
2021-10-25 12:09:05 +00:00
|
|
|
tutils.set_random_main_port()
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2021-02-11 14:32:07 +00:00
|
|
|
dm = ClassifDataModule()
|
2021-02-11 17:32:46 +00:00
|
|
|
model = CustomClassificationModelDP(lr=0.1)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# exp file to get meta
|
2020-04-22 00:33:10 +00:00
|
|
|
logger = tutils.get_default_logger(tmpdir)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# exp file to get weights
|
2019-11-28 17:06:05 +00:00
|
|
|
checkpoint = tutils.init_checkpoint_callback(logger)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
trainer_options = dict(
|
2021-09-25 05:53:31 +00:00
|
|
|
enable_progress_bar=False,
|
2020-04-22 00:33:10 +00:00
|
|
|
max_epochs=2,
|
2021-02-11 14:32:07 +00:00
|
|
|
limit_train_batches=5,
|
|
|
|
limit_val_batches=5,
|
2020-12-09 19:14:34 +00:00
|
|
|
callbacks=[checkpoint],
|
2019-10-23 10:10:13 +00:00
|
|
|
logger=logger,
|
|
|
|
gpus=[0, 1],
|
2021-10-16 15:10:25 +00:00
|
|
|
strategy="dp",
|
2020-07-28 13:47:53 +00:00
|
|
|
default_root_dir=tmpdir,
|
2020-07-09 00:33:48 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# fit model
|
|
|
|
trainer = Trainer(**trainer_options)
|
2021-02-11 14:32:07 +00:00
|
|
|
trainer.fit(model, datamodule=dm)
|
2020-07-09 00:33:48 +00:00
|
|
|
|
|
|
|
# correct result and ok accuracy
|
2021-05-04 10:50:56 +00:00
|
|
|
assert trainer.state.finished, f"Training failed with {trainer.state}"
|
2021-02-11 14:32:07 +00:00
|
|
|
pretrained_model = ClassificationModel.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
|
2020-07-09 00:33:48 +00:00
|
|
|
|
|
|
|
# run test set
|
|
|
|
new_trainer = Trainer(**trainer_options)
|
2021-02-11 14:32:07 +00:00
|
|
|
new_trainer.test(pretrained_model)
|
2020-07-09 00:33:48 +00:00
|
|
|
pretrained_model.cpu()
|
|
|
|
|
2021-10-12 10:45:28 +00:00
|
|
|
dataloaders = dm.test_dataloader()
|
2020-07-09 00:33:48 +00:00
|
|
|
if not isinstance(dataloaders, list):
|
|
|
|
dataloaders = [dataloaders]
|
|
|
|
|
|
|
|
for dataloader in dataloaders:
|
2021-02-23 22:08:46 +00:00
|
|
|
tpipes.run_prediction_eval_model_template(pretrained_model, dataloader)
|
2020-07-09 00:33:48 +00:00
|
|
|
|
|
|
|
|
2021-03-02 08:03:32 +00:00
|
|
|
@RunIf(min_gpus=2)
|
2020-07-09 00:33:48 +00:00
|
|
|
def test_running_test_pretrained_model_distrib_ddp_spawn(tmpdir):
|
|
|
|
"""Verify `test()` on pretrained model."""
|
2021-10-25 12:09:05 +00:00
|
|
|
tutils.set_random_main_port()
|
2021-02-11 14:32:07 +00:00
|
|
|
dm = ClassifDataModule()
|
|
|
|
model = ClassificationModel()
|
2020-07-09 00:33:48 +00:00
|
|
|
|
|
|
|
# exp file to get meta
|
|
|
|
logger = tutils.get_default_logger(tmpdir)
|
|
|
|
|
|
|
|
# exp file to get weights
|
|
|
|
checkpoint = tutils.init_checkpoint_callback(logger)
|
|
|
|
|
|
|
|
trainer_options = dict(
|
2021-09-25 05:53:31 +00:00
|
|
|
enable_progress_bar=False,
|
2020-07-09 00:33:48 +00:00
|
|
|
max_epochs=2,
|
2021-02-11 14:32:07 +00:00
|
|
|
limit_train_batches=2,
|
|
|
|
limit_val_batches=2,
|
2020-12-09 19:14:34 +00:00
|
|
|
callbacks=[checkpoint],
|
2020-07-09 00:33:48 +00:00
|
|
|
logger=logger,
|
|
|
|
gpus=[0, 1],
|
2021-10-16 15:10:25 +00:00
|
|
|
strategy="ddp_spawn",
|
2020-07-28 13:47:53 +00:00
|
|
|
default_root_dir=tmpdir,
|
2019-10-23 10:10:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# fit model
|
|
|
|
trainer = Trainer(**trainer_options)
|
2021-02-11 14:32:07 +00:00
|
|
|
trainer.fit(model, datamodule=dm)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2020-02-01 20:47:58 +00:00
|
|
|
log.info(os.listdir(tutils.get_data_path(logger, path_dir=tmpdir)))
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# correct result and ok accuracy
|
2021-05-04 10:50:56 +00:00
|
|
|
assert trainer.state.finished, f"Training failed with {trainer.state}"
|
2021-02-11 14:32:07 +00:00
|
|
|
pretrained_model = ClassificationModel.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# run test set
|
|
|
|
new_trainer = Trainer(**trainer_options)
|
2021-02-11 14:32:07 +00:00
|
|
|
new_trainer.test(pretrained_model)
|
2020-07-09 00:33:48 +00:00
|
|
|
pretrained_model.cpu()
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2021-02-11 14:32:07 +00:00
|
|
|
dataloaders = dm.test_dataloader()
|
2020-02-25 03:23:25 +00:00
|
|
|
if not isinstance(dataloaders, list):
|
|
|
|
dataloaders = [dataloaders]
|
|
|
|
|
|
|
|
for dataloader in dataloaders:
|
2021-02-23 22:08:46 +00:00
|
|
|
tpipes.run_prediction_eval_model_template(pretrained_model, dataloader, min_acc=0.1)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
|
2020-04-22 00:33:10 +00:00
|
|
|
def test_running_test_pretrained_model_cpu(tmpdir):
|
2020-02-01 23:44:05 +00:00
|
|
|
"""Verify test() on pretrained model."""
|
2021-02-11 14:32:07 +00:00
|
|
|
tutils.reset_seed()
|
|
|
|
dm = ClassifDataModule()
|
|
|
|
model = ClassificationModel()
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# logger file to get meta
|
2020-04-22 00:33:10 +00:00
|
|
|
logger = tutils.get_default_logger(tmpdir)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# logger file to get weights
|
2019-11-28 17:06:05 +00:00
|
|
|
checkpoint = tutils.init_checkpoint_callback(logger)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
trainer_options = dict(
|
2021-09-25 05:53:31 +00:00
|
|
|
enable_progress_bar=False,
|
2021-02-11 14:32:07 +00:00
|
|
|
max_epochs=2,
|
|
|
|
limit_train_batches=2,
|
|
|
|
limit_val_batches=2,
|
|
|
|
limit_test_batches=2,
|
2020-12-09 19:14:34 +00:00
|
|
|
callbacks=[checkpoint],
|
2020-06-27 01:38:25 +00:00
|
|
|
logger=logger,
|
2020-07-28 13:47:53 +00:00
|
|
|
default_root_dir=tmpdir,
|
2019-10-23 10:10:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# fit model
|
|
|
|
trainer = Trainer(**trainer_options)
|
2021-02-11 14:32:07 +00:00
|
|
|
trainer.fit(model, datamodule=dm)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# correct result and ok accuracy
|
2021-05-04 10:50:56 +00:00
|
|
|
assert trainer.state.finished, f"Training failed with {trainer.state}"
|
2021-02-11 14:32:07 +00:00
|
|
|
pretrained_model = ClassificationModel.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
new_trainer = Trainer(**trainer_options)
|
2021-02-11 14:32:07 +00:00
|
|
|
new_trainer.test(pretrained_model, datamodule=dm)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# test we have good test accuracy
|
2021-07-26 11:37:35 +00:00
|
|
|
tutils.assert_ok_model_acc(new_trainer, key="test_acc", thr=0.45)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
@pytest.mark.parametrize("model_template", [ValTestLossBoringModel, GenericValTestLossBoringModel])
|
2020-08-20 11:19:11 +00:00
|
|
|
def test_load_model_from_checkpoint(tmpdir, model_template):
|
2020-02-01 23:44:05 +00:00
|
|
|
"""Verify test() on pretrained model."""
|
2021-02-11 14:32:07 +00:00
|
|
|
tutils.reset_seed()
|
|
|
|
model = model_template()
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
trainer_options = dict(
|
2021-09-25 05:53:31 +00:00
|
|
|
enable_progress_bar=False,
|
2020-01-14 03:25:27 +00:00
|
|
|
max_epochs=2,
|
2021-02-11 14:32:07 +00:00
|
|
|
limit_train_batches=2,
|
|
|
|
limit_val_batches=2,
|
|
|
|
limit_test_batches=2,
|
2021-07-26 11:37:35 +00:00
|
|
|
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor="val_loss", save_top_k=-1)],
|
2020-04-10 16:02:59 +00:00
|
|
|
default_root_dir=tmpdir,
|
2019-10-23 10:10:13 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# fit model
|
|
|
|
trainer = Trainer(**trainer_options)
|
2021-01-12 00:36:48 +00:00
|
|
|
trainer.fit(model)
|
2021-07-28 10:12:46 +00:00
|
|
|
trainer.test(model)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# correct result and ok accuracy
|
2021-05-04 10:50:56 +00:00
|
|
|
assert trainer.state.finished, f"Training failed with {trainer.state}"
|
2020-01-14 03:25:27 +00:00
|
|
|
|
|
|
|
# load last checkpoint
|
2020-03-03 20:16:57 +00:00
|
|
|
last_checkpoint = sorted(glob.glob(os.path.join(trainer.checkpoint_callback.dirpath, "*.ckpt")))[-1]
|
2020-10-05 16:44:23 +00:00
|
|
|
|
2021-02-11 14:32:07 +00:00
|
|
|
# Since `BoringModel` has `_save_hparams = True` by default, check that ckpt has hparams
|
2020-10-05 16:44:23 +00:00
|
|
|
ckpt = torch.load(last_checkpoint)
|
2021-07-26 11:37:35 +00:00
|
|
|
assert model_template.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), "hyper_parameters missing from checkpoints"
|
2020-10-05 16:44:23 +00:00
|
|
|
|
|
|
|
# Ensure that model can be correctly restored from checkpoint
|
2020-08-20 11:19:11 +00:00
|
|
|
pretrained_model = model_template.load_from_checkpoint(last_checkpoint)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# test that hparams loaded correctly
|
2021-02-11 14:32:07 +00:00
|
|
|
for k, v in model.hparams.items():
|
|
|
|
assert getattr(pretrained_model.hparams, k) == v
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2020-03-02 22:12:22 +00:00
|
|
|
# assert weights are the same
|
|
|
|
for (old_name, old_p), (new_name, new_p) in zip(model.named_parameters(), pretrained_model.named_parameters()):
|
2021-07-26 11:37:35 +00:00
|
|
|
assert torch.all(torch.eq(old_p, new_p)), "loaded weights are not the same as the saved weights"
|
2020-03-02 22:12:22 +00:00
|
|
|
|
2020-10-05 16:44:23 +00:00
|
|
|
# Check `test` on pretrained model:
|
2019-10-23 10:10:13 +00:00
|
|
|
new_trainer = Trainer(**trainer_options)
|
|
|
|
new_trainer.test(pretrained_model)
|
|
|
|
|
|
|
|
|
2021-03-02 08:03:32 +00:00
|
|
|
@RunIf(min_gpus=2)
|
2019-12-03 13:01:04 +00:00
|
|
|
def test_dp_resume(tmpdir):
|
2019-12-04 11:48:53 +00:00
|
|
|
"""Make sure DP continues training correctly."""
|
2021-02-23 22:08:46 +00:00
|
|
|
model = CustomClassificationModelDP(lr=0.1)
|
|
|
|
dm = ClassifDataModule()
|
2020-05-04 15:38:08 +00:00
|
|
|
|
2021-10-16 15:10:25 +00:00
|
|
|
trainer_options = dict(max_epochs=1, gpus=2, strategy="dp", default_root_dir=tmpdir)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# get logger
|
2020-04-22 00:33:10 +00:00
|
|
|
logger = tutils.get_default_logger(tmpdir)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# exp file to get weights
|
|
|
|
# logger file to get weights
|
2019-11-28 17:06:05 +00:00
|
|
|
checkpoint = tutils.init_checkpoint_callback(logger)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2020-05-04 15:38:08 +00:00
|
|
|
# add these to the trainer options
|
2021-07-26 11:37:35 +00:00
|
|
|
trainer_options["logger"] = logger
|
|
|
|
trainer_options["callbacks"] = [checkpoint]
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# fit model
|
|
|
|
trainer = Trainer(**trainer_options)
|
2021-10-25 17:52:15 +00:00
|
|
|
trainer._is_slurm_managing_tasks = True
|
2021-02-23 22:08:46 +00:00
|
|
|
trainer.fit(model, datamodule=dm)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2020-02-22 01:27:19 +00:00
|
|
|
# track epoch before saving. Increment since we finished the current epoch, don't want to rerun
|
2020-06-20 03:39:53 +00:00
|
|
|
real_global_epoch = trainer.current_epoch + 1
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# correct result and ok accuracy
|
2021-05-04 10:50:56 +00:00
|
|
|
assert trainer.state.finished, f"Training failed with {trainer.state}"
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# ---------------------------
|
|
|
|
# HPC LOAD/SAVE
|
|
|
|
# ---------------------------
|
|
|
|
# save
|
2020-09-12 12:42:27 +00:00
|
|
|
trainer.checkpoint_connector.hpc_save(tmpdir, logger)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# init new trainer
|
2020-04-22 00:33:10 +00:00
|
|
|
new_logger = tutils.get_default_logger(tmpdir, version=logger.version)
|
2021-07-26 11:37:35 +00:00
|
|
|
trainer_options["logger"] = new_logger
|
|
|
|
trainer_options["callbacks"] = [ModelCheckpoint(dirpath=tmpdir)]
|
|
|
|
trainer_options["limit_train_batches"] = 0.5
|
|
|
|
trainer_options["limit_val_batches"] = 0.2
|
|
|
|
trainer_options["max_epochs"] = 1
|
2019-10-23 10:10:13 +00:00
|
|
|
new_trainer = Trainer(**trainer_options)
|
|
|
|
|
2021-02-23 22:08:46 +00:00
|
|
|
class CustomModel(CustomClassificationModelDP):
|
2021-02-11 14:32:07 +00:00
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
2021-06-15 12:55:06 +00:00
|
|
|
self.on_pretrain_routine_end_called = False
|
2021-02-11 14:32:07 +00:00
|
|
|
|
|
|
|
# set the epoch start hook so we can predict before the model does the full training
|
2021-06-15 12:55:06 +00:00
|
|
|
def on_pretrain_routine_end(self):
|
2021-02-11 14:32:07 +00:00
|
|
|
assert self.trainer.current_epoch == real_global_epoch and self.trainer.current_epoch > 0
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2021-02-11 14:32:07 +00:00
|
|
|
# if model and state loaded correctly, predictions will be good even though we
|
|
|
|
# haven't trained with the new loaded model
|
2021-05-04 10:50:56 +00:00
|
|
|
new_trainer.state.stage = RunningStage.VALIDATING
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2021-10-12 10:45:28 +00:00
|
|
|
dataloader = dm.train_dataloader()
|
2021-02-23 22:08:46 +00:00
|
|
|
tpipes.run_prediction_eval_model_template(self.trainer.lightning_module, dataloader=dataloader)
|
2021-06-15 12:55:06 +00:00
|
|
|
self.on_pretrain_routine_end_called = True
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# new model
|
2021-02-11 14:32:07 +00:00
|
|
|
model = CustomModel()
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# fit new model which should load hpc weights
|
2021-02-23 22:08:46 +00:00
|
|
|
new_trainer.fit(model, datamodule=dm)
|
2021-06-15 12:55:06 +00:00
|
|
|
assert model.on_pretrain_routine_end_called
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# test freeze on gpu
|
|
|
|
model.freeze()
|
|
|
|
model.unfreeze()
|
|
|
|
|
|
|
|
|
2019-12-03 13:01:04 +00:00
|
|
|
def test_model_saving_loading(tmpdir):
|
2019-12-04 11:48:53 +00:00
|
|
|
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
|
2021-02-11 14:32:07 +00:00
|
|
|
model = BoringModel()
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# logger file to get meta
|
2020-04-22 00:33:10 +00:00
|
|
|
logger = tutils.get_default_logger(tmpdir)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
2020-07-28 13:47:53 +00:00
|
|
|
# fit model
|
|
|
|
trainer = Trainer(
|
2020-12-09 19:14:34 +00:00
|
|
|
max_epochs=1,
|
2021-02-11 14:32:07 +00:00
|
|
|
limit_train_batches=2,
|
|
|
|
limit_val_batches=2,
|
2020-12-09 19:14:34 +00:00
|
|
|
logger=logger,
|
|
|
|
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
|
|
|
|
default_root_dir=tmpdir,
|
2019-10-23 10:10:13 +00:00
|
|
|
)
|
2021-01-12 00:36:48 +00:00
|
|
|
trainer.fit(model)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# traning complete
|
2021-05-04 10:50:56 +00:00
|
|
|
assert trainer.state.finished, f"Training failed with {trainer.state}"
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# make a prediction
|
2020-02-25 03:23:25 +00:00
|
|
|
dataloaders = model.test_dataloader()
|
|
|
|
if not isinstance(dataloaders, list):
|
|
|
|
dataloaders = [dataloaders]
|
|
|
|
|
2021-02-11 14:32:07 +00:00
|
|
|
batch = next(iter(dataloaders[0]))
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# generate preds before saving model
|
|
|
|
model.eval()
|
2021-02-11 14:32:07 +00:00
|
|
|
pred_before_saving = model(batch)
|
2019-10-23 10:10:13 +00:00
|
|
|
|
|
|
|
# save model
|
2021-07-26 11:37:35 +00:00
|
|
|
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
|
2019-10-23 10:10:13 +00:00
|
|
|
trainer.save_checkpoint(new_weights_path)
|
|
|
|
|
|
|
|
# load new model
|
2020-05-13 13:05:15 +00:00
|
|
|
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
|
2021-07-26 11:37:35 +00:00
|
|
|
hparams_path = os.path.join(hparams_path, "hparams.yaml")
|
|
|
|
model_2 = BoringModel.load_from_checkpoint(checkpoint_path=new_weights_path, hparams_file=hparams_path)
|
2019-10-23 10:10:13 +00:00
|
|
|
model_2.eval()
|
|
|
|
|
|
|
|
# make prediction
|
|
|
|
# assert that both predictions are the same
|
2021-02-11 14:32:07 +00:00
|
|
|
new_pred = model_2(batch)
|
2019-10-23 10:10:13 +00:00
|
|
|
assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1
|
|
|
|
|
2020-02-25 15:36:44 +00:00
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
@pytest.mark.parametrize("url_ckpt", [True, False])
|
2020-08-13 20:25:43 +00:00
|
|
|
def test_strict_model_load_more_params(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
|
|
|
|
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
|
|
|
|
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
|
2021-07-26 11:37:35 +00:00
|
|
|
monkeypatch.setenv("TORCH_HOME", tmpdir)
|
2020-08-13 20:25:43 +00:00
|
|
|
|
2021-02-11 14:32:07 +00:00
|
|
|
model = BoringModel()
|
2020-08-13 20:25:43 +00:00
|
|
|
# Extra layer
|
2021-02-11 14:32:07 +00:00
|
|
|
model.c_d3 = torch.nn.Linear(32, 32)
|
2020-08-13 20:25:43 +00:00
|
|
|
|
|
|
|
# logger file to get meta
|
|
|
|
logger = tutils.get_default_logger(tmpdir)
|
|
|
|
|
|
|
|
# fit model
|
|
|
|
trainer = Trainer(
|
2021-02-06 11:07:26 +00:00
|
|
|
default_root_dir=tmpdir,
|
|
|
|
max_epochs=1,
|
2021-02-11 14:32:07 +00:00
|
|
|
limit_train_batches=2,
|
|
|
|
limit_val_batches=2,
|
2021-02-06 11:07:26 +00:00
|
|
|
logger=logger,
|
2020-12-09 19:14:34 +00:00
|
|
|
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
|
2020-08-13 20:25:43 +00:00
|
|
|
)
|
2021-01-12 00:36:48 +00:00
|
|
|
trainer.fit(model)
|
2020-08-13 20:25:43 +00:00
|
|
|
|
|
|
|
# traning complete
|
2021-05-04 10:50:56 +00:00
|
|
|
assert trainer.state.finished, f"Training failed with {trainer.state}"
|
2020-08-13 20:25:43 +00:00
|
|
|
|
|
|
|
# save model
|
2021-07-26 11:37:35 +00:00
|
|
|
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
|
2020-08-13 20:25:43 +00:00
|
|
|
trainer.save_checkpoint(new_weights_path)
|
|
|
|
|
|
|
|
# load new model
|
2021-07-26 11:37:35 +00:00
|
|
|
hparams_path = os.path.join(tutils.get_data_path(logger, path_dir=tmpdir), "hparams.yaml")
|
|
|
|
hparams_url = f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
|
2020-08-13 20:25:43 +00:00
|
|
|
ckpt_path = hparams_url if url_ckpt else new_weights_path
|
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
BoringModel.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False)
|
2020-08-13 20:25:43 +00:00
|
|
|
|
|
|
|
with pytest.raises(RuntimeError, match=r'Unexpected key\(s\) in state_dict: "c_d3.weight", "c_d3.bias"'):
|
2021-07-26 11:37:35 +00:00
|
|
|
BoringModel.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=True)
|
2020-08-13 20:25:43 +00:00
|
|
|
|
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
@pytest.mark.parametrize("url_ckpt", [True, False])
|
2020-08-13 20:25:43 +00:00
|
|
|
def test_strict_model_load_less_params(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
|
|
|
|
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
|
|
|
|
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
|
2021-07-26 11:37:35 +00:00
|
|
|
monkeypatch.setenv("TORCH_HOME", tmpdir)
|
2020-08-13 20:25:43 +00:00
|
|
|
|
2021-02-11 14:32:07 +00:00
|
|
|
model = BoringModel()
|
2020-08-13 20:25:43 +00:00
|
|
|
|
|
|
|
# logger file to get meta
|
|
|
|
logger = tutils.get_default_logger(tmpdir)
|
|
|
|
|
|
|
|
# fit model
|
|
|
|
trainer = Trainer(
|
2021-02-06 11:07:26 +00:00
|
|
|
default_root_dir=tmpdir,
|
|
|
|
max_epochs=1,
|
2021-02-11 14:32:07 +00:00
|
|
|
limit_train_batches=2,
|
|
|
|
limit_val_batches=2,
|
2021-02-06 11:07:26 +00:00
|
|
|
logger=logger,
|
2020-12-09 19:14:34 +00:00
|
|
|
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
|
2020-08-13 20:25:43 +00:00
|
|
|
)
|
2021-01-12 00:36:48 +00:00
|
|
|
trainer.fit(model)
|
2020-08-13 20:25:43 +00:00
|
|
|
|
|
|
|
# traning complete
|
2021-05-04 10:50:56 +00:00
|
|
|
assert trainer.state.finished, f"Training failed with {trainer.state}"
|
2020-08-13 20:25:43 +00:00
|
|
|
|
|
|
|
# save model
|
2021-07-26 11:37:35 +00:00
|
|
|
new_weights_path = os.path.join(tmpdir, "save_test.ckpt")
|
2020-08-13 20:25:43 +00:00
|
|
|
trainer.save_checkpoint(new_weights_path)
|
|
|
|
|
|
|
|
# load new model
|
2021-07-26 11:37:35 +00:00
|
|
|
hparams_path = os.path.join(tutils.get_data_path(logger, path_dir=tmpdir), "hparams.yaml")
|
|
|
|
ckpt_url = f"http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}"
|
2021-04-22 20:45:46 +00:00
|
|
|
ckpt_path = ckpt_url if url_ckpt else new_weights_path
|
2020-08-13 20:25:43 +00:00
|
|
|
|
2021-02-11 14:32:07 +00:00
|
|
|
class CurrentModel(BoringModel):
|
2020-08-13 20:25:43 +00:00
|
|
|
def __init__(self):
|
|
|
|
super().__init__()
|
|
|
|
self.c_d3 = torch.nn.Linear(7, 7)
|
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
CurrentModel.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False)
|
2020-08-13 20:25:43 +00:00
|
|
|
|
|
|
|
with pytest.raises(RuntimeError, match=r'Missing key\(s\) in state_dict: "c_d3.weight", "c_d3.bias"'):
|
2021-07-26 11:37:35 +00:00
|
|
|
CurrentModel.load_from_checkpoint(checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=True)
|
2020-08-13 20:25:43 +00:00
|
|
|
|
|
|
|
|
2020-05-24 22:59:08 +00:00
|
|
|
def test_model_pickle(tmpdir):
|
2021-02-11 14:32:07 +00:00
|
|
|
model = BoringModel()
|
2020-05-24 22:59:08 +00:00
|
|
|
pickle.dumps(model)
|
2020-06-09 20:51:30 +00:00
|
|
|
cloudpickle.dumps(model)
|