lightning/tests/helpers/pipelines.py

114 lines
4.2 KiB
Python
Raw Normal View History

2020-10-13 11:18:07 +00:00
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torchmetrics.functional import accuracy
from pytorch_lightning import LightningDataModule, LightningModule, Trainer
from pytorch_lightning.utilities import _StrategyType
from tests.helpers import BoringModel
from tests.helpers.utils import get_default_logger, load_model_from_checkpoint, reset_seed
def run_model_test_without_loggers(
trainer_options: dict, model: LightningModule, data: LightningDataModule = None, min_acc: float = 0.50
):
reset_seed()
# fit model
trainer = Trainer(**trainer_options)
trainer.fit(model, datamodule=data)
# correct result and ok accuracy
assert trainer.state.finished, f"Training failed with {trainer.state}"
model2 = load_model_from_checkpoint(trainer.logger, trainer.checkpoint_callback.best_model_path, type(model))
# test new model accuracy
test_loaders = model2.test_dataloader() if not data else data.test_dataloader()
if not isinstance(test_loaders, list):
test_loaders = [test_loaders]
if not isinstance(model2, BoringModel):
for dataloader in test_loaders:
2021-12-19 13:08:43 +00:00
run_model_prediction(model2, dataloader, min_acc=min_acc)
def run_model_test(
trainer_options,
model: LightningModule,
data: LightningDataModule = None,
on_gpu: bool = True,
version=None,
with_hpc: bool = True,
min_acc: float = 0.25,
):
reset_seed()
save_dir = trainer_options["default_root_dir"]
# logger file to get meta
logger = get_default_logger(save_dir, version=version)
trainer_options.update(logger=logger)
trainer = Trainer(**trainer_options)
initial_values = torch.tensor([torch.sum(torch.abs(x)) for x in model.parameters()])
trainer.fit(model, datamodule=data)
post_train_values = torch.tensor([torch.sum(torch.abs(x)) for x in model.parameters()])
assert trainer.state.finished, f"Training failed with {trainer.state}"
# Check that the model is actually changed post-training
test_cpu and test_gpu EvalModelTemplate deprecation (#4820) * test_cpu refactoring - BoringModel and checkpoints; test_gpu refactoring - BoringModelboring_model refactoring - validation, testing; Fix - run_prediction as dispatcher for testing BoringModel * Removed EvalModelTemplate import from test_cpu and test_gpu * Reverting unintended changes * Issues with checkpointing * Fixed tests for logging and checkpointing * Fix for dispatcher * test_cpu refactoring - BoringModel and checkpoints; test_gpu refactoring - BoringModelboring_model refactoring - validation, testing; Fix - run_prediction as dispatcher for testing BoringModel * Removed EvalModelTemplate import from test_cpu and test_gpu * Reverting unintended changes * Issues with checkpointing * Fixed tests for logging and checkpointing * Fix for dispatcher * Fixed acc check for stocasticity of seeds * Fixed according to @borda suggestions * Hparams for boring_model * Deprecated RuntimeParamChagneModelAssing (functionality is tested in RuntimeParamChangeModelSaving) * Reduced boring_model parameters to just in and out features, test_cpu modelsinherit BoringModel to specify additional parameters (e.g., optimizer) * Fix PEP8 * Update tests/base/develop_pipelines.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/base/boring_model.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/base/develop_pipelines.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Merged test_early_stopping with all_features; added TODO for self.log * Fixed test_all_features trainer options * Ready for review! * Update tests/models/test_cpu.py Thank you! :) Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * added optimizer_name, lr, and batch_size as hparams for save_hparameters() * Fixes for reducing PR size * Reverse test_hparams (removed DEPRECATED test for hparams direct assignment) * Changes for in_features * Fixed hparams * Fixed parameters for boring_model * Update tests/models/test_cpu.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * fix for pep8 * Fixed run_predction and TODO * fix min acc for darwin/windows without pl_opt * eval as DEFAULT run_prediction strategy * Updated val_dataloader for running_test_no_val Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> Co-authored-by: chaton <thomas@grid.ai> Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
2021-01-07 10:50:08 +00:00
change_ratio = torch.norm(initial_values - post_train_values)
2021-10-29 21:46:39 +00:00
assert change_ratio > 0.03, f"the model is changed of {change_ratio}"
# test model loading
test_cpu and test_gpu EvalModelTemplate deprecation (#4820) * test_cpu refactoring - BoringModel and checkpoints; test_gpu refactoring - BoringModelboring_model refactoring - validation, testing; Fix - run_prediction as dispatcher for testing BoringModel * Removed EvalModelTemplate import from test_cpu and test_gpu * Reverting unintended changes * Issues with checkpointing * Fixed tests for logging and checkpointing * Fix for dispatcher * test_cpu refactoring - BoringModel and checkpoints; test_gpu refactoring - BoringModelboring_model refactoring - validation, testing; Fix - run_prediction as dispatcher for testing BoringModel * Removed EvalModelTemplate import from test_cpu and test_gpu * Reverting unintended changes * Issues with checkpointing * Fixed tests for logging and checkpointing * Fix for dispatcher * Fixed acc check for stocasticity of seeds * Fixed according to @borda suggestions * Hparams for boring_model * Deprecated RuntimeParamChagneModelAssing (functionality is tested in RuntimeParamChangeModelSaving) * Reduced boring_model parameters to just in and out features, test_cpu modelsinherit BoringModel to specify additional parameters (e.g., optimizer) * Fix PEP8 * Update tests/base/develop_pipelines.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/base/boring_model.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/base/develop_pipelines.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Merged test_early_stopping with all_features; added TODO for self.log * Fixed test_all_features trainer options * Ready for review! * Update tests/models/test_cpu.py Thank you! :) Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> * added optimizer_name, lr, and batch_size as hparams for save_hparameters() * Fixes for reducing PR size * Reverse test_hparams (removed DEPRECATED test for hparams direct assignment) * Changes for in_features * Fixed hparams * Fixed parameters for boring_model * Update tests/models/test_cpu.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * Update tests/models/test_cpu.py Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com> * fix for pep8 * Fixed run_predction and TODO * fix min acc for darwin/windows without pl_opt * eval as DEFAULT run_prediction strategy * Updated val_dataloader for running_test_no_val Co-authored-by: Rohit Gupta <rohitgr1998@gmail.com> Co-authored-by: chaton <thomas@grid.ai> Co-authored-by: Carlos Mocholí <carlossmocholi@gmail.com>
2021-01-07 10:50:08 +00:00
pretrained_model = load_model_from_checkpoint(logger, trainer.checkpoint_callback.best_model_path, type(model))
# test new model accuracy
test_loaders = model.test_dataloader() if not data else data.test_dataloader()
if not isinstance(test_loaders, list):
test_loaders = [test_loaders]
if not isinstance(model, BoringModel):
for dataloader in test_loaders:
2021-12-19 13:08:43 +00:00
run_model_prediction(model, dataloader, min_acc=min_acc)
if with_hpc:
if trainer._distrib_type in (_StrategyType.DDP, _StrategyType.DDP_SPAWN, _StrategyType.DDP2):
# on hpc this would work fine... but need to hack it for the purpose of the test
trainer.optimizers, trainer.lr_schedulers, trainer.optimizer_frequencies = trainer.init_optimizers(
pretrained_model
)
# test HPC saving
trainer.checkpoint_connector.hpc_save(save_dir, logger)
# test HPC loading
checkpoint_path = trainer.checkpoint_connector.get_max_ckpt_path_from_folder(save_dir)
trainer.checkpoint_connector.restore(checkpoint_path)
@torch.no_grad()
2021-12-19 13:08:43 +00:00
def run_model_prediction(trained_model, dataloader, min_acc=0.50):
orig_device = trained_model.device
# run prediction on 1 batch
trained_model.cpu()
trained_model.eval()
batch = next(iter(dataloader))
x, y = batch
x = x.flatten(1)
y_hat = trained_model(x)
acc = accuracy(y_hat.cpu(), y.cpu(), top_k=2).item()
assert acc >= min_acc, f"This model is expected to get > {min_acc} in test set (it got {acc})"
trained_model.to(orig_device)