2020-12-17 11:03:45 +00:00
|
|
|
# Copyright The Lightning AI team.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
2021-07-20 21:23:53 +00:00
|
|
|
import os
|
2020-03-30 22:16:32 +00:00
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
import pytest
|
|
|
|
import torch
|
ruff: replace isort with ruff +TPU (#17684)
* ruff: replace isort with ruff
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fixing & imports
* lines in warning test
* docs
* fix enum import
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fixing
* import
* fix lines
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* type ClusterEnvironment
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2023-09-26 15:54:55 +00:00
|
|
|
from lightning.pytorch import LightningModule, Trainer, seed_everything
|
|
|
|
from tests_pytorch.helpers.advanced_models import ParityModuleMNIST, ParityModuleRNN
|
2020-03-30 22:16:32 +00:00
|
|
|
|
2023-05-24 23:16:41 +00:00
|
|
|
from parity_pytorch.measure import measure_loops
|
|
|
|
from parity_pytorch.models import ParityModuleCIFAR
|
2021-07-20 21:23:53 +00:00
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
_EXTEND_BENCHMARKS = os.getenv("PL_RUNNING_BENCHMARKS", "0") == "1"
|
2021-07-20 21:23:53 +00:00
|
|
|
_SHORT_BENCHMARKS = not _EXTEND_BENCHMARKS
|
|
|
|
_MARK_SHORT_BM = pytest.mark.skipif(_SHORT_BENCHMARKS, reason="Only run during Benchmarking")
|
2023-05-24 23:16:41 +00:00
|
|
|
_MARK_XFAIL_LOSS = pytest.mark.xfail(strict=False, reason="bad loss")
|
2020-03-30 22:16:32 +00:00
|
|
|
|
|
|
|
|
2020-12-23 19:38:57 +00:00
|
|
|
def assert_parity_relative(pl_values, pt_values, norm_by: float = 1, max_diff: float = 0.1):
|
|
|
|
# assert speeds
|
|
|
|
diffs = np.asarray(pl_values) - np.mean(pt_values)
|
|
|
|
# norm by vanilla time
|
|
|
|
diffs = diffs / norm_by
|
|
|
|
# relative to mean reference value
|
|
|
|
diffs = diffs / np.mean(pt_values)
|
|
|
|
assert np.mean(diffs) < max_diff, f"Lightning diff {diffs} was worse than vanilla PT (threshold {max_diff})"
|
|
|
|
|
|
|
|
|
|
|
|
def assert_parity_absolute(pl_values, pt_values, norm_by: float = 1, max_diff: float = 0.55):
|
|
|
|
# assert speeds
|
|
|
|
diffs = np.asarray(pl_values) - np.mean(pt_values)
|
|
|
|
# norm by event count
|
|
|
|
diffs = diffs / norm_by
|
|
|
|
assert np.mean(diffs) < max_diff, f"Lightning {diffs} was worse than vanilla PT (threshold {max_diff})"
|
|
|
|
|
|
|
|
|
2020-11-27 18:36:50 +00:00
|
|
|
# ParityModuleMNIST runs with num_workers=1
|
2021-01-28 02:58:33 +00:00
|
|
|
@pytest.mark.parametrize(
|
2023-05-04 15:50:39 +00:00
|
|
|
("cls_model", "max_diff_speed", "max_diff_memory", "num_epochs", "num_runs"),
|
2021-01-28 02:58:33 +00:00
|
|
|
[
|
2021-07-20 21:23:53 +00:00
|
|
|
(ParityModuleRNN, 0.05, 0.001, 4, 3),
|
2023-05-24 23:16:41 +00:00
|
|
|
pytest.param(ParityModuleMNIST, 0.3, 0.001, 4, 3, marks=_MARK_XFAIL_LOSS), # FixME: investigate!
|
|
|
|
pytest.param( # FixME: investigate!
|
|
|
|
ParityModuleCIFAR, 4.0, 0.0002, 2, 2, marks=[_MARK_SHORT_BM, _MARK_XFAIL_LOSS]
|
|
|
|
),
|
2021-07-26 11:37:35 +00:00
|
|
|
],
|
2021-01-28 02:58:33 +00:00
|
|
|
)
|
2020-03-30 22:16:32 +00:00
|
|
|
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
|
2020-12-23 19:38:57 +00:00
|
|
|
def test_pytorch_parity(
|
2022-10-04 22:54:14 +00:00
|
|
|
cls_model: LightningModule, max_diff_speed: float, max_diff_memory: float, num_epochs: int, num_runs: int
|
2020-12-23 19:38:57 +00:00
|
|
|
):
|
2021-09-06 12:49:09 +00:00
|
|
|
"""Verify that the same pytorch and lightning models achieve the same results."""
|
2023-05-24 23:16:41 +00:00
|
|
|
lightning = measure_loops(
|
|
|
|
cls_model, kind="PT Lightning", loop=lightning_loop, num_epochs=num_epochs, num_runs=num_runs
|
|
|
|
)
|
|
|
|
vanilla = measure_loops(cls_model, kind="Vanilla PT", loop=vanilla_loop, num_epochs=num_epochs, num_runs=num_runs)
|
2020-03-30 22:16:32 +00:00
|
|
|
|
|
|
|
# make sure the losses match exactly to 5 decimal places
|
2020-12-23 19:38:57 +00:00
|
|
|
print(f"Losses are for... \n vanilla: {vanilla['losses']} \n lightning: {lightning['losses']}")
|
2021-07-26 11:37:35 +00:00
|
|
|
for pl_out, pt_out in zip(lightning["losses"], vanilla["losses"]):
|
2020-03-30 22:16:32 +00:00
|
|
|
np.testing.assert_almost_equal(pl_out, pt_out, 5)
|
|
|
|
|
2020-12-23 19:38:57 +00:00
|
|
|
# drop the first run for initialize dataset (download & filter)
|
|
|
|
assert_parity_absolute(
|
2021-07-26 11:37:35 +00:00
|
|
|
lightning["durations"][1:], vanilla["durations"][1:], norm_by=num_epochs, max_diff=max_diff_speed
|
2020-12-17 11:03:45 +00:00
|
|
|
)
|
2020-04-15 00:23:36 +00:00
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
assert_parity_relative(lightning["memory"], vanilla["memory"], max_diff=max_diff_memory)
|
2020-03-30 22:16:32 +00:00
|
|
|
|
2020-12-23 19:38:57 +00:00
|
|
|
|
|
|
|
def _hook_memory():
|
|
|
|
if torch.cuda.is_available():
|
|
|
|
torch.cuda.synchronize()
|
|
|
|
used_memory = torch.cuda.max_memory_allocated()
|
|
|
|
else:
|
|
|
|
used_memory = np.nan
|
|
|
|
return used_memory
|
|
|
|
|
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
def vanilla_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10):
|
2020-12-23 19:38:57 +00:00
|
|
|
device = torch.device(device_type)
|
|
|
|
# set seed
|
|
|
|
seed_everything(idx)
|
2020-03-30 22:16:32 +00:00
|
|
|
|
2020-12-23 19:38:57 +00:00
|
|
|
# init model parts
|
|
|
|
model = cls_model()
|
|
|
|
dl = model.train_dataloader()
|
|
|
|
optimizer = model.configure_optimizers()
|
2020-03-30 22:16:32 +00:00
|
|
|
|
2020-12-23 19:38:57 +00:00
|
|
|
# model to GPU
|
|
|
|
model = model.to(device)
|
2020-03-30 22:16:32 +00:00
|
|
|
|
2020-12-23 19:38:57 +00:00
|
|
|
epoch_losses = []
|
|
|
|
# as the first run is skipped, no need to run it long
|
|
|
|
for epoch in range(num_epochs if idx > 0 else 1):
|
|
|
|
# run through full training set
|
|
|
|
for j, batch in enumerate(dl):
|
|
|
|
batch = [x.to(device) for x in batch]
|
|
|
|
loss_dict = model.training_step(batch, j)
|
2021-07-26 11:37:35 +00:00
|
|
|
loss = loss_dict["loss"]
|
2020-12-23 19:38:57 +00:00
|
|
|
loss.backward()
|
|
|
|
optimizer.step()
|
|
|
|
optimizer.zero_grad()
|
|
|
|
|
|
|
|
# track last epoch loss
|
|
|
|
epoch_losses.append(loss.item())
|
|
|
|
|
|
|
|
return epoch_losses[-1], _hook_memory()
|
|
|
|
|
|
|
|
|
2021-07-26 11:37:35 +00:00
|
|
|
def lightning_loop(cls_model, idx, device_type: str = "cuda", num_epochs=10):
|
2020-12-23 19:38:57 +00:00
|
|
|
seed_everything(idx)
|
|
|
|
|
|
|
|
model = cls_model()
|
|
|
|
# init model parts
|
|
|
|
trainer = Trainer(
|
|
|
|
# as the first run is skipped, no need to run it long
|
|
|
|
max_epochs=num_epochs if idx > 0 else 1,
|
2021-09-25 05:53:31 +00:00
|
|
|
enable_progress_bar=False,
|
2021-10-13 11:50:54 +00:00
|
|
|
enable_model_summary=False,
|
2021-11-17 22:41:50 +00:00
|
|
|
enable_checkpointing=False,
|
2022-04-10 17:10:05 +00:00
|
|
|
accelerator="gpu" if device_type == "cuda" else "cpu",
|
|
|
|
devices=1,
|
2020-12-23 19:38:57 +00:00
|
|
|
logger=False,
|
2023-02-22 13:07:02 +00:00
|
|
|
use_distributed_sampler=False,
|
2022-05-31 21:23:21 +00:00
|
|
|
benchmark=False,
|
2020-12-23 19:38:57 +00:00
|
|
|
)
|
|
|
|
trainer.fit(model)
|
|
|
|
|
2023-05-24 23:16:41 +00:00
|
|
|
return model._loss[-1], _hook_memory()
|