2023-03-06 20:19:25 +00:00
|
|
|
# Copyright The Lightning AI team.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
import time
|
|
|
|
from copy import deepcopy
|
|
|
|
from typing import Callable
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
import torch
|
|
|
|
import torch.distributed
|
|
|
|
import torch.nn.functional
|
|
|
|
from lightning.fabric.fabric import Fabric
|
ruff: replace isort with ruff +TPU (#17684)
* ruff: replace isort with ruff
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fixing & imports
* lines in warning test
* docs
* fix enum import
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* fixing
* import
* fix lines
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
* type ClusterEnvironment
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---------
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
2023-09-26 15:54:55 +00:00
|
|
|
from tests_fabric.helpers.runif import RunIf
|
|
|
|
|
2023-05-24 23:16:41 +00:00
|
|
|
from parity_fabric.models import ConvNet
|
|
|
|
from parity_fabric.utils import (
|
2023-03-06 20:19:25 +00:00
|
|
|
cuda_reset,
|
|
|
|
get_model_input_dtype,
|
|
|
|
is_cuda_memory_close,
|
|
|
|
is_state_dict_equal,
|
|
|
|
is_timing_close,
|
|
|
|
make_deterministic,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def train_torch(
|
|
|
|
move_to_device: Callable,
|
|
|
|
precision_context,
|
|
|
|
input_dtype=torch.float32,
|
|
|
|
):
|
2023-04-18 14:03:40 +00:00
|
|
|
make_deterministic(warn_only=True)
|
2023-03-06 20:19:25 +00:00
|
|
|
memory_stats = {}
|
|
|
|
|
|
|
|
model = ConvNet()
|
|
|
|
model = move_to_device(model)
|
|
|
|
dataloader = model.get_dataloader()
|
|
|
|
optimizer = model.get_optimizer()
|
|
|
|
loss_fn = model.get_loss_function()
|
|
|
|
|
|
|
|
memory_stats["start"] = torch.cuda.memory_stats()
|
|
|
|
|
|
|
|
model.train()
|
|
|
|
iteration_timings = []
|
|
|
|
iterator = iter(dataloader)
|
|
|
|
for _ in range(model.num_steps):
|
|
|
|
t0 = time.perf_counter()
|
|
|
|
|
|
|
|
inputs, labels = next(iterator)
|
|
|
|
inputs, labels = move_to_device(inputs), move_to_device(labels)
|
|
|
|
optimizer.zero_grad()
|
|
|
|
with precision_context():
|
|
|
|
outputs = model(inputs.to(input_dtype))
|
|
|
|
loss = loss_fn(outputs.float(), labels)
|
|
|
|
loss.backward()
|
|
|
|
optimizer.step()
|
|
|
|
|
|
|
|
t1 = time.perf_counter()
|
|
|
|
iteration_timings.append(t1 - t0)
|
|
|
|
|
|
|
|
memory_stats["end"] = torch.cuda.memory_stats()
|
|
|
|
|
|
|
|
return model.state_dict(), torch.tensor(iteration_timings), memory_stats
|
|
|
|
|
|
|
|
|
|
|
|
def train_fabric(fabric):
|
2023-04-18 14:03:40 +00:00
|
|
|
make_deterministic(warn_only=True)
|
2023-03-06 20:19:25 +00:00
|
|
|
memory_stats = {}
|
|
|
|
|
|
|
|
model = ConvNet()
|
|
|
|
initial_state_dict = deepcopy(model.state_dict())
|
|
|
|
|
|
|
|
optimizer = model.get_optimizer()
|
|
|
|
model, optimizer = fabric.setup(model, optimizer)
|
|
|
|
|
|
|
|
dataloader = model.get_dataloader()
|
|
|
|
dataloader = fabric.setup_dataloaders(dataloader)
|
|
|
|
loss_fn = model.get_loss_function()
|
|
|
|
|
|
|
|
memory_stats["start"] = torch.cuda.memory_stats()
|
|
|
|
|
|
|
|
model.train()
|
|
|
|
iteration_timings = []
|
|
|
|
iterator = iter(dataloader)
|
|
|
|
for _ in range(model.num_steps):
|
|
|
|
t0 = time.perf_counter()
|
|
|
|
|
|
|
|
inputs, labels = next(iterator)
|
|
|
|
optimizer.zero_grad()
|
|
|
|
outputs = model(inputs)
|
|
|
|
loss = loss_fn(outputs, labels)
|
|
|
|
fabric.backward(loss)
|
|
|
|
optimizer.step()
|
|
|
|
|
|
|
|
t1 = time.perf_counter()
|
|
|
|
iteration_timings.append(t1 - t0)
|
|
|
|
|
|
|
|
memory_stats["end"] = torch.cuda.memory_stats()
|
|
|
|
|
|
|
|
# check that the model has changed
|
|
|
|
assert not is_state_dict_equal(initial_state_dict, model.state_dict())
|
|
|
|
|
|
|
|
return model.state_dict(), torch.tensor(iteration_timings), memory_stats
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.flaky(reruns=3)
|
|
|
|
@pytest.mark.usefixtures("reset_deterministic_algorithm", "reset_cudnn_benchmark")
|
|
|
|
@pytest.mark.parametrize(
|
2023-05-04 15:50:39 +00:00
|
|
|
("precision", "accelerator"),
|
2023-03-06 20:19:25 +00:00
|
|
|
[
|
|
|
|
(32, "cpu"),
|
|
|
|
pytest.param(32, "cuda", marks=RunIf(min_cuda_gpus=1)),
|
|
|
|
# pytest.param(16, "cuda", marks=RunIf(min_cuda_gpus=1)), # TODO: requires GradScaler
|
|
|
|
pytest.param("bf16", "cpu", marks=RunIf(skip_windows=True)),
|
|
|
|
pytest.param("bf16", "cuda", marks=RunIf(min_cuda_gpus=1, bf16_cuda=True)),
|
|
|
|
pytest.param(32, "mps", marks=RunIf(mps=True)),
|
|
|
|
],
|
|
|
|
)
|
|
|
|
def test_parity_single_device(precision, accelerator):
|
|
|
|
input_dtype = get_model_input_dtype(precision)
|
|
|
|
|
|
|
|
cuda_reset()
|
|
|
|
|
|
|
|
# Train with Fabric
|
|
|
|
fabric = Fabric(precision=precision, accelerator=accelerator, devices=1)
|
|
|
|
state_dict_fabric, timings_fabric, memory_fabric = train_fabric(fabric)
|
|
|
|
|
|
|
|
cuda_reset()
|
|
|
|
|
|
|
|
# Train with raw PyTorch
|
|
|
|
state_dict_torch, timings_torch, memory_torch = train_torch(
|
|
|
|
fabric.to_device, precision_context=fabric.autocast, input_dtype=input_dtype
|
|
|
|
)
|
|
|
|
|
|
|
|
# Compare the final weights
|
|
|
|
assert is_state_dict_equal(state_dict_torch, state_dict_fabric)
|
|
|
|
|
|
|
|
# Compare the time per iteration
|
2023-03-24 12:07:07 +00:00
|
|
|
assert is_timing_close(timings_torch, timings_fabric, rtol=1e-2, atol=0.1)
|
2023-03-06 20:19:25 +00:00
|
|
|
|
|
|
|
# Compare memory usage
|
|
|
|
if accelerator == "cuda":
|
|
|
|
assert is_cuda_memory_close(memory_torch["start"], memory_fabric["start"])
|
|
|
|
assert is_cuda_memory_close(memory_torch["end"], memory_fabric["end"])
|