2022-11-21 10:41:59 +00:00
|
|
|
# app.py
|
|
|
|
# ! pip install torch
|
2022-11-07 09:36:41 +00:00
|
|
|
import torch
|
|
|
|
from torch.nn.parallel.distributed import DistributedDataParallel
|
|
|
|
|
|
|
|
import lightning as L
|
|
|
|
from lightning.app.components import MultiNode
|
|
|
|
|
|
|
|
|
|
|
|
def distributed_train(local_rank: int, main_address: str, main_port: int, num_nodes: int, node_rank: int, nprocs: int):
|
2022-11-21 10:41:59 +00:00
|
|
|
# 1. SET UP DISTRIBUTED ENVIRONMENT
|
2022-11-07 09:36:41 +00:00
|
|
|
global_rank = local_rank + node_rank * nprocs
|
|
|
|
world_size = num_nodes * nprocs
|
|
|
|
|
|
|
|
if torch.distributed.is_available() and not torch.distributed.is_initialized():
|
|
|
|
torch.distributed.init_process_group(
|
|
|
|
"nccl" if torch.cuda.is_available() else "gloo",
|
|
|
|
rank=global_rank,
|
|
|
|
world_size=world_size,
|
|
|
|
init_method=f"tcp://{main_address}:{main_port}",
|
|
|
|
)
|
|
|
|
|
2022-11-21 10:41:59 +00:00
|
|
|
# 2. PREPARE DISTRIBUTED MODEL
|
|
|
|
model = torch.nn.Linear(32, 2)
|
2022-11-11 10:06:40 +00:00
|
|
|
device = torch.device(f"cuda:{local_rank}") if torch.cuda.is_available() else torch.device("cpu")
|
2022-11-21 16:02:30 +00:00
|
|
|
model = DistributedDataParallel(model, device_ids=[local_rank] if torch.cuda.is_available() else None).to(device)
|
2022-11-09 18:00:17 +00:00
|
|
|
|
2022-11-21 10:41:59 +00:00
|
|
|
# 3. SETUP LOSS AND OPTIMIZER
|
2022-11-07 09:36:41 +00:00
|
|
|
criterion = torch.nn.MSELoss()
|
|
|
|
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
|
|
|
|
|
2022-11-21 10:41:59 +00:00
|
|
|
# 4.TRAIN THE MODEL FOR 50 STEPS
|
|
|
|
for step in range(50):
|
2022-11-07 09:36:41 +00:00
|
|
|
model.zero_grad()
|
2022-11-21 10:41:59 +00:00
|
|
|
x = torch.randn(64, 32).to(device)
|
2022-11-07 09:36:41 +00:00
|
|
|
output = model(x)
|
2022-11-21 10:41:59 +00:00
|
|
|
loss = criterion(output, torch.ones_like(output))
|
2022-11-07 09:36:41 +00:00
|
|
|
print(f"global_rank: {global_rank} step: {step} loss: {loss}")
|
|
|
|
loss.backward()
|
|
|
|
optimizer.step()
|
|
|
|
|
2022-11-21 10:41:59 +00:00
|
|
|
# 5. VERIFY ALL COPIES OF THE MODEL HAVE THE SAME WEIGTHS AT END OF TRAINING
|
|
|
|
weight = model.module.weight.clone()
|
|
|
|
torch.distributed.all_reduce(weight)
|
|
|
|
assert torch.equal(model.module.weight, weight / world_size)
|
|
|
|
|
|
|
|
print("Multi Node Distributed Training Done!")
|
|
|
|
|
2022-11-07 09:36:41 +00:00
|
|
|
|
|
|
|
class PyTorchDistributed(L.LightningWork):
|
2022-11-21 10:41:59 +00:00
|
|
|
def run(self, main_address: str, main_port: int, num_nodes: int, node_rank: int):
|
2022-11-07 09:36:41 +00:00
|
|
|
nprocs = torch.cuda.device_count() if torch.cuda.is_available() else 1
|
|
|
|
torch.multiprocessing.spawn(
|
|
|
|
distributed_train, args=(main_address, main_port, num_nodes, node_rank, nprocs), nprocs=nprocs
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2022-11-21 16:02:30 +00:00
|
|
|
# 8 GPUs: (2 nodes x 4 v 100)
|
2022-11-21 10:41:59 +00:00
|
|
|
compute = L.CloudCompute("gpu-fast-multi") # 4xV100
|
|
|
|
component = MultiNode(PyTorchDistributed, num_nodes=2, cloud_compute=compute)
|
|
|
|
app = L.LightningApp(component)
|