在Ubuntu系統上,PyTorch的并行計算能力可以通過多種方式發揮,主要包括數據并行、模型并行和分布式訓練。以下是詳細的步驟和技巧:
數據并行是最常用的并行計算方法之一。它將模型復制到多個GPU上,每個GPU處理數據集的不同部分,然后聚合結果。PyTorch提供了torch.nn.DataParallel
類來實現數據并行。
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
# 定義一個簡單的模型
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.fc1 = nn.Linear(10, 5)
self.fc2 = nn.Linear(5, 2)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
# 實例化模型
model = SimpleModel()
# 使用DataParallel包裝模型
if torch.cuda.device_count() > 1:
print("使用", torch.cuda.device_count(), "個GPU")
model = nn.DataParallel(model)
# 將模型放到GPU上
model.cuda()
# 定義損失函數和優化器
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)
# 數據加載器
data_loader = DataLoader(dataset=torch.randn(32, 10), batch_size=4, num_workers=4)
# 訓練循環
for epoch in range(10):
for data, target in data_loader:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
對于大型模型,模型并行是必要的,因為它將模型的不同部分分配到不同的GPU上進行計算,避免單個GPU內存不足的問題。PyTorch提供了torch.nn.parallel.DistributedDataParallel
類來實現模型并行。
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
def train(rank, world_size):
dist.init_process_group("nccl", rank=rank, world_size=world_size)
model = SimpleModel().to(rank)
ddp_model = DDP(model, device_ids=[rank])
criterion = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.01)
for epoch in range(10):
for data, target in data_loader:
data, target = data.to(rank), target.to(rank)
optimizer.zero_grad()
output = ddp_model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if __name__ == '__main__':
world_size = 4
mp.spawn(train, args=(world_size,), nprocs=world_size, join=True)
分布式訓練利用多個計算節點(每個節點可以包含多個GPU)協同訓練模型,進一步擴展了并行計算能力。PyTorch的torch.distributed
包提供了分布式訓練的工具。
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
def train(rank, world_size):
dist.init_process_group("nccl", rank=rank, world_size=world_size)
model = SimpleModel().to(rank)
ddp_model = DDP(model, device_ids=[rank])
criterion = nn.MSELoss()
optimizer = optim.SGD(ddp_model.parameters(), lr=0.01)
for epoch in range(10):
for data, target in data_loader:
data, target = data.to(rank), target.to(rank)
optimizer.zero_grad()
output = ddp_model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if __name__ == '__main__':
world_size = 4
mp.spawn(train, args=(world_size,), nprocs=world_size, join=True)
數據加載和預處理往往是訓練過程中的瓶頸。使用多進程可以顯著提高數據加載的速度。PyTorch的torch.utils.data.DataLoader
支持多進程數據加載。
import torch
from torch.utils.data import DataLoader, Dataset
class CustomDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
dataset = CustomDataset(torch.randn(1000, 10))
dataloader = DataLoader(dataset, batch_size=32, num_workers=4)
for batch in dataloader:
print(batch)
同步批量歸一化在多GPU訓練中可以提高模型的性能,但會犧牲一些并行速度。PyTorch提供了torch.nn.SyncBatchNorm
類來實現同步批量歸一化。
import torch
import torch.nn as nn
class SimpleModel(nn.Module):
def __init__(self):
super(SimpleModel, self).__init__()
self.fc1 = nn.Linear(10, 5)
self.bn1 = nn.BatchNorm1d(5)
self.fc2 = nn.Linear(5, 2)
def forward(self, x):
x = self.fc1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
model = SimpleModel()
model = nn.DataParallel(model)
model.cuda()
通過以上步驟和技巧,你可以在Ubuntu系統上高效地配置PyTorch環境,并利用GPU加速訓練過程。