PyTorch是一個基于Python的科學計算庫,它主要用于深度學習研究,特別是神經網絡。在PyTorch中,你可以使用torch.nn
模塊來構建卷積神經網絡(CNN)。以下是一個簡單的CNN示例:
import torch
import torch.nn as nn
import torch.optim as optim
class SimpleCNN(nn.Module):
def __init__(self, num_classes=10):
super(SimpleCNN, self).__init__()
# 卷積層1
self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1)
# 激活函數
self.relu1 = nn.ReLU()
# 池化層
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
# 卷積層2
self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1)
# 激活函數
self.relu2 = nn.ReLU()
# 池化層
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
# 全連接層
self.fc1 = nn.Linear(in_features=32 * 25 * 25, out_features=1024)
self.relu3 = nn.ReLU()
self.dropout = nn.Dropout(0.5)
# 輸出層
self.fc2 = nn.Linear(in_features=1024, out_features=num_classes)
def forward(self, x):
# 通過卷積層和激活函數
x = self.conv1(x)
x = self.relu1(x)
x = self.pool1(x)
# 通過卷積層和激活函數
x = self.conv2(x)
x = self.relu2(x)
x = self.pool2(x)
# 展平特征圖
x = x.view(x.size(0), -1)
# 通過全連接層和激活函數
x = self.fc1(x)
x = self.relu3(x)
x = self.dropout(x)
# 輸出結果
x = self.fc2(x)
return x
# 實例化網絡
num_classes = 10
model = SimpleCNN(num_classes)
# 定義損失函數和優化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# 訓練網絡
for epoch in range(num_epochs):
for images, labels in train_loader:
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
這個示例中,我們定義了一個簡單的CNN網絡,包含兩個卷積層、兩個池化層和兩個全連接層。你可以根據你的任務和數據集來調整網絡結構。