steps
1 使用 torchvision載入和歸一化cifar10訓練集和測試集
2 定義乙個卷積網路
3 定義損失函式
4 在訓練集上訓練
5 在測試集上測試
transform = transforms.compose(
[transforms.totensor(),
transforms.normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.cifar10(root='./data', train=true,
download=true, transform=transform)
trainloader = torch.utils.data.dataloader(trainset, batch_size=4,
shuffle=true, num_workers=2)
testset = torchvision.datasets.cifar10(root='./data', train=false,
download=true, transform=transform)
testloader = torch.utils.data.dataloader(testset, batch_size=4,
shuffle=false, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
顯示訓練的一些影象
import matplotlib.pyplot as plt
import numpy as np
# **展示影象的函式**
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
# 獲取隨機資料
dataiter = iter(trainloader)#這句話就是將多維的tensor 拉成一維
images, labels = dataiter.next()#顯示的是乙個batch的,這裡就是四張
# 展示影象
imshow(torchvision.utils.make_grid(images))
# 顯示影象標籤
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
定義乙個卷積網路
import torch.nn as nn
import torch.nn.functional as f
class net(nn.module):
def __init__(self):
super(net, self).__init__()
self.conv1 = nn.conv2d(3, 6, 5)#表示輸入是3通道,輸出是6通道,卷積核是5*5
self.pool = nn.maxpool2d(2, 2)
self.conv2 = nn.conv2d(6, 16, 5)
self.fc1 = nn.linear(16 * 5 * 5, 120)
self.fc2 = nn.linear(120, 84)
self.fc3 = nn.linear(84, 10)
def forward(self, x):
x = self.pool(f.relu(self.conv1(x)))
x = self.pool(f.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = f.relu(self.fc1(x))
x = f.relu(self.fc2(x))
x = self.fc3(x)
return x
net = net()
定義損失函式和優化器
使用的是交叉熵損失函式,使用帶動量的隨機梯度下降
import torch.optim as optim
criterion = nn.crossentropyloss()
optimizer = optim.sgd(net.parameters(), lr=0.001, momentum=0.9)
訓練網路for epoch in range(2): # 多批次迴圈
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# 獲取輸入
inputs, labels = data
# 梯度置0
optimizer.zero_grad()
# 正向傳播,反向傳播,優化
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# 列印狀態資訊
running_loss += loss.item()
if i % 2000 == 1999: # 每2000批次列印一次
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print(『finished training』)
測試網路
dataiter = iter(testloader)
images, labels = dataiter.next()
# 顯示
imshow(torchvision.utils.make_grid(images))
print('groundtruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
測試結果
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
對每個類的識別準確率
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
利用pytorch做乙個簡單的回歸網路
最近整理了一下pytorch的筆記,把很簡單的pytorch搭建乙個回歸網路的code貼上來 import torch import torch.nn.functional as f x torch.unsqueeze torch.linspace 2,2,100 dim 1 在 1,1 這個區間內...
乙個經典的Pytorch神經網路分類模型訓練框架
import pandas as pd import torch from torch.autograd import variable import torch.nn as nn import torch.nn.functional as f import torch.optim as optim...
第乙個pytorch示例
注 僅供記錄所用 pytorch實現第乙個兩層神經網路 1.普通pytorch實現 import torch n,d in,h,d out 64,1000,100,10 64個訓練資料,輸入1000維,隱藏層100維,輸出層10維 產生訓練資料 x torch.randn n,d in y torc...