import torch
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
# set the parameters
num_workers =
0batch_size =
20# converting the images to tensors using transforms
transform = transforms.
totensor()
train_data = datasets.
mnist
(root=
'data'
, train=true,
download=true, transform=transform)
test_data = datasets.
mnist
(root=
'data'
, train=false,
download=true, transform=transform)
# loading the data
train_loader = torch.utils.data.
dataloader
(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.
dataloader
(test_data, batch_size=batch_size,
num_workers=num_workers)
#用matplotlib畫圖
import matplotlib.pyplot as plt
%matplotlib inline
#iter()迭代器迭代batch_size個影象
dataiter =
iter
(train_loader)
images, labels = dataiter.
next()
images = images.
numpy()
# figsize指定figure的寬和高
fig = plt.
figure
(figsize=(25
,4))
for image in np.
arange(20
):# add_subplot
(a,b,c) a為子圖總行數,b為列數,c為畫圖的位置
ax = fig.
add_subplot(2
,20/2
, image+
1, xticks=
, yticks=
) # 從陣列的形狀中刪除單維條目,即把shape中為1的維度去掉
ax.imshow
(np.
squeeze
(images[image]
), cmap=
'gray'
) ax.
set_title
(str
(labels[image]
.item()
))
卷積視覺化
影象卷積操作的應用沒有嚴格的數學推導,即沒有數學推導表明每一層究竟表示什麼。為了了解卷積神經網路中每一層與原有影象的對應關係,文章visualizing and understanding convolutional networks通過反向卷積的方式實現了該過程。那具體如何操作的呢?得到的h1與真...
特徵視覺化
colormap 色度圖 的視覺表示和colormap 的數值,左邊的顏色模式表示較低的灰度值,右邊的則表示較高的灰度值。得到特徵圖fea後,heatmap np.mean fea,axis 1 heatmap np.maximum heatmap,0 heatmap與0比較,取其大者 heatma...
卷積視覺化瞎掰
首先讀取一張1271273的rgb影象,然後把讀到的影象轉換成tensor 13127127 執行11卷積 把卷積後得到的tensor轉換成numpy 再把numpy轉換成能生成的那種格式 1 127 127大小 卷積之前 crop z tensor.size conv2 torch.nn.conv...