importpandas as pd
import
torch
from torch.autograd import
variable
import
torch.nn as nn
import
torch.nn.functional as f
import
torch.optim as optim
import
numpy as np
url='
'df=pd.read_csv(url)
vowels=df['
vowel
'].unique()
(vowels)
sample_num = df['
vowel
'].shape[0]
vowels_dict =dict(zip(vowels, list(range(sample_num))))
(vowels_dict)
features = np.zeros((sample_num, 2))
labels = np.zeros((sample_num, 1))
(features.shape)
#build labels
for i, vowel in enumerate(df['
vowel
']):
labels[i] =vowels_dict[vowel]
(labels)
#build features and normalize
defnormalize(x):
return (x-min(x))/(max(x)-min(x))
features[:, 0] = normalize(df['f1'
])features[:, 1] = normalize(df['
f2'])
1. 將 label 編碼為數字,由於後面用的是nn.crossentropyloss(),所以不用轉為one-hot
2. 將training data 進行 normalize
from sklearn.model_selection importtrain_test_split
from torch.utils.data import
dataloader
import
torch.utils.data as data
batch_size = 32train_data, test_data, train_targets, test_targets =train_test_split(features, labels)
torch_dataset =data.tensordataset(torch.from_numpy(train_data), torch.from_numpy(train_targets))
training_data = dataloader(torch_dataset, batch_size=batch_size, shuffle=true)
torch_dataset =data.tensordataset(torch.from_numpy(test_data), torch.from_numpy(test_targets))
testing_data = dataloader(torch_dataset, batch_size=batch_size)
1. features 和 labels 都處理好,進行train/valid/test split
2. 匯入pytorch自帶的dataloader中,設定batch_size
classmlpnet(nn.module):
def__init__(self, input_dim = 2, hid_dim = 100, output_dim=vowels.shape[0]):
super(mlpnet, self).
__init__
() self.nn1 =nn.linear(input_dim, hid_dim)
self.nn2 =nn.linear(hid_dim, output_dim)
defforward(self, x):
#print(x)
x =self.nn1(x)
x =f.relu(x)
x =self.nn2(x)
return x
主要是init()和forward()兩部分,init()中主要定義torch.nn中的函式,forward()中主要使用torch.nn.functional中的函式
model =mlpnet()loss_func =nn.crossentropyloss()
optimizer = optim.sgd(model.parameters(), lr=0.02, momentum=0.9)
print(model)
分類問題用nn.crossentropyloss()比較簡單
deftrain_func(training_data):
train_loss =0
train_acc =0
for i, (x, y) in
enumerate(training_data):
y = y.squeeze(1).long()
optimizer.zero_grad()
output =model(x.float())
loss =loss_func(output, y)
train_loss +=loss.item()
loss.backward()
optimizer.step()
train_acc += (output.argmax(1) ==y).sum().item()
return train_loss / len(train_targets), train_acc /len(train_targets)
deftest_func(testing_data):
loss =0
acc =0
for i, (x, y) in
enumerate(testing_data):
y = y.squeeze(1).long()
with torch.no_grad():
output =model(x.float())
loss =loss_func(output, y)
loss +=loss.item()
acc += (output.argmax(1) ==y).sum().item()
return loss / len(test_targets), acc / len(test_targets)
注意有一些細節,比如tensor型別需要轉為float/long,還有shape不符合需要squeeze/unsqueeze等等
n_epochs = 2001import
time
best_test_acc =0
for epoch in
range(n_epochs):
start_time =time.time()
train_loss, train_acc =train_func(training_data)
valid_loss, valid_acc =test_func(testing_data)
best_test_acc =max(valid_acc, best_test_acc)
secs = int(time.time() -start_time)
mins = secs / 60secs = secs % 60
if epoch % 50 ==0:
print('
epoch: %d
' %(epoch + 1), "
| time in %d minutes, %d seconds
" %(mins, secs))
print(f'
\tloss: (train)\t|\tacc: %(train)')
print(f'
\tloss: (test)\t|\tacc: %(test)')
print(f'
best testing acc: %
')
參考:
如何用pytorch搭建乙個簡單的神經網路?
匯入模組 import torch import torch.nn.functional as f from torch.autograd import variable import matplotlib.pyplot as plt x torch.unsqueeze torch.linspace...
乙個簡單的tensorFlow關於神經網路的示例
這個示例源自 實戰google 深度學習框架 一書的第三章,實現了乙個簡單的前向網路的演算法。下面掛完整的 import tensorflow as tf from numpy.random import randomstate batch size 8 w1 tf.variable tf.rand...
乙個經典錯誤
今天,在學習qt tutorial 8的時候,犯了乙個經典錯誤,在建構函式裡面覆蓋了 應該是這樣稱呼這種行為的吧 私有成員變數,導致私有成員變數沒有初始化,引起懸空指標,導致程式崩潰。出錯 如下 lcdrange.h ifndef lcdrange h define lcdrange h inclu...