import torch
import torchvision
import torchvision.transforms as transform
import torch.utils.data as data
import matplotlib.pyplot as plt
from torch.utils.data import dataset,dataloader
import pandas as pd
import numpy as np
from torch.autograd import variable
# data set
train=pd.read_csv('thirdtest.csv')
#cut 0 col as label
train_label=train.iloc[:,[0]] #只讀取一列
#train_label=train.iloc[:,0:3]
#cut 1~16 col as data
train_data=train.iloc[:,1:]
#change to np
train_label_np=train_label.values
train_data_np=train_data.values
#change to tensor
train_label_ts=torch.from_numpy(train_label_np)
train_data_ts=torch.from_numpy(train_data_np)
train_label_ts=train_label_ts.type(torch.longtensor)
train_data_ts=train_data_ts.type(torch.floattensor)
print(train_label_ts.shape)
print(type(train_label_ts))
train_dataset=data.tensordataset(train_data_ts,train_label_ts)
train_loader=dataloader(dataset=train_dataset,batch_size=64,shuffle=true)
#make a network
import torch.nn.functional as f # 激勵函式都在這
class net(torch.nn.module): # 繼承 torch 的 module
def __init__(self ):
super(net, self).__init__() # 繼承 __init__ 功能
self.hidden1 = torch.nn.linear(16, 30)# 隱藏層線性輸出
self.out = torch.nn.linear(30, 3) # 輸出層線性輸出
def forward(self, x):
# 正向傳播輸入值, 神經網路分析出輸出值
x = f.relu(self.hidden1(x)) # 激勵函式(隱藏層的線性值)
x = self.out(x) # 輸出值, 但是這個不是**值, **值還需要再另外計算
return x
# net=net()
# optimizer = torch.optim.sgd(net.parameters(), lr=0.0001,momentum=0.001)
# loss_func = torch.nn.crossentropyloss() # the target label is not an one-hotted
# loss_list=
# for epoch in range(500):
# for step ,(b_x,b_y) in enumerate (train_loader):
# b_x,b_y=variable(b_x),variable(b_y)
# b_y=b_y.squeeze(1)
# output=net(b_x)
# loss=loss_func(output,b_y)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# if epoch%1==0:
# print( "epoch: ", epoch, "step ", step, "loss: ", float(loss))
# 為每個優化器建立乙個 net
net_sgd = net()
net_momentum = net()
net_rmsprop = net()
net_adam = net()
nets = [net_sgd, net_momentum, net_rmsprop, net_adam]
#定義優化器
lr=0.0001
opt_sgd = torch.optim.sgd(net_sgd.parameters(), lr=lr,momentum=0.001)
opt_momentum = torch.optim.sgd(net_momentum.parameters(), lr=lr, momentum=0.8)
opt_rmsprop = torch.optim.rmsprop(net_rmsprop.parameters(), lr=lr, alpha=0.9)
opt_adam = torch.optim.adam(net_adam.parameters(), lr=lr, betas=(0.9, 0.99))
optimizers = [opt_sgd, opt_momentum, opt_rmsprop, opt_adam]
loss_func = torch.nn.crossentropyloss()
losses_his = [, , , ]
for net, opt, l_his in zip(nets, optimizers, losses_his):
for epoch in range(500):
for step, (b_x, b_y) in enumerate(train_loader):
b_x, b_y = variable(b_x), variable(b_y)
b_y = b_y.squeeze(1)# 資料必須得是一維非one-hot向量
# 對每個優化器, 優化屬於他的神經網路
output = net(b_x) # get output for every net
loss = loss_func(output, b_y) # compute loss for every net
opt.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
if epoch%1==0:
print("optimizers: ",opt,"epoch: ",epoch,"step ",step,"loss: ",float(loss))
labels = ['sgd', 'momentum', 'rmsprop', 'adam']
for i, l_his in enumerate(losses_his):
plt.plot(l_his, label=labels[i])
plt.legend(loc='best')
plt.xlabel('steps')
plt.ylabel('loss')
plt.xlim((0,1000))
plt.ylim((0,4))
plt.show()
## for epoch in range(5):
# for step ,(b_x,b_y) in enumerate (train_loader):
# b_x,b_y=variable(b_x),variable(b_y)
# b_y=b_y.squeeze(1)
# output=net(b_x)
# loss=loss_func(output,b_y)
# loss.backward()
# optimizer.zero_grad()
# optimizer.step()
# print(loss)
如何用pytorch搭建乙個簡單的神經網路?
匯入模組 import torch import torch.nn.functional as f from torch.autograd import variable import matplotlib.pyplot as plt x torch.unsqueeze torch.linspace...
pytorch搭建簡單的神經網路
主要是熟悉pytorch這個開源框架。這個網路主要是用來擬合y x 2這個函式用的 所以說很簡單啦 自己生成的訓練資料,全部用來做訓練集了。網路結構 乙個輸入的向量,一層隱藏層,一層輸出層。隱藏層用的relu啟用函式,輸出層什麼都沒加,直接線性輸出。from torch.autograd impor...
pytorch搭建神經網路入門
autograd自動求導 pytorch 是乙個基於python的科學計算包,用於代替numpy,可以利用gpu的效能進行計算 作為乙個深度學習平台。張量 tensor 類似於numpy中的ndarray,但還可以在gpu上使用,實現加速計算的效果。建立張量的幾種方式 建立乙個沒有初始化的矩陣張量 ...