#-*- coding: utf-8 -*-
"""created on sat mar 19 19:44:17 2022
@author: administrator
"""import
math
import
numpy as np
import
pandas as pd
from pandas import
dataframe,series
y =[0.14 ,0.64 ,0.28 ,0.33 ,0.12 ,0.03 ,0.02 ,0.11 ,0.08]
x1 =[0.29 ,0.50 ,0.00 ,0.21 ,0.10 ,0.06 ,0.13 ,0.24 ,0.28]
x2 =[0.23 ,0.62 ,0.53 ,0.53 ,0.33 ,0.15 ,0.03 ,0.23 ,0.03]
theata = [-1,-1,-1,-1,-1,-1,-1,-1,-1]
x =np.array([x1,x2,theata])
w_mid = dataframe(0.5,index=['
input1
','input2
','theata
'],columns=['
mid1
','mid2
','mid3
','mid4'])
w_out = dataframe(0.5,index=['
input1
','input2
','input3
','input4
','theata
'],columns=['a'
])def sigmoid(x): #
對映函式
return 1/(1+math.exp(-x))
#訓練神經元
deftrain(w_out, w_mid,data,real):
#中間層神經元輸入和輸出層神經元輸入
net_in = dataframe(data,index=['
input1
','input2
','theata
'],columns=['a'
]) out_in = dataframe(0,index=['
input1
','input2
','input3
','input4
','theata
'],columns=['a'
]) out_in.loc[
'theata
'] = -1
#中間層和輸出層神經元權值
w_mid_delta = dataframe(0,index=['
input1
','input2
','theata
'],columns=['
mid1
','mid2
','mid3
','mid4'])
w_out_delta = dataframe(0,index=['
input1
','input2
','input3
','input4
','theata
'],columns=['a'
])
#中間層的輸出
for i in range(0,4):
out_in.iloc[i] = sigmoid(sum(w_mid.iloc[:,i]*net_in.iloc[:,0]))
#輸出層的輸出/網路輸出
res = sigmoid(sum(out_in.iloc[:,0]*w_out.iloc[:,0]))
#誤差error = abs(res-real)
#輸出層權值變化量
#yita =學習率
yita =0.86w_out_delta.iloc[:,0] = yita*res*(1-res)*(real-res)*out_in.iloc[:,0]
w_out_delta.iloc[4,0] = -(yita*res*(1-res)*(real-res))
w_out = w_out + w_out_delta #
輸出層權值更新
#中間層權值變化量
for i in range(0,4):
w_mid_delta.iloc[:,i] = yita*out_in.iloc[i,0]*(1-out_in.iloc[i,0])*w_out.iloc[i,0]*res*(1-res)*(real-res)*net_in.iloc[:,0]
w_mid_delta.iloc[2,i] = -(yita*out_in.iloc[i,0]*(1-out_in.iloc[i,0])*w_out.iloc[i,0]*res*(1-res)*(real-res))
w_mid = w_mid + w_mid_delta #
中間層權值更新
return
w_out,w_mid,res,error
defreault(data,w_out, w_mid):
net_in = dataframe(data,index=['
input1
','input2
','theata
'],columns=['a'
]) out_in = dataframe(0,index=['
input1
','input2
','input3
','input4
','theata
'],columns=['a'
]) out_in.loc[
'theata
'] = -1
#中間層的輸出
for i in range(0,4):
out_in.iloc[i] = sigmoid(sum(w_mid.iloc[:,i]*net_in.iloc[:,0]))
#輸出層的輸出/網路輸出
BP神經網路
基本bp神經網路演算法包括 訊號的前向傳播 誤差的反向傳播 也即計算實際輸出時按照輸入到輸出的方向進行,權值閾值調整則相反。bp是一種多層前饋神經網路,由輸入層 隱含層和輸出層組成。層與層之間有兩種訊號在流動 一種是從輸入流向輸出的工作訊號,是輸入和權值的函式 另一種是輸入流向輸出的訊號,即誤差。隱...
BP神經網路
x 為輸入向量,y為隱藏層的神經元,z 為輸出層,d為目標真實值,本文預設 z 不經過sigmod處理。x y的權重為 w,y z的權重為 v yj ix iwij 1 oyi f y j 2 其中激勵函式f x 1 1 e x f x f x 1 f x 3 z k j f yj vjk 此時系統...
BP神經網路
bp是back propagation bp神經網路主要的演算法在於誤差反向傳播error backpropagation 有三層 輸入層 input 隱含層 hidden 輸出層 output 輸入層 n個神經元 隱含層 p個神經元 輸出層 q個神經元 輸入向量 x x1,x2,x n 隱含層輸入...