這次的版本更優秀了的樣子!
按照老闆說的,每個節點是單獨的匯出節點(會導致sigmod訓練變慢,原因sigmod層數多了,梯度會下降很快導致**。)
換個啟用函式就行了。
net<10>表示網咯有10個節點
然後input.txt裡的檔案格式大概是
n0 1
1 23 4
n表示有n行,每行2個數字,表示2個點有邊。(0下標開始,不超過net初始化的節點數量)。任意拓撲結構都可以執行。
#include #include "recordlog.h"
#include using std::cin;
using std::endl;
using std::cout;
#define pr(x) cout<<#x<<" = "typedef std::vectorneuron_array_t;
typedef std::vectorvector_map_t;
typedef std::unique_ptrneuron_ptr_t;
class neuron_t
;templateclass net_t
static double line(double x)
static double relu(double x)
double derivative(double x)
if (activation_way == "relu")
if (activation_way == "line")
cout<<"no activationfunction!"this -> rate = 0.1; //xuexilv
for (int i = 0; i < neuron_size; ++ i)
this -> output_number.clear();
this -> input_number.clear();
prln(neuron_size);
while (n--)
} void setio(std::vector&input, std::vector&output, std::vector*input_num = null, std::vector*output_num = null)
if (output.size() == 0)
if (input_num && output_num)
else
printf("\n");
printf("input nodes are:");
for (int i = output.size(); i < input.size() + output.size(); ++ i)
printf("\n");
}initinputneuron(*input_num);
for (int i = 0; i < output.size(); ++ i)
std::queueq[output.size() + input.size()];
memset(height, -1, sizeof(height));
int painted = output.size();
int cnt=0;
for (auto curnode : output_number)
for (auto curnode : input_number)
bool flag = true;
while (flag)
for (auto curnode : input_number)
}auto build_map = [=](int from, int to);
for (int i = 0; i < neuron_size; ++ i)}}
for (int i = 0; i < neuron_size; ++ i)
gettopology();
//至此構造完網路的拓撲結構
} void gettopology()
}std::queueq;
for (auto curnode : input_number)
int pos = 0;
while (!q.empty())}}
userful_neuron_size = pos;
//debug
//for (int i = 0; i < neuron_size; ++ i)
// pr(i),prln(topology[i]);
} bool bfs(std::queue&q, int delta)
int h = height[q.front()];
while (!q.empty() && height[q.front()] == h)
height[nextnode] = h + delta;
q.push(nextnode);}}
return true;
} void cal_propagate(int node)
} void propagate(std::vector&input)
*/for (int i = 0; i < neuron_size; ++ i)
for (int i = 0; i != input.size(); ++ i)
for (int i = 0; i < userful_neuron_size; ++ i)
} void cal_back(int node)
for (auto &nextnode : node.neuron_array)
node_theta -= node_pe * d_node * rate;
} void back(std::vector&input, std::vector&output)
for (int i = 0; i != output.size(); ++ i)
for (int i = userful_neuron_size - 1; i >= 0; -- i)
else
}for (int i = 0; i < input.size(); ++ i)
} double train(std::vector&input, std::vector&output)
return error;
} void outputnetwork()
printf("---------------other nodes------------\n");
printf("other nodes\n");
for (int i = 0; i < neuron_size; ++ i)
}printf("**********===end********************\n");
} void testoutput(std::vector&input)
coutreturn move(q);
}};void doit251()
); std::vectorout();
net_t<8> net;//("input.txt");
srand(0);
std::vectorinput1();
std::vectorinput2();
std::vectorinput3();
std::vectorinput4();
std::vectoroutput1();
std::vectoroutput2();
std::vectoroutput3();
std::vectoroutput4();
net.setio(input1, output1, &in, &out);
net.activation_way = "sigmoid";
net.rate = 20;
/* net.propagate(input3);
net.outputnetwork();
net.back(input3, output3);
net.outputnetwork();
return;
*/ double error=0;
for (int i = 1;i<=20000;++i){
error = 0;
error += net.train(input1, output1);
error += net.train(input2, output2);
error += net.train(input3, output3);
error += net.train(input4, output4);
error/=4;
cout<
機器學習 BP神經網路模型
bp back propagation 網路是1986年由rumelhart和mccelland為首的科學家小組提出,是一種按誤差逆傳播演算法訓練的多層前饋網路,是目前應用最廣泛的神經網路模型之一。bp網路能學習和存貯大量的輸入 輸出模式對映關係,而無需事前揭示描述這種對映關係的數學方程。它的學習規...
機器學習 BP神經網路模型
bp back propagation 網路是1986年由rumelhart和mccelland為首的科學家小組提出,是一種按誤差逆傳播演算法訓練的多層前饋網路,是目前應用最廣泛的神經網路模型之一。bp網路能學習和存貯大量的輸入 輸出模式對映關係,而無需事前揭示描述這種對映關係的數學方程。它的學習規...
bp神經網路學習
import numpy as np 使用邏輯回歸進行分類 defnonlin x,deriv false if deriv true return x 1 x return 1 1 np.exp x 待分類的資料 x np.array 0,0,1 0,1,1 1,0,1 1,1,1 print x...