十行代碼 -- 用c++ 玩轉單層感知機
來自專欄 深度學堂
接觸深度學習塊一年半了,原理什麼都大概能看懂,使用的時候直接用框架,直接改別人的代碼。剛才看到有一篇博客用c++寫的「神經網路」(實際上是單層感知機,沒有隱層),覺得對理解反向傳播有一定幫助,所以就貼在這了。
這是原文
單層感知機結構:
具體什麼是單層感知機請看這裡
這個版本沒有加入參數b
另外,十行代碼,咳咳,只是主程序十行,光靠c++那是做夢。。。
首先我們來看main函數
vector<double> X { 5.1, 3.5, 1.4, 0.2, 4.9, 3.0, 1.4, 0.2, 6.2, 3.4, 5.4, 2.3, 5.9, 3.0, 5.1, 1.8};vector<double> y { 0, 0, 1, 1 };vector<double> W { 0.5, 0.5, 0.5, 0.5 };int main() { for (int i = 0; i != 50; ++i) { vector<double> pred = sigmoid(dot(X, W, 4, 4, 1 )); vector<double> pred_error = y - pred; vector<double> pred_delta = pred_error * sigmoid_d(pred); vector<double> W_delta = dot(transpose( &X[0], 4, 4 ), pred_delta, 4, 4, 1); W = W + W_delta; if (i == 49){ print ( pred, 4, 1 ); }; }; return 0;}
首先我們看第一組 對應的y為0 (注意X中第一行才是第一組數據,不是第一列)
權重每個為0.5
用圖例表示為:
vector<double> pred = sigmoid(dot(X, W, 4, 4, 1 ));
這就是一次正向傳播
第二步,計算誤差。注意「-」號被重載過,所以可以讓vector相減
vector<double> pred_error = y - pred;
令 則
我們的x和y都是輸入的數據,現在我們需要改變W參數,使得pred_error最小
即我們要把W作為自變數對其求導
我們要找到F的最小值,就需要順著導數的方向往下爬,這裡的pred_error決定了走的方向和步長。(我是這樣理解的,你也可以自己選擇學習速率)
vector<double> pred_delta = pred_error * sigmoid_d(pred);
sigmoid_d
返回的是sigmoid的導數
vector<double> W_delta = dot(transpose( &X[0], 4, 4 ), pred_delta, 4, 4, 1);
W = W + W_delta;
最後更新一遍W參數
外面循環50個epoch
運行結果:
0.06296 0.0841725 0.947551 0.920766 [Finished in 0.8s]
和y已經是非常接近了。
還有一些輔助函數
vector <double> sigmoid (const vector <double>& m1);vector <double> sigmoid_d (const vector <double>& m1);vector <double> operator+(const vector <double>& m1, const vector <double>& m2);vector <double> operator-(const vector <double>& m1, const vector <double>& m2);vector <double> operator*(const vector <double>& m1, const vector <double>& m2);vector <double> transpose (double *m, const int C, const int R);vector <double> dot (const vector <double>& m1, const vector <double>& m2, const int m1_rows, const int m1_columns, const int m2_columns);void print ( const vector <double>& m, int n_rows, int n_columns );
如果你感興趣的話我把剩下的也貼在這了
#include "functions.hpp"vector <double> sigmoid_d (const vector <double>& m1) { const unsigned long VECTOR_SIZE = m1.size(); vector <double> output (VECTOR_SIZE); for( unsigned i = 0; i != VECTOR_SIZE; ++i ) output[ i ] = m1[ i ] * (1 - m1[ i ]); return output;}vector <double> sigmoid (const vector <double>& m1) { const unsigned long VECTOR_SIZE = m1.size(); vector <double> output (VECTOR_SIZE); for( unsigned i = 0; i != VECTOR_SIZE; ++i ) output[ i ] = 1 / (1 + exp(-m1[ i ])); return output;}vector <double> operator+(const vector <double>& m1, const vector <double>& m2){ const unsigned long VECTOR_SIZE = m1.size(); vector <double> sum (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i) sum[i] = m1[i] + m2[i]; return sum;}vector <double> operator-(const vector <double>& m1, const vector <double>& m2){ const unsigned long VECTOR_SIZE = m1.size(); vector <double> difference (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i) difference[i] = m1[i] - m2[i]; return difference;}vector <double> operator*(const vector <double>& m1, const vector <double>& m2){ const unsigned long VECTOR_SIZE = m1.size(); vector <double> product (VECTOR_SIZE); for (unsigned i = 0; i != VECTOR_SIZE; ++i) product[i] = m1[i] * m2[i]; return product;}vector <double> transpose (double *m, const int C, const int R) { vector <double> mT (C*R); for(unsigned n = 0; n != C*R; n++) { unsigned i = n/C; unsigned j = n%C; mT[n] = m[R*j + i]; } return mT;}vector <double> dot (const vector <double>& m1, const vector <double>& m2, const int m1_rows, const int m1_columns, const int m2_columns) { vector <double> output (m1_rows*m2_columns); for( int row = 0; row != m1_rows; ++row ) { for( int col = 0; col != m2_columns; ++col ) { output[ row * m2_columns + col ] = 0; for( int k = 0; k != m1_columns; ++k ) output[ row * m2_columns + col ] += m1[ row * m1_columns + k ] * m2[ k * m2_columns + col ]; } } return output;}void print ( const vector <double>& m, int n_rows, int n_columns ) { for( int i = 0; i != n_rows; ++i ) { for( int j = 0; j != n_columns; ++j ) { cout << m[ i * n_columns + j ] << " "; } cout <<
; } cout << endl;}
完。
推薦閱讀:
TAG:深度學習DeepLearning | 計算機科學 | 人工智慧 |