標籤:

紀念與vs2017的第一炮

紀念與vs2017的第一炮

梯度下降法:

a.線性回歸用梯度下降:

g(x)=x

要收斂的函數 :

對b求偏導:

對w的任一分量求偏導:

梯度向量:

更新E(w;b)=E(w;b)-p*a (按E下降最快的方向更新E,a為更新步長,直到E收斂於極值點)

b.對數幾率回歸用梯度下降

即聯繫函數g(x)=1/(1-e^(-z))時的線性回歸

要求極值的函數:

求偏導

梯度向量

同樣對任意初始(w;b)迭代直到收斂在極值。

#include "stdafx.h"#include "math.h"#define e 2.7182818284590float dot(float *x, float *y, int n)//向量積,n指定向量維度{ float sum = 0; for (int i = 0; i < n; i++) sum += x[i] * y[i]; return sum;}struct watermelon{ int number; float density; float sugar; int good;};struct watermelon W[17]={ 1,0.697,0.460,1, 2,0.774,0.376,1, 3,0.634,0.264,1, 4,0.608,0.318,1, 5,0.556,0.215,1, 6,0.403,0.237,1, 7,0.481,0.149,1, 8,0.437,0.211,1, 9,0.666,0.091,0, 10,0.243,0.267,0, 11,0.245,0.057,0, 12,0.343,0.099,0, 13,0.639,0.161,0, 14,0.657,0.198,0, 15,0.360,0.370,0, 16,0.593,0.042,0, 17,0.719,0.103,0};float logistic(float x)//聯繫函數{ return (1 / (1 + powf(e, -x)));}float *gradient(struct watermelon *W)//愚蠢的西瓜分類器{ int temp = 100000; int i, j; float wb[3] = { 1,1,1 };//隨便找個點 float a = 0.001; float gra[3];//用來存梯度向量 for (i = 0; i < temp; i++)//步長0.001 先跑100000輪 { for (j = 0; j < 3; j++) gra[j] = 0; for (j = 0; j < 17; j++) { gra[0] +=a* (logistic(wb[1] * W[j].density + wb[2] * W[j].sugar + wb[0]) - W[j].good);//對應三個梯度公式 gra[1] += a*W[j].density*(logistic(wb[1] * W[j].density + wb[2] * W[j].sugar + wb[0]) - W[j].good); gra[2] += a*W[j].sugar*(logistic(wb[1] * W[j].density + wb[2] * W[j].sugar + wb[0]) - W[j].good); } for (j = 0; j < 3; j++) wb[j] -=gra[j];//根據梯度更新wb } return wb;}float *linear(struct watermelon *W)//更蠢的西瓜分類器{ int temp = 100000; int i, j; float wb[3] = { 1,1,1 };//隨便找個點 float a = 0.001; float gra[3];//用來存梯度向量 for (i = 0; i < temp; i++)//步長0.001 先跑100000輪 { for (j = 0; j < 3; j++) gra[j] = 0; for (j = 0; j < 17; j++) { gra[0] += a* (wb[1] * W[j].density + wb[2] * W[j].sugar + wb[0] - W[j].good);//對應三個梯度公式 gra[1] += a*W[j].density*(wb[1] * W[j].density + wb[2] * W[j].sugar + wb[0] - W[j].good); gra[2] += a*W[j].sugar*(wb[1] * W[j].density + wb[2] * W[j].sugar + wb[0] - W[j].good); } for (j = 0; j < 3; j++) wb[j] -= gra[j];//根據梯度更新wb } return wb;}int main(){ float *g = gradient(W); printf("根據愚蠢的西瓜分類器:
好瓜=%f+密度*%f+糖*%f (大於0是好瓜)
", g[0], g[1], g[2]); float *l = linear(W); printf("根據更蠢的西瓜分類器:
好瓜=%f+密度*%f+糖*%f (靠近1是好瓜,靠近0是壞瓜)
", l[0], l[1], l[2]); return 0;}

好像效果不怎麼樣 訓練集里都有幾個瓜分錯了。。。
推薦閱讀:

機器學習筆記7 —— 編程作業1代價函數和梯度下降函數
為什麼 feature scaling 會使 gradient descent 的收斂更好?
如何理解 natural gradient descent?
機器學習:用梯度下降法實現線性回歸

TAG:梯度下降 |