一個小型的神經網路,特別適合理解神經網路底層原理,不依賴任何神經網路庫

# 該代碼是一個小型的DNN網路代碼的實現,並且實現了多種激活函數,並且實現了圖形化顯示,特別適合直觀地理解神經網路的擬合過程# 代碼主要測試了sigmoid和relu函數,另外我們還測試了sin和正態分布,這些函數都能很好地擬合函數,但是一定要對初始化權重做一定的處理,否則訓練會很難# 原作者:易瑜 郵箱:296721135@qq.com 如果有錯誤,歡迎指正,如轉載,請註明作者和出處# 本代碼在python3上執行測試,只信賴兩個python庫 numpy 和 matplotlib,通過 pip install numpy 和 pip install matplotlib即可安裝,非常簡單import numpy as npimport matplotlib.pyplot as pltfrom mpl_toolkits.mplot3d import Axes3Dimport mathimport randomimport osclass Activation: ##子類必須實現下面的函數 def __init__(self, wRange=1, bRange=1): self.wRange = wRange self.bRange = bRange # 初始化權重 wx + b = w(x + b/w) = w(x + h) -> h = b/w ,w決定了函數的x方向的縮放,h決定了縮放後x方向的平移 # 初始化權重並不是一個隨機初始化的過程,我們測試中發現,在對s型函數擬合的過程中,務必把函數進行合適的縮放,然後初始化偏移,讓其均勻地分布在整個整個輸入空間 # 但對relu類型的函數,w可以設置為+1,-1即可,只要改變初始偏移即可完成相應的擬合 def initWeight(self, cell): for i in range(len(cell.w)): cell.w[i] = self.wRange * random.choice([1., -1.]) cell.b = (self.bRange * self.wRange) * random.uniform(-1, 1) if (cell.specialCellType): for i in range(len(cell.w)): cell.h[i] = (self.bRange) * random.uniform(-1, 1) def activation_fun(self, x): # 激活函數 raise NotImplemented("") def activation_deri_fun(self, cell): # 偏導 raise NotImplemented("") # 權重差值,求出來的偏導為 # △loss/△w = deri, (1) # 如果令 △w = -speed*deri (2) # 令2代入1可以導出 # △loss = deri*△w = - speed*deri*deri, loss是往恆往小的方向進行的 # 但是這個更新策略並不是唯一的策略,只要令△loss實際是往減小方向的策略理論上都是可以的,比如我們,在deri不為零的前提下 # 令 △w = -speed/deri (3) # 代入1,可得 △loss = -speed, 即每更新一步,△loss是以固定速度減小的 # 但是在(3)式的策略其實也可能有一些其他的問題,比如我們的偏導deri只是在當前w的一個很小的鄰域內才成立,所以一定要限制△w 的範圍, # 此處是只拋磚引玉,梯度下降的策略很有多種,可以參數一下下面文章: # http://www.360doc.com/content/16/1121/12/22755525_608221032.shtml def updateDeltaWeight(self, deri, speed, cell, loss, coefficient): return -speed * deri###############################################################X2,梯度很容易爆炸,但可以通過修改更新權重的策略讓其擬合一些函數class ActivationXX(Activation): def activation_fun(self, x): # 激活函數 if (abs(x) > 1): # 限制x的範圍 x = 1 return x * x def activation_deri_fun(self, cell): # 偏導 if (abs(cell.sum) > 1): return 0 return 2 * cell.sum############################################################### V型函數class ActivationAbsolute(Activation): def activation_fun(self, x): # 激活函數 return abs(x) def activation_deri_fun(self, cell): # 偏導 return 1.0 if cell.sum < 0.0 else 1.0############################################################### Sinc型函數class ActivationSinc(Activation): def activation_fun(self, x): # 激活函數 return 1.0 if x == 0.0 else math.sin(x) / x def activation_deri_fun(self, cell): # 偏導 x = cell.sum return 1.0 if x == 0.0 else math.cos(x) / x - math.sin(x) / (x * x)class ActivationTanh(Activation): def activation_fun(self, x): # 激活函數 return math.tanh(x) def activation_deri_fun(self, cell): # 偏導 return 1 - cell.out * cell.outclass ActivationRelu(Activation): def activation_fun(self, x): # 激活函數 return max(0.0, x) def activation_deri_fun(self, cell): # 偏導 return 0.0 if cell.sum <= 0. else 1.0class ActivationMyRelu(Activation): # ____/~~~~~~~`,往右移了一下 def activation_fun(self, x): # 激活函數 return max(0.0, x - 0.5) def activation_deri_fun(self, cell): # 偏導 return 0.0 if cell.sum <= 0. else 1.0class ActivationLeakyRelu(Activation): def activation_fun(self, x): # 激活函數 return x if x > 0.0 else 0.01 * x def activation_deri_fun(self, cell): # 偏導 return 0.01 if cell.sum <= 0 else 1.0class ActivationStep(Activation): # ___|~~~~~~ ,0 - 1 def activation_fun(self, x): # 激活函數 return 1.0 if x >= 0 else 0 def activation_deri_fun(self, cell): # 偏導 return 0class ActivationSignum(Activation): # ___|~~~~~~ ,-1 - 1 def activation_fun(self, x): # 激活函數 return 1.0 if x >= 0 else -1.0 def activation_deri_fun(self, cell): # 偏導 return 0.0class ActivationSoftPlus(Activation): # ln(1 + e^x) def activation_fun(self, x): # 激活函數 return math.log(1 + math.exp(x)) def activation_deri_fun(self, cell): # 偏導 return 1 / (1 + math.exp(-cell.sum))class ActivationLecunTanh(Activation): # LeCun Tanh def activation_fun(self, x): # 激活函數 return 1.7519 * math.tanh(2 * x / 3) # def activation_deri_fun(self, cell): # 偏導 return 1.7519 * 2 * (1 - cell.out * cell / (1.7519 * 1.7519)) / 3class ActivationHardTanh(Activation): # ____/~~~~~~~~~ , def activation_fun(self, x): # 激活函數 return 1 if x > 1.0 else (-1 if x < -1.0 else x) def activation_deri_fun(self, cell): # 偏導 return 1 if abs(x) < 1.0 else 0class ActivationArcTan(Activation): # ArcTan def activation_fun(self, x): # 激活函數 return math.atan(x) # def activation_deri_fun(self, cell): # 偏導 return 1 / (cell.sum * cell.sum + 1)class ActivationSoftsign(Activation): # x/(1 + |x|) def activation_fun(self, x): # 激活函數 return x / (1 + abs(x)) # def activation_deri_fun(self, cell): # 偏導 return 1 / ((1 + abs(cell.sum)) * (1 + abs(cell.sum))) ################################################################sigmoidclass ActivationSigmoid(Activation): def activation_fun(self, x): # 激活函數 try: return 1 / (1 + math.exp(-x)) except OverflowError: if x < 0.0: return 0 else: return 1; def activation_deri_fun(self, cell): # 偏導 return cell.out * (1 - cell.out) # def updateDeltaWeight(self,deri,speed,cell,loss,coefficient): ##權重差值,這種策略貌似會更快一點 # sigmoidDri = abs(cell.out * (1 - cell.out)) # if((sigmoidDri) < 0.1): #梯度太小,不處理 # return 0.0 # coefficient = abs(coefficient) # coefficient = max(coefficient,0.1) # maxDelta = (0.3/coefficient)*sigmoidDri #一次的x變化不能太大 # # if abs(deri) > 0.000001: # delta = (speed/deri) * loss # else: # return 0.0 # if abs(delta) > maxDelta: # delta = maxDelta if delta > 0 else -maxDelta # return -delta###############################################################正態分布class ActivationNormal(Activation): def activation_fun(self, x): # 激活函數 return math.exp(-x * x) def activation_deri_fun(self, cell): # 偏導 return -cell.out * 2 * cell.sum ###############################################################tanh(x/2)函數class ActivationTanh(Activation): def activation_fun(self, x): # 激活函數 return (1 - math.exp(-x)) / (1 + math.exp(-x)) def activation_deri_fun(self, cell): # 偏導 return 0.5 * (1 - cell.out * cell.out)###############################################################loglog函數class ActivationLogLog(Activation): def activation_fun(self, x): # 激活函數 return 1 - math.exp(-math.exp(x)) def activation_deri_fun(self, cell): # 偏導 return math.exp(cell.sum) * cell.out###############################################################cos函數class ActivationCos(Activation): def activation_fun(self, x): # 激活函數 return math.cos(x) def activation_deri_fun(self, cell): # 偏導 return math.sin(cell.sum)###############################################################sin函數class ActivationSin(Activation): def initWeight(self, cell): for i in range(len(cell.w)): cell.w[i] = self.wRange * random.choice([1., -1.]) * random.uniform(0.01, 1) cell.b = (self.bRange * self.wRange) * random.uniform(-1, 1) def activation_fun(self, x): # 激活函數 return math.sin(x) def activation_deri_fun(self, cell): # 偏導 return math.cos(cell.sum)###############################################################線性函數class ActivationLiner(Activation): def activation_fun(self, x): # 激活函數 return x def activation_deri_fun(self, cell): # 偏導 return 1 # def updateDeltaWeight(self,deri,speed,cell,loss,coefficient): # return 0. #暫時先強製為0,測試########################Cell有兩種,一種是以 ∑wi*xi + b 作為輸出 ,特殊的是以∑(abs(wi*(xi + hi)))作為輸出class Cell: def __init__(self, activation, specialCellType): self._activation = activation self.inputCell = None self.sum = 0.0 self.out = 0.0 self.error = 0.0 self.specialCellType = specialCellType def setInputCells(self, inputCell): self.inputCell = inputCell self.w = [0 for i in range(len(inputCell))] self.delta_w = [0 for i in range(len(inputCell))] if (self.specialCellType): self.h = [0 for i in range(len(inputCell))] self.delta_h = [0 for i in range(len(inputCell))] self.b = 0.0 self.delta_b = 0.0 if (self._activation): self._activation.initWeight(self) def caculateOut(self): # 計算輸出 sum = 0.0 i = 0 for cell in self.inputCell: if self.specialCellType: sum += abs(self.w[i] * (cell.out + self.h[i])) else: sum += self.w[i] * cell.out i += 1 if not self.specialCellType: sum += self.b self.sum = sum self.out = self._activation.activation_fun(sum) def updateWeight(self, speed, loss): if self.inputCell: i = 0 outDeri = self.error * self._activation.activation_deri_fun(self) for cell in self.inputCell: if self.specialCellType: deri = (cell.out + self.h[i]) * outDeri if self.delta_w[i] * (cell.out + self.h[i]) < 0.: deri = -deri else: deri = cell.out * outDeri self.delta_w[i] = self._activation.updateDeltaWeight(deri, speed, self, loss, cell.out) self.w[i] += self.delta_w[i] if self.specialCellType: hDeri = outDeri if self.w[i] > 0 else -outDeri # self.w[i]*outDeri if (cell.out + self.h[i]) < 0.: # 絕對值,特殊處理一下 hDeri = -hDeri; self.delta_h[i] = self._activation.updateDeltaWeight(hDeri, speed, self, loss, cell.out) self.h[i] += self.delta_h[i] i += 1 if not self.specialCellType: deri = outDeri self.delta_b = self._activation.updateDeltaWeight(deri, speed, self, loss, 1) self.b += self.delta_bclass Layer: def __init__(self, lastLayer=None, cellNum=1, activation=None, specialCellType=False): self._lastLayer = lastLayer self._cellNum = cellNum self.cells = [Cell(activation, specialCellType) for i in range(cellNum)] self._nextLayer = None if lastLayer: lastLayer._nextLayer = self for cell in self.cells: cell.setInputCells(lastLayer.cells) def _forward(self): # 第一個層調用 nextLayer = self._nextLayer while nextLayer: for cell in nextLayer.cells: cell.caculateOut() nextLayer = nextLayer._nextLayer def setInputAndForward(self, x): # 僅第一層調用 for i in range(len(self.cells)): self.cells[i].out = x[i] self._forward() def backPropagation(self, speed, loss): # 最後一個層調用,往前跑 currLayer = self lastLayer = self._lastLayer while lastLayer: # 計算所有的error for lastLayerCell in lastLayer.cells: lastLayerCell.error = 0.0 for currLayercell in currLayer.cells: deri = currLayercell._activation.activation_deri_fun(currLayercell) * currLayercell.error for j in range(len(lastLayer.cells)): lastLayerCell = lastLayer.cells[j] lastLayerCell.error += currLayercell.w[j] * deri currLayer = lastLayer lastLayer = lastLayer._lastLayer while currLayer: # 更新權重 for currLayercell in currLayer.cells: currLayercell.updateWeight(speed, loss) currLayer = currLayer._nextLayerclass Loss: def __init__(self, layer): self._layer = layer pass def minimize(self, expect): raise NotImplemented("")class LossL2(Loss): def __init__(self, layer): super().__init__(layer) if (len(layer.cells) != 1): raise (Exception("last layer shoule only one cell!")) def minimize(self, expect, speed): # L2距離為 (out - expect)^2 ,其偏導為 2*(out - expect) loss = (self._layer.cells[0].out - expect) * (self._layer.cells[0].out - expect) self._layer.cells[0].error = 2 * (self._layer.cells[0].out - expect) self._layer.backPropagation(speed, loss)class LossEntropy(Loss): # 通常是配合前一級是 sigmoid函數的損失計算,否則意義不大 def __init__(self, layer): super().__init__(layer) if (len(layer.cells) != 1): raise (Exception("last layer shoule only one cell!")) def minimize(self, expect, speed): # 距離為 -(expect*ln(out) + (1 - expect)*ln(1 - out) ,其偏導為 -(expect/out - (1 - expect)/(1 - out)) = (out - expect)/((1 - out)*out) ,因為error有一個除法,很容易在計算的時候,數據超出浮點數範圍 loss = -(expect * math.log(self._layer.cells[0].out) + (1 - expect) * math.log(1 - self._layer.cells[0].out)) self._layer.cells[0].error = (self._layer.cells[0].out - expect) / ( self._layer.cells[0].out * (1 - self._layer.cells[0].out)) self._layer.backPropagation(speed, loss)def run3DDraw(): fig = plt.figure() ax = Axes3D(fig) X = np.arange(-8, 8, 0.25) Y = np.arange(-8, 8, 0.25) X, Y = np.meshgrid(X, Y) R = 1 / (1 + np.exp(abs(X) + abs(Y) - 5)) Z = R # 具體函數方法可用 help(function) 查看,如:help(ax.plot_surface) ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=rainbow) plt.show()def run2DDraw(): x = np.linspace(-7, 7, 70) y = 1 / (1 + np.exp((abs(x) - 5))) ax1 = plt.subplot(111) ax1.clear() # ax1.set_title(y = sigmoid(-x)) ax1.plot(x, y) ax1.grid(True) plt.pause(10)def run2D_DNN(): # run2DDraw() hideCellNum = 120 # 隱含層神經元數目 speed = 0.0001 # 不要小看這個speed,選擇過大的時候,非常容易造成遞度爆炸,比如你可以試試speed為1,Relu的訓練 inputLayer = Layer(None, 1, None) # 第一層,沒有上一層,沒有激活函數,輸入單元的個數為1 ##############單隱含層的物理結構如下,一個輸入單元,hideCellNum個隱含層神經單元,一個輸出單元,最後一個輸出單用的是線性神經元,loss函數用的是L2距離 # /-- 0 -- # (x) 0 --- 0 -- 0 (y) # -- 0 --/ # # hideLayer1 = Layer(inputLayer, hideCellNum, ActivationXX(15, 1)) # hideLayer1 = Layer(inputLayer, hideCellNum, ActivationNormal(15, 1)) # hideLayer1 = Layer(inputLayer, hideCellNum, ActivationSin(35, 1)) # hideLayer1 = Layer(inputLayer,hideCellNum,ActivationSigmoid(35,1)) hideLayer1 = Layer(inputLayer, hideCellNum, ActivationRelu(1, 1)) # hideLayer2 = Layer(hideLayer1, hideCellNum, ActivationRelu()) #我們同樣可以進行多層的神經網路 # hideLayer3 = Layer(hideLayer2, hideCellNum, ActivationRelu()) outputLayer = Layer(hideLayer1, 1, ActivationLiner(1, 0)) loss = LossL2(outputLayer) x = np.linspace(-1, 1, 20) # 這個輸入的範圍,要和相應的激活函數的權重初始化相關聯, orig_y = 2 * np.sin(3 * x) + 1 * (x - 3) * x + 2 # 調sin()裡面的係數,可以控制輸出的周期幅度 # (x,orig_y) = walk_dir(./PIC,.bmp) y = orig_y # 1/(1 + np.exp(-orig_y)) #如果最後一層是sigmoid,這裡就可以再用sigmoid處理一下,如果最後一層是Liner,直接用原始的即可 _z = np.array([0.0 for i in range( len(y))]) # 千萬不要寫_y = y 這種愚蠢的寫法,這種寫法,_y和y會共用同一個存儲空間,改變_y也會改變y,但你可以寫成_y = np.array(y),這時_y和y的存儲空間是獨立的 hideOutZ = [np.array(_z) for i in range(hideCellNum + 1)] hideDeltaWeightZ = [np.array(_z) for i in range(hideCellNum)] hideDeltaBiasZ = [np.array(_z) for i in range(hideCellNum)] outWeightZ = [np.array(_z) for i in range(hideCellNum)] outDeltaWeightZ = [np.array(_z) for i in range(hideCellNum)] plt.close() # clf() # 清圖 cla() # 清坐標軸 close() # 關窗口 plt.grid(True) # 添加網格 plt.ion() # interactive mode on plt.figure(1) # 創建圖表1 ax1 = plt.subplot(221) # 在圖表2中創建子圖1 ax2 = plt.subplot(222) # 在圖表2中創建子圖2 ax3 = plt.subplot(223) # 在圖表2中創建子圖3 ax4 = plt.subplot(224) # 在圖表2中創建子圖4 # ax.axis("equal") # 設置圖像顯示的時候XY軸比例 for t in range(len(x)): # 初始化初值 inputLayer.setInputAndForward([x[t]]) loss.minimize(y[t], speed) for j in range(len(hideLayer1.cells)): hideOutZ[j][t] = hideLayer1.cells[j].out * outputLayer.cells[0].w[j] hideDeltaWeightZ[j][t] = hideLayer1.cells[j].delta_w[0] hideDeltaBiasZ[j][t] = hideLayer1.cells[j].delta_b outDeltaWeightZ[j][t] = outputLayer.cells[0].delta_w[j] outWeightZ[j][t] = outputLayer.cells[0].w[j] hideOutZ[hideCellNum][t] = outputLayer.cells[0].b _z[t] = outputLayer.cells[0].out for loop in range(10000): for epoch in range(30): # t = int(random.uniform(0,1)*10000000)%len(x) for t in range(len(x)): inputLayer.setInputAndForward([x[t]]) loss.minimize(y[t], speed) if (epoch == 1): # True:#True:# inputLayer.setInputAndForward([x[t]]) for j in range(len(hideLayer1.cells)): hideDeltaWeightZ[j][t] = hideLayer1.cells[j].delta_w[0] hideDeltaBiasZ[j][t] = hideLayer1.cells[j].delta_b outDeltaWeightZ[j][t] = outputLayer.cells[0].delta_w[j] outWeightZ[j][t] = outputLayer.cells[0].w[j] for n in range(len(x)): inputLayer.setInputAndForward([x[n]]) for j in range(len(hideLayer1.cells)): hideOutZ[j][n] = hideLayer1.cells[j].out * outputLayer.cells[0].w[j] hideOutZ[hideCellNum][n] = outputLayer.cells[0].b _z[n] = outputLayer.cells[0].sum if (t != len(x) - 1): # 將此處注釋,可以實時看到每一次訓練的變化過程 continue ax1.clear() ax1.set_title( result loop: + str(loop) + Cell: + str(hideCellNum)) # 目標函數,補經網路的輸出,以及隱含層每個神經元的輸出乘以相應w權重 ax2.clear() ax2.set_title(hide layer △w) ax3.clear() ax3.set_title(hide layer △b) ax4.clear() ax4.set_title(target layer △w) for j in range(len(hideOutZ)): ax1.plot(x, hideOutZ[j]) ax1.plot(x, orig_y) # ,-o ax1.plot(x, _z) ax1.plot([x[t], x[t]], [np.min(_z[t]), np.max(y[t])]) for j in range(len(hideDeltaWeightZ)): ax2.plot(x, hideDeltaWeightZ[j]) ax3.plot(x, hideDeltaBiasZ[j]) # ax4.plot(x, outWeightZ[j]) ax4.plot(x, outDeltaWeightZ[j]) ax2.plot([x[t], x[t]], [np.min(hideDeltaWeightZ), np.max(hideDeltaWeightZ)]) ax3.plot([x[t], x[t]], [np.min(hideDeltaBiasZ), np.max(hideDeltaBiasZ)]) plt.pause(0.1)def run3D_DNN(): hideCellNum = 5 # 隱含層神經元數目 speed = 0.001 # 不要小看這個speed,選擇過大的時候,非常容易造成遞度爆炸,比如你可以試試speed為1,Relu的訓練 inputLayer = Layer(None, 2, None) # 第一層,沒有上一層,沒有激活函數,輸入單元的個數為1 inputRange = 0.5 ##############單隱含層的物理結構如下,一個輸入單元,hideCellNum個隱含層神經單元,一個輸出單元,最後一個輸出單用的是線性神經元,loss函數用的是L2距離 # /-- 0 -- # (x) 0 --- 0 -- 0 (y) # -- 0 --/ # # hideLayer1 = Layer(inputLayer, hideCellNum, ActivationXX(15, 1)) # hideLayer1 = Layer(inputLayer, hideCellNum, ActivationNormal(15, 1)) # hideLayer1 = Layer(inputLayer, hideCellNum, ActivationSin(35, 1)) # hideLayer1 = Layer(inputLayer,hideCellNum,ActivationNormal(2,0.5),True) _hideLayer = Layer(inputLayer, hideCellNum, ActivationRelu(1, 0.2), True) hideLayer = Layer(_hideLayer, hideCellNum, ActivationRelu(1, 0.2), True) hideLayer1 = Layer(hideLayer, hideCellNum, ActivationRelu(1, 0.2)) # hideLayer2 = Layer(hideLayer1, hideCellNum, ActivationRelu()) #我們同樣可以進行多層的神經網路 # hideLayer3 = Layer(hideLayer2, hideCellNum, ActivationRelu()) outputLayer = Layer(hideLayer1, 1, ActivationLiner(1, 0)) loss = LossL2(outputLayer) # X = np.arange(-1, 1, 0.4) # Y = np.arange(-1, 1, 0.4) X = np.arange(-inputRange, inputRange + 0.000001, inputRange / 2) # 兩個點 Y = np.arange(-inputRange, inputRange + 0.000001, inputRange / 2) x, y = np.meshgrid(X, Y) subX = np.arange(-inputRange, inputRange, 0.1) # 主要是用來畫圖用的 subY = np.arange(-inputRange, inputRange, 0.1) subx, suby = np.meshgrid(subX, subY) subz = subx + suby orig_z = 2 * np.sin(7 * x) + 1 * (y - 3) * x + 2 # 調sin()裡面的係數,可以控制輸出的周期幅度 orig_z = [[1, 0, 0, 1, 1], [1, 1, 0, 1, 0], [1, 0, 0, 1, 0], [1, 1, 0, 0, 1], [1, 0, 0, 1, 0]] z = orig_z # 1/(1 + np.exp(-orig_y)) #如果最後一層是sigmoid,這裡就可以再用sigmoid處理一下,如果最後一層是Liner,直接用原始的即可 # print(x) # print(z) _z = np.array(subz) # 千萬不要寫_y = y 這種愚蠢的寫法,這種寫法,_y和y會共用同一個存儲空間,改變_y也會改變y,但你可以寫成_y = np.array(y),這時_y和y的存儲空間是獨立的 hideOutZ = [np.array(_z) for i in range(hideCellNum + 1)] hideDeltaWeightZ = [np.array(_z) for i in range(hideCellNum)] hideDeltaBiasZ = [np.array(_z) for i in range(hideCellNum)] outWeightZ = [np.array(_z) for i in range(hideCellNum)] outDeltaWeightZ = [np.array(_z) for i in range(hideCellNum)] plt.close() # clf() # 清圖 cla() # 清坐標軸 close() # 關窗口 plt.grid(True) # 添加網格 plt.ion() # interactive mode on fig = plt.figure(1) # ax = Axes3D(fig) ax1 = plt.axes(projection=3d) fig = plt.figure(2) # ax = Axes3D(fig) ax2 = plt.axes(projection=3d) # plt.figure(1) # 創建圖表1 # ax1 = plt.subplot(221) # 在圖表2中創建子圖1 # ax2 = plt.subplot(222) # 在圖表2中創建子圖2 # ax3 = plt.subplot(223) # 在圖表2中創建子圖3 # ax4 = plt.subplot(224) # 在圖表2中創建子圖4 # # ax.axis("equal") # 設置圖像顯示的時候XY軸比例 for loop in range(10000): for epoch in range(30): # t = int(random.uniform(0,1)*10000000)%len(x) for t in range(len(X)): for u in range(len(Y)): inputLayer.setInputAndForward([X[t], Y[u]]) loss.minimize(z[t][u], speed) if (epoch == 1): # True:#True:# for t in range(len(subX)): for u in range(len(subY)): inputLayer.setInputAndForward([subX[t], subY[u]]) for j in range(len(hideLayer1.cells)): hideDeltaWeightZ[j][t] = hideLayer1.cells[j].delta_w[0] hideDeltaBiasZ[j][t] = hideLayer1.cells[j].delta_b outDeltaWeightZ[j][t] = outputLayer.cells[0].delta_w[j] outWeightZ[j][t] = outputLayer.cells[0].w[j] n, m = t, u hideOutZ[j][n][m] = hideLayer1.cells[j].out * outputLayer.cells[0].w[j] _z[n][m] = outputLayer.cells[0].sum hideOutZ[hideCellNum][n][m] = outputLayer.cells[0].b ax1.clear() ax2.clear() ax1.set_title( sub loop: + str(loop) + Cell: + str(hideCellNum)) # 目標函數,補經網路的輸出,以及隱含層每個神經元的輸出乘以相應w權重 ax2.plot_surface(x, y, orig_z) ax2.set_title(result loop: + str(loop) + Cell: + str(hideCellNum)) ax2.plot_surface(subx, suby, _z) # , rstride=1, cstride=1, cmap=rainbow for j in range(len(hideOutZ)): ax1.plot_surface(subx, suby, hideOutZ[j]) # ax1.clear() # ax1.set_title(result loop: + str(loop) + Cell: + str(hideCellNum)) #目標函數,補經網路的輸出,以及隱含層每個神經元的輸出乘以相應w權重 # ax2.clear() # ax2.set_title(hide layer △w) # ax3.clear() # ax3.set_title(hide layer △b) # ax4.clear() # ax4.set_title(target layer △w) # for j in range(len(hideOutZ)): # ax1.plot(x, hideOutZ[j]) # # ax1.plot(x, orig_y) # ax1.plot(x, _z) # ax1.plot([x[t],x[t]],[np.min(_z[t]),np.max(y[t])]) # for j in range(len(hideDeltaWeightZ)): # ax2.plot(x, hideDeltaWeightZ[j]) # ax3.plot(x, hideDeltaBiasZ[j]) # # ax4.plot(x, outWeightZ[j]) # ax4.plot(x, outDeltaWeightZ[j]) # # ax2.plot([x[t], x[t]], [np.min(hideDeltaWeightZ), np.max(hideDeltaWeightZ)]) # ax3.plot([x[t], x[t]], [np.min(hideDeltaBiasZ), np.max(hideDeltaBiasZ)]) plt.pause(0.1)if __name__ == "__main__": # run3DDraw() # run3DDraw() # run3D_DNN() # run2D_DNN() run2D_DNN()

推薦閱讀:

一個專門用來模仿飛蛾識別味道的神經網路,說明了為什麼飛蛾的學習速度遠超機器
膠囊網路結構Capsule初探
一文徹底搞懂BP演算法:原理推導+數據演示+項目實戰(上篇)
讀取腦磁波,神經網路知道你在「猶豫不定」
「營銷號」式寫作——YOLOv3:一項漸進式的更新

TAG:神經網路 | 深度學習DeepLearning |