Python實現的NN神經網絡算法完整示例

運行環境

  • Pyhton3
  • numpy(科學計算包)
  • matplotlib(畫圖所需,不畫圖可不必)
  • sklearn(人工智能包,生成數據使用)

計算過程

Python實現的NN神經網絡算法完整示例

輸入樣例

none

代碼實現

# -*- coding:utf-8 -*-
#!python3
__author__ = 'Wsine'
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
import matplotlib.pyplot as plt
import matplotlib
import operator
import time
def createData(dim=200, cnoise=0.20):
 """
 輸出:數據集, 對應的類別標籤
 描述:生成一個數據集和對應的類別標籤
 """
 np.random.seed(0) 
 X, y = sklearn.datasets.make_moons(dim, noise=cnoise)
 plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)
 #plt.show()
 return X, y
def plot_decision_boundary(pred_func, X, y):
 """
 輸入:邊界函數, 數據集, 類別標籤
 描述:繪製決策邊界(畫圖用)
 """
 # 設置最小最大值, 加上一點外邊界
 x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
 h = 0.01
 # 根據最小最大值和一個網格距離生成整個網格
 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
 # 對整個網格預測邊界值
 Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
 Z = Z.reshape(xx.shape)
 # 繪製邊界和數據集的點
 plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
 plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) 
def calculate_loss(model, X, y):
 """
 輸入:訓練模型, 數據集, 類別標籤
 輸出:誤判的概率
 描述:計算整個模型的性能
 """
 W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
 # 正向傳播來計算預測的分類值
 z1 = X.dot(W1) + b1
 a1 = np.tanh(z1)
 z2 = a1.dot(W2) + b2
 exp_scores = np.exp(z2)
 probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
 # 計算誤判概率
 corect_logprobs = -np.log(probs[range(num_examples), y])
 data_loss = np.sum(corect_logprobs)
 # 加入正則項修正錯誤(可選)
 data_loss += reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
 return 1./num_examples * data_loss
def predict(model, x):
 """
 輸入:訓練模型, 預測向量 
 輸出:判決類別
 描述:預測類別屬於(0 or 1)
 """
 W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
 # 正向傳播計算
 z1 = x.dot(W1) + b1
 a1 = np.tanh(z1)
 z2 = a1.dot(W2) + b2
 exp_scores = np.exp(z2)
 probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
 return np.argmax(probs, axis=1)
def initParameter(X):
 """
 輸入:數據集
 描述:初始化神經網絡算法的參數
 必須初始化為全局函數!
 這裡需要手動設置!
 """
 global num_examples
 num_examples = len(X) # 訓練集的大小
 global nn_input_dim
 nn_input_dim = 2 # 輸入層維數 
 global nn_output_dim
 nn_output_dim = 2 # 輸出層維數
 # 梯度下降參數
 global epsilon
 epsilon = 0.01 # 梯度下降學習步長
 global reg_lambda
 reg_lambda = 0.01 # 修正的指數
def build_model(X, y, nn_hdim, num_passes=20000, print_loss=False):
 """
 輸入:數據集, 類別標籤, 隱藏層層數, 迭代次數, 是否輸出誤判率
 輸出:神經網絡模型
 描述:生成一個指定層數的神經網絡模型
 """
 # 根據維度隨機初始化參數
 np.random.seed(0)
 W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)
 b1 = np.zeros((1, nn_hdim))
 W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)
 b2 = np.zeros((1, nn_output_dim))
 model = {}
 # 梯度下降 
 for i in range(0, num_passes):
 # 正向傳播
 z1 = X.dot(W1) + b1
 a1 = np.tanh(z1) # 激活函數使用tanh = (exp(x) - exp(-x)) / (exp(x) + exp(-x))
 z2 = a1.dot(W2) + b2
 exp_scores = np.exp(z2) # 原始歸一化
 probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
 # 後向傳播
 delta3 = probs
 delta3[range(num_examples), y] -= 1
 dW2 = (a1.T).dot(delta3)
 db2 = np.sum(delta3, axis=0, keepdims=True)
 delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
 dW1 = np.dot(X.T, delta2)
 db1 = np.sum(delta2, axis=0)
 # 加入修正項
 dW2 += reg_lambda * W2
 dW1 += reg_lambda * W1
 # 更新梯度下降參數
 W1 += -epsilon * dW1
 b1 += -epsilon * db1
 W2 += -epsilon * dW2
 b2 += -epsilon * db2
 # 更新模型
 model = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
 # 一定迭代次數後輸出當前誤判率
 if print_loss and i % 1000 == 0:
 print("Loss after iteration %i: %f" % (i, calculate_loss(model, X, y)))
 plot_decision_boundary(lambda x: predict(model, x), X, y)
 plt.title("Decision Boundary for hidden layer size %d" % nn_hdim)
 #plt.show()
 return model
def main():
 dataSet, labels = createData(200, 0.20)
 initParameter(dataSet)
 nnModel = build_model(dataSet, labels, 3, print_loss=False)
 print("Loss is %f" % calculate_loss(nnModel, dataSet, labels))
if __name__ == '__main__':
 start = time.clock()
 main()
 end = time.clock()
 print('finish all in %s' % str(end - start))
 plt.show()

輸出樣例

Loss is 0.071316
finish all in 7.221354361552228

Python實現的NN神經網絡算法完整示例


分享到:


相關文章: