import numpy as np import pandas as pd import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt import ui from scipy.io import loadmat print('再次处理手写数字数据集,这次使用反向传播的前馈神经网络') print('用的数据和 4_multi_classification.py 是一样的') print("经典的 MNIST 手写数字识别") ui.split_line1() print("载入数据") data = loadmat('data/ex3data1.mat') print("预览载入的数据") print(data) print("数据的 X 和 y 的尺寸") X = data['X'] y = data['y'] print(X.shape, y.shape) print("对 y 标签做 one-hot 编码") from sklearn.preprocessing import OneHotEncoder encoder = OneHotEncoder(sparse=False) y_onehot = encoder.fit_transform(y) print('编码后 y 的尺寸') print(y_onehot.shape) print('对比原来的 y 和编码后的 y_onehot') print(y[0], y_onehot[0, :]) print("定义 sigmoid 函数,前向传播函数和 cost 函数") def sigmoid(z): return 1 / (1 + np.exp(-z)) def forward_propagate(X, theta1, theta2): m = X.shape[0] a1 = np.insert(X, 0, values=np.ones(m), axis=1) z2 = a1 * theta1.T a2 = np.insert(sigmoid(z2), 0, values=np.ones(m), axis=1) z3 = a2 * theta2.T h = sigmoid(z3) return a1, z2, a2, z3, h def cost0(params, input_size, hidden_size, num_labels, X, y, learning_rate): m = X.shape[0] X = np.matrix(X) y = np.matrix(y) # reshape the parameter array into parameter matrices for each layer theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1)))) theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1)))) # run the feed-forward pass a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2) # compute the cost J = 0 for i in range(m): first_term = np.multiply(-y[i,:], np.log(h[i,:])) second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:])) J += np.sum(first_term - second_term) J = J / m return J print('初始化设置') input_size = 400 hidden_size = 25 num_labels = 10 learning_rate = 1 print('随机初始化完整网络参数大小的参数数组') params = (np.random.random(size=hidden_size * (input_size + 1) + num_labels * (hidden_size + 1)) - 0.5) * 0.25 m = X.shape[0] X = np.matrix(X) y = np.matrix(y) print('将参数数组解开为每个层的参数矩阵,大小为') theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1)))) theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1)))) print(theta1.shape, theta2.shape) print('计算前向传播,结果的大小为') a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2) print(a1.shape, z2.shape, a2.shape, z3.shape, h.shape) print('计算 y 和 h 的总误差') print(cost0(params, input_size, hidden_size, num_labels, X, y_onehot, learning_rate)) print('定义正则化损失函数') def cost(params, input_size, hidden_size, num_labels, X, y, learning_rate): m = X.shape[0] X = np.matrix(X) y = np.matrix(y) # reshape the parameter array into parameter matrices for each layer theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1)))) theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1)))) # run the feed-forward pass a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2) # compute the cost J = 0 for i in range(m): first_term = np.multiply(-y[i,:], np.log(h[i,:])) second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:])) J += np.sum(first_term - second_term) J = J / m # add the cost regularization term J += (float(learning_rate) / (2 * m)) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2))) return J print('再计算一次总误差') print(cost(params, input_size, hidden_size, num_labels, X, y_onehot, learning_rate)) print('定义反向传播相关函数') def sigmoid_gradient(z): return np.multiply(sigmoid(z), (1 - sigmoid(z))) def backprop(params, input_size, hidden_size, num_labels, X, y, learning_rate): m = X.shape[0] X = np.matrix(X) y = np.matrix(y) # reshape the parameter array into parameter matrices for each layer theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1)))) theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1)))) # run the feed-forward pass a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2) # initializations J = 0 delta1 = np.zeros(theta1.shape) # (25, 401) delta2 = np.zeros(theta2.shape) # (10, 26) # compute the cost for i in range(m): first_term = np.multiply(-y[i,:], np.log(h[i,:])) second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:])) J += np.sum(first_term - second_term) J = J / m # add the cost regularization term J += (float(learning_rate) / (2 * m)) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2))) # perform backpropagation for t in range(m): a1t = a1[t,:] # (1, 401) z2t = z2[t,:] # (1, 25) a2t = a2[t,:] # (1, 26) ht = h[t,:] # (1, 10) yt = y[t,:] # (1, 10) d3t = ht - yt # (1, 10) z2t = np.insert(z2t, 0, values=np.ones(1)) # (1, 26) d2t = np.multiply((theta2.T * d3t.T).T, sigmoid_gradient(z2t)) # (1, 26) delta1 = delta1 + (d2t[:,1:]).T * a1t delta2 = delta2 + d3t.T * a2t delta1 = delta1 / m delta2 = delta2 / m # add the gradient regularization term delta1[:,1:] = delta1[:,1:] + (theta1[:,1:] * learning_rate) / m delta2[:,1:] = delta2[:,1:] + (theta2[:,1:] * learning_rate) / m # unravel the gradient matrices into a single array grad = np.concatenate((np.ravel(delta1), np.ravel(delta2))) return J, grad print('尝试计算一步反向传播') J, grad = backprop(params, input_size, hidden_size, num_labels, X, y_onehot, learning_rate) print(J, grad.shape) print('进行模型训练') from scipy.optimize import minimize # minimize the objective function fmin = minimize(fun=backprop, x0=params, args=(input_size, hidden_size, num_labels, X, y_onehot, learning_rate), method='TNC', jac=True, options={'maxiter': 250}) print('结果为', fmin) print('使用优化后的参数进行预测') X = np.matrix(X) theta1 = np.matrix(np.reshape(fmin.x[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1)))) theta2 = np.matrix(np.reshape(fmin.x[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1)))) a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2) y_pred = np.array(np.argmax(h, axis=1) + 1) print('y 的预测值为') print(y_pred) print('评估准确率') correct = [1 if a == b else 0 for (a, b) in zip(y_pred, y)] accuracy = (sum(map(int, correct)) / float(len(correct))) print ('accuracy = {0}%'.format(accuracy * 100))