import numpy as np import pandas as pd import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt import ui from scipy.io import loadmat print("经典的 MNIST 手写数字识别") ui.split_line1() print("载入数据") data = loadmat('data/ex3data1.mat') print("预览载入的数据") print(data) print("数据的 X 和 y 的尺寸") print(data['X'].shape, data['y'].shape) print("定义 sigmoid 函数,cost 函数和向量化的梯度函数") def sigmoid(z): return 1 / (1 + np.exp(-z)) def cost(theta, X, y, learningRate): theta = np.matrix(theta) X = np.matrix(X) y = np.matrix(y) first = np.multiply(-y, np.log(sigmoid(X * theta.T))) second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T))) reg = (learningRate / (2 * len(X))) * np.sum(np.power(theta[:,1:theta.shape[1]], 2)) return np.sum(first - second) / len(X) + reg def gradient(theta, X, y, learningRate): theta = np.matrix(theta) X = np.matrix(X) y = np.matrix(y) parameters = int(theta.ravel().shape[1]) error = sigmoid(X * theta.T) - y grad = ((X.T * error) / len(X)).T + ((learningRate / len(X)) * theta) # intercept gradient is not regularized grad[0, 0] = np.sum(np.multiply(error, X[:,0])) / len(X) return np.array(grad).ravel() ui.split_line2() print("实现一对一全分类方法,其中具有k个不同类的标签就有k个分类器,每个分类器在“类别 i”和“不是 i”之间") print("决定。 我们将把分类器训练包含在一个函数中,该函数计算10个分类器中的每个分类器的最终权重,并将权重") print("返回为k x(n + 1)数组,其中n是参数数量。") from scipy.optimize import minimize def one_vs_all(X, y, num_labels, learning_rate): rows = X.shape[0] params = X.shape[1] # k X (n + 1) array for the parameters of each of the k classifiers all_theta = np.zeros((num_labels, params + 1)) # insert a column of ones at the beginning for the intercept term X = np.insert(X, 0, values=np.ones(rows), axis=1) # labels are 1-indexed instead of 0-indexed for i in range(1, num_labels + 1): theta = np.zeros(params + 1) y_i = np.array([1 if label == i else 0 for label in y]) y_i = np.reshape(y_i, (rows, 1)) # minimize the objective function fmin = minimize(fun=cost, x0=theta, args=(X, y_i, learning_rate), method='TNC', jac=gradient) all_theta[i-1,:] = fmin.x return all_theta print("准备训练数据") rows = data['X'].shape[0] params = data['X'].shape[1] all_theta = np.zeros((10, params + 1)) X = np.insert(data['X'], 0, values=np.ones(rows), axis=1) theta = np.zeros(params + 1) y_0 = np.array([1 if label == 0 else 0 for label in data['y']]) y_0 = np.reshape(y_0, (rows, 1)) print("X, y, theta, all_theta 的大小") print(X.shape, y_0.shape, theta.shape, all_theta.shape) print("独立的标签数量") print(np.unique(data['y'])) print("进行训练") all_theta = one_vs_all(data['X'], data['y'], 10, 1) print(all_theta) print("预测每个图像的标签") def predict_all(X, all_theta): rows = X.shape[0] params = X.shape[1] num_labels = all_theta.shape[0] # same as before, insert ones to match the shape X = np.insert(X, 0, values=np.ones(rows), axis=1) # convert to matrices X = np.matrix(X) all_theta = np.matrix(all_theta) # compute the class probability for each class on each training instance h = sigmoid(X * all_theta.T) # create array of the index with the maximum probability h_argmax = np.argmax(h, axis=1) # because our array was zero-indexed we need to add one for the true label prediction h_argmax = h_argmax + 1 return h_argmax print("得到准确率") y_pred = predict_all(data['X'], all_theta) correct = [1 if a == b else 0 for (a, b) in zip(y_pred, data['y'])] accuracy = (sum(map(int, correct)) / float(len(correct))) print ('accuracy = {0}%'.format(accuracy * 100))