標籤:

機器學習(5)主成分分析法PCA

機器學習(5)主成分分析法PCA

來自專欄 python的學習

主要用於降維

降維過程就是去掉一個維度

讓點更分散的降維:

第一步將均值歸0:

求得最大方差:

使用梯度上升法找到最大方差值

與線性回歸的區別:

使用梯度上升法實現PCA

#主成分分析法import numpy as npimport matplotlib.pyplot as plt#生成樣本X = np.empty((100,2))#100個樣本 2個特徵X[:,0] = np.random.uniform(0.,100.,size=100)X[:,1] = 0.75 * X[:,0] + 3. +np.random.normal(0,10, size=100)# plt.scatter(X[:,0],X[:,1])# plt.show()#均值歸0def demean(X): return X-np.mean(X,axis=0)X_demean = demean(X)# plt.scatter(X_demean[:,0],X_demean[:,1])# plt.show()#上升梯度法def f(w,X):#方差值 return np.sum((X.dot(w))**2)/len(X)def df_math(w,X):#方差倒數,即斜率 return X.T.dot(X.dot(w))* 2./len(X)def df_debug(w,X,epsilon=0.0001):#原始方法驗證w是否正確 res = np.empty(len(w)) for i in range(len(w)): w_1 = w.copy() w_1[i]+=epsilon w_2 = w.copy() w_2[i]-=epsilon res[i]=(f(w_1,X)-f(w_2,X)) / (2*epsilon) return resdef direction(w):# w 轉換為單位向量,只要方向即可 return w / np.linalg.norm(w)def gradient_ascent(df,X,initial_w,eta,n_iters = 1e4,epsilon = 1e-8):#上升梯度法 w = direction(initial_w)#把初始化的w 轉為單位向量 cur_iter = 0#目前循環次數 while cur_iter < n_iters: gradient = df(w,X) last_w = w w= w + eta * gradient #每次上升不同高度,斜率越靠近0,上升高度越小 w = direction(w) # 注意1:每次求一個單位方向 if(abs(f(w,X)-f(w,last_w))<epsilon): break cur_iter+=1 return winitial_w = np.random.random(X.shape[1]) ## 注意2:初始化的 w 不能用0向量開始eta = 0.001# 注意3: 不能使用StandardScaler標準化數據w = gradient_ascent(df_debug,X_demean,initial_w,eta)print(w)#[0.78660631 0.61745487]w = gradient_ascent(df_math,X_demean,initial_w,eta)print(w)#[0.78660631 0.61745487]plt.scatter(X_demean[:,0],X_demean[:,1])plt.plot([0,w[0]*100],[0,w[1]*100],color = r)plt.show()

紅線為兩個特徵主成分分析的線

獲得前n個主成分(X在第一主成分映射的垂直映射)

#獲得前n個主成分import numpy as npimport matplotlib.pyplot as pltX = np.empty((100, 2))X[:,0] = np.random.uniform(0., 100., size=100)X[:,1] = 0.75 * X[:,0] + 3. + np.random.normal(0, 10., size=100)def demean(X): return X - np.mean(X, axis=0)X = demean(X)def f(w, X): return np.sum((X.dot(w) ** 2)) / len(X)def df(w, X): return X.T.dot(X.dot(w)) * 2. / len(X)def direction(w): return w / np.linalg.norm(w)def first_component(X, initial_w, eta, n_iters=1e4, epsilon=1e-8): w = direction(initial_w) cur_iter = 0 while cur_iter < n_iters: gradient = df(w, X) last_w = w w = w + eta * gradient w = direction(w) if (abs(f(w, X) - f(last_w, X)) < epsilon): break cur_iter += 1 return winitial_w = np.random.random(X.shape[1])eta = 0.01w = first_component(X, initial_w, eta)# 第一主成分的單位向量X2 = X - X.dot(w).reshape(-1, 1) * w # X 在第二主成分中的映射w2 = first_component(X2, initial_w, eta) #第二主成分的單位向量print(w.dot(w2))#兩垂直向量相乘 2.387548196425282e-06#求特徵值在多主城分中的單位向量def first_n_components(n, X, eta=0.01, n_iters=1e4, epsilon=1e-8): X_pca = X.copy() X_pca = demean(X_pca) res = [] for i in range(n): initial_w = np.random.random(X_pca.shape[1]) w = first_component(X_pca, initial_w, eta) res.append(w) X_pca = X_pca - X_pca.dot(w).reshape(-1, 1) * w return resw= first_n_components(2, X)print(w) #[array([0.77252383, 0.63498578]), array([-0.63498371, 0.77252553])]

從高維數據向低維數據的映射

我們可以自己寫一個轉化為主成分和轉化回多特徵值的函數PCA.py

import numpy as npclass PCA: def __init__(self, n_components): """初始化PCA""" assert n_components >= 1, "n_components must be valid" self.n_components = n_components self.components_ = None def fit(self, X, eta=0.01, n_iters=1e4): """獲得數據集X的前n個主成分""" assert self.n_components <= X.shape[1], "n_components must not be greater than the feature number of X" def demean(X): return X - np.mean(X, axis=0) def f(w, X): return np.sum((X.dot(w) ** 2)) / len(X) def df(w, X): return X.T.dot(X.dot(w)) * 2. / len(X) def direction(w): return w / np.linalg.norm(w) def first_component(X, initial_w, eta=0.01, n_iters=1e4, epsilon=1e-8): w = direction(initial_w) cur_iter = 0 while cur_iter < n_iters: gradient = df(w, X) last_w = w w = w + eta * gradient w = direction(w) if (abs(f(w, X) - f(last_w, X)) < epsilon): break cur_iter += 1 return w X_pca = demean(X) self.components_ = np.empty(shape=(self.n_components, X.shape[1])) for i in range(self.n_components): initial_w = np.random.random(X_pca.shape[1]) w = first_component(X_pca, initial_w, eta, n_iters) self.components_[i,:] = w X_pca = X_pca - X_pca.dot(w).reshape(-1, 1) * w return self def transform(self, X): """將給定的X,映射到各個主成分分量中""" assert X.shape[1] == self.components_.shape[1] return X.dot(self.components_.T) def inverse_transform(self, X): """將給定的X,反向映射回原來的特徵空間""" assert X.shape[1] == self.components_.shape[0] return X.dot(self.components_) def __repr__(self): return "PCA(n_components=%d)" % self.n_components

使用:

import numpy as npimport matplotlib.pyplot as pltX = np.empty((100, 2))X[:,0] = np.random.uniform(0., 100., size=100)X[:,1] = 0.75 * X[:,0] + 3. + np.random.normal(0, 10., size=100)from PCA import PCApca = PCA(n_components=2)pca.fit(X)print(pca.components_)#[[ 0.76018413 0.64970769] [-0.64970246 0.76018861]]pca = PCA(n_components=1)pca.fit(X)X_reduction = pca.transform(X)#轉化為在第一主成分里的位置X_restore = pca.inverse_transform(X_reduction)#把第一主成分位置轉化回兩個特徵值plt.scatter(X[:,0], X[:,1], color=b, alpha=0.5)plt.scatter(X_restore[:,0], X_restore[:,1], color=r, alpha=0.5)plt.show()

使用sklearn包中的函數

import numpy as npimport matplotlib.pyplot as pltX = np.empty((100, 2))X[:,0] = np.random.uniform(0., 100., size=100)X[:,1] = 0.75 * X[:,0] + 3. + np.random.normal(0, 10., size=100)from sklearn.decomposition import PCApca = PCA(n_components=1)pca.fit(X)print(pca.components_)X_reduction = pca.transform(X)X_restore = pca.inverse_transform(X_reduction)plt.scatter(X[:,0], X[:,1], color=b, alpha=0.5)plt.scatter(X_restore[:,0], X_restore[:,1], color=r, alpha=0.5)plt.show()

scikit-learn中的PCA

#使用數字圖像數據來降維分類import numpy as npimport matplotlib.pyplot as pltfrom sklearn import datasetsimport timedigits = datasets.load_digits()X = digits.datay = digits.targetfrom sklearn.model_selection import train_test_splitX_train,X_test,y_train,y_test = train_test_split(X,y,random_state=666)# print(X_train.shape)#(1347, 64) 這是64維的數據#使用KNN演算法不降維來分類time1 = time.time()from sklearn.neighbors import KNeighborsClassifierknn_clf = KNeighborsClassifier()knn_clf.fit(X_train,y_train)print(knn_clf.score(X_test,y_test)) #0.9866666666666667time2 = time.time()print(time2-time1) # 0.10456466674804688#先降維再分類from sklearn.decomposition import PCAtime3 = time.time()pca = PCA(n_components=2)#降為2維pca.fit(X_train)X_train_rediction = pca.transform(X_train)X_test_rediction = pca.transform(X_test)knn_clf = KNeighborsClassifier()knn_clf.fit(X_train_rediction,y_train)print(knn_clf.score(X_test_rediction,y_test)) #0.6066666666666667time4 = time.time()print(time4-time3) #0.007133007049560547#先降維雖然時間縮短但是準確度降低了,需要重新設置維度print(pca.explained_variance_ratio_)#在兩維時每個維度正確率為 [0.14566817 0.13735469],數值過於偏大#設置為64維時,看每個維度的方差正確率from sklearn.decomposition import PCApca = PCA(n_components=X_train.shape[1])pca.fit(X_train)# print(pca.explained_variance_ratio_)[1.45668166e-01 1.37354688e-01 1.17777287e-01 8.49968861e-02 5.86018996e-02 5.11542945e-02 4.26605279e-02 3.60119663e-02 3.41105814e-02 3.05407804e-02 2.42337671e-02 2.28700570e-02 1.80304649e-02 1.79346003e-02 1.45798298e-02 1.42044841e-02 1.29961033e-02 1.26617002e-02 1.01728635e-02 9.09314698e-03 8.85220461e-03 7.73828332e-03 7.60516219e-03 7.11864860e-03 6.85977267e-03 5.76411920e-03 5.71688020e-03 5.08255707e-03 4.89020776e-03 4.34888085e-03 3.72917505e-03 3.57755036e-03 3.26989470e-03 3.14917937e-03 3.09269839e-03 2.87619649e-03 2.50362666e-03 2.25417403e-03 2.20030857e-03 1.98028746e-03 1.88195578e-03 1.52769283e-03 1.42823692e-03 1.38003340e-03 1.17572392e-03 1.07377463e-03 9.55152460e-04 9.00017642e-04 5.79162563e-04 3.82793717e-04 2.38328586e-04 8.40132221e-05 5.60545588e-05 5.48538930e-05 1.08077650e-05 4.01354717e-06 1.23186515e-06 1.05783059e-06 6.06659094e-07 5.86686040e-07 1.71368535e-33 7.44075955e-34 7.44075955e-34 7.15189459e-34]#把正確率累加,看維度到多少時,錯誤率可以被接受plt.plot([i for i in range(X_train.shape[1])], [np.sum(pca.explained_variance_ratio_[:i+1]) for i in range(X_train.shape[1])])plt.show()#設置方差正確率,由方差正確率得出應設置的維度pca = PCA(0.95)pca.fit(X_train)print(pca.n_components_) #28X_train_rediction = pca.transform(X_train)X_test_rediction = pca.transform(X_test)time5 =time.time()knn_clf = KNeighborsClassifier()knn_clf.fit(X_train_rediction,y_train)print(knn_clf.score(X_test_rediction,y_test)) #0.98time6 =time.time()print(time6-time5)#0.02398967742919922#在耗時減少的前提下也提高了正確率

MNIST 數據降維與不降維的對比:

#MNIST 資料庫分類import numpy as npfrom sklearn.datasets import fetch_mldataimport timemnist = fetch_mldata(MNIST original)X,y = mnist[data],mnist[target]X_train = np.array(X[:60000],dtype=float)X_test = np.array(X[60000:],dtype=float)y_train = np.array(y[:60000],dtype=float)y_test = np.array(y[60000:],dtype=float)# print(X_train.shape)#(60000, 784) 這是一個784維的數據#使用KNN分類from sklearn.neighbors import KNeighborsClassifierknn_clf = KNeighborsClassifier()time1 = time.time()knn_clf.fit(X_train,y_train)print(knn_clf.score(X_test,y_test)) #0.9688time2 = time.time()print(time2-time1) #902.0233969688416#先降維再分類from sklearn.decomposition import PCApca = PCA(0.9)#方差正確率為0.9pca.fit(X_train)X_train_reduction = pca.transform(X_train)X_test_reduction = pca.transform(X_test)knn_clf = KNeighborsClassifier()time3 = time.time()knn_clf.fit(X_train_reduction,y_train)print(knn_clf.score(X_test_reduction,y_test)) #0.9728time4 = time.time()print(time4-time3) #116.10433602333069

可以明顯看出降維後的準確率高,而且消耗時間也大幅減少

在把方差正確率控制在0.9時,去掉了不少噪音,因此準確率也提高了

特徵臉:

#特徵臉import numpy as npimport matplotlib.pyplot as pltfrom sklearn.datasets import fetch_lfw_peoplefaces = fetch_lfw_people()example_faces = faces.data[:36,:]def plot_faces(faces): fig, axes = plt.subplots(6, 6, figsize=(10, 10), subplot_kw={xticks: [], yticks: []}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i, ax in enumerate(axes.flat): ax.imshow(faces[i].reshape(62, 47), cmap=bone) plt.show()plot_faces(example_faces)

使用PCA提煉出特徵臉:

#特徵臉import numpy as npimport matplotlib.pyplot as pltfrom sklearn.datasets import fetch_lfw_peoplefaces = fetch_lfw_people()random_indexes = np.random.permutation(len(faces.data))X = faces.data[random_indexes]example_faces = X[:36,:]def plot_faces(faces): fig, axes = plt.subplots(6, 6, figsize=(10, 10), subplot_kw={xticks: [], yticks: []}, gridspec_kw=dict(hspace=0.1, wspace=0.1)) for i, ax in enumerate(axes.flat): ax.imshow(faces[i].reshape(62, 47), cmap=bone) plt.show()# plot_faces(example_faces)from sklearn.decomposition import PCApca = PCA(svd_solver=randomized)pca.fit(example_faces)plot_faces(pca.components_)


推薦閱讀:

反向強化學習3——求解線性規劃
正則化
【乾貨】楊強:從機器學習到遷移學習 | GAITC 演講(附PPT)
理解機器學習模型的指標:準確率、精度和召回率等
神經網路-標準神經網路(python3)

TAG:機器學習 |