over 2 years ago

辨識手寫字體(MNIST)是機器器學習領域,一個很重要的問題。這裡利用非手寫字體(notMNIST)和SVM來完成字母辨識。

  1. 此處下載字母圖片資料集

  2. 取出與前處理資料

    • 圖片集按照字母分類為A-J資料夾,每張圖片大小為 28*28共18724張
    • 把資料分割為training/ test set
  3. 降維(Principle Compenent Analysis)

    • 利用 pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)將28*28像素降低到150維向量
  4. 機器學習(ML)-SVM

  5. 誤差分析


    利用linear kernel/C=1/gamma=auto正確率約 87%

  6. 繪圖


完整程式碼

SVM_notMNIST.py
#! -encoding: utf8-

"""
===================================================
noMNIST recognition example using eigenimages and SVMs
===================================================
The dataset used in this example in the folder

"""


from collections import defaultdict
from sklearn.cross_validation import train_test_split
from sklearn.svm import SVC
from sklearn.decomposition import RandomizedPCA
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from time import time
import numpy as np
import pylab as pl
import os

t0 = time()
##################

# preprocess the image data for SVM classification 

#


### preload png image files in each folder


filenames = []
for e in os.walk('./notMNIST_small'):
    filenames.append(e)

foldername = filenames[0][1] # ['A','G','E','F',...]

labels = foldername

filepath_dict = {foldername[i]:filenames[i+1][2]
                     for i,_ in enumerate(foldername)}

filepaths = defaultdict(list)
for key in filepath_dict.keys():
    filepaths[key] = [ os.path.join('./notMNIST_small',key,path) for path in filepath_dict[key]]
    
def imageData(key):
    '''input:key, output list of image data '''
    imagedata = []
    for e in filepaths[key]:
        try:
            imagedata.append(pl.imread(e))
        except:
            pass
    return np.array(imagedata)

images = {key:imageData(key) for key in filepaths.keys()} # images['A'] 

print 'time cost to read files: {}'.format(time()-t0)
size = 0
for i in labels:
    print "images[{}].shape = {}".format(i,images[i].shape)

    size += images[i].shape[0]

dataA = np.array([np.ndarray.flatten(images['A'][i]) for i in range(images['A'].shape[0])])

h = images['A'].shape[-1]
w = images['A'].shape[-2]
n_features = h*w


############################################

# prepare "data" for machine learn X,

# data.shape = (18724,28*28) 

# y -> label as (A,A,A....,G,G,H,H,H)

#      size as (18724,) 

############################################


data = np.zeros([size,n_features])
temp=0
y = np.chararray(size) # string numpy array


for j,label in enumerate(labels):
    for e in range(images[label].shape[0]):        
        data[temp] = np.ndarray.flatten(images[label][e])        
        y[temp]= np.array(label)
        temp += 1

X = data
n_samples = data.shape[0]
n_classes = len(labels)

print "Total datasets size"
print "==================="
print "n_samples: {}".format(n_samples)
print "n_features: {}".format(n_features)
print "n_classes: {}".format(n_classes)
print "==================="
###########################################

#

# split train/test for our original data

#

#######################


X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,random_state=42)

###############

#

#  PCA

#

###############


n_components = 150

print "Extracting the %d eigenpixel from %d pixels" % (n_components, X_train.shape[1])
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print "done in %0.3fs" % (time() - t0)

eigenimage = pca.components_.reshape((n_components, h, w))

print "Projecting the input data on the eigenimage orthonormal basis"
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print "done in %0.3fs" % (time() - t0)


################################

#

#  SVC and error analysis

#

################################


print "Fitting the classifier to the training set"
t0 = time()

clf = SVC(kernel='linear')
clf.fit(X_train_pca, y_train)
print "done in %0.3fs" % (time() - t0)

y_pred = clf.predict(X_test_pca)
print classification_report(y_test,y_pred)
print confusion_matrix(y_test,y_pred)

#################################

#

# plot 

#

################################


def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
    """Helper function to plot a gallery of images"""
    pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
    pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
    for i in range(n_row * n_col):
        pl.subplot(n_row, n_col, i + 1)
        pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
        pl.title(titles[i], size=12)
        pl.xticks(())
        pl.yticks(())

def title(y_pred,y_test,i):
    return 'predicted:{},real:{} '.format(y_pred[i],y_test[i])

prediction_titles = [title(y_pred, y_test,i) for i in range(12)]

plot_gallery(X_test[:12],prediction_titles,h,w)
pl.show()

參考資料

  • scikit-learn SVM臉部辨識範例
← ARIMA時間模型(非季節性)-應用案例 推薦系統實作(一)- 線性迴歸 →
 
comments powered by Disqus