keras學習筆記(1)-Keras的模組架構
阿新 • • 發佈:2019-02-11
1.keras介紹
keras是基於Tensorflow、Theano、CNTK後端的高層神經網路API,它簡單快速的原型設計,讓你可以快速的將idea轉化為結果,並且可以在CPU和GPU之間無縫切換。
2. keras模組思維導圖
3.快速上手-mnist手寫字識別
#-*-coding:utf-8-*- from keras.models import Sequential from keras.datasets import mnist from keras.optimizers import Adam from keras.losses import categorical_crossentropy from keras.layers import Dense,Reshape,Dropout,Flatten,Conv2D,MaxPool2D from keras.utils import to_categorical def loadData(): (x_train,y_train),(x_test,y_test) = mnist.load_data() return x_train,y_train,x_test,y_test def shuffle(): pass def createModel(): model = Sequential() #reshape image to tensorflow backend shape (rows,clos,channels) model.add(Reshape(input_shape=(28,28),target_shape=(28,28,1))) # layer1-conv 卷積核大小:(5,5),啟用函式:relu,卷積核個數:32,第一層一定要指定input_shape model.add(Conv2D(32,kernel_size=(5,5),input_shape=(28,28,1),activation='relu')) # 2*2的最大池化 model.add(MaxPool2D(pool_size=(2,2))) #layer2-conv 卷積核大小:(5,5),啟用函式:relu,卷積核個數:64 model.add(Conv2D(64,kernel_size=(5,5),activation='relu')) # 2*2的最大池化 model.add(MaxPool2D(pool_size=(2,2))) #資料一維化 model.add(Flatten()) #layer3-dense,輸出1024 model.add(Dense(1024,activation='relu')) #layer4-drop,斷開神經元比例50% model.add(Dropout(0.5)) #output,10個類,啟用函式:softmax model.add(Dense(10,activation='softmax')) return model def train(): model = createModel() #損失函式:交叉熵 ,優化函式:adam,評估標準:精度 model.compile(loss=categorical_crossentropy,optimizer='adam',metrics=['accuracy']) x_train, y_train, x_test, y_test = loadData() #資料歸一化 x_train = x_train.astype('float32')/255 x_test = x_test.astype('float32')/255 #轉換為獨熱編碼[1,2],[[0.1,0.5,...],[0.5,0.8,...]] y_train = to_categorical(y_train,10) y_test = to_categorical(y_test,10) #epochs:資料輪10遍,每個batch64,verbose=1: 輸出日誌資訊 model.fit(x=x_train,y=y_train,batch_size=64,epochs=1,shuffle=True,verbose=1, validation_data=(x_test,y_test)) score = model.evaluate(x=x_test,y=y_test,batch_size=128,verbose=1) print('Test loss:', score[0]) print('Test accuracy:', score[1]) if __name__=="__main__": train()