在Keras中CNN聯合LSTM進行分類例項
阿新 • • 發佈:2020-06-30
我就廢話不多說,大家還是直接看程式碼吧~
def get_model(): n_classes = 6 inp=Input(shape=(40,80)) reshape=Reshape((1,40,80))(inp) # pre=ZeroPadding2D(padding=(1,1))(reshape) # 1 conv1=Convolution2D(32,3,border_mode='same',init='glorot_uniform')(reshape) #model.add(Activation('relu')) l1=LeakyReLU(alpha=0.33)(conv1) conv2=ZeroPadding2D(padding=(1,1))(l1) conv2=Convolution2D(32,init='glorot_uniform')(conv2) #model.add(Activation('relu')) l2=LeakyReLU(alpha=0.33)(conv2) m2=MaxPooling2D((3,3),strides=(3,3))(l2) d2=Dropout(0.25)(m2) # 2 conv3=ZeroPadding2D(padding=(1,1))(d2) conv3=Convolution2D(64,init='glorot_uniform')(conv3) #model.add(Activation('relu')) l3=LeakyReLU(alpha=0.33)(conv3) conv4=ZeroPadding2D(padding=(1,1))(l3) conv4=Convolution2D(64,init='glorot_uniform')(conv4) #model.add(Activation('relu')) l4=LeakyReLU(alpha=0.33)(conv4) m4=MaxPooling2D((3,3))(l4) d4=Dropout(0.25)(m4) # 3 conv5=ZeroPadding2D(padding=(1,1))(d4) conv5=Convolution2D(128,init='glorot_uniform')(conv5) #model.add(Activation('relu')) l5=LeakyReLU(alpha=0.33)(conv5) conv6=ZeroPadding2D(padding=(1,1))(l5) conv6=Convolution2D(128,init='glorot_uniform')(conv6) #model.add(Activation('relu')) l6=LeakyReLU(alpha=0.33)(conv6) m6=MaxPooling2D((3,3))(l6) d6=Dropout(0.25)(m6) # 4 conv7=ZeroPadding2D(padding=(1,1))(d6) conv7=Convolution2D(256,init='glorot_uniform')(conv7) #model.add(Activation('relu')) l7=LeakyReLU(alpha=0.33)(conv7) conv8=ZeroPadding2D(padding=(1,1))(l7) conv8=Convolution2D(256,init='glorot_uniform')(conv8) #model.add(Activation('relu')) l8=LeakyReLU(alpha=0.33)(conv8) g=GlobalMaxPooling2D()(l8) print("g=",g) #g1=Flatten()(g) lstm1=LSTM( input_shape=(40,80),output_dim=256,activation='tanh',return_sequences=False)(inp) dl1=Dropout(0.3)(lstm1) den1=Dense(200,activation="relu")(dl1) #model.add(Activation('relu')) #l11=LeakyReLU(alpha=0.33)(d11) dl2=Dropout(0.3)(den1) # lstm2=LSTM( # 256,# return_sequences=False)(lstm1) # dl2=Dropout(0.5)(lstm2) print("dl2=",dl1) g2=concatenate([g,dl2],axis=1) d10=Dense(1024)(g2) #model.add(Activation('relu')) l10=LeakyReLU(alpha=0.33)(d10) l10=Dropout(0.5)(l10) l11=Dense(n_classes,activation='softmax')(l10) model=Model(input=inp,outputs=l11) model.summary() #編譯model adam = keras.optimizers.Adam(lr = 0.0005,beta_1=0.95,beta_2=0.999,epsilon=1e-08) #adam = keras.optimizers.Adam(lr = 0.001,epsilon=1e-08) #sgd = keras.optimizers.SGD(lr = 0.001,decay = 1e-06,momentum = 0.9,nesterov = False) #reduce_lr = ReduceLROnPlateau(monitor = 'loss',factor = 0.1,patience = 2,verbose = 1,min_lr = 0.00000001,mode = 'min') model.compile(loss='categorical_crossentropy',optimizer=adam,metrics=['accuracy']) return model
補充知識:keras中如何將不同的模型聯合起來(以cnn/lstm為例)
可能會遇到多種模型需要揉在一起,如cnn和lstm,而我一般在keras框架下開局就是一句
model = Sequential()
然後model.add ,model.add,......到最後
model.compile(loss=["mae"],optimizer='adam',metrics=[mape])
這突然要把模型加起來,這可怎麼辦?
以下示例程式碼是將cnn和lstm聯合起來,先是由cnn模型卷積池化得到特徵,再輸入到lstm模型中得到最終輸出
import os import keras os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from keras.models import Model from keras.layers import * from matplotlib import pyplot os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' from keras.layers import Dense,Dropout,Activation,Convolution2D,MaxPooling2D,Flatten from keras.layers import LSTM def design_model(): # design network inp=Input(shape=(11,5)) reshape=Reshape((11,5,1))(inp) conv1=Convolution2D(32,init='glorot_uniform')(reshape) print(conv1) l1=Activation('relu')(conv1) conv2=Convolution2D(64,)(l1) l2=Activation('relu')(conv2) print(l2) m2=MaxPooling2D(pool_size=(2,2),border_mode='valid')(l2) print(m2) reshape1=Reshape((10,64))(m2) lstm1=LSTM(input_shape=(10,64),output_dim=30,return_sequences=False)(reshape1) dl1=Dropout(0.3)(lstm1) # den1=Dense(100,activation="relu")(dl1) den2=Dense(1,activation="relu")(dl1) model=Model(input=inp,outputs=den2) model.summary() #打印出模型概況 adam = keras.optimizers.Adam(lr = 0.001,epsilon=1e-08) model.compile(loss=["mae"],metrics=['mape']) return model model=design_model() history = model.fit(train_x,train_y,epochs=epochs,batch_size=batch_size,validation_data=[test_x,test_y],verbose=2,shuffle=True) # #save LeNet_model_files after train model.save('model_trained.h5')
以上示例程式碼中cnn和lstm是串聯即cnn輸出作為lstm的輸入,一條路線到底
如果想實現並聯,即分開再彙總到一起
可用concatenate函式把cnn的輸出端和lstm的輸出端合併起來,後面再接上其他層,完成整個模型圖的構建。
g2=concatenate([g,axis=1)
總結一下:
這是keras框架下除了Sequential另一種函式式構建模型的方式,更有靈活性,主要是在模型最後通過 model=Model(input=inp,outputs=den2)來確定整個模型的輸入和輸出
以上這篇在Keras中CNN聯合LSTM進行分類例項就是小編分享給大家的全部內容了,希望能給大家一個參考,也希望大家多多支援我們。