1. 程式人生 > 其它 >tensorflow 模型搭建及使用 常用小技巧

tensorflow 模型搭建及使用 常用小技巧

針對基於tensorflow的深度學習實驗中,如何自定義損失函式 啟用函式 注意力層 等問題,記錄常用自定義模型的技巧和方法


1. 自定義啟用函式

自定義gelu啟用函式

# gelu啟用函式
def gelu(x):
return 0.5 * x * (1 + tf.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))
模型使用自定義啟用函式
inputs1 = Input(shape=(2048,))
fe1 = Dropout(0.5)(inputs1)
fe2 = Dense(256, activation=gelu)(fe1)
inputs2 = Input(shape=(max_length1,))

se1 = Embedding(vocab_size, embedding_dim, mask_zero=True)(inputs2)
se2 = Dropout(0.5)(se1)
x = LSTM(128, return_sequences=True,activation=gelu)(se2)
x = Dropout(0.5)(x)
x = LSTM(256)(x)
載入自定義啟用函式的模型
from keras.utils import to_categorical, get_custom_objects
from keras.models import load_model
from keras.layers import Activation


model = load_model('saved_model/inceptionV3_LSTM2_200.h5',
custom_objects=get_custom_objects().update({'gelu': Activation(gelu)}))
2. 自定義Class的模型搭建
自定義class
# TPA注意力
class CalculateScoreMatrix(Layer):
def __init__(self, output_dim=None, **kwargs):
self.output_dim = output_dim
super(CalculateScoreMatrix, self).__init__(**kwargs)

def get_config(self):
config = super().get_config().copy()
config.update({'output_dim': self.output_dim})
return config

def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[-1], self.output_dim),
initializer='uniform',
trainable=True)
super(CalculateScoreMatrix, self).build(input_shape)

def call(self, x):
res = K.dot(x, self.kernel)
return res
模型搭建
# ATP-LSTM
fe1 = Dropout(0.5)(inputs1)
fe2 = Dense(256, activation=tf_swish)(fe1)
inputs2 = Input(shape=(max_length1,))
se1 = Embedding(vocab_size, embedding_dim, mask_zero=True)(inputs2)
se2 = Dropout(0.5)(se1)
x = LSTM(64, return_sequences=True)(se2)
se2 = Dropout(0.5)(se1)
# get the 1~t-1 and t hidden state
H = Lambda(lambda x: x[:, :-1, :])(x)
ht = Lambda(lambda x: x[:, -1, :])(x)
ht = Reshape((64, 1))(ht)
# get the HC by 1*1 convolution
HC = Lambda(lambda x: K.permute_dimensions(x, [0, 2, 1]))(H)
score_mat = CalculateScoreMatrix(64)(HC)
score_mat = Lambda(lambda x: K.batch_dot(x[0], x[1]))([score_mat, ht])
# get the attn matrix
score_mat = Activation("sigmoid")(score_mat)
attn_mat = Multiply()([HC, score_mat])
attn_vec = Lambda(lambda x: K.sum(x, axis=-1))(attn_mat)
wvt = Dense(units=64 * 4, activation=None)(attn_vec)
wht = Dense(units=64 * 4, activation=None)(Flatten()(ht))
yht = Add()([wht, wvt])

載入自定義class的模型
from keras.utils import to_categorical, get_custom_objects
from keras.models import load_model
from keras.layers import Activation
model = load_model('saved_model/model_inception_TPA_lstm1_30.h5',custom_objects=get_custom_objects().update({'CalculateScoreMatrix': CalculateScoreMatrix}))

3.花式學習率設定和損失函式自定義過幾天再弄