1. 程式人生 > 程式設計 >keras列印loss對權重的導數方式

keras列印loss對權重的導數方式

Notes

懷疑模型梯度爆炸,想列印模型 loss 對各權重的導數看看。如果如果fit來訓練的話,可以用keras.callbacks.TensorBoard實現。

但此次使用train_on_batch來訓練的,用K.gradients和K.function實現。

Codes

以一份 VAE 程式碼為例

# -*- coding: utf8 -*-
import keras
from keras.models import Model
from keras.layers import Input,Lambda,Conv2D,MaxPooling2D,Flatten,Dense,Reshape
from keras.losses import binary_crossentropy
from keras.datasets import mnist,fashion_mnist
import keras.backend as K
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt

BATCH = 128
N_CLASS = 10
EPOCH = 5
IN_DIM = 28 * 28
H_DIM = 128
Z_DIM = 2

(x_train,y_train),(x_test,y_test) = fashion_mnist.load_data()
x_train = x_train.reshape(len(x_train),-1).astype('float32') / 255.
x_test = x_test.reshape(len(x_test),-1).astype('float32') / 255.

def sampleing(args):
  """reparameterize"""
  mu,logvar = args
  eps = K.random_normal([K.shape(mu)[0],Z_DIM],mean=0.0,stddev=1.0)
  return mu + eps * K.exp(logvar / 2.)

# encode
x_in = Input([IN_DIM])
h = Dense(H_DIM,activation='relu')(x_in)
z_mu = Dense(Z_DIM)(h) # mean,不用啟用
z_logvar = Dense(Z_DIM)(h) # log variance,不用啟用
z = Lambda(sampleing,output_shape=[Z_DIM])([z_mu,z_logvar]) # 只能有一個引數
encoder = Model(x_in,[z_mu,z_logvar,z],name='encoder')

# decode
z_in = Input([Z_DIM])
h_hat = Dense(H_DIM,activation='relu')(z_in)
x_hat = Dense(IN_DIM,activation='sigmoid')(h_hat)
decoder = Model(z_in,x_hat,name='decoder')

# VAE
x_in = Input([IN_DIM])
x = x_in
z_mu,z = encoder(x)
x = decoder(z)
out = x
vae = Model(x_in,[out,out],name='vae')

# loss_kl = 0.5 * K.sum(K.square(z_mu) + K.exp(z_logvar) - 1. - z_logvar,axis=1)
# loss_recon = binary_crossentropy(K.reshape(vae_in,[-1,IN_DIM]),vae_out) * IN_DIM
# loss_vae = K.mean(loss_kl + loss_recon)

def loss_kl(y_true,y_pred):
  return 0.5 * K.sum(K.square(z_mu) + K.exp(z_logvar) - 1. - z_logvar,axis=1)


# vae.add_loss(loss_vae)
vae.compile(optimizer='rmsprop',loss=[loss_kl,'binary_crossentropy'],loss_weights=[1,IN_DIM])
vae.summary()

# 獲取模型權重 variable
w = vae.trainable_weights
print(w)

# 列印 KL 對權重的導數
# KL 要是 Tensor,不能是上面的函式 `loss_kl`
grad = K.gradients(0.5 * K.sum(K.square(z_mu) + K.exp(z_logvar) - 1. - z_logvar,axis=1),w)
print(grad) # 有些是 None 的
grad = grad[grad is not None] # 去掉 None,不然報錯

# 列印梯度的函式
# K.function 的輸入和輸出必要是 list!就算只有一個
show_grad = K.function([vae.input],[grad])

# vae.fit(x_train,# y_train,# 不能傳 y_train
#     batch_size=BATCH,#     epochs=EPOCH,#     verbose=1,#     validation_data=(x_test,None))

''' 以 train_on_batch 方式訓練 '''
for epoch in range(EPOCH):
  for b in range(x_train.shape[0] // BATCH):
    idx = np.random.choice(x_train.shape[0],BATCH)
    x = x_train[idx]
    l = vae.train_on_batch([x],[x,x])

  # 計算梯度
  gd = show_grad([x])
  # 列印梯度
  print(gd)

# show manifold
PIXEL = 28
N_PICT = 30
grid_x = norm.ppf(np.linspace(0.05,0.95,N_PICT))
grid_y = grid_x

figure = np.zeros([N_PICT * PIXEL,N_PICT * PIXEL])
for i,xi in enumerate(grid_x):
  for j,yj in enumerate(grid_y):
    noise = np.array([[xi,yj]]) # 必須秩為 2,兩層中括號
    x_gen = decoder.predict(noise)
    # print('x_gen shape:',x_gen.shape)
    x_gen = x_gen[0].reshape([PIXEL,PIXEL])
    figure[i * PIXEL: (i+1) * PIXEL,j * PIXEL: (j+1) * PIXEL] = x_gen

fig = plt.figure(figsize=(10,10))
plt.imshow(figure,cmap='Greys_r')
fig.savefig('./variational_autoencoder.png')
plt.show()

補充知識:keras 自定義損失 自動求導時出現None

問題記錄,keras 自定義損失 自動求導時出現None,後來想到是因為傳入的變數沒有使用,所以keras無法求出偏導,修改後問題解決。就是不願使用的變數×0,求導後還是0就可以了。

def my_complex_loss_graph(y_label,emb_uid,lstm_out,y_true_1,y_true_2,y_true_3,out_1,out_2,out_3):
 
  mse_out_1 = mean_squared_error(y_true_1,out_1)
  mse_out_2 = mean_squared_error(y_true_2,out_2)
  mse_out_3 = mean_squared_error(y_true_3,out_3)
  # emb_uid= K.reshape(emb_uid,32])
  cosine_sim = tf.reduce_sum(0.5*tf.square(emb_uid-lstm_out))
 
  cost=0*cosine_sim+K.sum([0.5*mse_out_1,0.25*mse_out_2,0.25*mse_out_3],axis=1,keepdims=True)
  # print(mse_out_1)
  final_loss = cost
 
  return K.mean(final_loss)

以上這篇keras列印loss對權重的導數方式就是小編分享給大家的全部內容了,希望能給大家一個參考,也希望大家多多支援我們。