(python原始碼)基於tensorflow的徑向基向量bp神經網路
阿新 • • 發佈:2019-02-06
基於tensorflow的rbf神經網路_python原始碼實現
該網路結果比較簡單,輸入層--徑向基層--輸出層,其中需要迭代優化的是:“徑向基層”--“輸出層”之間的權重(weights和biases)。
#encoding:utf-8 import numpy as np import tensorflow as tf import os PI = 3.1415926535898 min_,max_ = -5,5 class rbf_bp: # 對輸入值進行徑向基向量計算 def kernel_(self,x_): #函式:兩點之間的歐式距離 self.distant_ = lambda x1,x2:np.sqrt(np.sum(np.square(x1-x2))) #函式:高斯核 #self.Gaussian = lambda x:np.exp(-np.power(x/self.gamma,2)) self.Gaussian = lambda x:x**self.gamma mount_ = x_.shape[0] x_dis = np.zeros((mount_,self.num_)) #中間矩陣:儲存兩點之間的距離 matrix_ = np.zeros((mount_,self.num_)) #距離,進行高斯核變換 for i in xrange(mount_): for j in xrange(self.num_): x_dis[i,j]=self.distant_(x_[i],self.x_nodes[j]) matrix_[i,j]=self.Gaussian(x_dis[i,j]) return matrix_ def __init__(self,x_nodes,y_nodes,gamma): #節點的x座標值 self.x_nodes = x_nodes #高斯係數 self.gamma = gamma self.num_ = len(y_nodes) #節點數 matrix_ = self.kernel_(x_nodes) #計算初始化權重weights_ weights_ = np.dot(np.linalg.pinv(matrix_),y_nodes.copy()) #定義一個兩層的網路,第1層為高斯核函式節點的輸出,第2層為迴歸的值 self.x_ = tf.placeholder(tf.float32,shape=(None,x_nodes.shape[0]),name="x_") self.y_ = tf.placeholder(tf.float32,shape=(None),name="y_") weights_ = weights_[:, np.newaxis] self.weights = tf.Variable(weights_,name = "weights", dtype=tf.float32) self.biaes = tf.Variable(0.0,name = "biaes", dtype=tf.float32) self.predict_ = tf.matmul(self.x_,self.weights) + self.biaes self.loss = tf.reduce_mean(tf.square(self.y_-self.predict_)) self.err_rate = tf.reduce_mean(tf.abs((self.y_-self.predict_)/self.y_)) def train(self,x_train,y_train,x_test,y_test,batch_size,learn_rate,circles_): print x_train.shape x_train = self.kernel_(x_train) print x_train.shape x_test = self.kernel_(x_test) self.train_ = tf.train.GradientDescentOptimizer(learn_rate).minimize(self.loss) saver = tf.train.Saver() size_ = x_train.shape[0] #訓練集的數量 with tf.Session() as sess: sess.run(tf.initialize_all_variables()) for step_ in range(circles_): #訓練次數 start = int((step_*batch_size)%(size_-1)) end = start+batch_size if end<(size_-1): in_x = x_train[start:end,:] in_y = y_train[start:end] else: end_ = end%(size_-1) in_x = np.concatenate((x_train[start:size_-1,:],x_train[0:end_,:])) in_y = np.concatenate((y_train[start:size_-1],y_train[0:end_])) if step_%50 == 0: print "第",step_,"次迭代" print "test_錯誤率:", sess.run(self.err_rate, feed_dict={self.x_:x_test,self.y_:y_test}) print "train_錯誤率:",sess.run(self.err_rate, feed_dict={self.x_:in_x,self.y_:in_y}) #print "predict:",sess.run(self.predict_, feed_dict={self.x_:in_x,self.y_:in_y}) sess.run(self.train_, feed_dict={self.x_:in_x,self.y_:in_y}) os.mkdir("./Model") saver.save(sess,"Model/model.ckpt") def predict(self,x_data,y_data): x_data = self.kernel_(x_data) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess,"Model/model.ckpt") prediction = sess.run(self.predict_, feed_dict={self.x_:x_data,self.y_:y_data}) return prediction def gen_y(x): y = np.zeros((x.shape[0])) for i in range(x.shape[0]): x_ = x[i] y[i] = np.sin(x_) return y def main(): x_input = np.random.uniform(min_,max_,size=[1000,1]) y_input = gen_y(x_input)[:, np.newaxis] x_train,y_train,x_test,y_test = x_input[0:900,:],y_input[0:900,:],x_input[900:1000,:],y_input[900:1000,:] #生成100個等差分佈的節點 x_nodes = np.linspace(min_,max_,50).reshape((50,1)) y_nodes = gen_y(x_nodes) rbf_ = rbf_bp(x_nodes,y_nodes,1.0) rbf_.train(x_train,y_train,x_test,y_test,500,0.0001,1000) pp = rbf_.predict(x_test[0:20],y_test[0:20]) if __name__ == "__main__": main()