dropout解決過擬合
阿新 • • 發佈:2018-12-12
原理就是在第一次學習的過程中,隨即忽略一些神經元和神經的連結。使得神經網路變得不完整。一次一次。。。。。每一次得出的結果不依賴某一個引數。這樣就解決了過擬合問題。
import tensorflow as tf from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer #load data digits = load_digits() X = digits.data y = digits.target y = LabelBinarizer().fit_transform(y) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3) # 傳入的引數有輸入層,輸入大小,輸出大小,還有一個激勵函式,預設是NONE(線性函式) def add_layer(inputs, in_size, out_size, layer_name, activation_function=None, ): Weights = tf.Variable(tf.random_normal([in_size, out_size])) # 定義權重為隨機變數,因為隨機變數生成初始變數要比0好很多。形狀是【2】【3】:2行3列 # 機器學習推薦變數不為0.他的size是:1行our_size列 biases = tf.Variable(tf.zeros([1, out_size]) + 0.1) Wx_plus_b = tf.matmul(inputs, Weights) + biases # matmul是矩陣的乘法。還沒被啟用的值存在這個變數中 #將這個結果50%不考慮 其實就是dropout Wx_plus_b = tf.nn.dropout(Wx_plus_b, keep_prob) if activation_function is None: outputs = Wx_plus_b else: outputs = activation_function(Wx_plus_b) tf.summary.histogram(layer_name + '/outputs', outputs) return outputs #define placeholder for input.784個畫素點 keep_prob = tf.placeholder(tf.float32)#需要定義一個引數,保持多少的如果不被drop掉 xs = tf.placeholder(tf.float32, [None, 64]) #X的是8X8的64個單位 ys = tf.placeholder(tf.float32, [None, 10]) #輸出是十個單位,分別描述0123456789 #add output layer. softmax一般是用來做分類的函式 l1 = add_layer(xs, 64, 50, 'l1',activation_function=tf.nn.tanh) prediction = add_layer(l1, 50, 10, 'l2', activation_function=tf.nn.softmax) #the error between prediction and real data.在softmax來說,這個cross_entropy演算法做分類,生成分類演算法 cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(prediction), reduction_indices=[1]))#loss tf.summary.scalar('loss', cross_entropy) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) sess = tf.Session() merged = tf.summary.merge_all() #summary writer goes in here train_writer = tf.summary.FileWriter("A://logs/train", sess.graph) test_writer = tf.summary.FileWriter("A://logs/test", sess.graph) #important stetp if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1: init = tf.initialize_all_variables() else: init = tf.global_variables_initializer() sess.run(init) for i in range(500): sess.run(train_step, feed_dict={xs: X_train, ys: y_train, keep_prob: 0.5})#一般會有50%的結果(被)drop if i % 50 == 0: #記錄loss train_result = sess.run(merged, feed_dict={xs: X_train, ys: y_train, keep_prob: 1}) #記錄result的時候不要drop任何東西 test_result = sess.run(merged, feed_dict={xs: X_test, ys: y_test, keep_prob: 1}) #載入到writer,第i次學習 train_writer.add_summary(train_result, i) test_writer.add_summary(test_result, i)