深度學習 tensorflow例子
阿新 • • 發佈:2019-02-04
問題:1:構成的網路結構?
2:輸入資料大小?
https://yq.aliyun.com/articles/118726?spm=5176.100239.blogcont122848.14.zwhUbk
import tensorflow as tf import pandas as pd import numpy as np import os import matplotlib import matplotlib.pyplot as plt import random #matplotlib inline import tensorflow as tf import shutil importtensorflow.contrib.learn as tflearn import tensorflow.contrib.layers as tflayers from tensorflow.contrib.learn.python.learn import learn_runner import tensorflow.contrib.metrics as metrics import tensorflow.contrib.rnn as rnn random.seed(111) rng=pd.date_range(start='2000',periods=209,freq='M') """pd.date_range 函式:生成日期範圍start:開始日期 periods:日期範圍內日期的個數,periods*freq 等於日期範圍的長度 freq:每多少天或其他明確時間頻率的方式,預設為D,即1天 M為一個月 """ ts=pd.Series(np.random.uniform(-10,10,size=len(rng)).cumsum()) ts.plot(c='b',title='Example Time Series') #顏色藍色 plt.show() ts.head(10) #使用head檢視前幾行資料(預設是前5行),不過你可以指定前幾行 TS=np.array(ts) num_periods=20#觀測值 f_horizon=1 #forecast horizon 預測一步 x_data=TS[:(len(TS)-len(TS)%num_periods)] #len(TS)=209 x_batches=x_data.reshape(-1,20,1) y_data=TS[1:(len(TS)-(len(TS)%num_periods))+f_horizon] y_batches=y_data.reshape(-1,20,1) print("x_data shape%s"%len(TS)) print("x_batchesis%s"%len(x_batches)) #10 print(x_batches.shape) # (10, 20, 1) print("x_batches[0:2]is %s"%x_batches[0:2]) print("y_batches[0:1]is%s"%y_batches[0:1]) print(y_batches.shape) #(10, 20, 1) ##??不懂 測試資料是怎麼取的 為什麼 def test_data(series,forecast,num_periods): test_x_setup=TS[-(num_periods+forecast):] testX=test_x_setup[:num_periods].reshape(-1,20,1) testY=TS[-(num_periods):].reshape(-1,20,1) return testX,testY X_test,Y_test=test_data(TS,f_horizon,num_periods) print(X_test.shape) print(X_test) tf.reset_default_graph() #清除預設圖的堆疊,並設定全域性圖為預設圖 num_periods=20 inputs=1 hidden=100 output=1 X=tf.placeholder(tf.float32,[None,num_periods,inputs]) y=tf.placeholder(tf.float32,[None,num_periods,output]) basic_cell=tf.contrib.rnn.BasicRNNCell(num_units=hidden,activation=tf.nn.relu) rnn_output,states=tf.nn.dynamic_rnn(basic_cell,X,dtype=tf.float32) learning_rate=0.001 "tf.layers.dense:全連線層 操作 outputs = activation(inputs.kernel + bias) " stacked_rnn_output=tf.reshape(rnn_output,[-1,hidden]) #轉為二維資料 stacked_outputs=tf.layers.dense(stacked_rnn_output,output) outputs=tf.reshape(stacked_outputs,[-1,num_periods,output]) #? loss=tf.reduce_sum(tf.square(outputs-y)) optimizer=tf.train.AdamOptimizer(learning_rate=learning_rate) training_op=optimizer.minimize(loss) init=tf.global_variables_initializer() #################################### epochs=1000 with tf.Session() as sess: init.run() for ep in range(epochs): sess.run(training_op,feed_dict={X:x_batches,y:y_batches}) if ep %100 ==0: mse=loss.eval(feed_dict={X:x_batches,y:y_batches}) print(ep,"\TMSE:",mse) y_pred=sess.run(outputs,feed_dict={X:X_test}) print(y_pred) plt.title("Forecast vs Actual",fontsize=14) plt.plot(pd.Series(np.ravel(Y_test)),"bo",markersize=10,label="Actual") #plt.plot(pd.Series(np.ravel(Y_test)),"w*",markersize=10) plt.plot(pd.Series(np.ravel(y_pred)),"r.",markersize=10,label="Forecast") plt.legend(loc="upper left") plt.xlabel("Time Periods") plt.show()