1. 程式人生 > >tensorflow的基本用法(一)

tensorflow的基本用法(一)

一.sessoion
session會話:
指定到某個神經網路

#encoding:utf-8
import tensorflow as tf
import numpy as np
##定義一個向量##
##運算兩個矩陣##
martrix1=tf.constant([[3,3]])
martrix2=tf.constant([[2],[2]])
product=tf.matmul(matrix1,matrix2)
##np.dot()相當於##
##seeeion用來執行結果##
sess=tf.Session()
result=sess.run(product)
print(result)
sess.close()

二.variable
使用變數實現一個簡單的計數器

#encoding:utf-8
import tensorflow as tf
state=tf.Variable(0,name='counter')
print(state.name)
#tf.constant()#定義一個常量
#state=tf.Variable(0)
one=tf.constant(1)#常數為1
new_value=tf.add(state,one)
update=tf.assign(state,new_value)#new_value賦值給statue
init=tf.initialize_all_variables()#初始化所有的變數
#運算 with tf.Session() as sess: sess.run(init)# run and active啟用rUn步驟 for _ in range(3): sess.run(update) print(sess.run(state))

結果:

counter:0
WARNING:tensorflow:From /home/sulei/tensorflow-workspace/variable.py:10: initialize_all_variables (from tensorflow.python.ops.variables) is
deprecated and will be removed after 2017-03-02. Instructions for updating: Use `tf.global_variables_initializer` instead. 2017-06-27 11:23:49.419560: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations. 2017-06-27 11:23:49.419613: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations. 2017-06-27 11:23:49.419631: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations. 1 2 3

三.Feeds的傳入值:
tf.placeholder() 為這些操作建立佔位符
外部對神經網路的傳值
錯現的錯誤
AttributeError: module ‘tensorflow’ has no attribute ‘mul’
解決的方法:
將mul改成multiply,則可以使用

import tensorflow as tf
input1=tf.placeholder(tf.float32)
input2=tf.placeholder(tf.float32)
ouput=tf.multiply(input1,input2)#multyiply:乘法
with tf.Session() as sess:
    print(sess.run(ouput,feed_dict={input1:[7.],input2:[2.]}))
    #字典的函式
/usr/bin/python3.5 /home/sulei/tensorflow-workspace/feeds.py
2017-06-27 13:43:13.138963: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.1 instructions, but these are available on your machine and could speed up CPU computations.
2017-06-27 13:43:13.139033: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use SSE4.2 instructions, but these are available on your machine and could speed up CPU computations.
2017-06-27 13:43:13.139056: W tensorflow/core/platform/cpu_feature_guard.cc:45] The TensorFlow library wasn't compiled to use AVX instructions, but these are available on your machine and could speed up CPU computations.
[ 14.]

Process finished with exit code 0

四、激勵函式
激勵方程:

使某一部分的神經元先激勵
Activate Function的使用方法:參考連結
五.定義激勵函式和新增一個神經層以及建造神經網路

import tensorflow as tf
import numpy as np
def add_layer(inputs,in_size,out_size,activation_function=None):

    Weights=tf.Variable(tf.random_normal([in_size,out_size]))    #定義一個權重

    biases=tf.Variable(tf.zeros([1,out_size])+0.1)
    Wx_plus_b=tf.matmul(inputs,Weights)+biases#預測出來的值
    if activation_function is None:
        outputs=Wx_plus_b
    else:
         outputs=activation_function(Wx_plus_b)
    return outputs

x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)#噪點
y_data=np.square(x_data)-0.5+noise

xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
#輸入層/隱藏層/輸出層
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
prediction=add_layer(l1,10,1,activation_function=None)

loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
                   reduction_indices=[1]))
#順年,練習的步驟
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init=tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)
for i in range(1000):
    sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
    if i%50:
        print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))

結果:

loss的值會越來越小

六.視覺化

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs,in_size,out_size,activation_function=None):

    Weights=tf.Variable(tf.random_normal([in_size,out_size]))    #定義一個權重

    biases=tf.Variable(tf.zeros([1,out_size])+0.1)
    Wx_plus_b=tf.matmul(inputs,Weights)+biases#預測出來的值
    if activation_function is None:
        outputs=Wx_plus_b
    else:
         outputs=activation_function(Wx_plus_b)
    return outputs

x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)#噪點
y_data=np.square(x_data)-0.5+noise

xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
#輸入層/隱藏層/輸出層
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
prediction=add_layer(l1,10,1,activation_function=None)

loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
                   reduction_indices=[1]))
#順年,練習的步驟
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init=tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)

fig=plt.figure()
ax=fig.add_subplot(1,1,1)#連續性
ax.scatter(x_data,y_data)
plt.show()
for i in range(1000):
    sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
    if i%50:
        print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
        #prediction_value=sess.run(prediction,feed_dict={xs:x_data})
        #lines=ax.plot(x_data,prediction_value,'r-',lw=5)
        #ax.lines.remove()

這裡寫圖片描述

動態的視覺化的模型趨近:

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs,in_size,out_size,activation_function=None):

    Weights=tf.Variable(tf.random_normal([in_size,out_size]))    #定義一個權重

    biases=tf.Variable(tf.zeros([1,out_size])+0.1)
    Wx_plus_b=tf.matmul(inputs,Weights)+biases#預測出來的值
    if activation_function is None:
        outputs=Wx_plus_b
    else:
         outputs=activation_function(Wx_plus_b)
    return outputs

x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)#噪點
y_data=np.square(x_data)-0.5+noise

xs=tf.placeholder(tf.float32,[None,1])
ys=tf.placeholder(tf.float32,[None,1])
#輸入層/隱藏層/輸出層
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
prediction=add_layer(l1,10,1,activation_function=None)

loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),
                   reduction_indices=[1]))
#順年,練習的步驟
train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init=tf.initialize_all_variables()
sess=tf.Session()
sess.run(init)

fig=plt.figure()
ax=fig.add_subplot(1,1,1)#連續性
ax.scatter(x_data,y_data)
plt.ion()#連續顯示圖形,不暫停
plt.show()
for i in range(1000):
    sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
    if i%50:
        #print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
        try:
            ax.lines.remove(lines[0])  # 抹除曲線
        except Exception:
            pass
        prediction_value=sess.run(prediction,feed_dict={xs:x_data})
        lines=ax.plot(x_data,prediction_value,'r-',lw=5)#曲線的形式
        plt.pause(0.1)#暫停0.1s


Tensorboard 視覺化好幫手

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt #匯入視覺化模組

def add_layer(inputs, in_size, out_size, activation_function=None):
    #activation_function=None表示沒有啟用函式  相當於是線性模型
    with tf.name_scope('layer'):
        with tf.name_scope('weights'):
            Weights = tf.Variable(tf.random_normal([in_size, out_size]), name = 'W')
        with tf.name_scope('biases'):
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name = 'b')#偏置最好不為零所以加了0.1
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b = tf.matmul(inputs, Weights) + biases
        if activation_function is None:
            outputs = Wx_plus_b
        else:
            outputs = activation_function(Wx_plus_b)
        return outputs

# 定義神經網路輸入的placeholder
with tf.name_scope('inputs'):
    xs = tf.placeholder(tf.float32,[None,1], name = "x_input")
    #None表示可以輸入任意的資料。因為x_data是300x1的矩陣,所以這裡為[None,1]
    ys = tf.placeholder(tf.float32,[None,1], name = 'y_input')

#隱藏層layer1 輸入節點1,輸出節點10
l1 = add_layer(xs, 1, 10, activation_function = tf.nn.relu)
#預測的時候輸入時隱藏層的輸入l1,輸入節點10,輸出為y_data 有1個節點
prediction = add_layer(l1, 10, 1, activation_function = None)

#計算損失
with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
                        reduction_indices = [1]))
with tf.name_scope('train'):
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

init = tf.global_variables_initializer()
#tf.initialize_all_variables()執行的時候會提示你現在的新名稱是tf.global_variables_initializer()
sess = tf.Session()
writer = tf.summary.FileWriter("/home/sulei/tensorflow-workspace/logs/",sess.graph)
#writer = tf.summary.FileWriter("/*****/",sess.graph)
# 先載入到一個檔案中,然後再載入到瀏覽器裡觀看,雙引號裡標出存放這個檔案的路徑
# 莫煩視訊中是tf.train.SummaryWriter(),我執行報錯後發現這個函式名稱被改為了tf.summary.FileWriter()
sess.run(init)

之後在events.out.tfevents.1499002324的資料夾之下使用如下的命令

sulei@sulei:~/tensorflow-workspace/logs$ tensorbord --logdir='/home/sulei/tensorflow-workspace/logs/'

展示的結果:

sulei@sulei:~/tensorflow-workspace/logs$ tensorboard --logdir='/home/sulei/tenrflow-workspace/logs/'
Starting TensorBoard 54 at http://sulei:6006
(Press CTRL+C to quit)

開啟連結:http://sulei:6006