1. 程式人生 > >TensorFlow入門一

TensorFlow入門一

cuda與tensorflow安裝

按以往經驗,tensorflow安裝一條pip命令就可以解決,前提是有fq工具,沒有的話去找找牆內別人分享的地址。而坑多在安裝支援gpu,需預先安裝英偉達的cuda,這裡坑比較多,推薦使用ubuntu deb的安裝方式來安裝cuda,run.sh的方式總感覺有很多問題,cuda的安裝具體可以參考。 注意連結裡面的tensorflow版本是以前的,tensorflow 現在官方上的要求是cuda7.5+cudnnV4,請在安裝的時候注意下。

Hello World

import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!'
) sess = tf.Session()print sess.run(hello)

首先,通過tf.constant建立一個常量,然後啟動Tensorflow的Session,呼叫sess的run方法來啟動整個graph。 接下來我們做下簡單的數學的方法:

import tensorflow as tf
a = tf.constant(2)
b = tf.constant(3)with tf.Session()as sess:print"a=2, b=3"print"Addition with constants: %i"% sess.run(a+b)print"Multiplication with constants: %i"
% sess.run(a*b)# output a=2, b=3Additionwith constants:5Multiplicationwith constants:6
import tensorflow as tf
a = tf.placeholder(tf.int16)
b = tf.placeholder(tf.int16)
add = tf.add(a, b)
mul = tf.mul(a, b)with tf.Session()as sess:# Run every operation with variable inputprint"Addition with variables: %i"
% sess.run(add, feed_dict={a:2, b:3})print"Multiplication with variables: %i"% sess.run(mul, feed_dict={a:2, b:3})# output:Additionwith variables:5Multiplicationwith variables:6 matrix1 = tf.constant([[3.,3.]]) matrix2 = tf.constant([[2.],[2.]]) product=tf.matmul(matrix1,matrix2)with tf.Session()as sess: result = sess.run(product)print result #result:12

線性迴歸

import tensorflow as tf
import numpy
import matplotlib.pyplot as plt
rng = numpy.random

# Parameters
learning_rate =0.01
training_epochs =2000
display_step =50# Training Data
train_X = numpy.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1])
train_Y = numpy.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3])
n_samples = train_X.shape[0]# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")# Create Model# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")# Construct a linear model
activation = tf.add(tf.mul(X, W), b)# Minimize the squared errors
cost = tf.reduce_sum(tf.pow(activation-Y,2))/(2*n_samples)#L2 loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)#Gradient descent# Initializing the variables
init = tf.initialize_all_variables()# Launch the graphwith tf.Session()as sess:
    sess.run(init)# Fit all training datafor epoch in range(training_epochs):for(x, y)in zip(train_X, train_Y):
            sess.run(optimizer, feed_dict={X: x, Y: y})#Display logs per epoch stepif epoch % display_step ==0:print"Epoch:",'%04d'%(epoch+1),"cost=", \
                "{:.9f}".format(sess.run(cost, feed_dict={X: train_X, Y:train_Y})), \
                "W=", sess.run(W),"b=", sess.run(b)print"Optimization Finished!"print"cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), \
          "W=", sess.run(W),"b=", sess.run(b)#Graphic display
    plt.plot(train_X, train_Y,'ro', label='Original data')
    plt.plot(train_X, sess.run(W)* train_X + sess.run(b), label='Fitted line')
    plt.legend()
    plt.show()

邏輯迴歸

import tensorflow as tf
# Import MINST datafrom tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)# Parameters
learning_rate =0.01
training_epochs =25
batch_size =100
display_step =1# tf Graph Input
x = tf.placeholder(tf.float32,[None,784])# mnist data image of shape 28*28=784
y = tf.