1. 程式人生 > >tf.abs tf.add() tf.negative()

tf.abs tf.add() tf.negative()

隨心一記(沒什麼營養)

tf.abs

求絕對值的

tf.add()

相加的 支援broadacst

tf.negative()

取反的
理由有這樣的需求 求兩個tensor的相減的結果

tf.add(tensor1,tf.negtive(temsor2))

機器學習中的NN 實現出處:

資料下載

import tensorflow as tf
import numpy as np

# Import MINST data
from tensorflow.examples.tutorials.mnist import input_data
mnist =
input_data.read_data_sets("../mnist_data/", one_hot=True) # In this example, we limit mnist data# In th Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training (nn candidates) 注意這裡返回是 arr Xte, Yte = mnist.test.next_batch(200) #200 for testing xtr=tf.placeholder("float",[None,784]) xte=tf.placeholder("float"
,[784]) distance=tf.reduce_sum(tf.abs(tf.add(xtr,tf.negative(xte))),axis=1) nn_index=tf.argmin(distance,axis=0) init=tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) accuracy=0.0 # loop over test data for i in range(len(Xte)): # Get nearest neighbor a =
sess.run(nn_index, feed_dict={xtr: Xtr, xte: Xte[i, :]}) #注意在 feed_dict裡面 的變數上下文操作 # Get nearest neighbor class label and compare it to its true label print("Test", i, "Prediction:", np.argmax(Ytr[a]), \ "True Class:", np.argmax(Yte[i])) # Calculate accuracy if np.argmax(Ytr[a]) == np.argmax(Yte[i]): accuracy += 1./len(Xte) print("Done!") print("Accuracy:", accuracy)