高斯核函式SVM TensorFlow實現
阿新 • • 發佈:2018-12-31
import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from sklearn import datasets from tensorflow.python.framework import ops (x_vals, y_vals) = datasets.make_circles(n_samples=350, factor=.5, noise=.1) y_vals = np.array([1 if y==1 else -1 for y in y_vals]) #plot points class1_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i]==1] class1_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i]==1] class2_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i]==-1] class2_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i]==-1] %matplotlib inline plt.plot(class1_x, class1_y, 'ro', label='Class 1') plt.plot(class2_x, class2_y, 'kx', label='Class -1') plt.title('distrubtion') plt.xlabel('x') plt.ylabel('y') plt.ylim([-1.5, 1.5]) plt.xlim([-1.5, 1.5]) plt.show() #data processing x = np.transpose(x_vals) #shape=(features,examples) y = y_vals.reshape((1,-1)) #shape=(targets,examples) # Split data into train/test sets train_indices = np.random.choice(len(x_vals),round(len(x_vals)*0.8),replace=False) test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices))) x_train = x[:,train_indices] x_test = x[:,test_indices] y_train = y[:,train_indices] y_test = y[:,test_indices] def model(x_train,y_train,x_test,y_test,learning_rate=0.01): ops.reset_default_graph() X_place = tf.placeholder(tf.float32,[2,None]) Y_place = tf.placeholder(tf.float32,[1,None]) w = tf.Variable(tf.random_normal(shape=[1,x_train.shape[1]])) b = tf.Variable(tf.random_normal(shape=[1,1])) #compute Gaussian Kernel gamma = tf.constant(-10.0) dist = tf.reduce_sum(tf.square(tf.transpose(X_place)), 1) dist = tf.reshape(dist, [-1,1]) sq_dists = tf.add(tf.subtract(dist, tf.multiply(2., tf.matmul(tf.transpose(X_place),X_place))), tf.transpose(dist)) my_kernel = tf.exp(tf.multiply(gamma, sq_dists)) output = tf.add(tf.matmul(w,my_kernel),b) loss = tf.reduce_mean(tf.maximum(0.,tf.subtract(1.,tf.multiply(output,Y_place)))) + tf.matmul(w,tf.transpose(w)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) train_loss = [] for epoch in range(1000): _,train_cost = sess.run([optimizer,loss],feed_dict={X_place:x_train,Y_place:y_train}) train_loss.append(train_cost) if (epoch+1)%50==0: print('epoch'+str(epoch+1)+'cost:'+str(train_cost)) w_new = sess.run(w) b_new = sess.run(b) x_trainfloat = tf.to_float(x_train) x_testfloat = tf.to_float(x_test) dist = tf.reduce_sum(tf.square(tf.transpose(x_trainfloat)), 1) dist = tf.reshape(dist, [-1,1]) sq_dists = tf.add(tf.subtract(dist, tf.multiply(2., tf.matmul(tf.transpose(x_trainfloat),x_trainfloat))), tf.transpose(dist)) my_kernel = tf.exp(tf.multiply(gamma, sq_dists)) predict_train = tf.add(tf.matmul(w_new,my_kernel),b_new) prediction = tf.sign(predict_train) accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_train), tf.float32)) train_accuracy = sess.run(accuracy) print('train_accuracy:' + str(train_accuracy)) j = 0. for i in range(x_test.shape[1]): k = np.array([[x_test[0,i]],[x_test[1,i]]]) a = np.add(np.dot(np.exp(np.sum(np.square(x_train - k),0)* (-10)),np.transpose(w_new)),b_new) if a*y_test[0,i]>0: j = j+1. test_accuracy = j/x_test.shape[1] print('test_accuracy:' + str(test_accuracy)) return train_loss train_loss = model(x_train,y_train,x_test,y_test)