1. 程式人生 > >深度學習框架Tensorflow學習與應用(5到8)

深度學習框架Tensorflow學習與應用(5到8)

五. 03-1 迴歸


# coding: utf-8

# In[2]:


import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


# In[3]:


#使用numpy生成200個隨機點
x_data = np.linspace(-0.5,0.5,200)[:,np.newaxis]
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data) + noise


# In[9]:


#定義兩個placeholder
x = tf.placeholder(tf.float32,[None,1])
y = tf.placeholder(tf.float32,[None,1])

#定義神經網路中間層
weights_L1 = tf.Variable(tf.random_normal([1,10]))
biases_L1 = tf.Variable(tf.zeros([1,10]))
Wx_plus_b_L1 = tf.matmul(x,weights_L1) + biases_L1
L1 = tf.nn.tanh(Wx_plus_b_L1)

#定義神經網路輸出層
weights_L2 = tf.Variable(tf.random_normal([10,1]))
biases_L2 = tf.Variable(tf.zeros([1,1]))
Wx_plus_b_L2 = tf.matmul(L1,weights_L2) + biases_L2
prediction = tf.nn.tanh(Wx_plus_b_L2)

#二次代價函式
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法訓練
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

with tf.Session() as sess:
    #變數初始化
    sess.run(tf.global_variables_initializer())
    for _ in range(2000):
        sess.run(train_step,feed_dict={x:x_data,y:y_data})
    
    #獲得預測值
    prediction_value = sess.run(prediction,feed_dict={x:x_data})
    
    #畫圖
    plt.figure()
    plt.scatter(x_data,y_data)
    plt.plot(x_data,prediction_value,'r-',lw=5)
    plt.show()

六. 03-2 

七. 03-2 MNIST資料分類簡單版本 

baseline version


# coding: utf-8

# https://machinelearningmastery.com/handwritten-digit-recognition-using-convolutional-neural-networks-python-keras/

# ##Baseline Model with Multi-Layer Perceptrons##

# In[2]:


from keras.datasets import mnist
import matplotlib.pyplot as plt
#載入(下載,如果需要的話)MNIST資料集
(X_train, y_train), (X_test, y_test) = mnist.load_data()
#plot 4 images as gray scale
plt.subplot(221)
plt.imshow(X_train[0], cmap=plt.get_cmap('gray'))
plt.subplot(222)
plt.imshow(X_train[1], cmap=plt.get_cmap('gray'))
plt.subplot(223)
plt.imshow(X_train[2], cmap=plt.get_cmap('gray'))
plt.subplot(224)
plt.imshow(X_train[3], cmap=plt.get_cmap('gray'))
#繪製圖片
plt.show()


# Baseline Model with Multi-Layer Perceptrons

# In[3]:


import numpy
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.utils import np_utils


# In[4]:


# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)


# In[31]:


# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()


# In[32]:


# flatten 28*28 images to a 784 vector for each image
num_pixels = X_train.shape[1] * X_train.shape[2]
X_train = X_train.reshape(X_train.shape[0], num_pixels).astype('float32')
X_test = X_test.reshape(X_test.shape[0], num_pixels).astype('float32')


# The pixel values are gray scale between 0 and 255. It is almost always a good idea to perform some scaling of input values when using neural network models. Because the scale is well known and well behaved, we can very quickly normalize the pixel values to the range 0 and 1 by dividing each value by the maximum of 255.

# In[33]:


# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255


# In[34]:


# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]


# In[35]:


# define baseline model
def baseline_model():
    # create model
    model = Sequential()
    model.add(Dense(num_pixels, kernel_initializer='normal', activation='relu'))
    model.add(Dense(num_classes, kernel_initializer='normal', activation='softmax'))
    # Compile model
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model


# In[36]:


# build the model
model = baseline_model()
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Baseline Error: %.2f%%" % (100-scores[1]*100))

執行結果:

Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] - 7s 112us/step - loss: 0.2818 - acc: 0.9194 - val_loss: 0.1382 - val_acc: 0.9588
Epoch 2/10
60000/60000 [==============================] - 5s 88us/step - loss: 0.1107 - acc: 0.9683 - val_loss: 0.0984 - val_acc: 0.9697
Epoch 3/10
60000/60000 [==============================] - 6s 92us/step - loss: 0.0717 - acc: 0.9795 - val_loss: 0.0732 - val_acc: 0.9768
Epoch 4/10
60000/60000 [==============================] - 5s 87us/step - loss: 0.0497 - acc: 0.9855 - val_loss: 0.0711 - val_acc: 0.9779
Epoch 5/10
60000/60000 [==============================] - 5s 88us/step - loss: 0.0372 - acc: 0.9895 - val_loss: 0.0662 - val_acc: 0.9786
Epoch 6/10
60000/60000 [==============================] - 5s 92us/step - loss: 0.0278 - acc: 0.9926 - val_loss: 0.0692 - val_acc: 0.9777
Epoch 7/10
60000/60000 [==============================] - 5s 87us/step - loss: 0.0193 - acc: 0.9953 - val_loss: 0.0551 - val_acc: 0.9829
Epoch 8/10
60000/60000 [==============================] - 5s 87us/step - loss: 0.0139 - acc: 0.9971 - val_loss: 0.0644 - val_acc: 0.9808
Epoch 9/10
60000/60000 [==============================] - 6s 92us/step - loss: 0.0112 - acc: 0.9975 - val_loss: 0.0614 - val_acc: 0.9826
Epoch 10/10
60000/60000 [==============================] - 5s 87us/step - loss: 0.0086 - acc: 0.9981 - val_loss: 0.0610 - val_acc: 0.9823
Baseline Error: 1.77%

 

CNN


# coding: utf-8

# In[9]:


from keras.datasets import mnist
import matplotlib.pyplot as plt
#載入(下載,如果需要的話)MNIST資料集
(X_train, y_train), (X_test, y_test) = mnist.load_data()
#plot 4 images as gray scale
plt.subplot(221)
plt.imshow(X_train[0], cmap=plt.get_cmap('gray'))
plt.subplot(222)
plt.imshow(X_train[1], cmap=plt.get_cmap('gray'))
plt.subplot(223)
plt.imshow(X_train[2], cmap=plt.get_cmap('gray'))
plt.subplot(224)
plt.imshow(X_train[3], cmap=plt.get_cmap('gray'))
#繪製圖片
plt.show()


# In[10]:


import numpy
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th')


# In[11]:


# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)


# Next we need to load the MNIST dataset and reshape it so that it is suitable for use training a CNN. In Keras, the layers used for two-dimensional convolutions expect pixel values with the dimensions [pixels][width][height].
# In the case of RGB, the first dimension pixels would be 3 for the red, green and blue components and it would be like having 3 image inputs for every color image. In the case of MNIST where the pixel values are gray scale, the pixel dimension is set to 1.

# In[12]:


# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape to be [samples][pixels][width][height]
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')


# In[13]:


# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]


# In[14]:


def baseline_model():
    # create model
    model = Sequential()
    model.add(Conv2D(32, (5, 5), input_shape=(1, 28, 28), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    # Compile model
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model


# In[15]:


# build the model
model = baseline_model()
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("CNN Error: %.2f%%" % (100-scores[1]*100))

執行結果:

Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] - 83s 1ms/step - loss: 0.2230 - acc: 0.9364 - val_loss: 0.0783 - val_acc: 0.9755
Epoch 2/10
60000/60000 [==============================] - 82s 1ms/step - loss: 0.0707 - acc: 0.9788 - val_loss: 0.0443 - val_acc: 0.9851
Epoch 3/10
60000/60000 [==============================] - 84s 1ms/step - loss: 0.0507 - acc: 0.9844 - val_loss: 0.0421 - val_acc: 0.9857
Epoch 4/10
60000/60000 [==============================] - 85s 1ms/step - loss: 0.0388 - acc: 0.9881 - val_loss: 0.0405 - val_acc: 0.9869
Epoch 5/10
60000/60000 [==============================] - 86s 1ms/step - loss: 0.0323 - acc: 0.9897 - val_loss: 0.0354 - val_acc: 0.9884
Epoch 6/10
60000/60000 [==============================] - 90s 1ms/step - loss: 0.0264 - acc: 0.9917 - val_loss: 0.0325 - val_acc: 0.9890
Epoch 7/10
60000/60000 [==============================] - 89s 1ms/step - loss: 0.0220 - acc: 0.9929 - val_loss: 0.0350 - val_acc: 0.9884
Epoch 8/10
60000/60000 [==============================] - 88s 1ms/step - loss: 0.0191 - acc: 0.9940 - val_loss: 0.0315 - val_acc: 0.9892
Epoch 9/10
60000/60000 [==============================] - 88s 1ms/step - loss: 0.0156 - acc: 0.9948 - val_loss: 0.0331 - val_acc: 0.9888
Epoch 10/10
60000/60000 [==============================] - 88s 1ms/step - loss: 0.0142 - acc: 0.9958 - val_loss: 0.0318 - val_acc: 0.9902
CNN Error: 0.98%

 

larger CNN


# coding: utf-8

# In[1]:


# Larger CNN for the MNIST Dataset
import numpy
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th')
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape to be [samples][pixels][width][height]
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]


# In[2]:


# define the larger model
def larger_model():
    # create model
    model = Sequential()
    model.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(15, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dense(50, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    # Compile model
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model


# In[ ]:


# build the model
model = larger_model()
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Large CNN Error: %.2f%%" % (100-scores[1]*100))

執行結果:

Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] - 1395s 23ms/step - loss: 0.3904 - acc: 0.8805 - val_loss: 0.0896 - val_acc: 0.9715
Epoch 2/10
60000/60000 [==============================] - 1391s 23ms/step - loss: 0.0986 - acc: 0.9701 - val_loss: 0.0558 - val_acc: 0.9824
Epoch 3/10
60000/60000 [==============================] - 1392s 23ms/step - loss: 0.0727 - acc: 0.9778 - val_loss: 0.0431 - val_acc: 0.9855
Epoch 4/10
60000/60000 [==============================] - 1392s 23ms/step - loss: 0.0594 - acc: 0.9818 - val_loss: 0.0376 - val_acc: 0.9874
Epoch 5/10
60000/60000 [==============================] - 1393s 23ms/step - loss: 0.0513 - acc: 0.9841 - val_loss: 0.0336 - val_acc: 0.9892
Epoch 6/10
60000/60000 [==============================] - 1397s 23ms/step - loss: 0.0441 - acc: 0.9858 - val_loss: 0.0293 - val_acc: 0.9903
Epoch 7/10
60000/60000 [==============================] - 1395s 23ms/step - loss: 0.0387 - acc: 0.9880 - val_loss: 0.0301 - val_acc: 0.9905
Epoch 8/10
60000/60000 [==============================] - 1368s 23ms/step - loss: 0.0351 - acc: 0.9889 - val_loss: 0.0261 - val_acc: 0.9909
Epoch 9/10
60000/60000 [==============================] - 1359s 23ms/step - loss: 0.0331 - acc: 0.9895 - val_loss: 0.0236 - val_acc: 0.9920
Epoch 10/10
60000/60000 [==============================] - 1363s 23ms/step - loss: 0.0310 - acc: 0.9906 - val_loss: 0.0279 - val_acc: 0.9921
Large CNN Error: 0.79%

 

 

八. 04-1