1. 程式人生 > >tensorflow73 使用RNN生成古詩和藏頭詩

tensorflow73 使用RNN生成古詩和藏頭詩

01 環境

# 原始碼地址:https://github.com/5455945/tensorflow_demo.git
# win10 Tensorflow_gpu1.2.1 python3.6.1
# CUDA v8.0 cudnn-8.0-windows10-x64-v5.1
#千萬不要忘記下載資料檔案 https://github.com/5455945/tensorflow_demo/tree/master/poetry/data/poetry.txt
# tensorflow_demo\poetry\data\poetry.txt 古詩資料
# tensorflow_demo\poetry\train_poetry_model.py 古詩模型訓練
# tensorflow_demo\poetry\test_poetry.py 古詩生成測試 # tensorflow_demo\poetry\test_acrostic_poetry.py 藏頭詩生成測試

02 訓練模型train_poetry_model.py

#-*- coding: UTF-8 -*-
import collections
import numpy as np
import tensorflow as tf

'''
train_poetry_model.py 生成古詩模型 win10 python3.6.1 tensorflow1.2.1
'''
#-------------------------------資料預處理---------------------------#
poetry_file ='data/poetry.txt' # 詩集 poetrys = [] with open(poetry_file, "r", encoding = 'utf-8') as f: for line in f: try: #line = line.decode('UTF-8') line = line.strip(u'\n') title, content = line.strip(u' ').split(u':') content = content.replace(u' '
,u'') if u'_' in content or u'(' in content or u'(' in content or u'《' in content or u'[' in content: continue if len(content) < 5 or len(content) > 79: continue content = u'[' + content + u']' poetrys.append(content) except Exception as e: pass # 按詩的字數排序 poetrys = sorted(poetrys, key = lambda line: len(line)) print('唐詩總數: ', len(poetrys)) # 統計每個字出現次數 all_words = [] for poetry in poetrys: all_words += [word for word in poetry] counter = collections.Counter(all_words) count_pairs = sorted(counter.items(), key=lambda x: -x[1]) words, _ = zip(*count_pairs) # 取前多少個常用字 words = words[:len(words)] + (' ',) # 每個字對映為一個數字ID word_num_map = dict(zip(words, range(len(words)))) # 把詩轉換為向量形式,參考TensorFlow練習1 to_num = lambda word: word_num_map.get(word, len(words)) poetrys_vector = [ list(map(to_num, poetry)) for poetry in poetrys] #[[314, 3199, 367, 1556, 26, 179, 680, 0, 3199, 41, 506, 40, 151, 4, 98, 1], #[339, 3, 133, 31, 302, 653, 512, 0, 37, 148, 294, 25, 54, 833, 3, 1, 965, 1315, 377, 1700, 562, 21, 37, 0, 2, 1253, 21, 36, 264, 877, 809, 1] #....] # 每次取64首詩進行訓練 batch_size = 64 n_chunk = len(poetrys_vector) // batch_size class DataSet(object): def __init__(self, data_size): self._data_size = data_size self._epochs_completed = 0 self._index_in_epoch = 0 self._data_index = np.arange(data_size) def next_batch(self, batch_size): start = self._index_in_epoch if start + batch_size > self._data_size: np.random.shuffle(self._data_index) self._epochs_completed = self._epochs_completed + 1 self._index_in_epoch = batch_size full_batch_features, full_batch_labels = self.data_batch(0, batch_size) return full_batch_features, full_batch_labels else: self._index_in_epoch += batch_size end = self._index_in_epoch full_batch_features ,full_batch_labels = self.data_batch(start, end) if self._index_in_epoch == self._data_size: self._index_in_epoch = 0 self._epochs_completed = self._epochs_completed + 1 np.random.shuffle(self._data_index) return full_batch_features,full_batch_labels def data_batch(self,start,end): batches = [] for i in range(start,end): batches.append(poetrys_vector[self._data_index[i]]) length = max(map(len,batches)) xdata = np.full((end - start, length), word_num_map[' '], np.int32) for row in range(end - start): xdata[row,:len(batches[row])] = batches[row] ydata = np.copy(xdata) ydata[:,:-1] = xdata[:, 1:] return xdata,ydata #---------------------------------------RNN--------------------------------------# input_data = tf.placeholder(tf.int32, [batch_size, None]) output_targets = tf.placeholder(tf.int32, [batch_size, None]) # 定義RNN def neural_network(model = 'lstm', rnn_size = 128, num_layers = 2): if model == 'rnn': cell_fun = tf.contrib.rnn.BasicRNNCell elif model == 'gru': cell_fun = tf.contrib.rnn.GRUCell elif model == 'lstm': cell_fun = tf.contrib.rnn.BasicLSTMCell cell = cell_fun(rnn_size, state_is_tuple = True) cell = tf.contrib.rnn.MultiRNNCell([cell] * num_layers, state_is_tuple = True) initial_state = cell.zero_state(batch_size, tf.float32) with tf.variable_scope('rnnlm'): softmax_w = tf.get_variable("softmax_w", [rnn_size, len(words)]) softmax_b = tf.get_variable("softmax_b", [len(words)]) embedding = tf.get_variable("embedding", [len(words), rnn_size]) inputs = tf.nn.embedding_lookup(embedding, input_data) outputs, last_state = tf.nn.dynamic_rnn(cell, inputs, initial_state = initial_state, scope = 'rnnlm') output = tf.reshape(outputs, [-1, rnn_size]) logits = tf.matmul(output, softmax_w) + softmax_b probs = tf.nn.softmax(logits) return logits, last_state, probs, cell, initial_state def load_model(sess, saver, ckpt_path): latest_ckpt = tf.train.latest_checkpoint(ckpt_path) if latest_ckpt: print ('resume from', latest_ckpt) saver.restore(sess, latest_ckpt) return int(latest_ckpt[latest_ckpt.rindex('-') + 1:]) else: print ('building model from scratch') sess.run(tf.global_variables_initializer()) return -1 #訓練 def train_neural_network(): logits, last_state, _, _, _ = neural_network() targets = tf.reshape(output_targets, [-1]) loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [targets], \ [tf.ones_like(targets, dtype = tf.float32)], len(words)) cost = tf.reduce_mean(loss) learning_rate = tf.Variable(0.0, trainable = False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), 5) #optimizer = tf.train.GradientDescentOptimizer(learning_rate) optimizer = tf.train.AdamOptimizer(learning_rate) train_op = optimizer.apply_gradients(zip(grads, tvars)) Session_config = tf.ConfigProto(allow_soft_placement = True) Session_config.gpu_options.allow_growth = True trainds = DataSet(len(poetrys_vector)) with tf.Session(config = Session_config) as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(tf.global_variables()) last_epoch = load_model(sess, saver, 'model/') for epoch in range(last_epoch + 1, 100): sess.run(tf.assign(learning_rate, 0.002 * (0.97 ** epoch))) #sess.run(tf.assign(learning_rate, 0.01)) all_loss = 0.0 for batche in range(n_chunk): x,y = trainds.next_batch(batch_size) train_loss, _, _ = sess.run([cost, last_state, train_op], feed_dict={input_data: x, output_targets: y}) all_loss = all_loss + train_loss if batche % 50 == 1: print(epoch, batche, 0.002 * (0.97 ** epoch),train_loss) saver.save(sess, 'model/poetry.module', global_step = epoch) print (epoch,' Loss: ', all_loss * 1.0 / n_chunk) train_neural_network()

03 古詩生成測試test_poetry.py

#-*- coding: UTF-8 -*-
import os
import collections
import numpy as np
import tensorflow as tf
'''
test_poetry.py 隨機生成古詩 win10 python3.6.1 tensorflow1.2.1
'''
#-------------------------------資料預處理---------------------------#
poetry_file ='./data/poetry.txt'
# 詩集
poetrys = []
with open(poetry_file, "r", encoding='utf-8') as f:
    for line in f:
        try:
            #line = line.decode('UTF-8')
            line = line.strip(u'\n')
            title, content = line.strip(u' ').split(u':')
            content = content.replace(u' ',u'')
            if u'_' in content or u'(' in content or u'(' in content or u'《' in content or u'[' in content:
                continue
            if len(content) < 5 or len(content) > 79:
                continue
            content = u'[' + content + u']'
            poetrys.append(content)
        except Exception as e:
            pass

# 按詩的字數排序
poetrys = sorted(poetrys,key=lambda line: len(line))
print('唐詩總數: ', len(poetrys))

# 統計每個字出現次數
all_words = []
for poetry in poetrys:
    all_words += [word for word in poetry]
counter = collections.Counter(all_words)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
words, _ = zip(*count_pairs)

# 取前多少個常用字
words = words[:len(words)] + (' ',)
# 每個字對映為一個數字ID
word_num_map = dict(zip(words, range(len(words))))
# 把詩轉換為向量形式,參考TensorFlow練習1
to_num = lambda word: word_num_map.get(word, len(words))
poetrys_vector = [ list(map(to_num, poetry)) for poetry in poetrys]
#[[314, 3199, 367, 1556, 26, 179, 680, 0, 3199, 41, 506, 40, 151, 4, 98, 1],
#[339, 3, 133, 31, 302, 653, 512, 0, 37, 148, 294, 25, 54, 833, 3, 1, 965, 1315, 377, 1700, 562, 21, 37, 0, 2, 1253, 21, 36, 264, 877, 809, 1]
#....]

# 每次取64首詩進行訓練
batch_size = 1
n_chunk = len(poetrys_vector) // batch_size

class DataSet(object):
    def __init__(self,data_size):
        self._data_size = data_size
        self._epochs_completed = 0
        self._index_in_epoch = 0
        self._data_index = np.arange(data_size)

    def next_batch(self,batch_size):
        start = self._index_in_epoch
        if start + batch_size > self._data_size:
            np.random.shuffle(self._data_index)
            self._epochs_completed = self._epochs_completed + 1
            self._index_in_epoch = batch_size
            full_batch_features ,full_batch_labels = self.data_batch(0, batch_size)
            return full_batch_features ,full_batch_labels
        else:
            self._index_in_epoch += batch_size
            end = self._index_in_epoch
            full_batch_features ,full_batch_labels = self.data_batch(start, end)
            if self._index_in_epoch == self._data_size:
                self._index_in_epoch = 0
                self._epochs_completed = self._epochs_completed + 1
                np.random.shuffle(self._data_index)
            return full_batch_features,full_batch_labels

    def data_batch(self, start, end):
        batches = []
        for i in range(start, end):
            batches.append(poetrys_vector[self._data_index[i]])

        length = max(map(len,batches))

        xdata = np.full((end - start,length), word_num_map[' '], np.int32)
        for row in range(end - start):
            xdata[row,:len(batches[row])] = batches[row]
        ydata = np.copy(xdata)
        ydata[:, :-1] = xdata[:, 1:]
        return xdata, ydata

#---------------------------------------RNN--------------------------------------#
input_data = tf.placeholder(tf.int32, [batch_size, None])
output_targets = tf.placeholder(tf.int32, [batch_size, None])
# 定義RNN
def neural_network(model='lstm', rnn_size=128, num_layers=2):
    if model == 'rnn':
        cell_fun = tf.contrib.rnn.BasicRNNCell
    elif model == 'gru':
        cell_fun = tf.contrib.rnn.GRUCell
    elif model == 'lstm':
        cell_fun = tf.contrib.rnn.BasicLSTMCell

    cell = cell_fun(rnn_size, state_is_tuple = True)
    cell = tf.contrib.rnn.MultiRNNCell([cell] * num_layers, state_is_tuple = True)

    initial_state = cell.zero_state(batch_size, tf.float32)

    with tf.variable_scope('rnnlm'):
        softmax_w = tf.get_variable("softmax_w", [rnn_size, len(words)])
        softmax_b = tf.get_variable("softmax_b", [len(words)])
        embedding = tf.get_variable("embedding", [len(words), rnn_size])
        inputs = tf.nn.embedding_lookup(embedding, input_data)

    outputs, last_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state, scope='rnnlm')
    output = tf.reshape(outputs,[-1, rnn_size])

    logits = tf.matmul(output, softmax_w) + softmax_b
    probs = tf.nn.softmax(logits)
    return logits, last_state, probs, cell, initial_state

#-------------------------------生成古詩---------------------------------#
# 使用訓練完成的模型
def gen_poetry():
    def to_word(weights):
        t = np.cumsum(weights)
        s = np.sum(weights)
        sample = int(np.searchsorted(t, np.random.rand(1)*s))
        return words[sample]

    _, last_state, probs, cell, initial_state = neural_network()
    Session_config = tf.ConfigProto(allow_soft_placement = True)
    Session_config.gpu_options.allow_growth = True

    with tf.Session(config = Session_config) as sess:
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(tf.global_variables())
        #saver.restore(sess, 'model/poetry.module-99')
        ckpt = tf.train.get_checkpoint_state('./model/')
        checkpoint_suffix = ""
        if tf.__version__ > "0.12":
            checkpoint_suffix = ".index"
        if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + checkpoint_suffix):
            #print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print("Created model with fresh parameters.")
            return None

        state_ = sess.run(cell.zero_state(1, tf.float32))
        x = np.array([list(map(word_num_map.get, '['))])
        [probs_, state_] = sess.run([probs, last_state], feed_dict={input_data: x, initial_state: state_})
        word = to_word(probs_)
        #word = words[np.argmax(probs_)]
        poem = ''
        while word != ']':
            poem += word
            x = np.zeros((1,1))
            x[0,0] = word_num_map[word]
            [probs_, state_] = sess.run([probs, last_state], feed_dict={input_data: x, initial_state: state_})
            word = to_word(probs_)
            #word = words[np.argmax(probs_)]
        return poem

print(gen_poetry())
'''
test01 惟應三品,對璧在崇。臨伊或,沈山駕。玉幣坤,蕙薌冠。祗繁託,眷聿酬。穆穆天周,休以配雄。
test02 心溼夕門僧,根為匣裡書。風初擊鼓動,蟬扇對閒吟。書和魚群累,看翛落月門。一招如此意,歸去夢南方。
test03 亦獨勞身拙,吾隨鬢射霜。人心猶守指,時節又聞蟬。岸館添湘水,江雲照甑舟。山高獨更雨,僧聽與樵攜。
test04 開中嬋娟倚西風,慄殿中朝別未眠。暴芝籍寄山中處,禪石縈橫水脈寒。
test05 詩家無事客,吟切又和非。寂寞關門遠,無人知亦憎。
'''

04 藏頭詩生成測試test_acrostic_poetry.py

#-*- coding: UTF-8 -*-
import os
import collections
import numpy as np
import tensorflow as tf
'''
test_acrostic_poetry.py 生成藏頭詩(五言或七言) win10 python3.6.1 tensorflow1.2.1
'''

#-------------------------------資料預處理---------------------------#
poetry_file ='data/poetry.txt'
# 詩集
poetrys = []
with open(poetry_file, "r", encoding='utf-8') as f:
    for line in f:
        try:
            #line = line.decode('UTF-8')
            line = line.strip(u'\n')
            title, content = line.strip(u' ').split(u':')
            content = content.replace(u' ',u'')
            if u'_' in content or u'(' in content or u'(' in content or u'《' in content or u'[' in content:
                continue
            if len(content) < 5 or len(content) > 79:
                continue
            content = u'[' + content + u']'
            poetrys.append(content)
        except Exception as e:
            pass

# 按詩的字數排序
poetrys = sorted(poetrys,key=lambda line: len(line))
print('唐詩總數: ', len(poetrys))

# 統計每個字出現次數
all_words = []
for poetry in poetrys:
    all_words += [word for word in poetry]
counter = collections.Counter(all_words)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
words, _ = zip(*count_pairs)

# 取前多少個常用字
words = words[:len(words)] + (' ',)
# 每個字對映為一個數字ID
word_num_map = dict(zip(words, range(len(words))))
# 把詩轉換為向量形式,參考TensorFlow練習1
to_num = lambda word: word_num_map.get(word, len(words))
poetrys_vector = [ list(map(to_num, poetry)) for poetry in poetrys]
#[[314, 3199, 367, 1556, 26, 179, 680, 0, 3199, 41, 506, 40, 151, 4, 98, 1],
#[339, 3, 133, 31, 302, 653, 512, 0, 37, 148, 294, 25, 54, 833, 3, 1, 965, 1315, 377, 1700, 562, 21, 37, 0, 2, 1253, 21, 36, 264, 877, 809, 1]
#....]

# 每次取64首詩進行訓練
batch_size = 1
n_chunk = len(poetrys_vector) // batch_size

class DataSet(object):
    def __init__(self, data_size):
        self._data_size = data_size
        self._epochs_completed = 0
        self._index_in_epoch = 0
        self._data_index = np.arange(data_size)

    def next_batch(self,batch_size):
        start = self._index_in_epoch
        if start + batch_size > self._data_size:
            np.random.shuffle(self._data_index)
            self._epochs_completed = self._epochs_completed + 1
            self._index_in_epoch = batch_size
            full_batch_features ,full_batch_labels = self.data_batch(0, batch_size)
            return full_batch_features , full_batch_labels
        else:
            self._index_in_epoch += batch_size
            end = self._index_in_epoch
            full_batch_features ,full_batch_labels = self.data_batch(start, end)
            if self._index_in_epoch == self._data_size:
                self._index_in_epoch = 0
                self._epochs_completed = self._epochs_completed + 1
                np.random.shuffle(self._data_index)
            return full_batch_features,full_batch_labels

    def data_batch(self, start, end):
        batches = []
        for i in range(start, end):
            batches.append(poetrys_vector[self._data_index[i]])

        length = max(map(len, batches))

        xdata = np.full((end - start,length), word_num_map[' '], np.int32)
        for row in range(end - start):
            xdata[row,:len(batches[row])] = batches[row]
        ydata = np.copy(xdata)
        ydata[:, :-1] = xdata[:, 1:]
        return xdata, ydata

#---------------------------------------RNN--------------------------------------#
input_data = tf.placeholder(tf.int32, [batch_size, None])
output_targets = tf.placeholder(tf.int32, [batch_size, None])
# 定義RNN
def neural_network(model = 'lstm', rnn_size = 128, num_layers = 2):
    if model == 'rnn':
        cell_fun = tf.contrib.rnn.BasicRNNCell
    elif model == 'gru':
        cell_fun = tf.contrib.rnn.GRUCell
    elif model == 'lstm':
        cell_fun = tf.contrib.rnn.BasicLSTMCell

    cell = cell_fun(rnn_size, state_is_tuple = True)
    cell = tf.contrib.rnn.MultiRNNCell([cell] * num_layers, state_is_tuple = True)

    initial_state = cell.zero_state(batch_size, tf.float32)

    with tf.variable_scope('rnnlm'):
        softmax_w = tf.get_variable("softmax_w", [rnn_size, len(words)])
        softmax_b = tf.get_variable("softmax_b", [len(words)])
        embedding = tf.get_variable("embedding", [len(words), rnn_size])
        inputs = tf.nn.embedding_lookup(embedding, input_data)

    outputs, last_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state, scope = 'rnnlm')
    output = tf.reshape(outputs,[-1, rnn_size])

    logits = tf.matmul(output, softmax_w) + softmax_b
    probs = tf.nn.softmax(logits)
    return logits, last_state, probs, cell, initial_state

#-------------------------------生成古詩---------------------------------#
# 使用訓練完成的模型
def gen_head_poetry(heads, type):
    if type != 5 and type != 7:
        print('The second para has to be 5 or 7!')
        return
    def to_word(weights):
        t = np.cumsum(weights)
        s = np.sum(weights)
        sample = int(np.searchsorted(t, np.random.rand(1)*s))
        return words[sample]
    _, last_state, probs, cell, initial_state = neural_network()
    Session_config = tf.ConfigProto(allow_soft_placement = True)
    Session_config.gpu_options.allow_growth = True

    with tf.Session(config = Session_config) as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())
        #saver.restore(sess, 'model/poetry.module-99')
        ckpt = tf.train.get_checkpoint_state('./model/')
        checkpoint_suffix = ""
        if tf.__version__ > "0.12":
            checkpoint_suffix = ".index"
        if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + checkpoint_suffix):
            #print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print("Created model with fresh parameters.")
            return None

        poem = ''
        for head in  heads:
            flag = True
            while flag:
                state_ = sess.run(cell.zero_state(1, tf.float32))
                x = np.array([list(map(word_num_map.get, u'['))])
                [probs_, state_] = sess.run([probs, last_state], feed_dict={input_data: x, initial_state: state_})
                sentence = head
                x = np.zeros((1, 1))
                x[0,0] = word_num_map[sentence]
                [probs_, state_] = sess.run([probs, last_state], feed_dict={input_data: x, initial_state: state_})
                word = to_word(probs_)
                sentence += word
                while word != u'。':
                    x = np.zeros((1, 1))
                    x[0,0] = word_num_map[word]
                    [probs_, state_] = sess.run([probs, last_state], feed_dict={input_data: x, initial_state: state_})
                    word = to_word(probs_)
                    sentence += word
                if len(sentence) == 2 + 2 * type:
                    sentence += u'\n'
                    poem += sentence
                    flag = False
        return poem

print(gen_head_poetry(u'物競天擇', 7))
'''
test01
物易一在是岐路,試將司卻該朱微。
競憶佳歸小紫春,此心應是說名官。
天潤爭能曲玉皇,柴門表接碧雲移。
擇宅閒冰覓四鄰,世間浮世事難欺。
test02
物色無煙繞路深,微風落日即尋鄰。
競逐飛根未解籠,誇雲可肯憶西陽。
天士由來自致高,忍教歌劍我留兵。
擇實形難寫藥奇,一身將意甚教名。
'''