1. 程式人生 > 實用技巧 >tansformer-文字分類

tansformer-文字分類

參考:https://www.cnblogs.com/jiangxinyang/p/10210813.html

程式碼來源:https://github.com/jiangxinyang227/textClassifier

1、引數配置

import os
import csv
import time
import datetime
import random
import json

import warnings
from collections import Counter
from math import sqrt

import gensim
import pandas as pd
import numpy as np
import tensorflow as tf from sklearn.metrics import roc_auc_score, accuracy_score, precision_score, recall_score warnings.filterwarnings("ignore")
# 配置引數

class TrainingConfig(object):
    epoches = 10
    evaluateEvery = 100
    checkpointEvery = 100
    learningRate = 0.001
    
class ModelConfig(object):
    embeddingSize 
= 200 filters = 128 # 內層一維卷積核的數量,外層卷積核的數量應該等於embeddingSize,因為要確保每個layer後的輸出維度和輸入維度是一致的。 numHeads = 8 # Attention 的頭數 numBlocks = 1 # 設定transformer block的數量 epsilon = 1e-8 # LayerNorm 層中的最小除數 keepProp = 0.9 # multi head attention 中的dropout dropoutKeepProb = 0.5 # 全連線層的dropout
l2RegLambda = 0.0 class Config(object): sequenceLength = 200 # 取了所有序列長度的均值 batchSize = 128 dataSource = "../data/preProcess/labeledTrain.csv" stopWordSource = "../data/english" numClasses = 1 # 二分類設定為1,多分類設定為類別的數目 rate = 0.8 # 訓練集的比例 training = TrainingConfig() model = ModelConfig() # 例項化配置引數物件 config = Config()

2、生成訓練資料

  1)將資料載入進來,將句子分割成詞表示,並去除低頻詞和停用詞。

  2)將詞對映成索引表示,構建詞彙-索引對映表,並儲存成json的資料格式,之後做inference時可以用到。(注意,有的詞可能不在word2vec的預訓練詞向量中,這種詞直接用UNK表示)

  3)從預訓練的詞向量模型中讀取出詞向量,作為初始化值輸入到模型中。

  4)將資料集分割成訓練集和測試集

# 資料預處理的類,生成訓練集和測試集

class Dataset(object):
    def __init__(self, config):
        self.config = config
        self._dataSource = config.dataSource
        self._stopWordSource = config.stopWordSource  
        
        self._sequenceLength = config.sequenceLength  # 每條輸入的序列處理為定長
        self._embeddingSize = config.model.embeddingSize
        self._batchSize = config.batchSize
        self._rate = config.rate
        
        self._stopWordDict = {}
        
        self.trainReviews = []
        self.trainLabels = []
        
        self.evalReviews = []
        self.evalLabels = []
        
        self.wordEmbedding =None
        
        self.labelList = []
        
    def _readData(self, filePath):
        """
        從csv檔案中讀取資料集
        """
        
        df = pd.read_csv(filePath)
        
        if self.config.numClasses == 1:
            labels = df["sentiment"].tolist()
        elif self.config.numClasses > 1:
            labels = df["rate"].tolist()
            
        review = df["review"].tolist()
        reviews = [line.strip().split() for line in review]

        return reviews, labels
    
    def _labelToIndex(self, labels, label2idx):
        """
        將標籤轉換成索引表示
        """
        labelIds = [label2idx[label] for label in labels]
        return labelIds
    
    def _wordToIndex(self, reviews, word2idx):
        """
        將詞轉換成索引
        """
        reviewIds = [[word2idx.get(item, word2idx["UNK"]) for item in review] for review in reviews]
        return reviewIds
        
    def _genTrainEvalData(self, x, y, word2idx, rate):
        """
        生成訓練集和驗證集
        """
        reviews = []
        for review in x:
            if len(review) >= self._sequenceLength:
                reviews.append(review[:self._sequenceLength])
            else:
                reviews.append(review + [word2idx["PAD"]] * (self._sequenceLength - len(review)))
            
        trainIndex = int(len(x) * rate)
        
        trainReviews = np.asarray(reviews[:trainIndex], dtype="int64")
        trainLabels = np.array(y[:trainIndex], dtype="float32")
        
        evalReviews = np.asarray(reviews[trainIndex:], dtype="int64")
        evalLabels = np.array(y[trainIndex:], dtype="float32")

        return trainReviews, trainLabels, evalReviews, evalLabels
        
    def _genVocabulary(self, reviews, labels):
        """
        生成詞向量和詞彙-索引對映字典,可以用全資料集
        """
        
        allWords = [word for review in reviews for word in review]
        
        # 去掉停用詞
        subWords = [word for word in allWords if word not in self.stopWordDict]
        
        wordCount = Counter(subWords)  # 統計詞頻
        sortWordCount = sorted(wordCount.items(), key=lambda x: x[1], reverse=True)
        
        # 去除低頻詞
        words = [item[0] for item in sortWordCount if item[1] >= 5]
        
        vocab, wordEmbedding = self._getWordEmbedding(words)
        self.wordEmbedding = wordEmbedding
        
        word2idx = dict(zip(vocab, list(range(len(vocab)))))
        
        uniqueLabel = list(set(labels))
        label2idx = dict(zip(uniqueLabel, list(range(len(uniqueLabel)))))
        self.labelList = list(range(len(uniqueLabel)))
        
        # 將詞彙-索引對映表儲存為json資料,之後做inference時直接載入來處理資料
        with open("../data/wordJson/word2idx.json", "w", encoding="utf-8") as f:
            json.dump(word2idx, f)
        
        with open("../data/wordJson/label2idx.json", "w", encoding="utf-8") as f:
            json.dump(label2idx, f)
        
        return word2idx, label2idx
            
    def _getWordEmbedding(self, words):
        """
        按照我們的資料集中的單詞取出預訓練好的word2vec中的詞向量
        """
        
        wordVec = gensim.models.KeyedVectors.load_word2vec_format("../word2vec/word2Vec.bin", binary=True)
        vocab = []
        wordEmbedding = []
        
        # 新增 "pad" 和 "UNK", 
        vocab.append("PAD")
        vocab.append("UNK")
        wordEmbedding.append(np.zeros(self._embeddingSize))
        wordEmbedding.append(np.random.randn(self._embeddingSize))
        
        for word in words:
            try:
                vector = wordVec.wv[word]
                vocab.append(word)
                wordEmbedding.append(vector)
            except:
                print(word + "不存在於詞向量中")
                
        return vocab, np.array(wordEmbedding)
    
    def _readStopWord(self, stopWordPath):
        """
        讀取停用詞
        """
        
        with open(stopWordPath, "r") as f:
            stopWords = f.read()
            stopWordList = stopWords.splitlines()
            # 將停用詞用列表的形式生成,之後查詢停用詞時會比較快
            self.stopWordDict = dict(zip(stopWordList, list(range(len(stopWordList)))))
            
    def dataGen(self):
        """
        初始化訓練集和驗證集
        """
        
        # 初始化停用詞
        self._readStopWord(self._stopWordSource)
        
        # 初始化資料集
        reviews, labels = self._readData(self._dataSource)
        
        # 初始化詞彙-索引對映表和詞向量矩陣
        word2idx, label2idx = self._genVocabulary(reviews, labels)
        
        # 將標籤和句子數值化
        labelIds = self._labelToIndex(labels, label2idx)
        reviewIds = self._wordToIndex(reviews, word2idx)
        
        # 初始化訓練集和測試集
        trainReviews, trainLabels, evalReviews, evalLabels = self._genTrainEvalData(reviewIds, labelIds, word2idx, self._rate)
        self.trainReviews = trainReviews
        self.trainLabels = trainLabels
        
        self.evalReviews = evalReviews
        self.evalLabels = evalLabels
        
        
data = Dataset(config)
data.dataGen()

我們慢慢來看:

labeledTrain.csv中的部分資料:

第一列id,第二列標籤,第三列評論。

停用詞english中部分資料:

_readData(filePath)方法返回的部分資料是:字列表和標籤

['with', 'all', 'this', 'stuff', 'going', 'down', 'at', 'the', 'moment', 'with', 'mj', 'ive', 'started', 'listening', 'to', 'his', 'music', 'watching', 'the', 'odd', 'documentary', 'here', 'and', 'there', 'watched', 'the', 'wiz', 'and', 'watched', 'moonwalker', 'again', 'maybe', 'i', 'just', 'want', 'to', 'get', 'a', 'certain', 'insight', 'into', 'this', 'guy', 'who', 'i', 'thought', 'was', 'really', 'cool', 'in', 'the', 'eighties', 'just', 'to', 'maybe', 'make', 'up', 'my', 'mind', 'whether', 'he', 'is', 'guilty', 'or', 'innocent', 'moonwalker', 'is', 'part', 'biography', 'part', 'feature', 'film', 'which', 'i', 'remember', 'going', 'to', 'see', 'at', 'the', 'cinema', 'when', 'it', 'was', 'originally', 'released', 'some', 'of', 'it', 'has', 'subtle', 'messages', 'about', 'mjs', 'feeling', 'towards', 'the', 'press', 'and', 'also', 'the', 'obvious', 'message', 'of', 'drugs', 'are', 'bad', 'mkayvisually', 'impressive', 'but', 'of', 'course', 'this', 'is', 'all', 'about', 'michael', 'jackson', 'so', 'unless', 'you', 'remotely', 'like', 'mj', 'in', 'anyway', 'then', 'you', 'are', 'going', 'to', 'hate', 'this', 'and', 'find', 'it', 'boring', 'some', 'may', 'call', 'mj', 'an', 'egotist', 'for', 'consenting', 'to', 'the', 'making', 'of', 'this', 'movie', 'but', 'mj', 'and', 'most', 'of', 'his', 'fans', 'would', 'say', 'that', 'he', 'made', 'it', 'for', 'the', 'fans', 'which', 'if', 'true', 'is', 'really', 'nice', 'of', 'himthe', 'actual', 'feature', 'film', 'bit', 'when', 'it', 'finally', 'starts', 'is', 'only', 'on', 'for', '20', 'minutes', 'or', 'so', 'excluding', 'the', 'smooth', 'criminal', 'sequence', 'and', 'joe', 'pesci', 'is', 'convincing', 'as', 'a', 'psychopathic', 'all', 'powerful', 'drug', 'lord', 'why', 'he', 'wants', 'mj', 'dead', 'so', 'bad', 'is', 'beyond', 'me', 'because', 'mj', 'overheard', 'his', 'plans', 'nah', 'joe', 'pescis', 'character', 'ranted', 'that', 'he', 'wanted', 'people', 'to', 'know', 'it', 'is', 'he', 'who', 'is', 'supplying', 'drugs', 'etc', 'so', 'i', 'dunno', 'maybe', 'he', 'just', 'hates', 'mjs', 'musiclots', 'of', 'cool', 'things', 'in', 'this', 'like', 'mj', 'turning', 'into', 'a', 'car', 'and', 'a', 'robot', 'and', 'the', 'whole', 'speed', 'demon', 'sequence', 'also', 'the', 'director', 'must', 'have', 'had', 'the', 'patience', 'of', 'a', 'saint', 'when', 'it', 'came', 'to', 'filming', 'the', 'kiddy', 'bad', 'sequence', 'as', 'usually', 'directors', 'hate', 'working', 'with', 'one', 'kid', 'let', 'alone', 'a', 'whole', 'bunch', 'of', 'them', 'performing', 'a', 'complex', 'dance', 'scenebottom', 'line', 'this', 'movie', 'is', 'for', 'people', 'who', 'like', 'mj', 'on', 'one', 'level', 'or', 'another', 'which', 'i', 'think', 'is', 'most', 'people', 'if', 'not', 'then', 'stay', 'away', 'it', 'does', 'try', 'and', 'give', 'off', 'a', 'wholesome', 'message', 'and', 'ironically', 'mjs', 'bestest', 'buddy', 'in', 'this', 'movie', 'is', 'a', 'girl!', 'michael', 'jackson', 'is', 'truly', 'one', 'of', 'the', 'most', 'talented', 'people', 'ever', 'to', 'grace', 'this', 'planet', 'but', 'is', 'he', 'guilty', 'well', 'with', 'all', 'the', 'attention', 'ive', 'gave', 'this', 'subjecthmmm', 'well', 'i', 'dont', 'know', 'because', 'people', 'can', 'be', 'different', 'behind', 'closed', 'doors', 'i', 'know', 'this', 'for', 'a', 'fact', 'he', 'is', 'either', 'an', 'extremely', 'nice', 'but', 'stupid', 'guy', 'or', 'one', 'of', 'the', 'most', 'sickest', 'liars', 'i', 'hope', 'he', 'is', 'not', 'the', 'latter'] 1

_genVocabulary(self, reviews, labels)方法:

傳入字列表組成的列表以及每個字列表對應的標籤。

1)構建所有字構成的子空間

2)去掉停用詞

3)去掉出現頻率較低的詞

4)呼叫_getWordEmbedding(words):傳入處理後的字空間:

  • 其中vocab, wordEmbedding的形狀是:(31983,) (31983, 200)

5)word2idx是將每一個字都對應一個數值表示:

{'PAD': 0, 'UNK': 1, 'movie': 2, 'film': 3,... 需要注意的是padding用'PAD'表示,沒有出現的字用'UNK'表示。

6)label2idx也是將標籤對映成數值表示:

{0: 0, 1: 1} 。同時儲存一個self.labelList

7)將word2idx和label2idx儲存到json檔案中。

_labelToIndex(labels, label2idx)方法和_wordToIndex(reviews, word2idx)方法:

將reviews和labels中的將字或者標籤用數值表示。

_genTrainEvalData(x,y,word2idx,rate):劃分訓練集和測試集。

3、生成batch資料

採用生成器的形式向模型輸入batch資料集,(生成器可以避免將所有的資料加入到記憶體中)

# 輸出batch資料集

def nextBatch(x, y, batchSize):
        """
        生成batch資料集,用生成器的方式輸出
        """
    
        perm = np.arange(len(x))
        np.random.shuffle(perm)
        x = x[perm]
        y = y[perm]
        
        numBatches = len(x) // batchSize

        for i in range(numBatches):
            start = i * batchSize
            end = start + batchSize
            batchX = np.array(x[start: end], dtype="int64")
            batchY = np.array(y[start: end], dtype="float32")
            
            yield batchX, batchY

4、構建Transformer模型

關於transformer模型的一些使用心得:

  1)我在這裡選擇固定的one-hot的position embedding比論文中提出的利用正弦餘弦函式生成的position embedding的效果要好,可能的原因是論文中提出的position embedding是作為可訓練的值傳入的,這樣就增加了模型的複雜度,在小資料集(IMDB訓練集大小:20000)上導致效能有所下降。

  2)mask可能不需要,新增mask和去除mask對結果基本沒啥影響,也許在其他的任務或者資料集上有作用,但論文也並沒有提出一定要在encoder結構中加入mask,mask更多的是用在decoder。

  3)transformer的層數,transformer的層數可以根據自己的資料集大小調整,在小資料集上基本上一層就夠了。

  4)在subLayers上加dropout正則化,主要是在multi-head attention層加,因為feed forward是用卷積實現的,不加dropout應該沒關係,當然如果feed forward用全連線層實現,那也加上dropout。

  5)在小資料集上transformer的效果並不一定比Bi-LSTM + Attention好,在IMDB上效果就更差。

# 生成位置嵌入
def fixedPositionEmbedding(batchSize, sequenceLen):
    embeddedPosition = []
    for batch in range(batchSize):
        x = []
        for step in range(sequenceLen):
            a = np.zeros(sequenceLen)
            a[step] = 1
            x.append(a)
        embeddedPosition.append(x)
    
    return np.array(embeddedPosition, dtype="float32")


# 模型構建

class Transformer(object):
    """
    Transformer Encoder 用於文字分類
    """
    def __init__(self, config, wordEmbedding):

        # 定義模型的輸入
        self.inputX = tf.placeholder(tf.int32, [None, config.sequenceLength], name="inputX")
        self.inputY = tf.placeholder(tf.int32, [None], name="inputY")
        
        self.dropoutKeepProb = tf.placeholder(tf.float32, name="dropoutKeepProb")
        self.embeddedPosition = tf.placeholder(tf.float32, [None, config.sequenceLength, config.sequenceLength], name="embeddedPosition")
        
        self.config = config
        
        # 定義l2損失
        l2Loss = tf.constant(0.0)
        
        # 詞嵌入層, 位置向量的定義方式有兩種:一是直接用固定的one-hot的形式傳入,然後和詞向量拼接,在當前的資料集上表現效果更好。另一種
        # 就是按照論文中的方法實現,這樣的效果反而更差,可能是增大了模型的複雜度,在小資料集上表現不佳。
        
        with tf.name_scope("embedding"):

            # 利用預訓練的詞向量初始化詞嵌入矩陣
            self.W = tf.Variable(tf.cast(wordEmbedding, dtype=tf.float32, name="word2vec") ,name="W")
            # 利用詞嵌入矩陣將輸入的資料中的詞轉換成詞向量,維度[batch_size, sequence_length, embedding_size]
            self.embedded = tf.nn.embedding_lookup(self.W, self.inputX)
            self.embeddedWords = tf.concat([self.embedded, self.embeddedPosition], -1)

        with tf.name_scope("transformer"):
            for i in range(config.model.numBlocks):
                with tf.name_scope("transformer-{}".format(i + 1)):
            
                    # 維度[batch_size, sequence_length, embedding_size]
                    multiHeadAtt = self._multiheadAttention(rawKeys=self.inputX, queries=self.embeddedWords,
                                                            keys=self.embeddedWords)
                    # 維度[batch_size, sequence_length, embedding_size]
                    self.embeddedWords = self._feedForward(multiHeadAtt, 
                                                           [config.model.filters, config.model.embeddingSize + config.sequenceLength])
                
            outputs = tf.reshape(self.embeddedWords, [-1, config.sequenceLength * (config.model.embeddingSize + config.sequenceLength)])

        outputSize = outputs.get_shape()[-1].value

#         with tf.name_scope("wordEmbedding"):
#             self.W = tf.Variable(tf.cast(wordEmbedding, dtype=tf.float32, name="word2vec"), name="W")
#             self.wordEmbedded = tf.nn.embedding_lookup(self.W, self.inputX)
        
#         with tf.name_scope("positionEmbedding"):
#             print(self.wordEmbedded)
#             self.positionEmbedded = self._positionEmbedding()
            
#         self.embeddedWords = self.wordEmbedded + self.positionEmbedded
            
#         with tf.name_scope("transformer"):
#             for i in range(config.model.numBlocks):
#                 with tf.name_scope("transformer-{}".format(i + 1)):
            
#                     # 維度[batch_size, sequence_length, embedding_size]
#                     multiHeadAtt = self._multiheadAttention(rawKeys=self.wordEmbedded, queries=self.embeddedWords,
#                                                             keys=self.embeddedWords)
#                     # 維度[batch_size, sequence_length, embedding_size]
#                     self.embeddedWords = self._feedForward(multiHeadAtt, [config.model.filters, config.model.embeddingSize])
                
#             outputs = tf.reshape(self.embeddedWords, [-1, config.sequenceLength * (config.model.embeddingSize)])

#         outputSize = outputs.get_shape()[-1].value
        
        with tf.name_scope("dropout"):
            outputs = tf.nn.dropout(outputs, keep_prob=self.dropoutKeepProb)
    
        # 全連線層的輸出
        with tf.name_scope("output"):
            outputW = tf.get_variable(
                "outputW",
                shape=[outputSize, config.numClasses],
                initializer=tf.contrib.layers.xavier_initializer())
            
            outputB= tf.Variable(tf.constant(0.1, shape=[config.numClasses]), name="outputB")
            l2Loss += tf.nn.l2_loss(outputW)
            l2Loss += tf.nn.l2_loss(outputB)
            self.logits = tf.nn.xw_plus_b(outputs, outputW, outputB, name="logits")
            
            if config.numClasses == 1:
                self.predictions = tf.cast(tf.greater_equal(self.logits, 0.0), tf.float32, name="predictions")
            elif config.numClasses > 1:
                self.predictions = tf.argmax(self.logits, axis=-1, name="predictions")
        
        # 計算二元交叉熵損失
        with tf.name_scope("loss"):
            
            if config.numClasses == 1:
                losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.cast(tf.reshape(self.inputY, [-1, 1]), 
                                                                                                    dtype=tf.float32))
            elif config.numClasses > 1:
                losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.inputY)
                
            self.loss = tf.reduce_mean(losses) + config.model.l2RegLambda * l2Loss
            
    def _layerNormalization(self, inputs, scope="layerNorm"):
        # LayerNorm層和BN層有所不同
        epsilon = self.config.model.epsilon

        inputsShape = inputs.get_shape() # [batch_size, sequence_length, embedding_size]

        paramsShape = inputsShape[-1:]

        # LayerNorm是在最後的維度上計算輸入的資料的均值和方差,BN層是考慮所有維度的
        # mean, variance的維度都是[batch_size, sequence_len, 1]
        mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)

        beta = tf.Variable(tf.zeros(paramsShape))

        gamma = tf.Variable(tf.ones(paramsShape))
        normalized = (inputs - mean) / ((variance + epsilon) ** .5)
        
        outputs = gamma * normalized + beta

        return outputs
            
    def _multiheadAttention(self, rawKeys, queries, keys, numUnits=None, causality=False, scope="multiheadAttention"):
        # rawKeys 的作用是為了計算mask時用的,因為keys是加上了position embedding的,其中不存在padding為0的值
        
        numHeads = self.config.model.numHeads
        keepProp = self.config.model.keepProp
        
        if numUnits is None:  # 若是沒傳入值,直接去輸入資料的最後一維,即embedding size.
            numUnits = queries.get_shape().as_list()[-1]

        # tf.layers.dense可以做多維tensor資料的非線性對映,在計算self-Attention時,一定要對這三個值進行非線性對映,
        # 其實這一步就是論文中Multi-Head Attention中的對分割後的資料進行權重對映的步驟,我們在這裡先對映後分割,原則上是一樣的。
        # Q, K, V的維度都是[batch_size, sequence_length, embedding_size]
        Q = tf.layers.dense(queries, numUnits, activation=tf.nn.relu)
        K = tf.layers.dense(keys, numUnits, activation=tf.nn.relu)
        V = tf.layers.dense(keys, numUnits, activation=tf.nn.relu)

        # 將資料按最後一維分割成num_heads個, 然後按照第一維拼接
        # Q, K, V 的維度都是[batch_size * numHeads, sequence_length, embedding_size/numHeads]
        Q_ = tf.concat(tf.split(Q, numHeads, axis=-1), axis=0) 
        K_ = tf.concat(tf.split(K, numHeads, axis=-1), axis=0) 
        V_ = tf.concat(tf.split(V, numHeads, axis=-1), axis=0)

        # 計算keys和queries之間的點積,維度[batch_size * numHeads, queries_len, key_len], 後兩維是queries和keys的序列長度
        similary = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1]))

        # 對計算的點積進行縮放處理,除以向量長度的根號值
        scaledSimilary = similary / (K_.get_shape().as_list()[-1] ** 0.5)

        # 在我們輸入的序列中會存在padding這個樣的填充詞,這種詞應該對最終的結果是毫無幫助的,原則上說當padding都是輸入0時,
        # 計算出來的權重應該也是0,但是在transformer中引入了位置向量,當和位置向量相加之後,其值就不為0了,因此在新增位置向量
        # 之前,我們需要將其mask為0。雖然在queries中也存在這樣的填充詞,但原則上模型的結果之和輸入有關,而且在self-Attention中
        # queryies = keys,因此只要一方為0,計算出的權重就為0。
        # 具體關於key mask的介紹可以看看這裡: https://github.com/Kyubyong/transformer/issues/3

        # 利用tf,tile進行張量擴張, 維度[batch_size * numHeads, keys_len] keys_len = keys 的序列長度
        keyMasks = tf.tile(rawKeys, [numHeads, 1]) 

        # 增加一個維度,並進行擴張,得到維度[batch_size * numHeads, queries_len, keys_len]
        keyMasks = tf.tile(tf.expand_dims(keyMasks, 1), [1, tf.shape(queries)[1], 1])

        # tf.ones_like生成元素全為1,維度和scaledSimilary相同的tensor, 然後得到負無窮大的值
        paddings = tf.ones_like(scaledSimilary) * (-2 ** (32 + 1))

        # tf.where(condition, x, y),condition中的元素為bool值,其中對應的True用x中的元素替換,對應的False用y中的元素替換
        # 因此condition,x,y的維度是一樣的。下面就是keyMasks中的值為0就用paddings中的值替換
        maskedSimilary = tf.where(tf.equal(keyMasks, 0), paddings, scaledSimilary) # 維度[batch_size * numHeads, queries_len, key_len]

        # 在計算當前的詞時,只考慮上文,不考慮下文,出現在Transformer Decoder中。在文字分類時,可以只用Transformer Encoder。
        # Decoder是生成模型,主要用在語言生成中
        if causality:
            diagVals = tf.ones_like(maskedSimilary[0, :, :])  # [queries_len, keys_len]
            tril = tf.contrib.linalg.LinearOperatorTriL(diagVals).to_dense()  # [queries_len, keys_len]
            masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(maskedSimilary)[0], 1, 1])  # [batch_size * numHeads, queries_len, keys_len]

            paddings = tf.ones_like(masks) * (-2 ** (32 + 1))
            maskedSimilary = tf.where(tf.equal(masks, 0), paddings, maskedSimilary)  # [batch_size * numHeads, queries_len, keys_len]

        # 通過softmax計算權重係數,維度 [batch_size * numHeads, queries_len, keys_len]
        weights = tf.nn.softmax(maskedSimilary)

        # 加權和得到輸出值, 維度[batch_size * numHeads, sequence_length, embedding_size/numHeads]
        outputs = tf.matmul(weights, V_)

        # 將多頭Attention計算的得到的輸出重組成最初的維度[batch_size, sequence_length, embedding_size]
        outputs = tf.concat(tf.split(outputs, numHeads, axis=0), axis=2)
        
        outputs = tf.nn.dropout(outputs, keep_prob=keepProp)

        # 對每個subLayers建立殘差連線,即H(x) = F(x) + x
        outputs += queries
        # normalization 層
        outputs = self._layerNormalization(outputs)
        return outputs

    def _feedForward(self, inputs, filters, scope="multiheadAttention"):
        # 在這裡的前向傳播採用卷積神經網路
        
        # 內層
        params = {"inputs": inputs, "filters": filters[0], "kernel_size": 1,
                  "activation": tf.nn.relu, "use_bias": True}
        outputs = tf.layers.conv1d(**params)

        # 外層
        params = {"inputs": outputs, "filters": filters[1], "kernel_size": 1,
                  "activation": None, "use_bias": True}

        # 這裡用到了一維卷積,實際上卷積核尺寸還是二維的,只是只需要指定高度,寬度和embedding size的尺寸一致
        # 維度[batch_size, sequence_length, embedding_size]
        outputs = tf.layers.conv1d(**params)

        # 殘差連線
        outputs += inputs

        # 歸一化處理
        outputs = self._layerNormalization(outputs)

        return outputs
    
    def _positionEmbedding(self, scope="positionEmbedding"):
        # 生成可訓練的位置向量
        batchSize = self.config.batchSize
        sequenceLen = self.config.sequenceLength
        embeddingSize = self.config.model.embeddingSize
        
        # 生成位置的索引,並擴張到batch中所有的樣本上
        positionIndex = tf.tile(tf.expand_dims(tf.range(sequenceLen), 0), [batchSize, 1])

        # 根據正弦和餘弦函式來獲得每個位置上的embedding的第一部分
        positionEmbedding = np.array([[pos / np.power(10000, (i-i%2) / embeddingSize) for i in range(embeddingSize)] 
                                      for pos in range(sequenceLen)])

        # 然後根據奇偶性分別用sin和cos函式來包裝
        positionEmbedding[:, 0::2] = np.sin(positionEmbedding[:, 0::2])
        positionEmbedding[:, 1::2] = np.cos(positionEmbedding[:, 1::2])

        # 將positionEmbedding轉換成tensor的格式
        positionEmbedding_ = tf.cast(positionEmbedding, dtype=tf.float32)

        # 得到三維的矩陣[batchSize, sequenceLen, embeddingSize]
        positionEmbedded = tf.nn.embedding_lookup(positionEmbedding_, positionIndex)

        return positionEmbedded

5、定義計算metrics的函式

"""
定義各類效能指標
"""

def mean(item: list) -> float:
    """
    計算列表中元素的平均值
    :param item: 列表物件
    :return:
    """
    res = sum(item) / len(item) if len(item) > 0 else 0
    return res


def accuracy(pred_y, true_y):
    """
    計算二類和多類的準確率
    :param pred_y: 預測結果
    :param true_y: 真實結果
    :return:
    """
    if isinstance(pred_y[0], list):
        pred_y = [item[0] for item in pred_y]
    corr = 0
    for i in range(len(pred_y)):
        if pred_y[i] == true_y[i]:
            corr += 1
    acc = corr / len(pred_y) if len(pred_y) > 0 else 0
    return acc


def binary_precision(pred_y, true_y, positive=1):
    """
    二類的精確率計算
    :param pred_y: 預測結果
    :param true_y: 真實結果
    :param positive: 正例的索引表示
    :return:
    """
    corr = 0
    pred_corr = 0
    for i in range(len(pred_y)):
        if pred_y[i] == positive:
            pred_corr += 1
            if pred_y[i] == true_y[i]:
                corr += 1

    prec = corr / pred_corr if pred_corr > 0 else 0
    return prec


def binary_recall(pred_y, true_y, positive=1):
    """
    二類的召回率
    :param pred_y: 預測結果
    :param true_y: 真實結果
    :param positive: 正例的索引表示
    :return:
    """
    corr = 0
    true_corr = 0
    for i in range(len(pred_y)):
        if true_y[i] == positive:
            true_corr += 1
            if pred_y[i] == true_y[i]:
                corr += 1

    rec = corr / true_corr if true_corr > 0 else 0
    return rec


def binary_f_beta(pred_y, true_y, beta=1.0, positive=1):
    """
    二類的f beta值
    :param pred_y: 預測結果
    :param true_y: 真實結果
    :param beta: beta值
    :param positive: 正例的索引表示
    :return:
    """
    precision = binary_precision(pred_y, true_y, positive)
    recall = binary_recall(pred_y, true_y, positive)
    try:
        f_b = (1 + beta * beta) * precision * recall / (beta * beta * precision + recall)
    except:
        f_b = 0
    return f_b


def multi_precision(pred_y, true_y, labels):
    """
    多類的精確率
    :param pred_y: 預測結果
    :param true_y: 真實結果
    :param labels: 標籤列表
    :return:
    """
    if isinstance(pred_y[0], list):
        pred_y = [item[0] for item in pred_y]

    precisions = [binary_precision(pred_y, true_y, label) for label in labels]
    prec = mean(precisions)
    return prec


def multi_recall(pred_y, true_y, labels):
    """
    多類的召回率
    :param pred_y: 預測結果
    :param true_y: 真實結果
    :param labels: 標籤列表
    :return:
    """
    if isinstance(pred_y[0], list):
        pred_y = [item[0] for item in pred_y]

    recalls = [binary_recall(pred_y, true_y, label) for label in labels]
    rec = mean(recalls)
    return rec


def multi_f_beta(pred_y, true_y, labels, beta=1.0):
    """
    多類的f beta值
    :param pred_y: 預測結果
    :param true_y: 真實結果
    :param labels: 標籤列表
    :param beta: beta值
    :return:
    """
    if isinstance(pred_y[0], list):
        pred_y = [item[0] for item in pred_y]

    f_betas = [binary_f_beta(pred_y, true_y, beta, label) for label in labels]
    f_beta = mean(f_betas)
    return f_beta


def get_binary_metrics(pred_y, true_y, f_beta=1.0):
    """
    得到二分類的效能指標
    :param pred_y:
    :param true_y:
    :param f_beta:
    :return:
    """
    acc = accuracy(pred_y, true_y)
    recall = binary_recall(pred_y, true_y)
    precision = binary_precision(pred_y, true_y)
    f_beta = binary_f_beta(pred_y, true_y, f_beta)
    return acc, recall, precision, f_beta


def get_multi_metrics(pred_y, true_y, labels, f_beta=1.0):
    """
    得到多分類的效能指標
    :param pred_y:
    :param true_y:
    :param labels:
    :param f_beta:
    :return:
    """
    acc = accuracy(pred_y, true_y)
    recall = multi_recall(pred_y, true_y, labels)
    precision = multi_precision(pred_y, true_y, labels)
    f_beta = multi_f_beta(pred_y, true_y, labels, f_beta)
    return acc, recall, precision, f_beta

6、訓練模型

在訓練時,我們定義了tensorBoard的輸出,並定義了兩種模型儲存的方法。

# 訓練模型

# 生成訓練集和驗證集
trainReviews = data.trainReviews
trainLabels = data.trainLabels
evalReviews = data.evalReviews
evalLabels = data.evalLabels

wordEmbedding = data.wordEmbedding
labelList = data.labelList

embeddedPosition = fixedPositionEmbedding(config.batchSize, config.sequenceLength)

# 定義計算圖
with tf.Graph().as_default():

    session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    session_conf.gpu_options.allow_growth=True
    session_conf.gpu_options.per_process_gpu_memory_fraction = 0.9  # 配置gpu佔用率  

    sess = tf.Session(config=session_conf)
    
    # 定義會話
    with sess.as_default():
        transformer = Transformer(config, wordEmbedding)
        
        globalStep = tf.Variable(0, name="globalStep", trainable=False)
        # 定義優化函式,傳入學習速率引數
        optimizer = tf.train.AdamOptimizer(config.training.learningRate)
        # 計算梯度,得到梯度和變數
        gradsAndVars = optimizer.compute_gradients(transformer.loss)
        # 將梯度應用到變數下,生成訓練器
        trainOp = optimizer.apply_gradients(gradsAndVars, global_step=globalStep)
        
        # 用summary繪製tensorBoard
        gradSummaries = []
        for g, v in gradsAndVars:
            if g is not None:
                tf.summary.histogram("{}/grad/hist".format(v.name), g)
                tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
        
        outDir = os.path.abspath(os.path.join(os.path.curdir, "summarys"))
        print("Writing to {}\n".format(outDir))
        
        lossSummary = tf.summary.scalar("loss", transformer.loss)
        summaryOp = tf.summary.merge_all()
        
        trainSummaryDir = os.path.join(outDir, "train")
        trainSummaryWriter = tf.summary.FileWriter(trainSummaryDir, sess.graph)
        
        evalSummaryDir = os.path.join(outDir, "eval")
        evalSummaryWriter = tf.summary.FileWriter(evalSummaryDir, sess.graph)
        
        
        # 初始化所有變數
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
        
        # 儲存模型的一種方式,儲存為pb檔案
        savedModelPath = "../model/transformer/savedModel"
        if os.path.exists(savedModelPath):
            os.rmdir(savedModelPath)
        builder = tf.saved_model.builder.SavedModelBuilder(savedModelPath)
            
        sess.run(tf.global_variables_initializer())

        def trainStep(batchX, batchY):
            """
            訓練函式
            """   
            feed_dict = {
              transformer.inputX: batchX,
              transformer.inputY: batchY,
              transformer.dropoutKeepProb: config.model.dropoutKeepProb,
              transformer.embeddedPosition: embeddedPosition
            }
            _, summary, step, loss, predictions = sess.run(
                [trainOp, summaryOp, globalStep, transformer.loss, transformer.predictions],
                feed_dict)
            
            if config.numClasses == 1:
                acc, recall, prec, f_beta = get_binary_metrics(pred_y=predictions, true_y=batchY)

                
            elif config.numClasses > 1:
                acc, recall, prec, f_beta = get_multi_metrics(pred_y=predictions, true_y=batchY,
                                                              labels=labelList)
                
            trainSummaryWriter.add_summary(summary, step)
            
            return loss, acc, prec, recall, f_beta

        def devStep(batchX, batchY):
            """
            驗證函式
            """
            feed_dict = {
              transformer.inputX: batchX,
              transformer.inputY: batchY,
              transformer.dropoutKeepProb: 1.0,
              transformer.embeddedPosition: embeddedPosition
            }
            summary, step, loss, predictions = sess.run(
                [summaryOp, globalStep, transformer.loss, transformer.predictions],
                feed_dict)
            
            if config.numClasses == 1:
                acc, recall, prec, f_beta = get_binary_metrics(pred_y=predictions, true_y=batchY)

                
            elif config.numClasses > 1:
                acc, recall, prec, f_beta = get_multi_metrics(pred_y=predictions, true_y=batchY,
                                                              labels=labelList)
                
            trainSummaryWriter.add_summary(summary, step)
            
            return loss, acc, prec, recall, f_beta
        
        for i in range(config.training.epoches):
            # 訓練模型
            print("start training model")
            for batchTrain in nextBatch(trainReviews, trainLabels, config.batchSize):
                loss, acc, prec, recall, f_beta = trainStep(batchTrain[0], batchTrain[1])
                
                currentStep = tf.train.global_step(sess, globalStep) 
                print("train: step: {}, loss: {}, acc: {}, recall: {}, precision: {}, f_beta: {}".format(
                    currentStep, loss, acc, recall, prec, f_beta))
                if currentStep % config.training.evaluateEvery == 0:
                    print("\nEvaluation:")
                    
                    losses = []
                    accs = []
                    f_betas = []
                    precisions = []
                    recalls = []
                    
                    for batchEval in nextBatch(evalReviews, evalLabels, config.batchSize):
                        loss, acc, precision, recall, f_beta = devStep(batchEval[0], batchEval[1])
                        losses.append(loss)
                        accs.append(acc)
                        f_betas.append(f_beta)
                        precisions.append(precision)
                        recalls.append(recall)
                        
                    time_str = datetime.datetime.now().isoformat()
                    print("{}, step: {}, loss: {}, acc: {},precision: {}, recall: {}, f_beta: {}".format(time_str, currentStep, mean(losses), 
                                                                                                       mean(accs), mean(precisions),
                                                                                                       mean(recalls), mean(f_betas)))
                    
                if currentStep % config.training.checkpointEvery == 0:
                    # 儲存模型的另一種方法,儲存checkpoint檔案
                    path = saver.save(sess, "../model/Transformer/model/my-model", global_step=currentStep)
                    print("Saved model checkpoint to {}\n".format(path))
                    
        inputs = {"inputX": tf.saved_model.utils.build_tensor_info(transformer.inputX),
                  "keepProb": tf.saved_model.utils.build_tensor_info(transformer.dropoutKeepProb)}

        outputs = {"predictions": tf.saved_model.utils.build_tensor_info(transformer.predictions)}

        prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(inputs=inputs, outputs=outputs,
                                                                                      method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
        legacy_init_op = tf.group(tf.tables_initializer(), name="legacy_init_op")
        builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING],
                                            signature_def_map={"predict": prediction_signature}, legacy_init_op=legacy_init_op)

        builder.save()

需要注意的是上述使用的tensorflow是1.x版本,執行部分結果:

train: step: 1453, loss: 0.0010375125566497445, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1454, loss: 0.0006778845563530922, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1455, loss: 0.007209389470517635, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1456, loss: 0.0059194001369178295, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1457, loss: 0.0002592140226624906, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1458, loss: 0.0020833390299230814, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1459, loss: 0.002238483401015401, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1460, loss: 0.0002154896064894274, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1461, loss: 0.0005287806270644069, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1462, loss: 0.0021261998917907476, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1463, loss: 0.003387441160157323, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1464, loss: 0.039518121629953384, acc: 0.9921875, recall: 1.0, precision: 0.9827586206896551, f_beta: 0.9913043478260869
train: step: 1465, loss: 0.017118675634264946, acc: 0.9921875, recall: 0.9848484848484849, precision: 1.0, f_beta: 0.9923664122137404
train: step: 1466, loss: 0.002999877789989114, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1467, loss: 0.00025316851679235697, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1468, loss: 0.0018627031240612268, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1469, loss: 0.00041201969725079834, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1470, loss: 0.0018406400922685862, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1471, loss: 0.0008799995994195342, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1472, loss: 0.004898811690509319, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1473, loss: 0.0004434642905835062, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1474, loss: 0.0011488802265375853, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1475, loss: 0.00015377056843135506, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1476, loss: 0.0103827565908432, acc: 0.9921875, recall: 0.9836065573770492, precision: 1.0, f_beta: 0.9917355371900827
train: step: 1477, loss: 0.0007278787670657039, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1478, loss: 0.0003133404243271798, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1479, loss: 0.00027148338267579675, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1480, loss: 0.0003839871205855161, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1481, loss: 0.0024200205225497484, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1482, loss: 0.0005372267332859337, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1483, loss: 0.0003639731730800122, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1484, loss: 0.0006208484992384911, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1485, loss: 0.006886144168674946, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1486, loss: 0.003239469835534692, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1487, loss: 0.0007233020151033998, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1488, loss: 0.0009364052675664425, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1489, loss: 0.0012875663815066218, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1490, loss: 0.00010077921615447849, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1491, loss: 0.0003866801271215081, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1492, loss: 0.006166142877191305, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1493, loss: 0.0022950763814151287, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1494, loss: 0.00023041354143060744, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1495, loss: 0.004388233181089163, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1496, loss: 0.00047737392014823854, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1497, loss: 0.0025870269164443016, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1498, loss: 0.002695532515645027, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1499, loss: 0.013221409171819687, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1500, loss: 0.00033678440377116203, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0

Evaluation:
2020-07-29T03:37:24.417831, step: 1500, loss: 0.8976672161848117, acc: 0.8693910256410257,precision: 0.8834109059648474, recall: 0.8552037062592709, f_beta: 0.8682522378163627
Saved model checkpoint to /content/drive/My Drive/textClassifier/model/transformer/savedModel/-1500

train: step: 1501, loss: 0.0007042207289487123, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1502, loss: 0.0014643988106399775, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1503, loss: 0.0010233941720798612, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1504, loss: 0.02987544611096382, acc: 0.9921875, recall: 0.9852941176470589, precision: 1.0, f_beta: 0.9925925925925926
train: step: 1505, loss: 0.012284890748560429, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1506, loss: 0.0018061138689517975, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1507, loss: 0.00026604655431583524, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1508, loss: 0.003529384732246399, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1509, loss: 0.02039383351802826, acc: 0.9921875, recall: 1.0, precision: 0.9857142857142858, f_beta: 0.9928057553956835
train: step: 1510, loss: 0.0014776198659092188, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1511, loss: 0.0029119777027517557, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1512, loss: 0.004221439827233553, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1513, loss: 0.0045303404331207275, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1514, loss: 0.00019781164883170277, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1515, loss: 4.5770495489705354e-05, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1516, loss: 0.00016127500566653907, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1517, loss: 0.0008834836189635098, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1518, loss: 0.0012142667546868324, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1519, loss: 0.009638940915465355, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1520, loss: 0.004639771766960621, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1521, loss: 0.0028490747790783644, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1522, loss: 0.01262330450117588, acc: 0.9921875, recall: 0.9838709677419355, precision: 1.0, f_beta: 0.991869918699187
train: step: 1523, loss: 0.0011988620972260833, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1524, loss: 0.00021289248252287507, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1525, loss: 0.00034694239730015397, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1526, loss: 0.0029277384746819735, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1527, loss: 0.02414204739034176, acc: 0.984375, recall: 1.0, precision: 0.971830985915493, f_beta: 0.9857142857142858
train: step: 1528, loss: 0.013479265384376049, acc: 0.9921875, recall: 1.0, precision: 0.9838709677419355, f_beta: 0.991869918699187
train: step: 1529, loss: 0.05391649901866913, acc: 0.9921875, recall: 1.0, precision: 0.9830508474576272, f_beta: 0.9914529914529915
train: step: 1530, loss: 0.00015737551439087838, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1531, loss: 0.0007785416673868895, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1532, loss: 0.004696957301348448, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1533, loss: 0.024878593161702156, acc: 0.984375, recall: 0.9666666666666667, precision: 1.0, f_beta: 0.983050847457627
train: step: 1534, loss: 0.04452724754810333, acc: 0.984375, recall: 0.967741935483871, precision: 1.0, f_beta: 0.9836065573770492
train: step: 1535, loss: 0.00039763524546287954, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1536, loss: 0.000782095710746944, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1537, loss: 0.006687487475574017, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1538, loss: 0.00039404688868671656, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1539, loss: 0.014523430727422237, acc: 0.9921875, recall: 1.0, precision: 0.9855072463768116, f_beta: 0.9927007299270074
train: step: 1540, loss: 0.009221377782523632, acc: 0.9921875, recall: 1.0, precision: 0.9824561403508771, f_beta: 0.9911504424778761
train: step: 1541, loss: 0.005190445575863123, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1542, loss: 0.004177240654826164, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1543, loss: 0.0017567630857229233, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1544, loss: 0.0006568725220859051, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1545, loss: 0.002588574541732669, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1546, loss: 0.0001910639984998852, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1547, loss: 0.002887105569243431, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1548, loss: 0.03052239492535591, acc: 0.984375, recall: 0.967741935483871, precision: 1.0, f_beta: 0.9836065573770492
train: step: 1549, loss: 0.0018153332639485598, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1550, loss: 0.004794051870703697, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1551, loss: 0.0012018438428640366, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1552, loss: 0.00037542261998169124, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1553, loss: 0.002573814708739519, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1554, loss: 0.011914141476154327, acc: 0.9921875, recall: 1.0, precision: 0.9846153846153847, f_beta: 0.9922480620155039
train: step: 1555, loss: 0.014577670954167843, acc: 0.9921875, recall: 1.0, precision: 0.984375, f_beta: 0.9921259842519685
train: step: 1556, loss: 0.001725346315652132, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1557, loss: 0.0006375688826665282, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1558, loss: 0.000332293682731688, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1559, loss: 0.0018419102998450398, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
train: step: 1560, loss: 0.0025918420869857073, acc: 1.0, recall: 1.0, precision: 1.0, f_beta: 1.0
WARNING:tensorflow:From <ipython-input-16-f881542e6f65>:154: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.
WARNING:tensorflow:From <ipython-input-16-f881542e6f65>:163: calling SavedModelBuilder.add_meta_graph_and_variables (from tensorflow.python.saved_model.builder_impl) with legacy_init_op is deprecated and will be removed in a future version.
Instructions for updating:
Pass your op to the equivalent parameter main_op instead.
INFO:tensorflow:No assets to save.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: /content/drive/My Drive/textClassifier//model/transformer/savedModel/saved_model.pb

最後使用:

%load_ext tensorboard
%tensorboard --logdir "/content/summarys/"

開啟tensorboard視覺化: