Tensorflow實現多GPU並行方式
阿新 • • 發佈:2020-02-04
Tebsorflow開源實現多GPU訓練cifar10資料集:cifar10_multi_gpu_train.py
Tensorflow開源實現cifar10神經網路:cifar10.py
Tensorflow中的並行分為模型並行和資料並行。模型並行需要根據不同模型設計不同的並行方式,其主要原理是將模型中不同計算節點放在不同硬體資源上運算。比較通用且能簡便地實現大規模並行的方式是資料並行,同時使用多個硬體資源來計算不同batch的資料梯度,然後彙總梯度進行全域性更新。
資料並行幾乎適用於所有深度學習模型,總是可以利用多塊GPU同時訓練多個batch資料,執行在每塊GPU上的模型都基於同一個神經網路,網路結構一樣,並且共享模型引數。
import os import re import time import numpy as np import tensorflow as tf import cifar10_input import cifar10 batch_size = 128 max_steps = 1000 num_gpus = 1 # gpu數量 # 在scope下生成神經網路並返回scope下的loss def tower_loss(scope): # 資料集的路徑可以在cifar10.py中的tf.app.flags.DEFINE_string中定義 images,labels = cifar10.distorted_inputs() logits = cifar10.inference(images) # 生成神經網路 _ = cifar10.loss(logits,labels) # 不直接返回loss而是放到collection losses = tf.get_collection('losses',scope) # 獲取當前GPU上的loss(通過scope限定範圍) total_loss = tf.add_n(losses,name='total_loss') return total_loss ''' 外層是不同GPU計算的梯度,內層是某個GPU對應的不同var的值 tower_grads = [[(grad0_gpu0,var0_gpu0),(grad1_gpu0,var1_gpu0),...],[(grad0_gpu1,var0_gpu1),(grad1_gpu1,var1_gpu1),...]] zip(*tower_grads)= 相當於轉置了 [[(grad0_gpu0,(grad0_gpu1,var0,gpu1),[(grad1_gpu0,...]] ''' def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [tf.expand_dims(g,0) for g,_ in grad_and_vars] grads = tf.concat(grads,0) grad = tf.reduce_mean(grads,0) grad_and_var = (grad,grad_and_vars[0][1]) # [(grad0,var0),(grad1,var1),...] average_grads.append(grad_and_var) return average_grads def train(): # 預設的計算裝置為CPU with tf.Graph().as_default(),tf.device('/cpu:0'): # []表示沒有維度,為一個數 # trainable=False,不會加入GraphKeys.TRAINABLE_VARIABLES參與訓練 global_step = tf.get_variable('global_step',[],initializer=tf.constant_initializer(0),trainable=False) num_batches_per_epoch = cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / batch_size decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY) # https://tensorflow.google.cn/api_docs/python/tf/train/exponential_decay # decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) # staircase is True,then global_step / decay_steps is an integer division lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,global_step,decay_steps,cifar10.LEARNING_RATE_DECAY_FACTOR,staircase=True) opt = tf.train.GradientDescentOptimizer(lr) tower_grads = [] for i in range(num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME,i)) as scope: loss = tower_loss(scope) # 讓神經網路的變數可以重用,所有GPU使用完全相同的引數 # 讓下一個tower重用引數 tf.get_variable_scope().reuse_variables() grads = opt.compute_gradients(loss) tower_grads.append(grads) grads = average_gradients(tower_grads) apply_gradient_op = opt.apply_gradients(grads,global_step=global_step) init = tf.global_variables_initializer() # True會自動選擇一個存在並且支援的裝置來執行 sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) tf.train.start_queue_runners(sess=sess) for step in range(max_steps): start_time = time.time() _,loss_value = sess.run([apply_gradient_op,loss]) duration = time.time() - start_time if step % 10 == 0: num_examples_per_step = batch_size * num_gpus examples_per_sec = num_examples_per_step / duration sec_per_batch = duration / num_gpus print('step %d,loss=%.2f(%.1f examples/sec;%.3f sec/batch)' % (step,loss_value,examples_per_sec,sec_per_batch)) if __name__ == '__main__': train()
以上這篇Tensorflow實現多GPU並行方式就是小編分享給大家的全部內容了,希望能給大家一個參考,也希望大家多多支援我們。