1. 程式人生 > >【caffe原始碼的梳理之一】資料結構Blob

【caffe原始碼的梳理之一】資料結構Blob

Blob資料結構

1、Blob資料結構

  caffe使用Blob的4維陣列用於儲存和交換資料,維度從低到高為(width_, height_, channel_,num_)。其中width_和height_分別表示影象的寬和高,channel_表示顏色的通道如RGB,num_表示第幾幀。進行網路層的計算時,每一層的輸入輸出都以Blob物件最為緩衝,他是caffe的基本儲存單元。

2、Blob資料結構描述

message BlobShape {  
  //來實現高維資料的封裝。即vector(N)>,包含若干Int64位的型別值,分別表示Blob每個緯度的大小;packed表示這些值在記憶體中緊密排布,沒有空隙。
repeated int64 dim = 1 [packed = true]; } //Bob在磁碟中序列化之後的形態 message BlobProto { optional BlobShape shape = 7; //可選,包含一個BlobShape物件 repeated float data = 5 [packed = true]; //包含若干的浮點數元素,儲存資料和權值,元素數目由shape或(num,channels,height,width)確定,在記憶體中緊密排布 repeated float diff = 6 [packed = true]; //包含若干的浮點數用於儲存增量資訊,維度與data一致
repeated double double_data = 8 [packed = true]; repeated double double_diff = 9 [packed = true]; //資料4D形狀 -- 舊版本,已使用"BlobShape shape"代替: optional int32 num = 1 [default = 0]; optional int32 channels = 2 [default = 0]; optional int32 height = 3 [default = 0]; optional int32 width = 4
[default = 0]; } // The BlobProtoVector is simply a way to pass multiple blobproto instances // around. message BlobProtoVector { repeated BlobProto blobs = 1; }

使用protobuffer而非C/C++直接宣告的原因:
(1)結構體的序列化和反序列化需要二外額外的程式設計實現,難以做到介面的標準化;
(2)結構體中包含變長的資料<一般是指向某個記憶體的指標>,需要更加細緻的工作保證資料的完整性,而ProtoBuffer將程式設計最容易出現問題的地方加以隱藏,讓機器自動的處理,提高程式的健壯性。

3、Blob模板類分析

  Blob是一個模板類,宣告在include/caffe/blob.hpp中,封裝了SyncedMemory類,作為基本的計算單元服務Layer、Net、Solver等。下面對原始碼中的函式做具體的分析。

3.1、Reshape()函式

主要涉及一下種類的函式對Blob進行Reshape:(1)通過(num,channel,width,height)四個維度對(2)通過shape迭代器(3)通過caffe.proto中定義的Blobshape(4)通過其他的Blob

//該函式將num,channels,height,width傳遞給vector shape_,然後呼叫過載函式Blob<Dtype>:: Reshape(const vector<int>& shape)
template <typename Dtype>
void Blob<Dtype>::Reshape(const int num, const int channels, const int height,
    const int width) {
  vector<int> shape(4);
  shape[0] = num;
  shape[1] = channels;
  shape[2] = height;
  shape[3] = width;
  Reshape(shape); 
}                                

//真正的變維的函式
template <typename Dtype>
void Blob<Dtype>::Reshape(const vector<int>& shape) {
  CHECK_LE(shape.size(), kMaxBlobAxes);
  count_ = 1;
  shape_.resize(shape.size());//重新定義vector shape_ 的size
  for (int i = 0; i < shape.size(); ++i) {
    CHECK_GE(shape[i], 0);//確保shape每維度尺寸為正數
    count_ *= shape[i];
    shape_[i] = shape[i];
  }
  if (count_ > capacity_) {//如果新的count_大於原來的空間容量,就要進行擴容,重新分配data和diff的空間
    capacity_ = count_;  
    data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
    diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
  }
}

template <typename Dtype> 
void Blob<Dtype>::Reshape(const BlobShape& shape) {// BlobShape 在caffe.proto 中定義
  CHECK_LE(shape.dim_size(), kMaxBlobAxes);
  vector<int> shape_vec(shape.dim_size());
  for (int i = 0; i < shape.dim_size(); ++i) {
    shape_vec[i] = shape.dim(i);//dim 包含num,channels,height, width
  }
  Reshape(shape_vec);//用protobuf傳遞來dim 對shape_ 進行reshape
}

template <typename Dtype>
void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {//用已知的Blob的shape來對shape_ 進行reshape
  Reshape(other.shape());
}                                 

3.2、Blob的建構函式

//建構函式
template <typename Dtype>
Blob<Dtype>::Blob(const int num, const int channels, const int height,
    const int width)//用num,channels,height, width 初始化
  //呼叫Reshape之前必須初始化capacity_,否則會導致不可預期的結果
  : capacity_(0) {
  Reshape(num, channels, height, width);
}

template <typename Dtype>
Blob<Dtype>::Blob(const vector<int>& shape)//用shape 初始化
  //呼叫Reshape之前必須初始化capacity_,否則會導致不可預期的結果
  : capacity_(0) {
  Reshape(shape);
}

3.3、data指標的操作函式

三個函式,分別是獲取CPU和GPU的data指標、通過引數設定CPU的data指標

//只讀,獲得CPU的data指標
template <typename Dtype>
const Dtype* Blob<Dtype>::cpu_data() const {
  CHECK(data_);//保證data不為NULL
  return (const Dtype*)data_->cpu_data();
} 

//修改CPU data指標
template <typename Dtype>
void Blob<Dtype>::set_cpu_data(Dtype* data) {
  CHECK(data);//保證data不為NULL
  data_->set_cpu_data(data);//設定成員變數值為傳入引數值
}

//只讀,獲取GPU data的指標
template <typename Dtype>
const Dtype* Blob<Dtype>::gpu_data() const {
  CHECK(data_);
  return (const Dtype*)data_->gpu_data();
}
//讀寫訪問CPU data指標
template <typename Dtype>  
Dtype* Blob<Dtype>::mutable_cpu_data() {  
  CHECK(data_);  
  return static_cast<Dtype*>(data_->mutable_cpu_data());  
}  

//讀寫訪問 GPU data的指標  
template <typename Dtype>  
Dtype* Blob<Dtype>::mutable_gpu_data() {  
  CHECK(data_);  
  return static_cast<Dtype*>(data_->mutable_gpu_data());//轉換成Dtype型別  
}  

3.4、diff指標的操作函式

//只讀,獲取CPU 的diff指標
template <typename Dtype>
const Dtype* Blob<Dtype>::cpu_diff() const {
  CHECK(diff_);//保證diff_不為NULL
  return (const Dtype*)diff_->cpu_data();
}

//只讀,獲取GPU的diff指標
template <typename Dtype>
const Dtype* Blob<Dtype>::gpu_diff() const {
  CHECK(diff_);//保證diff_不為NULL
  return (const Dtype*)diff_->gpu_data();
}

//讀寫 訪問CPU的diff指標
template <typename Dtype>  
Dtype* Blob<Dtype>::mutable_cpu_diff() {  
  CHECK(diff_);  
  return static_cast<Dtype*>(diff_->mutable_cpu_data());  
}  

//讀寫訪問gpu的diff指標
template <typename Dtype>  
Dtype* Blob<Dtype>::mutable_gpu_diff() {  
  CHECK(diff_);  
  return static_cast<Dtype*>(diff_->mutable_gpu_data());  
}  

3.5、共享另一個Blob的data和diff指標

//共享另一個Blob的data指標
template <typename Dtype>
void Blob<Dtype>::ShareData(const Blob& other) {
  CHECK_EQ(count_, other.count());//判斷兩個Blob的內部元素個數是否相等
  data_ = other.data();
}//當前的blob 的data_ 指向已知blob的資料

//共享另一個Blob的diff指標
template <typename Dtype>
void Blob<Dtype>::ShareDiff(const Blob& other) {
  CHECK_EQ(count_, other.count());
  diff_ = other.diff();
}//當前的blob 的diff_ 指向已知blob的反向傳播導數

3.6、Update函式

template <typename Dtype>
void Blob<Dtype>::Update() {
  // data在哪裡就在哪裡更新
  switch (data_->head()) {
  case SyncedMemory::HEAD_AT_CPU://data位於CPU端,則在cpu上進行更新
    //執行CPU上的計算data_[i]=data_[i]-diff[i],i = 0,1,2,...count-1
    caffe_axpy<Dtype>(count_, Dtype(-1),
        static_cast<const Dtype*>(diff_->cpu_data()),
        static_cast<Dtype*>(data_->mutable_cpu_data()));  //data_-diff_
    break;
  case SyncedMemory::HEAD_AT_GPU://data位於GPU端
  case SyncedMemory::SYNCED://data在CPU和GPU端同步
#ifndef CPU_ONLY //如果沒有定義CPU_ONLY,且資料在gpu上,則在gpu上進行計算
    //執行在GPU上的計算data_[i]=data_[i]-diff[i],i = 0,1,2,...count-1
    caffe_gpu_axpy<Dtype>(count_, Dtype(-1),
        static_cast<const Dtype*>(diff_->gpu_data()),
        static_cast<Dtype*>(data_->mutable_gpu_data()));
#else
    NO_GPU;//編譯時打開了CPU_ONLY選項,GPU處於禁用模式
#endif
    break;
  default:
    LOG(FATAL) << "Syncedmem not initialized.";
  }
}

3.7、asum_data函式計算data_的L1範數

//L1範數:返回data_中所有element的絕對值之和
template <typename Dtype>
Dtype Blob<Dtype>::asum_data() const {
  if (!data_) { return 0; }
  switch (data_->head()) {
  case SyncedMemory::HEAD_AT_CPU:         
    return caffe_cpu_asum(count_, cpu_data());//資料在cpu上,執行CPU上的asum計算     
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
  {
    Dtype asum;
    caffe_gpu_asum(count_, gpu_data(), &asum);//資料在GPU上執行GPU上的asum計算
    return asum;
  }
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return 0;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
  }
  return 0;
} 

3.8、asum_diff函式計算diff_的L1範數

//L1範數:返回diff_中所有element的絕對值之和
template <typename Dtype>
Dtype Blob<Dtype>::asum_diff() const {
  if (!diff_) { return 0; }
  switch (diff_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    return caffe_cpu_asum(count_, cpu_diff());
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
  {
    Dtype asum;
    caffe_gpu_asum(count_, gpu_diff(), &asum);
    return asum;
  }
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return 0;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
  }
  return 0;
}  

3.9、asumq_data函式計算data_的L2範數

//L2範數:返回data_中所有element平方和
template <typename Dtype>
Dtype Blob<Dtype>::sumsq_data() const {
  Dtype sumsq;
  const Dtype* data;
  if (!data_) { return 0; }
  switch (data_->head()) {
  case SyncedMemory::HEAD_AT_CPU: //資料在cpu上,執行CPU上的dot計算
    data = cpu_data();
    sumsq = caffe_cpu_dot(count_, data, data);  //sumsq = sum(data[i]^2)
    break;
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    data = gpu_data();    //資料在gpu上,執行GPU上的dot計算
    caffe_gpu_dot(count_, data, data, &sumsq);
#else
    NO_GPU;
#endif
    break;
  case SyncedMemory::UNINITIALIZED:
    return 0;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
  }
  return sumsq;
}

3.10、asumq_diff函式計算diff_的L2範數

//L2範數:返回diff_中所有element平方和
template <typename Dtype>
Dtype Blob<Dtype>::sumsq_diff() const {
  Dtype sumsq;
  const Dtype* diff;
  if (!diff_) { return 0; }
  switch (diff_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    diff = cpu_diff();
    sumsq = caffe_cpu_dot(count_, diff, diff);
    break;
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    diff = gpu_diff();
    caffe_gpu_dot(count_, diff, diff, &sumsq);
    break;
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return 0;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
  }
  return sumsq;
}

3.11、scale_data函式對data_進行幅度縮放

template <typename Dtype>
void Blob<Dtype>::scale_data(Dtype scale_factor) {
  Dtype* data;
  if (!data_) { return; }
  switch (data_->head()) {
  case SyncedMemory::HEAD_AT_CPU:
    data = mutable_cpu_data();
    caffe_scal(count_, scale_factor, data);//data[i]=data[i]*scale_factor, i=0,1,2,...,count-1
    return;
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    data = mutable_gpu_data();
    caffe_gpu_scal(count_, scale_factor, data);
    return;
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
  }
}

3.12、scale_diff函式對diff_進行幅度縮放

template <typename Dtype>
void Blob<Dtype>::scale_diff(Dtype scale_factor) {
  Dtype* diff;
  if (!diff_) { return; }
  switch (diff_->head()) {
  case SyncedMemory::HEAD_AT_CPU://CPU上
    diff = mutable_cpu_diff();
    caffe_scal(count_, scale_factor, diff);//diff[i]=diff[i]*scale_factor, i=0,1,2,...,count-1
    return;
  case SyncedMemory::HEAD_AT_GPU:
  case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
    diff = mutable_gpu_diff();
    caffe_gpu_scal(count_, scale_factor, diff);
    return;
#else
    NO_GPU;
#endif
  case SyncedMemory::UNINITIALIZED:
    return;
  default:
    LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
  }
}

3.13、ShapeEquals函式判斷兩個Blob形狀是否形同

//形狀相同的時候返回true
template <typename Dtype>
bool Blob<Dtype>::ShapeEquals(const BlobProto& other) {
//BlobProto 是定義在caffe.proto 中的一個message,其欄位有 data, diff, shape, num, channels, height, width
  if (other.has_num() || other.has_channels() ||
      other.has_height() || other.has_width()) {
    //輸入的維度若使用過時的維度資訊(num,channel,width,height)則需要轉換為新的vector
    //程式碼中使用的C++中的“懶”邏輯
    return shape_.size() <= 4 &&
           LegacyShape(-4) == other.num() &&
           LegacyShape(-3) == other.channels() &&
           LegacyShape(-2) == other.height() &&
           LegacyShape(-1) == other.width();
  }
  //直接進行對比
  vector<int> other_shape(other.shape().dim_size());
  for (int i = 0; i < other.shape().dim_size(); ++i) {
    other_shape[i] = other.shape().dim(i);
  }
  return shape_ == other_shape;
}

3.14、Copyfrom函式

//從另外一個Blob物件拷貝data(可選diff),必要時候進行變維
template <typename Dtype>
void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
  if (source.count() != count_ || source.shape() != shape_) {
    if (reshape) {
      ReshapeLike(source);//要求變維進行變維操作   
    } else {
      LOG(FATAL) << "Trying to copy blobs of different sizes.";
    }
  }
  switch (Caffe::mode()) {
  case Caffe::GPU://如果使用GPU的模式就使用GPU的方法
    if (copy_diff) {
      caffe_copy(count_, source.gpu_diff(),
          static_cast<Dtype*>(diff_->mutable_gpu_data()));
    } else {
      caffe_copy(count_, source.gpu_data(),
          static_cast<Dtype*>(data_->mutable_gpu_data()));
    }
    break;
  case Caffe::CPU://如果使用CPU的模式就使用CPU的方法
    if (copy_diff) {
      caffe_copy(count_, source.cpu_diff(),
          static_cast<Dtype*>(diff_->mutable_cpu_data()));
    } else {
      caffe_copy(count_, source.cpu_data(),
          static_cast<Dtype*>(data_->mutable_cpu_data()));
    }
    break;
  default:
    LOG(FATAL) << "Unknown caffe mode.";
  }
}

3.15、FromProto函式

//從proto中載入一個Blob,適用於從磁碟載入 之前匯出的Blob
template <typename Dtype>
void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
  if (reshape) {//從BlobProto物件中獲得所各個維度的資訊
    vector<int> shape;
    if (proto.has_num() || proto.has_channels() ||
        proto.has_height() || proto.has_width()) {
      //過時的維度資訊(num,channel,height,width)
      shape.resize(4);
      shape[0] = proto.num();
      shape[1] = proto.channels();
      shape[2] = proto.height();
      shape[3] = proto.width();
    } else {
      shape.resize(proto.shape().dim_size());
      for (int i = 0; i < proto.shape().dim_size(); ++i) {
        shape[i] = proto.shape().dim(i);
      }
    }
    Reshape(shape);//Blob按照維度資訊進行變維   
  } else {//如果不做reshape要求當前的blob的shape和proto傳入的shape相同
    CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)";
  }
  //載入資料
  Dtype* data_vec = mutable_cpu_data();
  for (int i = 0; i < count_; ++i) {
    data_vec[i] = proto.data(i);
  }//將proto傳入的data拷貝到cpu資料
  if (proto.diff_size() > 0) {
    Dtype* diff_vec = mutable_cpu_diff();
    for (int i = 0; i < count_; ++i) {
      diff_vec[i] = proto.diff(i);
    }//將proto傳入的diff 拷貝到cpu資料
  }
}

3.16、ToProto函式

//將Blob中的data(diff可選)匯出到BlobProto結構體,便於儲存到磁碟檔案中
template <typename Dtype>
void Blob<Dtype>::ToProto(BlobProto* proto, bool write_diff) const {
  proto->clear_shape();//重置Proto的維度,保證與Blob相同
  for (int i = 0; i < shape_.size(); ++i) {
    proto->mutable_shape()->add_dim(shape_[i]);
  }
  proto->clear_data();//清除data
  proto->clear_diff();//清除diff
  const Dtype* data_vec = cpu_data();
  for (int i = 0; i < count_; ++i) {
    proto->add_data(data_vec[i]);
  }//將data寫入proto
  if (write_diff) {//如果有write_diff請求
    const Dtype* diff_vec = cpu_diff();
    for (int i = 0; i < count_; ++i) {
      proto->add_diff(diff_vec[i]);
    }//將diff寫入proto
  }
}  

3.17、例項化Blob模板

INSTANTIATE_CLASS(Blob);//例項化Blob模板(float,double)
template class Blob<int>;
template class Blob<unsigned int>;

這裡只是從函式功能的角度大體上介紹了Blob所能對資料data以及diff執行的操作,函式內部的實現細節可以暫時不要完全的關注,在後續的layer和Net中會有更多的對Blob的使用,具體的操作可以有針對性的的理解集體的實現。