caffe原始碼導讀(四)Blob.cpp解析
阿新 • • 發佈:2018-12-11
本系列參考<深度學習21天實戰caffe>這本書所做的筆記,如果錯誤歡迎指導
前篇
caffe 原始碼導讀(一) 瞭解protobuf
caffe 原始碼導讀(二) Blob資料結構介紹
caffe 原始碼導讀(三) Blob.hpp標頭檔案解析
d===========================================================================d
caffe原始碼導讀(四)Blob.cpp解析
直接看程式碼解析
#include <climits>
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void Blob<Dtype>::Reshape(const int num, const int channels, const int height,
const int width) {
vector<int> shape(4);
shape[0] = num;
shape[1] = channels;
shape[2] = height;
shape[3] = width;
Reshape(shape);
}
template <typename Dtype>
void Blob<Dtype>::Reshape(const vector<int>& shape) {
CHECK_LE(shape.size(), kMaxBlobAxes);
count_ = 1; // 用於計算元素總數
shape_.resize(shape. size()); // 成員變數也被重置,調整shape_的維度數和輸入的一樣
// 如果 shape_data_(blob)的訓練資料未分配記憶體資源,或者指向儲存空間小魚輸入shape形狀的大小
// 重新分配shape大小的記憶體空間
if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) {
shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int)));
}
// 型別轉換,轉換成指向int資料型別指標
int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data());
// 逐個維度檢查對應形狀大小是否超出int秒速能力,並累計計算佔的空間count_
for (int i = 0; i < shape.size(); ++i) {
CHECK_GE(shape[i], 0); //保證每一個維度的尺度>=0
if (count_ != 0) { // INT_MAX=2^31-1,為了保證count_不溢位
CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX";
}
count_ *= shape[i];// count累乘, 先乘n,再c,再h,再w
shape_[i] = shape[i]; // 形狀資料賦值
shape_data[i] = shape[i]; //形狀資料指標,shape_data是上面第33行出現的
}
// 如果新的count_大於當前已分配空間容量,重新分配記憶體空間
// capacity_在標頭檔案中定義的,blob的容積量
if (count_ > capacity_) {
capacity_ = count_;
// shared_ptr,通過reset方式來重新賦值,開闢新的動態記憶體空間給blob
data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
}
}
template <typename Dtype>
void Blob<Dtype>::Reshape(const BlobShape& shape) {
CHECK_LE(shape.dim_size(), kMaxBlobAxes);
vector<int> shape_vec(shape.dim_size()); //創一個vec,再呼叫Reshape
for (int i = 0; i < shape.dim_size(); ++i) {
shape_vec[i] = shape.dim(i);
}
Reshape(shape_vec);
}
template <typename Dtype>
void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
Reshape(other.shape()); //其他Blob的shape來初始化
}
template <typename Dtype>
Blob<Dtype>::Blob(const int num, const int channels, const int height,
const int width)
// capacity_ must be initialized before calling Reshape
// 呼叫Reshape之前必須初始化,這些都是建構函式裡的
: capacity_(0) {
Reshape(num, channels, height, width);
}
template <typename Dtype>
Blob<Dtype>::Blob(const vector<int>& shape)
// capacity_ must be initialized before calling Reshape
: capacity_(0) {
Reshape(shape);
}
//獲取Blob在gpu儲存資料形狀的指標,const表示無法通過指標修改資料
template <typename Dtype>
const int* Blob<Dtype>::gpu_shape() const {
CHECK(shape_data_);
return (const int*)shape_data_->gpu_data();
}
template <typename Dtype>
const Dtype* Blob<Dtype>::cpu_data() const {
CHECK(data_);//保證data不為空
return (const Dtype*)data_->cpu_data();
}
template <typename Dtype>
void Blob<Dtype>::set_cpu_data(Dtype* data) {
CHECK(data);
// Make sure CPU and GPU sizes remain equal
size_t size = count_ * sizeof(Dtype);
if (data_->size() != size) {
data_.reset(new SyncedMemory(size));
diff_.reset(new SyncedMemory(size));
}
data_->set_cpu_data(data); //設定成員變數的值為傳入的引數值
}
template <typename Dtype>
const Dtype* Blob<Dtype>::gpu_data() const {
CHECK(data_);
return (const Dtype*)data_->gpu_data(); //只讀獲取gpu_data的指標
}
template <typename Dtype>
void Blob<Dtype>::set_gpu_data(Dtype* data) {
CHECK(data);
// Make sure CPU and GPU sizes remain equal
size_t size = count_ * sizeof(Dtype);
if (data_->size() != size) {
data_.reset(new SyncedMemory(size));
diff_.reset(new SyncedMemory(size));
}
data_->set_gpu_data(data);//設定成員變數的值為傳入的引數值 gpu了
}
template <typename Dtype>
const Dtype* Blob<Dtype>::cpu_diff() const {
CHECK(diff_);
return (const Dtype*)diff_->cpu_data();// 只讀獲取cpu_diff指標
}
template <typename Dtype>
const Dtype* Blob<Dtype>::gpu_diff() const {
CHECK(diff_);
return (const Dtype*)diff_->gpu_data();// 只讀獲取gpu_diff指標
}
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_cpu_data() { //讀寫訪問cpu_data指標
CHECK(data_);
return static_cast<Dtype*>(data_->mutable_cpu_data());
}
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_gpu_data() { //讀寫訪問gpu_data指標
CHECK(data_);
return static_cast<Dtype*>(data_->mutable_gpu_data());
}
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_cpu_diff() {
CHECK(diff_);
return static_cast<Dtype*>(diff_->mutable_cpu_data());
}
template <typename Dtype>
Dtype* Blob<Dtype>::mutable_gpu_diff() {
CHECK(diff_);
return static_cast<Dtype*>(diff_->mutable_gpu_data());
}
template <typename Dtype>
void Blob<Dtype>::ShareData(const Blob& other) { // 共享另一個Blob的data指標
CHECK_EQ(count_, other.count());
data_ = other.data();
}
template <typename Dtype>
void Blob<Dtype>::ShareDiff(const Blob& other) { // 共享另一個Blob的diff指標
CHECK_EQ(count_, other.count());
diff_ = other.diff();
}
// The "update" method is used for parameter blobs in a Net, which are stored
// as Blob<float> or Blob<double> -- hence we do not define it for
// Blob<int> or Blob<unsigned int>.
// update用於網路引數blob更新,只有float個double實現,並沒有int和unsigned實現
template <> void Blob<unsigned int>::Update() { NOT_IMPLEMENTED; }
template <> void Blob<int>::Update() { NOT_IMPLEMENTED; }
template <typename Dtype>
void Blob<Dtype>::Update() {
// We will perform update based on where the data is located.
// data在哪裡,就在哪裡更新
switch (data_->head()) {
case SyncedMemory::HEAD_AT_CPU: // data位於cpu端
// perform computation on CPU
// 執行cpu上的計算,data_[i] = data_[i] - diff_[i], i=0,1,...,count-1
// 在util/math_functions.cpp中,呼叫了cblas_saxpy函式
//template <typename Dtype>
// void caffe_axpy(const int N, const Dtype alpha, const Dtype* X, Dtype* Y);
// void caffe_axpy<float>(const int N, const float alpha, const float* X,float* Y) {
// cblas_saxpy(N, alpha, X, 1, Y, 1); }實現 0-n中,-1*diff + data_
// 有一篇部落格總結了 caffe呼叫的cblas函式
// https://www.cnblogs.com/jianyingzhou/p/4444728.html
caffe_axpy<Dtype>(count_, Dtype(-1),
static_cast<const Dtype*>(diff_->cpu_data()),
static_cast<Dtype*>(data_->mutable_cpu_data()));
break;
case SyncedMemory::HEAD_AT_GPU: //data位於gpu端,或者cpu/gpu同步
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
// perform computation on GPU
// GPU上的計算 data_[i] = data_[i] - diff_[i]
caffe_gpu_axpy<Dtype>(count_, Dtype(-1),
static_cast<const Dtype*>(diff_->gpu_data()),
static_cast<Dtype*>(data_->mutable_gpu_data()));
#else
NO_GPU;
#endif
break;
default:
LOG(FATAL) << "Syncedmem not initialized.";
}
}
// 計算L1範數,但是這邊好像並不能使用
template <> unsigned int Blob<unsigned int>::asum_data() const {
NOT_IMPLEMENTED;
return 0;
}
template <> int Blob<int>::asum_data() const {
NOT_IMPLEMENTED;
return 0;
}
template <typename Dtype>
Dtype Blob<Dtype>::asum_data() const {
if (!data_) { return 0; } // 如果data為空,返回0
switch (data_->head()) { // 選擇data_的指標頭是在cpu上還是gpu上
/** Returns the sum of the absolute values of the elements of vector x
template <typename Dtype>
Dtype caffe_cpu_asum(const int n, const Dtype* x);
float caffe_cpu_asum<float>(const int n, const float* x) {
return cblas_sasum(n, x, 1);
功能:計算 vector x 的所有element的絕對值之和。
}
**/
case SyncedMemory::HEAD_AT_CPU:
return caffe_cpu_asum(count_, cpu_data());// cpu上L1範數計算,即絕對值之和
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
{
Dtype asum;
caffe_gpu_asum(count_, gpu_data(), &asum);
return asum;
}
#else
NO_GPU;
#endif
case SyncedMemory::UNINITIALIZED:
return 0;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
}
return 0;
}
template <> unsigned int Blob<unsigned int>::asum_diff() const {
NOT_IMPLEMENTED;
return 0;
}
template <> int Blob<int>::asum_diff() const {
NOT_IMPLEMENTED;
return 0;
}
template <typename Dtype>
Dtype Blob<Dtype>::asum_diff() const {
if (!diff_) { return 0; }
switch (diff_->head()) {
case SyncedMemory::HEAD_AT_CPU:
return caffe_cpu_asum(count_, cpu_diff()); // 計算diff的L1範數
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
{
Dtype asum;
caffe_gpu_asum(count_, gpu_diff(), &asum);
return asum;
}
#else
NO_GPU;
#endif
case SyncedMemory::UNINITIALIZED:
return 0;
default:
LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
}
return 0;
}
template <> unsigned int Blob<unsigned int>::sumsq_data() const {
NOT_IMPLEMENTED;
return 0;
}
template <> int Blob<int>::sumsq_data() const {
NOT_IMPLEMENTED;
return 0;
}
// 計算data_的L2範數
template <typename Dtype>
Dtype Blob<Dtype>::sumsq_data() const {
Dtype sumsq;
const Dtype* data;
if (!data_) { return 0; }
switch (data_->head()) {
case SyncedMemory::HEAD_AT_CPU:
data = cpu_data();
sumsq = caffe_cpu_dot(count_, data, data);//執行cpu上的dot計算
break;
case SyncedMemory::HEAD_AT_GPU:
case SyncedMemory::SYNCED:
#ifndef CPU_ONLY
data = gpu_data();
caffe_gpu_dot(count_, data, data, &sumsq)<