Caffe原始碼解讀:Layer類
阿新 • • 發佈:2019-01-08
Layer類簡介
- 至少有一個輸入,輸出Blob。
- 部分Layer帶有權值和偏置項(如:啟用層沒有權值項)
- 前向傳播對輸入Blob處理,得到輸出Blob。反向傳播對輸出的diff進行處理,得到輸入的diff
ProtoBuffer描述
開啟./caffe/src/caffe/caffe.proto檔案,找到LayerParameter,檢視對應的資料結構描述如下(根據這個資料結構描述,相當於約定,就可以自動生成對應的.hpp檔案):
// 注意 // 定義了自己的層的話,一定要更新下一個可用ID // 下一個ID是147,最近更新:recurrent_param message LayerParameter { optional string name = 1; // 可選引數,層的名字,string型別,下面類似 optional string type = 2; // 層的型別 repeated string bottom = 3; // 輸入Blob對應的名字 repeated string top = 4; // 輸出Blob的名字 // Train/Test階段 optional Phase phase = 10; // 每一層損失函式的權重,通常為0或者1,0表示不參與損失函式計算,1表示參與 repeated float loss_weight = 5; // 具體的訓練引數(比如相對於全域性學習速率的縮放因子,權值衰減因子,這樣就可以針對每一層設定不同的學習速率了)以及用於權值共享的名稱(只有在需要權值共享的時候才需要名詞)和其他設定,ParamSpec型別(這個型別的資料結構描述同樣可以在當前檔案中看到) repeated ParamSpec param = 6; // 該層的數值引數,BlobProto型別 repeated BlobProto blobs = 7; // 是否需要對Bottom Blob進行反向傳播,如果未指定,caffe會自動推斷。該欄位的維度數目應等於Bottom Blob的個數。即每層Bottom Blob都要指明是否需要反向傳播 repeated bool propagate_down = 11; // 根據當前網路狀態規則,確定該層是否包含在網路中,如果沒有指定任何規則,則一直被包含在網路中 repeated NetStateRule include = 8; repeated NetStateRule exclude = 9; // 資料預處理引數 optional TransformationParameter transform_param = 100; // 所有損失層共享的引數 optional LossParameter loss_param = 101; // 不同層型別的引數 // 注意,一些層實現可能有多種計算引擎(比如:caffe實現,以及cudnn實現),這些層包括一個引擎型別和一個引擎引數實現,預設引擎是在編譯階段的makefile裡面設定的 optional AccuracyParameter accuracy_param = 102; optional ArgMaxParameter argmax_param = 103; optional BatchNormParameter batch_norm_param = 139; optional BiasParameter bias_param = 141; optional ConcatParameter concat_param = 104; optional ContrastiveLossParameter contrastive_loss_param = 105; optional ConvolutionParameter convolution_param = 106; optional CropParameter crop_param = 144; optional DataParameter data_param = 107; optional DropoutParameter dropout_param = 108; optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; optional ELUParameter elu_param = 140; optional EmbedParameter embed_param = 137; optional ExpParameter exp_param = 111; optional FlattenParameter flatten_param = 135; optional HDF5DataParameter hdf5_data_param = 112; optional HDF5OutputParameter hdf5_output_param = 113; optional HingeLossParameter hinge_loss_param = 114; optional ImageDataParameter image_data_param = 115; optional InfogainLossParameter infogain_loss_param = 116; optional InnerProductParameter inner_product_param = 117; optional InputParameter input_param = 143; optional LogParameter log_param = 134; optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; optional MVNParameter mvn_param = 120; optional ParameterParameter parameter_param = 145; optional PoolingParameter pooling_param = 121; optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; optional PythonParameter python_param = 130; optional RecurrentParameter recurrent_param = 146; optional ReductionParameter reduction_param = 136; optional ReLUParameter relu_param = 123; optional ReshapeParameter reshape_param = 133; optional ScaleParameter scale_param = 142; optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; optional SPPParameter spp_param = 132; optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional TileParameter tile_param = 138; optional WindowDataParameter window_data_param = 129; }
Layer之間的繼承關係
在看標頭檔案宣告之前,先看一下各個Layer之間的繼承關係:
Layer類的派生類 Neuron Layers類繼承關係 Vision Layers類繼承關係 Common Layers類繼承關係 Data Layers類繼承關係 Loss Layers類繼承關係標頭檔案宣告
找到./include/caffe/layer.hpp檔案:
#ifndef CAFFE_LAYER_H_ #define CAFFE_LAYER_H_ #include <algorithm> #include <string> #include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/layer_factory.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" /** Forward declare boost::thread instead of including boost/thread.hpp to avoid a boost/NVCC issues (#1009, #1010) on OSX. */ namespace boost { class mutex; } namespace caffe { /** * @brief An interface for the units of computation which can be composed into a * Net. * * Layer%s must implement a Forward function, in which they take their input * (bottom) Blob%s (if any) and compute their output Blob%s (if any). * They may also implement a Backward function, in which they compute the error * gradients with respect to their input Blob%s, given the error gradients with * their output Blob%s. */ template <typename Dtype> class Layer { public: /** * You should not implement your own constructor. Any set up code should go * to SetUp(), where the dimensions of the bottom blobs are provided to the * layer. */ explicit Layer(const LayerParameter& param) : layer_param_(param), is_shared_(false) { // Set phase and copy blobs (if there are any). phase_ = param.phase(); if (layer_param_.blobs_size() > 0) { blobs_.resize(layer_param_.blobs_size()); for (int i = 0; i < layer_param_.blobs_size(); ++i) { blobs_[i].reset(new Blob<Dtype>()); blobs_[i]->FromProto(layer_param_.blobs(i)); } } } virtual ~Layer() {} /** * @brief Implements common layer setup functionality. * * @param bottom the preshaped input blobs * @param top * the allocated but unshaped output blobs, to be shaped by Reshape * * Checks that the number of bottom and top blobs is correct. * Calls LayerSetUp to do special layer setup for individual layer types, * followed by Reshape to set up sizes of top blobs and internal buffers. * Sets up the loss weight multiplier blobs for any non-zero loss weights. * This method may not be overridden. */ void SetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { InitMutex(); CheckBlobCounts(bottom, top); LayerSetUp(bottom, top); Reshape(bottom, top); SetLossWeights(top); } /** * @brief Does layer-specific setup: your layer should implement this function * as well as Reshape. * * @param bottom * the preshaped input blobs, whose data fields store the input data for * this layer * @param top * the allocated but unshaped output blobs * * This method should do one-time layer specific setup. This includes reading * and processing relevent parameters from the <code>layer_param_</code>. * Setting up the shapes of top blobs and internal buffers should be done in * <code>Reshape</code>, which will be called before the forward pass to * adjust the top blob sizes. */ virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {} /** * @brief Whether a layer should be shared by multiple nets during data * parallelism. By default, all layers except for data layers should * not be shared. data layers should be shared to ensure each worker * solver access data sequentially during data parallelism. */ virtual inline bool ShareInParallel() const { return false; } /** @brief Return whether this layer is actually shared by other nets. * If ShareInParallel() is true and using more than one GPU and the * net has TRAIN phase, then this function is expected return true. */ inline bool IsShared() const { return is_shared_; } /** @brief Set whether this layer is actually shared by other nets * If ShareInParallel() is true and using more than one GPU and the * net has TRAIN phase, then is_shared should be set true. */ inline void SetShared(bool is_shared) { CHECK(ShareInParallel() || !is_shared) << type() << "Layer does not support sharing."; is_shared_ = is_shared; } /** * @brief Adjust the shapes of top blobs and internal buffers to accommodate * the shapes of the bottom blobs. * * @param bottom the input blobs, with the requested input shapes * @param top the top blobs, which should be reshaped as needed * * This method should reshape top blobs as needed according to the shapes * of the bottom (input) blobs, as well as reshaping any internal buffers * and making any other necessary adjustments so that the layer can * accommodate the bottom blobs. */ virtual void Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) = 0; /** * @brief Given the bottom blobs, compute the top blobs and the loss. * * @param bottom * the input blobs, whose data fields store the input data for this layer * @param top * the preshaped output blobs, whose data fields will store this layers' * outputs * \return The total loss from the layer. * * The Forward wrapper calls the relevant device wrapper function * (Forward_cpu or Forward_gpu) to compute the top blob values given the * bottom blobs. If the layer has any non-zero loss_weights, the wrapper * then computes and returns the loss. * * Your layer should implement Forward_cpu and (optionally) Forward_gpu. */ inline Dtype Forward(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top); /** * @brief Given the top blob error gradients, compute the bottom blob error * gradients. * * @param top * the output blobs, whose diff fields store the gradient of the error * with respect to themselves * @param propagate_down * a vector with equal length to bottom, with each index indicating * whether to propagate the error gradients down to the bottom blob at * the corresponding index * @param bottom * the input blobs, whose diff fields will store the gradient of the error * with respect to themselves after Backward is run * * The Backward wrapper calls the relevant device wrapper function * (Backward_cpu or Backward_gpu) to compute the bottom blob diffs given the * top blob diffs. * * Your layer should implement Backward_cpu and (optionally) Backward_gpu. */ inline void Backward(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom); /** * @brief Returns the vector of learnable parameter blobs. */ vector<shared_ptr<Blob<Dtype> > >& blobs() { return blobs_; } /** * @brief Returns the layer parameter. */ const LayerParameter& layer_param() const { return layer_param_; } /** * @brief Writes the layer parameter to a protocol buffer */ virtual void ToProto(LayerParameter* param, bool write_diff = false); /** * @brief Returns the scalar loss associated with a top blob at a given index. */ inline Dtype loss(const int top_index) const { return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0); } /** * @brief Sets the loss associated with a top blob at a given index. */ inline void set_loss(const int top_index, const Dtype value) { if (loss_.size() <= top_index) { loss_.resize(top_index + 1, Dtype(0)); } loss_[top_index] = value; } /** * @brief Returns the layer type. */ virtual inline const char* type() const { return ""; } /** * @brief Returns the exact number of bottom blobs required by the layer, * or -1 if no exact number is required. * * This method should be overridden to return a non-negative value if your * layer expects some exact number of bottom blobs. */ virtual inline int ExactNumBottomBlobs() const { return -1; } /** * @brief Returns the minimum number of bottom blobs required by the layer, * or -1 if no minimum number is required. * * This method should be overridden to return a non-negative value if your * layer expects some minimum number of bottom blobs. */ virtual inline int MinBottomBlobs() const { return -1; } /** * @brief Returns the maximum number of bottom blobs required by the layer, * or -1 if no maximum number is required. * * This method should be overridden to return a non-negative value if your * layer expects some maximum number of bottom blobs. */ virtual inline int MaxBottomBlobs() const { return -1; } /** * @brief Returns the exact number of top blobs required by the layer, * or -1 if no exact number is required. * * This method should be overridden to return a non-negative value if your * layer expects some exact number of top blobs. */ virtual inline int ExactNumTopBlobs() const { return -1; } /** * @brief Returns the minimum number of top blobs required by the layer, * or -1 if no minimum number is required. * * This method should be overridden to return a non-negative value if your * layer expects some minimum number of top blobs. */ virtual inline int MinTopBlobs() const { return -1; } /** * @brief Returns the maximum number of top blobs required by the layer, * or -1 if no maximum number is required. * * This method should be overridden to return a non-negative value if your * layer expects some maximum number of top blobs. */ virtual inline int MaxTopBlobs() const { return -1; } /** * @brief Returns true if the layer requires an equal number of bottom and * top blobs. * * This method should be overridden to return true if your layer expects an * equal number of bottom and top blobs. */ virtual inline bool EqualNumBottomTopBlobs() const { return false; } /** * @brief Return whether "anonymous" top blobs are created automatically * by the layer. * * If this method returns true, Net::Init will create enough "anonymous" top * blobs to fulfill the requirement specified by ExactNumTopBlobs() or * MinTopBlobs(). */ virtual inline bool AutoTopBlobs() const { return false; } /** * @brief Return whether to allow force_backward for a given bottom blob * index. * * If AllowForceBackward(i) == false, we will ignore the force_backward * setting and backpropagate to blob i only if it needs gradient information * (as is done when force_backward == false). */ virtual inline bool AllowForceBackward(const int bottom_index) const { return true; } /** * @brief Specifies whether the layer should compute gradients w.r.t. a * parameter at a particular index given by param_id. * * You can safely ignore false values and always compute gradients * for all parameters, but possibly with wasteful computation. */ inline bool param_propagate_down(const int param_id) { return (param_propagate_down_.size() > param_id) ? param_propagate_down_[param_id] : false; } /** * @brief Sets whether the layer should compute gradients w.r.t. a * parameter at a particular index given by param_id. */ inline void set_param_propagate_down(const int param_id, const bool value) { if (param_propagate_down_.size() <= param_id) { param_propagate_down_.resize(param_id + 1, true); } param_propagate_down_[param_id] = value; } protected: /** The protobuf that stores the layer parameters */ LayerParameter layer_param_; /** The phase: TRAIN or TEST */ Phase phase_; /** The vector that stores the learnable parameters as a set of blobs. */ vector<shared_ptr<Blob<Dtype> > > blobs_; /** Vector indicating whether to compute the diff of each param blob. */ vector<bool> param_propagate_down_; /** The vector that indicates whether each top blob has a non-zero weight in * the objective function. */ vector<Dtype> loss_; /** @brief Using the CPU device, compute the layer output. */ virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) = 0; /** * @brief Using the GPU device, compute the layer output. * Fall back to Forward_cpu() if unavailable. */ virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // LOG(WARNING) << "Using CPU code as backup."; return Forward_cpu(bottom, top); } /** * @brief Using the CPU device, compute the gradients for any parameters and * for the bottom blobs if propagate_down is true. */ virtual void Backward_cpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) = 0; /** * @brief Using the GPU device, compute the gradients for any parameters and * for the bottom blobs if propagate_down is true. * Fall back to Backward_cpu() if unavailable. */ virtual void Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // LOG(WARNING) << "Using CPU code as backup."; Backward_cpu(top, propagate_down, bottom); } /** * Called by the parent Layer's SetUp to check that the number of bottom * and top Blobs provided as input match the expected numbers specified by * the {ExactNum,Min,Max}{Bottom,Top}Blobs() functions. */ virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { if (ExactNumBottomBlobs() >= 0) { CHECK_EQ(ExactNumBottomBlobs(), bottom.size()) << type() << " Layer takes " << ExactNumBottomBlobs() << " bottom blob(s) as input."; } if (MinBottomBlobs() >= 0) { CHECK_LE(MinBottomBlobs(), bottom.size()) << type() << " Layer takes at least " << MinBottomBlobs() << " bottom blob(s) as input."; } if (MaxBottomBlobs() >= 0) { CHECK_GE(MaxBottomBlobs(), bottom.size()) << type() << " Layer takes at most " << MaxBottomBlobs() << " bottom blob(s) as input."; } if (ExactNumTopBlobs() >= 0) { CHECK_EQ(ExactNumTopBlobs(), top.size()) << type() << " Layer produces " << ExactNumTopBlobs() << " top blob(s) as output."; } if (MinTopBlobs() >= 0) { CHECK_LE(MinTopBlobs(), top.size()) << type() << " Layer produces at least " << MinTopBlobs() << " top blob(s) as output."; } if (MaxTopBlobs() >= 0) { CHECK_GE(MaxTopBlobs(), top.size()) << type() << " Layer produces at most " << MaxTopBlobs() << " top blob(s) as output."; } if (EqualNumBottomTopBlobs()) { CHECK_EQ(bottom.size(), top.size()) << type() << " Layer produces one top blob as output for each " << "bottom blob input."; } } /** * Called by SetUp to initialize the weights associated with any top blobs in * the loss function. Store non-zero loss weights in the diff blob. */ inline void SetLossWeights(const vector<Blob<Dtype>*>& top) { const int num_loss_weights = layer_param_.loss_weight_size(); if (num_loss_weights) { CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be " "unspecified or specified once per top blob."; for (int top_id = 0; top_id < top.size(); ++top_id) { const Dtype loss_weight = layer_param_.loss_weight(top_id); if (loss_weight == Dtype(0)) { continue; } this->set_loss(top_id, loss_weight); const int count = top[top_id]->count(); Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff(); caffe_set(count, loss_weight, loss_multiplier); } } } private: /** Whether this layer is actually shared by other nets*/ bool is_shared_; /** The mutex for sequential forward if this layer is shared */ shared_ptr<boost::mutex> forward_mutex_; /** Initialize forward_mutex_ */ void InitMutex(); /** Lock forward_mutex_ if this layer is shared */ void Lock(); /** Unlock forward_mutex_ if this layer is shared */ void Unlock(); DISABLE_COPY_AND_ASSIGN(Layer); }; // class Layer // Forward and backward wrappers. You should implement the cpu and // gpu specific implementations instead, and should not change these // functions. template <typename Dtype> inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // Lock during forward to ensure sequential forward Lock(); Dtype loss = 0; Reshape(bottom, top); switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); for (int top_id = 0; top_id < top.size(); ++top_id) { if (!this->loss(top_id)) { continue; } const int count = top[top_id]->count(); const Dtype* data = top[top_id]->cpu_data(); const Dtype* loss_weights = top[top_id]->cpu_diff(); loss += caffe_cpu_dot(count, data, loss_weights); } break; case Caffe::GPU: Forward_gpu(bottom, top); #ifndef CPU_ONLY for (int top_id = 0; top_id < top.size(); ++top_id) { if (!this->loss(top_id)) { continue; } const int count = top[top_id]->count(); const Dtype* data = top[top_id]->gpu_data(); const Dtype* loss_weights = top[top_id]->gpu_diff(); Dtype blob_loss = 0; caffe_gpu_dot(count, data, loss_weights, &blob_loss); loss += blob_loss; } #endif break; default: LOG(FATAL) << "Unknown caffe mode."; } Unlock(); return loss; } template <typename Dtype> inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { switch (Caffe::mode()) { case Caffe::CPU: Backward_cpu(top, propagate_down, bottom); break; case Caffe::GPU: Backward_gpu(top, propagate_down, bottom); break; default: LOG(FATAL) << "Unknown caffe mode."; } } // Serialize LayerParameter to protocol buffer template <typename Dtype> void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff) { param->Clear(); param->CopyFrom(layer_param_); param->clear_blobs(); for (int i = 0; i < blobs_.size(); ++i) { blobs_[i]->ToProto(param->add_blobs(), write_diff); } } } // namespace caffe #endif // CAFFE_LAYER_H_
參考資料:
- 《21天實戰Caffe》