1. 程式人生 > 程式設計 >C++實現簡單BP神經網路

C++實現簡單BP神經網路

本文例項為大家分享了C++實現簡單BP神經網路的具體程式碼,供大家參考,具體內容如下

實現了一個簡單的BP神經網路

使用EasyX圖形化顯示訓練過程和訓練結果

使用了25個樣本,一共訓練了1萬次。

該神經網路有兩個輸入,一個輸出端

下圖是訓練效果,data是訓練的輸入資料,temp代表所在層的輸出,target是訓練目標,右邊的大圖是BP神經網路的測試結果。

C++實現簡單BP神經網路

以下是詳細的程式碼實現,主要還是基本的矩陣運算。

#include <stdio.h>
#include <stdlib.h>
#include <graphics.h>
#include <time.h>
#include <math.h>

#define uint unsigned short
#define real double

#define threshold (real)(rand() % 99998 + 1) / 100000

// 神經網路的層
class layer{
private:
 char name[20];
 uint row,col;
 uint x,y;
 real **data;
 real *bias;
public:
 layer(){
 strcpy_s(name,"temp");
 row = 1;
 col = 3;
 x = y = 0;
 data = new real*[row];
 bias = new real[row];
 for (uint i = 0; i < row; i++){
  data[i] = new real[col];
  bias[i] = threshold;
  for (uint j = 0; j < col; j++){
  data[i][j] = 1;
  }
 }
 }
 layer(FILE *fp){
 fscanf_s(fp,"%d %d %d %d %s",&row,&col,&x,&y,name);
 data = new real*[row];
 bias = new real[row];
 for (uint i = 0; i < row; i++){
  data[i] = new real[col];
  bias[i] = threshold;
  for (uint j = 0; j < col; j++){
  fscanf_s(fp,"%lf",&data[i][j]);
  }
 }
 }
 layer(uint row,uint col){
 strcpy_s(name,"temp");
 this->row = row;
 this->col = col;
 this->x = 0;
 this->y = 0;
 this->data = new real*[row];
 this->bias = new real[row];
 for (uint i = 0; i < row; i++){
  data[i] = new real[col];
  bias[i] = threshold;
  for (uint j = 0; j < col; j++){
  data[i][j] = 1.0f;
  }
 }
 }
 layer(const layer &a){
 strcpy_s(name,a.name);
 row = a.row,col = a.col;
 x = a.x,y = a.y;
 data = new real*[row];
 bias = new real[row];
 for (uint i = 0; i < row; i++){
  data[i] = new real[col];
  bias[i] = a.bias[i];
  for (uint j = 0; j < col; j++){
  data[i][j] = a.data[i][j];
  }
 }
 }
 ~layer(){
 // 刪除原有資料
 for (uint i = 0; i < row; i++){
  delete[]data[i];
 }
 delete[]data;
 }
 layer& operator =(const layer &a){
 // 刪除原有資料
 for (uint i = 0; i < row; i++){
  delete[]data[i];
 }
 delete[]data;
 delete[]bias;
 // 重新分配空間
 strcpy_s(name,y = a.y;
 data = new real*[row];
 bias = new real[row];
 for (uint i = 0; i < row; i++){
  data[i] = new real[col];
  bias[i] = a.bias[i];
  for (uint j = 0; j < col; j++){
  data[i][j] = a.data[i][j];
  }
 }
 return *this;
 }
 layer Transpose() const {
 layer arr(col,row);
 arr.x = x,arr.y = y;
 for (uint i = 0; i < row; i++){
  for (uint j = 0; j < col; j++){
  arr.data[j][i] = data[i][j];
  }
 }
 return arr;
 }
 layer sigmoid(){
 layer arr(col,arr.y = y;
 for (uint i = 0; i < x.row; i++){
  for (uint j = 0; j < x.col; j++){
  arr.data[i][j] = 1 / (1 + exp(-data[i][j]));// 1/(1+exp(-z))
  }
 }
 return arr;
 }
 layer operator *(const layer &b){
 layer arr(row,col);
 arr.x = x,arr.y = y;
 for (uint i = 0; i < row; i++){
  for (uint j = 0; j < col; j++){
  arr.data[i][j] = data[i][j] * b.data[i][j];
  }
 }
 return arr;
 }
 layer operator *(const int b){
 layer arr(row,arr.y = y;
 for (uint i = 0; i < row; i++){
  for (uint j = 0; j < col; j++){
  arr.data[i][j] = b * data[i][j];
  }
 }
 return arr;
 }
 layer matmul(const layer &b){
 layer arr(row,b.col);
 arr.x = x,arr.y = y;
 for (uint k = 0; k < b.col; k++){
  for (uint i = 0; i < row; i++){
  arr.bias[i] = bias[i];
  arr.data[i][k] = 0;
  for (uint j = 0; j < col; j++){
   arr.data[i][k] += data[i][j] * b.data[j][k];
  }
  }
 }
 return arr;
 }
 layer operator -(const layer &b){
 layer arr(row,arr.y = y;
 for (uint i = 0; i < row; i++){
  for (uint j = 0; j < col; j++){
  arr.data[i][j] = data[i][j] - b.data[i][j];
  }
 }
 return arr;
 }
 layer operator +(const layer &b){
 layer arr(row,arr.y = y;
 for (uint i = 0; i < row; i++){
  for (uint j = 0; j < col; j++){
  arr.data[i][j] = data[i][j] + b.data[i][j];
  }
 }
 return arr;
 }
 layer neg(){
 layer arr(row,arr.y = y;
 for (uint i = 0; i < row; i++){
  for (uint j = 0; j < col; j++){
  arr.data[i][j] = -data[i][j];
  }
 }
 return arr;
 }
 bool operator ==(const layer &a){
 bool result = true;
 for (uint i = 0; i < row; i++){
  for (uint j = 0; j < col; j++){
  if (abs(data[i][j] - a.data[i][j]) > 10e-6){
   result = false;
   break;
  }
  }
 }
 return result;
 }
 void randomize(){
 for (uint i = 0; i < row; i++){
  for (uint j = 0; j < col; j++){
  data[i][j] = threshold;
  }
  bias[i] = 0.3;
 }
 }
 void print(){
 outtextxy(x,y - 20,name);
 for (uint i = 0; i < row; i++){
  for (uint j = 0; j < col; j++){
  COLORREF color = HSVtoRGB(360 * data[i][j],1,1);
  putpixel(x + i,y + j,color);
  }
 }
 }
 void save(FILE *fp){
 fprintf_s(fp,"%d %d %d %d %s\n",row,col,x,y,name);
 for (uint i = 0; i < row; i++){
  for (uint j = 0; j < col; j++){
  fprintf_s(fp,"%lf ",data[i][j]);
  }
  fprintf_s(fp,"\n");
 }
 }
 friend class network;
 friend layer operator *(const double a,const layer &b);
};

layer operator *(const double a,const layer &b){
 layer arr(b.row,b.col);
 arr.x = b.x,arr.y = b.y;
 for (uint i = 0; i < arr.row; i++){
 for (uint j = 0; j < arr.col; j++){
  arr.data[i][j] = a * b.data[i][j];
 }
 }
 return arr;
}

// 神經網路
class network{
 int iter;
 double learn;
 layer arr[3];
 layer data,target,test;
 layer& unit(layer &x){
 for (uint i = 0; i < x.row; i++){
  for (uint j = 0; j < x.col; j++){
  x.data[i][j] = i == j ? 1.0 : 0.0;
  }
 }
 return x;
 }
 layer grad_sigmoid(layer &x){
 layer e(x.row,x.col);
 e = x*(e - x);
 return e;
 }
public:
 network(FILE *fp){
 fscanf_s(fp,"%d %lf",&iter,&learn);
 // 輸入資料
 data = layer(fp);
 for (uint i = 0; i < 3; i++){
  arr[i] = layer(fp);
  //arr[i].randomize();
 }
 target = layer(fp);
 // 測試資料
 test = layer(2,40000);
 for (uint i = 0; i < test.col; i++){
  test.data[0][i] = ((double)i / 200) / 200.0f;
  test.data[1][i] = (double)(i % 200) / 200.0f;
 }
 }
 void train(){
 int i = 0;
 char str[20];
 data.print();
 target.print();
 for (i = 0; i < iter; i++){
  sprintf_s(str,"Iterate:%d",i);
  outtextxy(0,str);
  // 正向傳播
  layer l0 = data;
  layer l1 = arr[0].matmul(l0).sigmoid();
  layer l2 = arr[1].matmul(l1).sigmoid();
  layer l3 = arr[2].matmul(l2).sigmoid();
  // 顯示輸出結果
  l1.print();
  l2.print();
  l3.print();
  if (l3 == target){
  break;
  }
  // 反向傳播
  layer l3_delta = (l3 - target ) * grad_sigmoid(l3);
  layer l2_delta = arr[2].Transpose().matmul(l3_delta) * grad_sigmoid(l2);
  layer l1_delta = arr[1].Transpose().matmul(l2_delta) * grad_sigmoid(l1);
  // 梯度下降法
  arr[2] = arr[2] - learn * l3_delta.matmul(l2.Transpose());
  arr[1] = arr[1] - learn * l2_delta.matmul(l1.Transpose());
  arr[0] = arr[0] - learn * l1_delta.matmul(l0.Transpose());
 }
 sprintf_s(str,i);
 outtextxy(0,str);
 // 測試輸出
 // selftest();
 }
 void selftest(){
 // 測試
 layer l0 = test;
 layer l1 = arr[0].matmul(l0).sigmoid();
 layer l2 = arr[1].matmul(l1).sigmoid();
 layer l3 = arr[2].matmul(l2).sigmoid();
 setlinecolor(WHITE);
 // 測試例
 for (uint j = 0; j < test.col; j++){
  COLORREF color = HSVtoRGB(360 * l3.data[0][j],1);// 輸出顏色
  putpixel((int)(test.data[0][j] * 160) + 400,(int)(test.data[1][j] * 160) + 30,color);
 }
 // 標準例
 for (uint j = 0; j < data.col; j++){
  COLORREF color = HSVtoRGB(360 * target.data[0][j],1);// 輸出顏色
  setfillcolor(color);
  fillcircle((int)(data.data[0][j] * 160) + 400,(int)(data.data[1][j] * 160) + 30,3);
 }
 line(400,30,400,230);
 line(400,600,30);
 }
 void save(FILE *fp){
 fprintf_s(fp,"%d %lf\n",iter,learn);
 data.save(fp);
 for (uint i = 0; i < 3; i++){
  arr[i].save(fp);
 }
 target.save(fp);
 }
};
#include "network.h"

void main(){
 FILE file;
 FILE *fp = &file;
 // 讀取狀態
 fopen_s(&fp,"Text.txt","r");
 network net(fp);
 fclose(fp);
 initgraph(600,320);
 net.train();
 // 儲存狀態
 fopen_s(&fp,"w");
 net.save(fp);
 fclose(fp);
 getchar();
 closegraph();
}

上面這段程式碼是在2016年初實現的,非常簡陋,且不利於擴充套件。時隔三年,我再次回顧了反向傳播演算法,重構了上面的程式碼。

最近,參考【深度學習】一書對反向傳播演算法的描述,我用C++再次實現了基於反向傳播演算法的神經網路框架:Github: Neural-Network。該框架支援張量運算,如卷積,池化和上取樣運算。除了能實現傳統的stacked網路模型,還實現了基於計算圖的自動求導演算法,目前還有些bug。預計支援搭建卷積神經網路,並實現【深度學習】一書介紹的一些基於梯度的優化演算法。

歡迎感興趣的同學在此提出寶貴建議。

以上就是本文的全部內容,希望對大家的學習有所幫助,也希望大家多多支援我們。