1. 程式人生 > 程式設計 >python梯度下降演算法的實現

python梯度下降演算法的實現

本文例項為大家分享了python實現梯度下降演算法的具體程式碼,供大家參考,具體內容如下

簡介

本文使用python實現了梯度下降演算法,支援y = Wx+b的線性迴歸
目前支援批量梯度演算法和隨機梯度下降演算法(bs=1)
也支援輸入特徵向量的x維度小於3的影象視覺化
程式碼要求python版本>3.4

程式碼

'''
梯度下降演算法
Batch Gradient Descent
Stochastic Gradient Descent SGD
'''
__author__ = 'epleone'
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys

# 使用隨機數種子, 讓每次的隨機數生成相同,方便除錯
# np.random.seed(111111111)


class GradientDescent(object):
 eps = 1.0e-8
 max_iter = 1000000 # 暫時不需要
 dim = 1
 func_args = [2.1,2.7] # [w_0,..,w_dim,b]

 def __init__(self,func_arg=None,N=1000):
 self.data_num = N
 if func_arg is not None:
 self.FuncArgs = func_arg
 self._getData()

 def _getData(self):
 x = 20 * (np.random.rand(self.data_num,self.dim) - 0.5)
 b_1 = np.ones((self.data_num,1),dtype=np.float)
 # x = np.concatenate((x,b_1),axis=1)
 self.x = np.concatenate((x,axis=1)

 def func(self,x):
 # noise太大的話, 梯度下降法失去作用
 noise = 0.01 * np.random.randn(self.data_num) + 0
 w = np.array(self.func_args)
 # y1 = w * self.x[0,] # 直接相乘
 y = np.dot(self.x,w) # 矩陣乘法
 y += noise
 return y

 @property
 def FuncArgs(self):
 return self.func_args

 @FuncArgs.setter
 def FuncArgs(self,args):
 if not isinstance(args,list):
 raise Exception(
 'args is not list,it should be like [w_0,...,b]')
 if len(args) == 0:
 raise Exception('args is empty list!!')
 if len(args) == 1:
 args.append(0.0)
 self.func_args = args
 self.dim = len(args) - 1
 self._getData()

 @property
 def EPS(self):
 return self.eps

 @EPS.setter
 def EPS(self,value):
 if not isinstance(value,float) and not isinstance(value,int):
 raise Exception("The type of eps should be an float number")
 self.eps = value

 def plotFunc(self):
 # 一維畫圖
 if self.dim == 1:
 # x = np.sort(self.x,axis=0)
 x = self.x
 y = self.func(x)
 fig,ax = plt.subplots()
 ax.plot(x,y,'o')
 ax.set(xlabel='x ',ylabel='y',title='Loss Curve')
 ax.grid()
 plt.show()
 # 二維畫圖
 if self.dim == 2:
 # x = np.sort(self.x,axis=0)
 x = self.x
 y = self.func(x)
 xs = x[:,0]
 ys = x[:,1]
 zs = y
 fig = plt.figure()
 ax = fig.add_subplot(111,projection='3d')
 ax.scatter(xs,ys,zs,c='r',marker='o')

 ax.set_xlabel('X Label')
 ax.set_ylabel('Y Label')
 ax.set_zlabel('Z Label')
 plt.show()
 else:
 # plt.axis('off')
 plt.text(
 0.5,0.5,"The dimension(x.dim > 2) \n is too high to draw",size=17,rotation=0.,ha="center",va="center",bbox=dict(
  boxstyle="round",ec=(1.,0.5),fc=(1.,0.8,0.8),))
 plt.draw()
 plt.show()
 # print('The dimension(x.dim > 2) is too high to draw')

 # 梯度下降法只能求解凸函式
 def _gradient_descent(self,bs,lr,epoch):
 x = self.x
 # shuffle資料集沒有必要
 # np.random.shuffle(x)
 y = self.func(x)
 w = np.ones((self.dim + 1,dtype=float)
 for e in range(epoch):
 print('epoch:' + str(e),end=',')
 # 批量梯度下降,bs為1時 等價單樣本梯度下降
 for i in range(0,self.data_num,bs):
 y_ = np.dot(x[i:i + bs],w)
 loss = y_ - y[i:i + bs].reshape(-1,1)
 d = loss * x[i:i + bs]
 d = d.sum(axis=0) / bs
 d = lr * d
 d.shape = (-1,1)
 w = w - d

 y_ = np.dot(self.x,w)
 loss_ = abs((y_ - y).sum())
 print('\tLoss = ' + str(loss_))
 print('擬合的結果為:',')
 print(sum(w.tolist(),[]))
 print()
 if loss_ < self.eps:
 print('The Gradient Descent algorithm has converged!!\n')
 break
 pass

 def __call__(self,bs=1,lr=0.1,epoch=10):
 if sys.version_info < (3,4):
 raise RuntimeError('At least Python 3.4 is required')
 if not isinstance(bs,int) or not isinstance(epoch,int):
 raise Exception(
 "The type of BatchSize/Epoch should be an integer number")
 self._gradient_descent(bs,epoch)
 pass

 pass


if __name__ == "__main__":
 if sys.version_info < (3,4):
 raise RuntimeError('At least Python 3.4 is required')

 gd = GradientDescent([1.2,1.4,2.1,4.5,2.1])
 # gd = GradientDescent([1.2,2.1])
 print("要擬合的引數結果是: ")
 print(gd.FuncArgs)
 print("===================\n\n")
 # gd.EPS = 0.0
 gd.plotFunc()
 gd(10,0.01)
 print("Finished!")

以上就是本文的全部內容,希望對大家的學習有所幫助,也希望大家多多支援我們。