入門補充 最速梯度下降
阿新 • • 發佈:2019-01-27
名字很叼有木有?
實現了函式最小值的計算,具體說是高中數學知識,完成了求導和梯度下降,找出在某一範圍內的最小值,注意調節步長(賊坑)
""" 最速下降法 Rosenbrock函式 函式 f(x)=100*(x(2)-x(1).^2).^2+(1-x(1)).^2 梯度 g(x)=(-400*(x(2)-x(1)^2)*x(1)-2*(1-x(1)),200*(x(2)-x(1)^2))^(T) """ import numpy as np import matplotlib.pyplot as plt import random def rosenbrock(x): return 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2 def jacobian(x): return np.array([-400 * x[0] * (x[1] - x[0] ** 2) - 2 * (1 - x[0]), 200 * (x[1] - x[0] ** 2)]) ''' X1 = np.arange(-1.5, 1.5 + 0.05, 0.05) X2 = np.arange(-3.5, 2 + 0.05, 0.05) [x1, x2] = np.meshgrid(X1, X2) f = 100 * (x2 - x1 ** 2) ** 2 + (1 - x1) ** 2 # 給定的函式 #plt.contour(x1, x2, f, 20) # 畫出函式的20條輪廓線 ''' def steepest(x0): print('初始點為:') print(x0, '\n') imax = 20000 W = np.zeros((2, imax)) W[:, 0] = x0 i = 1 x = x0 grad = jacobian(x) delta = sum(grad ** 2) # 初始誤差 while i < imax and delta > 10 ** (-5): p = -jacobian(x) x0 = x alpha = goldsteinsearch(rosenbrock, jacobian, p, x, 1, 0.1, 2) x = x + alpha * p W[:, i] = x grad = jacobian(x) delta = sum(grad ** 2) i = i + 1 print("迭代次數為:", i) print("近似最優解為:") print(x, '\n') W = W[:, 0:i] # 記錄迭代點 return W ''' 線性搜尋子函式 ''' def goldsteinsearch(f, df, d, x, alpham, rho, t): flag = 0 a = 0 b = alpham fk = f(x) gk = df(x) phi0 = fk dphi0 = np.dot(gk, d) alpha = b * random.uniform(0, 1) while (flag == 0): newfk = f(x + alpha * d) phi = newfk if (phi - phi0 <= rho * alpha * dphi0): if (phi - phi0 >= (1 - rho) * alpha * dphi0): flag = 1 else: a = alpha b = b if (b < alpham): alpha = (a + b) / 2 else: alpha = t * alpha else: a = a b = alpha alpha = (a + b) / 2 return alpha x0 = np.array([-1.2, 5])#檢測範圍 W = steepest(x0) #plt.plot(W[0, :], W[1, :], 'g*', W[0, :], W[1, :]) # 畫出迭代點收斂的軌跡 #plt.show()