首页 > 编程知识 正文

牛顿发明了什么线,三角形牛顿线

时间:2023-05-06 07:10:47 阅读:137278 作者:921

对于Rosenbrock函数:

分别用最速下降法、凶发夹法和伪凶发夹法求极值点。 初始值为[-1.2,1 ],误差为1e-5。

1 .最速下降法:1.代码实现: #最速下降法(Wolfe条件) importnumpyasnpimportrandomimportmatplotlib.pyplotasplt # Rosen Brock函数def Rosen brosen 坡度方向defgrad(x ) : return NP.array ([ 400 * x [0] * *3- 400-200 * x [0] * * 2200 * x [1] ) (Wolfe条件计算alphadefwolfe(f 不等式中的两个系数c1,C2'''flag=0a=0b=alphamfk=f(x ) alpha=b*random.uniform(0, 1 ) gk=df(x ) x ) GK1=df ) p ) dphi=NP.dot(GK1,p ) while(flag==0) :newfk=f ) xalpha*p 满足dphi的第二不等式flag=1else : a=alphab=BIF (ba lpham ) :alpha=(ab )/2 else : alpha=t * alpha else 3360 a=ab=alpham alphaelse3360a ) ab=alphaalp 2GK1=df(xalpha*p ) dphi=NP.dot ) GK1, p ) return alpha#最速下降法迭代过程defsteepest(x0 ) 3360maxk=10000w=NP.zeros ) ) ) ) )0)0)0)0)=x0 epsilon=1e-5 #5) 1 ) Delta=NP.LinaLG.norm(x-xn ) while delta epsilon : p=-grad (x ) x ) #方向为负梯度方向的alpha=wolfe(Rosenbrock,rock ) 2 ) #wolfe条件计算步骤x =alpha * p #新的迭代点W[:I]=xDelta=NP.LinaLG.norm(x-xn ) i =1 print ) '迭代次数为n ',n 0: I ] return wif _ name _=' _ _ main _ ' 3360 x1=NP.arange (-1.5,1.0.05 X2 )=NP.meshgrid(x1,x2 ) 10 ) x0=NP 1 ) w=steepest(x0 ) PLT.plot ) w [ 0, ],w [ 1, ],' r*-' ) plt.show )2.实验结果:

2 .凶发夹法(1.实现代码)凶发夹法(wolfe条件) importnumpyasnpimportrandomimportmatplotlib.pyplotasplt # Rosen Brock函数def rosef

([400 * x[0]**3 - 400 * x[0] * x[1] + 2 * x[0] -2, -200 * x[0]**2 + 200 * x[1]])# wolfe条件计算alphadef wolfe(f, df, p, x, alpham, c1, c2, t): """ 函数f,导数df,方向p,当前点x,初始步长alpham, 不等式中两个系数c1、c2 """ flag = 0 a = 0 b = alpham fk = f(x) alpha = b * random.uniform(0, 1) gk = df(x) gk1 = df(x+alpha*p) phi0 = fk dphi0 = np.dot(gk,p) dphi = np.dot(gk1,p) while (flag == 0): newfk = f(x + alpha * p) phi = newfk if phi <= phi0 + c1*alpha*dphi0: #满足第一个不等式 if dphi >= c2*dphi0: #满足第二个不等式 flag = 1 else: a = alpha b = b if(b < alpham): alpha = (a+b)/2 else: alpha = t*alpha else: a = a b = alpha alpha = (a+b)/2 gk1 = df(x+alpha*p) dphi = np.dot(gk1,p) return alpha# Hessian矩阵def hess(x): return np.array([[1200 * x[0]**2 - 400 * x[1] + 2, -400 * x[0]], [-400 * x[0], 200]])# 凶狠的发夹法迭代过程def newton(x0): maxk = 10000 W = np.zeros((2, maxk)) W[:, 0] = x0 epsilon = 1e-5 #误差限 x = x0 #初始点 i = 0 #迭代次数 xn = np.array([1, 1]) delta = np.linalg.norm(x - xn) while delta > epsilon: H = hess(x) p = -np.dot(np.linalg.inv(H), grad(x)) #方向 alpha = wolfe(rosenbrock, grad, p, x, 1, 0.4, 0.9, 2) #wolfe条件计算步长 x += alpha * p #新的迭代点 W[:, i] = x delta = np.linalg.norm(x - xn) i += 1 print("迭代次数为:n", i) print("近似解为:n", x) print("误差为:n", delta) W = W[:, 0:i] return Wif __name__ == "__main__": X1 = np.arange(-1.5, 1.5+0.05, 0.05) X2 = np.arange(-1.5, 1.5+0.05, 0.05) [x1, x2] = np.meshgrid(X1, X2) f = 100 *(x2 - x1 ** 2) ** 2 + (1 - x1) ** 2 plt.contour(x1, x2, f, 10) x0 = np.array([-1.2, 1]) W = newton(x0) plt.plot(W[0, :], W[1, :], 'r*-') plt.show() 2. 实验结果:

3. 拟凶狠的发夹法: 1. 代码实现: # 拟凶狠的发夹法(wolfe条件)import numpy as npimport randomimport matplotlib.pyplot as plt# Rosenbrock函数def rosenbrock(x): return 100 * (x[1] - x[0]**2)**2 + (1 - x[0])**2# 梯度方向def grad(x): return np.array([400 * x[0]**3 - 400 * x[0] * x[1] + 2 * x[0] -2, -200 * x[0]**2 + 200 * x[1]])# wolfe条件计算alphadef wolfe(f, df, p, x, alpham, c1, c2, t): """ 函数f,导数df,方向p,当前点x,初始步长alpham, 不等式中两个系数c1、c2 """ flag = 0 a = 0 b = alpham fk = f(x) alpha = b * random.uniform(0, 1) gk = df(x) gk1 = df(x+alpha*p) phi0 = fk dphi0 = np.dot(gk,p) dphi = np.dot(gk1,p) while (flag == 0): newfk = f(x + alpha * p) phi = newfk if phi <= phi0 + c1*alpha*dphi0: #满足第一个不等式 if dphi >= c2*dphi0: #满足第二个不等式 flag = 1 else: a = alpha b = b if(b < alpham): alpha = (a+b)/2 else: alpha = t*alpha else: a = a b = alpha alpha = (a+b)/2 gk1 = df(x+alpha*p) dphi = np.dot(gk1,p) return alpha# 求Hk+1def bfgs(f, df, dfk1, dfk, xk1, xk, Hk): I = np.identity(2) yk = dfk1 - dfk sk = xk1 - xk# print(yk, sk) rhok = 1 / np.dot(yk.T, sk)# print(rhok) Hk1 = np.dot(np.dot(I - rhok*np.dot(sk, yk.T), Hk), I - rhok*np.dot(yk, sk.T)) + rhok*np.dot(sk, sk.T) Hk = Hk1 return Hk# 拟凶狠的发夹法迭代过程def quasinewton(x0): maxk = 100000 W = np.zeros((2, maxk)) W[:, 0] = x0 epsilon = 1e-5 #误差限 x = x0 #初始点 i = 0 #迭代次数 xn = np.array([1, 1]) delta = np.linalg.norm(x - xn) Hk = np.identity(2) while delta > epsilon: xk = x.copy()# print(x, xk) dfk = grad(xk) p = -np.dot(Hk, grad(x)) #方向 alpha = wolfe(rosenbrock, grad, p, x, 1, 0.4, 0.9, 2) #wolfe条件计算步长 x += alpha * p #新的迭代点 W[:, i] = x xk1 = x# print(x, xk1, xk) dfk1 = grad(xk1) Hk = bfgs(rosenbrock, grad, dfk1, dfk, xk1, xk, Hk) delta = np.linalg.norm(x - xn) i += 1 print("迭代次数为:n", i) print("近似解为:n", x) print("误差为:n", delta) W = W[:, 0:i] return Wif __name__ == "__main__": X1 = np.arange(-1.5, 1.5+0.05, 0.05) X2 = np.arange(-1.5, 1.5+0.05, 0.05) [x1, x2] = np.meshgrid(X1, X2) f = 100 *(x2 - x1 ** 2) ** 2 + (1 - x1) ** 2 plt.contour(x1, x2, f, 10) x0 = np.array([-1.2, 1]) W = quasinewton(x0) plt.plot(W[0, :], W[1, :], 'r*-') plt.show() 2. 实验结果:

版权声明:该文观点仅代表作者本人。处理文章:请发送邮件至 三1五14八八95#扣扣.com 举报,一经查实,本站将立刻删除。