首页 > 编程知识 正文

牛顿法和梯度下降法,华南理工大学数值最优化计算试卷

时间:2023-05-03 23:28:12 阅读:137248 作者:3824

本文的代码与教科书算法的流程框架完全一致,没有重复解释理论,只是总结了代码实现。

文章目录拟lqdxh法-BFGS内向链-GNlqdxh法-Newton共轭梯度法总结

伪lqdxh法- bfgs #-*-coding : utf-8-* # # author : xhc # date :2021-05-2816336001 # project 33601 _ zyh x[0]#定义函数通过添加mat矩阵defgfun[x]3360x==来确定梯度值数据- 2部件2=-1.0 * x [0] [0]2* x [1] [0] result=NP 部件2 ] )打印) gf )打印测试返回结果#输入参数x0 ) : maxk=5000 #重复次数rho=0.45 #步骤sigma=0.3 #常量在0和1/2之间第几次迭代bk=NP.mat([1.0, 0.0] 1.0 ) #初始值为单位矩阵whilekmaxk:GK=gfun(x0 ) # (1,2 ) ifNP.LinaLG.norm(GK ) epsilon: # )范数为epsilon终止brebren 小于d梯度=0m=0mk=0whilem 30360 ari jo算法是步骤iffun(x0(rho**m ) dk ) x0 ) sigma* ) rho**m ) GK.dot ) 330 1 ) yk=gfun(x ) x )-GK ) ) 1,2 ) if/(yk * sk 03360 bk=bk-(bk * sk * sk.sk.t * bk )/(sk.T*Bk*sk ) [1] )选择初始点x,k=bgfs(fun,gfun,x0 ) print ) (bfgs ) (print ) )本文使用的例子为教科书p56例4.1.1,采用非严密检索的结果如下

内向手环- gn #-*-coding : utf-8-# # author : xhc # date :2021-05-2914336056 # project 33601 _ zyh # name3360 fun=defgfun(x ) : x=np.array(x ) x ) val1=x(0) 0.7*NP.sin )0)0)0)2*NP.cos [val2](returnydefhess(x ) : x=np.array(x ) x ) val1=1-0.7*NP.cos ) x[0][0] ) val2=0.2*NP.sin

, val4]]) return ydef GN(gfun, Hess, x0): maxk = 5000 rho = 0.4 sigma = 0.4 k = 0 epsilon = 1e-6 while k < maxk: fk = gfun(x0) hess = Hess(x0) gk = hess.T * fk dk = - (hess.T * hess ).I.dot(gk) if np.linalg.norm(gk) < epsilon: break m = 0 mk = 0 while m < 30: newf = 0.5 * np.linalg.norm(gfun(x0 + (rho**m) * dk))**2 oldf = 0.5 * np.linalg.norm(gfun(x0))**2 if newf < oldf + sigma * (rho**m)*gk.T*dk: mk = m break m = m + 1 x0 = x0 + (rho**mk) * dk k = k + 1 print("--------------------------") print("当前点为为%s" % x0.T) print("--------------------------") x = x0 return x, kif __name__ == "__main__": x0 = np.array([[0], [0]]) # (2,1) x, k = GN(gfun, Hess, x0) print("GN") print("本题实现非独立完成,在课本找不到例题,最初使用1/2(cos^2(x1)+1/2(sin^2(x2)) 会产生奇异矩阵,无法求逆。更换函数 结果如下") print("迭代次数为%d次" % k) print("最优点为%s" % x.T) # print("最小值为%d" %fun(x))

lqdxh法-Newton # -*- coding: utf-8 -*-## Author: xhc# Date: 2021-05-28 17:06# project: 1_zyh# Name: Newton.pyimport numpy as npfun = lambda x:0.5*x[0]**2+x[1]**2-x[0]*x[1]-x[0]def gfun(x): x = np.array(x) part1 = x[0][0] - x[1][0] - 1 part2 = -x[0][0] + 2 * x[1][0] gf = np.mat([part1,part2]) return gfdef Hess(x): x = np.array(x) part1 = 1 part2 = -1 part3 = -1 part4 = 2 Hess = np.mat([[part1, part2],[part3, part4]]) return Hessdef DampNewtonMethod(fun, gfun, Hessian, x0): maxk = 5000 # 最大迭代次数 rho = 0.4 # 步长 sigma = 0.4 # 常数 k = 0 # 迭代次数 epsilon = 1e-6 # 终止条件 while k < maxk: gk = gfun(x0) # 梯度计算 Gk = Hessian(x0) # hess计算 dk = -Gk.I.dot(gk.T) # 搜索方向 if np.linalg.norm(gk) < epsilon: # 终止条件 break m = 0 mk = 0 while m < 20: # 用Armijo搜索求步长 if fun(x0 + (rho**m) * dk) < fun(x0) + (sigma * (rho**m) * gk.dot(dk)): mk = m break m = m + 1 x0 = x0 + (rho**mk) * dk k = k + 1 print("--------------------------") print("当前点为为%s" % x0.T) print("当前点的值为%f" % fun(x0)) print("--------------------------") x = x0 return x, kif __name__ == "__main__": x0 = np.array([[0],[0]]) x, k = DampNewtonMethod(fun, gfun, Hess, x0) print("lqdxh法") print("本题使用的例子是课本p41例3.2.1,但是采用的非精确搜索,结果如下") print("迭代次数为%d次" %k) print("最优点为%s" %x.T) print("最小值为%f" %fun(x))

共轭梯度法 # -*- coding: utf-8 -*-## Author: xhc# Date: 2021-05-29 15:40# project: 1_zyh# Name: 共轭梯度法法.pyimport numpy as np# 函数fun = lambda x: 0.5*x[0]**2+x[1]**2# 梯度def gfun(x): x = np.array(x) part1 = x[0][0] part2 = 2*x[1][0] gf = np.mat([part1,part2]) return gfdef FR_Gradient(fun, gfun, x0): maxk = 5000 rho = 0.4 sigma = 0.4 k = 0 epsilon = 1e-6 n = len(x0) g0 = 0 d0 = 0 while k < maxk: g = gfun(x0) itern = k - (n+1)*np.floor(k/(n+1)) itern = itern + 1 if itern == 1: # 每一次迭代k=0的情况 负梯度 d = -g else: # 每一次迭代 k>0 的情况 负梯度+bkd beta = (g.dot(g.T))/(g0.dot(g0.T)) # 更新beta d = -g + beta*d0 gd = g.dot(d.T) if gd >= 0.0: d = -g if np.linalg.norm(g) < epsilon: # 终止条件 break m = 0 mk = 0 # armijo while m < 20: if fun(x0 + (rho**m)*d.T) < fun(x0) + sigma * (rho**m) * g.dot(d.T): mk = m break m = m + 1 x0 = x0 + (rho**mk)*d.T x0 = np.array(x0) g0 = g d0 = d k = k + 1 print("--------------------------") print("当前点为为%s" % x0.T) print("当前点的值为%f" % fun(x0)) print("--------------------------") x = x0 return x, kif __name__ == "__main__": x0 = np.array([[2],[1]]) x, k = FR_Gradient(fun, gfun, x0) print("共轭梯度法--FR算法") print("本题使用的例子是课本p76例5.2.1") print("迭代次数为%d次" %k) print("最优点为%s" %x.T) print("最小值为%f"%fun(x))

总结

如有错误,欢迎指出。

版权声明:该文观点仅代表作者本人。处理文章:请发送邮件至 三1五14八八95#扣扣.com 举报,一经查实,本站将立刻删除。