引言

对于无约束优化问题,最速下降法,牛顿迭代法,牛顿迭代法,共轭梯度法,F-R算法是工程中比较经典的约束方法,在此用python实现其具体的过程,主要适合刚开始学习这些算法的朋友以及正在学习工程优化的小伙伴,自己亲自把每一步都实现有利于大家的学习。下面会给出每个算法的原理以及每个算法的具体实现过程。大家最好从最速下降法开始了解,后面的三个方法其实都是类似,从代码也能看出来

最速下降法

python最优化包 python最优化求解_python最优化包

python最优化包 python最优化求解_共轭梯度法_02


按照上述的方法写成python程序如下(程序中有一步需要求namita K用到了方程的求导):

#__aditor:dwy
#__date: 2019/6/6
#!/usr/bin/env python    
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-


import sympy,numpy
import math
import matplotlib.pyplot as pl
from mpl_toolkits.mplot3d import Axes3D as ax3
#最速下降法,二维实验
def SD(x0,G,b,c,N,E):
	f = lambda x: 0.5 * (numpy.dot(numpy.dot(x.T, G), x)) + numpy.dot(b.T, x) + c#定义原函数
	f_d = lambda x: numpy.dot(G, x)+b#导数
	X=x0
	Y=[]#存储y的值
	Y_d=[]#存储y的导数值
	xx=sympy.symarray('xx',(2,1))
	n = 1
	ee = f_d(x0)

	e=(ee[0]**2+ee[1]**2)**0.5#评价迭代精度
	Y.append(f(x0)[0,0])
	Y_d.append(e)
	a=sympy.Symbol('a',real=True)#最速下降法中的namita
	print ('第%d次迭代:e=%d' % (n, e))
	while n<N and e>E:
		n=n+1
		yy=f(x0-a*f_d(x0))
		yy_d=sympy.diff(yy[0,0],a,1)
		a0=sympy.solve(yy_d)
		x0=x0-a0*f_d(x0)   #更新迭代
		X=numpy.c_[X,x0]
		Y.append(f(x0)[0,0])
		ee = f_d(x0)
		e = math.pow(math.pow(ee[0,0],2)+math.pow(ee[1,0],2),0.5)
		Y_d.append(e)
		print(ee)
		print ('第%d次迭代:e=%s'%(n,e))
	return X,Y,Y_d
if __name__=='__main__':
	G=numpy.array([[2,-2],[-2,4]])
	b=numpy.array([[-4],[0]])
	c=0
	f = lambda x: 0.5 * (numpy.dot(numpy.dot(x.T, G), x)) + numpy.dot(b.T, x) + c

	f_d = lambda x: numpy.dot(G, x) + b

	x0=numpy.array([[1],[1]])
	print(f_d(x0))
	N=40
	E=10**(-6)
	X, Y, Y_d=SD(x0,G,b,c,N,E)
	#画图
	X=numpy.array(X)
	Y=numpy.array(Y)
	Y_d=numpy.array(Y_d)
	figure1=pl.figure('trend')
	n=len(Y)
	x=numpy.arange(1,n+1)
	pl.subplot(2,1,1)
	pl.semilogy(x,Y,'r*')
	pl.xlabel('n')
	pl.ylabel('f(x)')
	pl.subplot(2,1,2)
	pl.semilogy(x,Y_d,'g*')
	pl.xlabel('n')
	pl.ylabel("|f'(x)|")
	figure2=pl.figure('behave')
	x=numpy.arange(-110,110,1)
	y=x
	[xx,yy]=numpy.meshgrid(x,y)
	zz=numpy.zeros(xx.shape)
	n=xx.shape[0]
	for i in range(n):
		for j in range(n):
			xxx=numpy.array([xx[i,j],yy[i,j]])
			zz[i,j]=f(xxx.T)
	ax=ax3(figure2)
	ax.contour3D(xx,yy,zz)
	ax.plot3D(X[0,:],X[1,:],Y,'ro--')
	pl.show()

牛顿法

python最优化包 python最优化求解_python最优化包_03

python最优化包 python最优化求解_共轭梯度法_04

python最优化包 python最优化求解_共轭梯度法_05


牛顿迭代的代码如下(在牛顿迭代中搜索方向d直接是通过二阶导数和一阶导数的乘积获取):

#__aditor:dwy
#__date: 2019/6/6
#!/usr/bin/env python    
# -*- coding: utf-8 -*-
import sympy,numpy
import math
import matplotlib.pyplot as pl
from mpl_toolkits.mplot3d import Axes3D as ax3
def Newton(x0,G,b,c,N,E):
	f = lambda x: 0.5 * (numpy.dot(numpy.dot(x.T, G), x)) + numpy.dot(b.T, x) + c#定义原函数
	f_d = lambda x: numpy.dot(G, x)+b#导数
	f_dd =lambda x:G #二阶导数
	X=x0
	Y=[]#存储y的值
	Y_d=[]#存储y的导数值
	xx=sympy.symarray('xx',(2,1))
	n = 1
	ee = f_d(x0)
	print(ee)
	e=(ee[0]**2+ee[1]**2)**0.5#评价迭代精度
	Y.append(f(x0)[0,0])
	Y_d.append(e)
	print ('第%d次迭代:e=%d' % (n, e))
	while e>E:
		n=n+1
		print(f_d(x0))
		print(numpy.linalg.inv(f_dd(x0)))
		d= -numpy.dot(numpy.linalg.inv(f_dd(x0)),f_d(x0))#迭代方向
		x0=x0 +d  #更新迭代
		X=numpy.c_[X,x0]
		Y.append(f(x0)[0,0])
		ee = f_d(x0)
		e = math.pow(math.pow(ee[0,0],2)+math.pow(ee[1,0],2),0.5)
		Y_d.append(e)
		print(ee)
		print ('第%d次迭代:e=%s'%(n,e))
	return X,Y,Y_d
if __name__=='__main__':
	G=numpy.array([[2,0],[0,50]])
	b=numpy.array([[0],[0]])
	c=0
	f = lambda x: 0.5 * (numpy.dot(numpy.dot(x.T, G), x)) + numpy.dot(b.T, x) + c

	f_d = lambda x: numpy.dot(G, x) + b

	x0=numpy.array([[2],[2]])
	print(f_d(x0))
	N=40
	E=10**(-6)
	X, Y, Y_d=Newton(x0,G,b,c,N,E)
	X=numpy.array(X)
	Y=numpy.array(Y)
	Y_d=numpy.array(Y_d)
	figure1=pl.figure('trend')
	n=len(Y)
	x=numpy.arange(1,n+1)

阻尼牛顿法(牛顿法的改进)

python最优化包 python最优化求解_最速下降法_06

#__aditor:dwy
#__date: 2019/6/6
#!/usr/bin/env python    
# -*- coding: utf-8 -*-
import sympy,numpy
import math
import matplotlib.pyplot as pl
from mpl_toolkits.mplot3d import Axes3D as ax3
def Newton(x0,G,b,c,N,E):
	f = lambda x: 0.5 * (numpy.dot(numpy.dot(x.T, G), x)) + numpy.dot(b.T, x) + c#定义原函数
	f_d = lambda x: numpy.dot(G, x)+b#导数
	f_dd =lambda x:G #二阶导数
	X=x0
	Y=[]#存储y的值
	Y_d=[]#存储y的导数值
	xx=sympy.symarray('xx',(2,1))
	n = 1
	ee = f_d(x0)
	a = sympy.Symbol('a', real=True)  # 阻尼系数
	print(ee)
	e=(ee[0]**2+ee[1]**2)**0.5#评价迭代精度
	Y.append(f(x0)[0,0])
	Y_d.append(e)
	print ('第%d次迭代:e=%d' % (n, e))
	while e>E:
		n=n+1
		print(f_d(x0))
		print(numpy.linalg.inv(f_dd(x0)))
		d= -numpy.dot(numpy.linalg.inv(f_dd(x0)),f_d(x0))#迭代方向
		yy = f(x0 + a * d)
		yy_d = sympy.diff(yy[0, 0], a, 1)
		a0 = sympy.solve(yy_d)
		x0=x0 +a0*d  #更新迭代
		X=numpy.c_[X,x0]
		Y.append(f(x0)[0,0])
		ee = f_d(x0)
		e = math.pow(math.pow(ee[0,0],2)+math.pow(ee[1,0],2),0.5)
		Y_d.append(e)
		print(ee)
		print ('第%d次迭代:e=%s'%(n,e))
	return X,Y,Y_d
if __name__=='__main__':
	G = numpy.array([[2, -2], [-2, 4]])
	b = numpy.array([[-4], [0]])
	c = 0
	f = lambda x: 0.5 * (numpy.dot(numpy.dot(x.T, G), x)) + numpy.dot(b.T, x) + c

	f_d = lambda x: numpy.dot(G, x) + b

	x0=numpy.array([[1],[1]])
	print(f_d(x0))
	N=40
	E=10**(-6)
	X, Y, Y_d=Newton(x0,G,b,c,N,E)
	X=numpy.array(X)
	Y=numpy.array(Y)
	Y_d=numpy.array(Y_d)
	figure1=pl.figure('trend')
	n=len(Y)
	x=numpy.arange(1,n+1)

共轭梯度法

python最优化包 python最优化求解_迭代_07

python最优化包 python最优化求解_共轭梯度法_08

python最优化包 python最优化求解_共轭梯度法_09

#__aditor:dwy
#__date: 2019/6/6
#!/usr/bin/env python    
# -*- coding: utf-8 -*-
import numpy as np
import sympy
import math
#共轭梯度法,step1:按照p方向计算一维搜索的namita值,step2:更新x0,计算g,STEP3更新迭代系数,更新迭代方向。重复

def conjugate_gradiant_method(x0,G,b,c,E,p):
	f=lambda x:0.5*(np.dot(np.dot(x.T,G),x))+np.dot(b.T, x) + c
	f_d = lambda x: np.dot(G, x) + b  # 导数
	f_dd = G
	X = x0
	Y = []  # 存储y的值
	Y_d = []  # 存储y的导数值
	a = sympy.Symbol('a', real=True)  # 最速下降法中的namita
	yy = f(x0 + a * p)#沿着P方向进行精确一维搜索
	yy_d = sympy.diff(yy[0, 0], a, 1)
	a0 = sympy.solve(yy_d)
	n=0
	g=f_d(x0)
	X = np.c_[X, x0]
	Y.append(f(x0)[0, 0])
	gg = math.pow(math.pow(g[0, 0], 2) + math.pow(g[1, 0], 2)+math.pow(g[2, 0], 2), 0.5)
	Y_d.append(gg)
	while gg>E:
		n=n+1
		#更新迭代初始点
		x0=x0+a0*p
		g1 = f_d(x0)
		gg = math.pow(math.pow(g1[0, 0], 2) + math.pow(g1[1, 0], 2) + math.pow(g1[2, 0], 2), 0.5)
		if(gg<E):
			break
		else:
			print("第%d迭代方向为:" % n)
			print(p)
			print('第%d次迭代:e=%s' % (n, gg))
			#更新a0和p
			a0=(np.dot(np.dot(g1.T,G),p))/(np.dot(np.dot(p.T,G),p))
			p=-g1+a0*p
			yy = f(x0 + a * p)  # 沿着P方向进行精确一维搜索
			yy_d = sympy.diff(yy[0, 0], a, 1)
			a0 = sympy.solve(yy_d)
			X = np.c_[X, x0]
			Y.append(f(x0)[0, 0])
			Y_d.append(gg)
	return X,Y,Y_d

def F_R(x0,G,b,c,E,p,n):
	f = lambda x: 0.5 * (np.dot(np.dot(x.T, G), x)) + np.dot(b.T, x) + c
	f_d = lambda x: np.dot(G, x) + b  # 导数
	f_dd = G
	X = x0
	k=0
	Y = []  # 存储y的值
	Y_d = []  # 存储y的导数值
	a = sympy.Symbol('a', real=True)  # 最速下降法中的namita
	#计算g
	g = f_d(x0)
	gg = math.pow(math.pow(g[0, 0], 2) + math.pow(g[1, 0], 2) , 0.5)
	p = -g
	print("第%d迭代方向为:" % k)
	print(p)
	print('第%d次迭代:e=%s' % (k, gg))
	while k<n+2 and gg>E:
		k+=1
		g = f_d(x0)
		if(k==n+1 and gg>E):
			k=1
		yy = f(x0 + a * p)
		yy_d = sympy.diff(yy[0, 0], a, 1)
		a0 = sympy.solve(yy_d)
		x0=x0+p*a0
		g1=f_d(x0)
		gg = math.pow(math.pow(g1[0, 0], 2) + math.pow(g1[1, 0], 2) , 0.5)
		print("第%d迭代方向为:" % k)
		print(p)
		print('第%d次迭代:e=%s' % (k, gg))
		#更新ao和p
		alpha = (np.dot(g1.T,g1))/(np.dot(g.T,g))
		p=-g1+alpha*p

	return X, Y, Y_d

if __name__=='__main__':
	print('-----------------F-R-----------------------')
	G = np.array([[2,-2], [-2, 4]])
	b = np.array([[-4], [0]])
	c = 0
	n=2
	x0 = np.array([[1], [1]])
	p=np.array([[-1], [-2],[0]])
	E = 10 ** (-6)
	X, Y, Y_d = F_R(x0, G, b, c, E,p,n)
	print('-----------------共轭梯度法-----------------------')
	#共轭梯度法
	G = np.array([[2,0,0], [0, 1,0],[0,0,1]])
	b = np.array([[0], [0],[0]])
	c = 0
	n = 2
	x0 = np.array([[1], [1],[1]])
	p = np.array([[-1], [-2], [0]])
	E = 10 ** (-6)
	X, Y, Y_d = conjugate_gradiant_method(x0, G, b, c, E, p)

小结

1.本文主要介绍了几种无约束优化的方法,主要是工程优化的方法,分享和大家一起学习有助于对课程的理解。
2.建议每个小伙伴都能一步一步敲一遍,这样会有不一样的收获
3.如果有什么问题欢迎提出,本文仅供学习,参考。