import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import math
from mpl_toolkits.mplot3d import axes3d
#設定在jupyter中matplotlib的顯示情況(表示不是嵌入顯示)
%matplotlib tk
# 解決中文顯示問題
mpl.rcparams['font.sans-serif'] = [u'simhei']
mpl.rcparams['axes.unicode_minus'] = false
#一維原始影象
def fun_1(x):
return x ** 2
#導函式
def deriv_fun_1(x):
return x - 0.25
#使用梯度下降法求解
gd_x =
gd_y =
x = 4
alpha = 0.5 #學習率alpha初始為0.5
fun_change = fun_1(x)
fun_current = fun_change
iter_num = 0
while fun_change > 1e-10 and iter_num < 10:
iter_num += 1
x = x - alpha*deriv_fun_1(x)
tmp = fun_1(x)
fun_change = np.abs(fun_current - tmp)
fun_current = tmp
print(u"最終結果為:(%.5f,%.5f)" % (x,fun_current))
print(u"迭代過程中x的取值,迭代次數:%d" % iter_num)
print(gd_x)
# 構建資料
x = np.arange(-4,4.5,0.05)
y = np.array(list(map(lambda x: fun_1(x),x)))
#畫圖,原地操作,即在原畫紙物件操作,操作完成,保留操作,返回給畫筆
plt.figure(facecolor="w") #畫板為白色
plt.plot(x,y,'g-',linewidth=2)
plt.plot(gd_x,gd_y,'ro--',linewidth=2)
plt.title(u"函式$y=0.5 * (θ - 0.25)^2$;\n學習率:%.3f;最終解:(%.3f,%.3f);迭代次數:%d" % (alpha,x,fun_current,iter_num))
plt.show()
# 二維原始影象
def fun_2(x,y):
return 0.6 * (x + y) ** 2 - x * y
#導函式
def deriv_fun_x(x,y):
return 0.6 * 2 * (x + y) - y
def deriv_fun_y(x,y):
return 0.6 * 2 * (x + y) - x
#使用梯度下降法求解
gd_x1 =
gd_y1 =
gd_z =
x1 = 4
y1 = 4
alpha = 1.1
fun_change = fun_2(x1,y1)
fun_current = fun_change
iter_num = 0
while fun_change > 1e-10 and iter_num < 100:
iter_num += 1
prex1 = x1
prey1 = y1
x1 = x1 - alpha*deriv_fun_x(prex1,prey1)
y1 = y1 - alpha*deriv_fun_y(prex1,prey1)
tmp = fun_2(x1,y1)
fun_change = np.abs(fun_current - tmp)
fun_current = tmp
print(u"最終結果為:(%.5f,%.5f,%.5f)" % (x1,y1,fun_current))
print(u"迭代過程中x的取值,迭代次數:%d" % iter_num)
print(gd_x1)
print(gd_y1)
#構建資料
x1 = np.arange(-4,4.5,0.2)
y1 = np.arange(-4,4.5,0.2)
x1,y1 = np.meshgrid(x1,y1) #維數擴充套件為方陣
z =np.array(list(map(lambda t:fun_2(t[0],t[1]),zip(x1.flatten(),y1.flatten()))))
z.shape = x1.shape
#畫圖fig = plt.figure(facecolor="w") #非原地操作,將畫板進行返回,fig為白色畫板物件
ax = axes3d(fig) #將畫板轉換為3d畫板物件ax
ax.plot_su***ce(x1,y1,z,rstride=1,cstride=1,cmap=plt.cm.jet)
ax.plot(gd_x1,gd_y1,gd_z,'bo--')
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.set_title(u"函式$y=0.6*(θ1+θ2)^2-θ1*θ2$;\n學習率:%.3f;最終解:(%.3f,%.3f,%.3f),迭代次數:%d" % (alpha,x1,y1,fun_current,iter_num))
plt.show()
#新的二維函式
def fun_3(x,y):
return 2 * (4 * x - 0.25) ** 2 + (2 * y - 0.25) ** 2 + (x + 2 * y - 1.25)
def deriv_funx(x,y):
return 2 * 2 * (4 * x - 0.25) * 4 + 1
def deriv_funy(x,y):
return 2 * (2 * y - 2) * 2 + 2
gd_x2 =
gd_y2 =
gd_z2 =
alpha = 0.01
x2 = 4
y2 = 4
fun_change = fun_3(x2,y2)
fun_current = fun_change
iter_num = 0
while fun_change > 1e-10 and iter_num < 10:
iter_num += 1
prex2 = x2
prey2 = y2
x2 = x2 - alpha*deriv_funx(prex2,prey2)
y2 = y2 - alpha*deriv_funy(prex2,prey2)
tmp = fun_3(x2,y2)
fun_change = np.abs(fun_current - tmp)
fun_current = tmp
print(u"最終結果為:(%.3f,%.3f,%.3f)" % (x2,y2,fun_current))
print(u"迭代次數為:%d" % iter_num)
print("迭代過程中的x2的值:" )
print(gd_x2)
print("迭代過程中的y2的值:" )
print(gd_y2)
# 構建資料
x2 = np.arange(-4,4.5,0.2)
y2 = np.arange(-4,4.5,0.2)
x2,y2 = np.meshgrid(x2,y2)
z2 =np.array(list(map(lambda xo:fun_3(xo[0],xo[1]),zip(x2.flatten(),y2.flatten()))))
z2.shape = x2.shape
# 畫圖
fig = plt.figure(facecolor="w")
ax = axes3d(fig)
ax.plot_su***ce(x2,y2,z2,rstride=1,cstride=1,cmap=plt.get_cmap("rainbow"))
ax.plot(gd_x2,gd_y2,gd_z2,"ro--")
ax.set_title(u"梯度迭代例項")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.show()
最優化 梯度下降法
最優化問題就是求解函式極值的問題,包括極大值和極小值,幾乎所有機器學習演算法歸根到底都是在求解最優化問題。在高等數學 微積分中有求極值統一的思路 找函式導數等於0的點,只要函式可導我們就可以用這種方法。在機器學習中我們一般求函式的極小值,若求極大值我們只需要整體加負號。有些時候我們會對優化變數x有約...
梯度下降法求解線性回歸
梯度下降法 英語 gradient descent 是乙個一階最優化演算法,通常也稱為最速下降法。要使用梯度下降法找到乙個函式的區域性極小值,必須向函式上當前點對應梯度 或者是近似梯度 的反方向的規定步長距離點進行迭代搜尋。如果相反地向梯度正方向迭代進行搜尋,則會接近函式的區域性極大值點 這個過程則...
演算法 梯度下降法求最優解,python原始碼
簡單解釋 比如拿溫度感測器來說,就是根據之前一段時間的溫度資料計算下當前理論上應該測量到的溫度,如果超出這個最優解的一定比例,就可以理解為突發狀況了 如下 coding utf 8 訓練集 每個樣本點有3個分量 x0,x1,x2 x 1,0.3 1,1.3 1,2.3 1,3.2 1,4.4 x 1...