import numpy as np
import math
from sklearn import datasets
from collections import counter
infinity =
float(-
2**31)
#邏輯回歸的實現
defsigmodformatrix
(xb,thetas)
: params =
- xb.dot(thetas)
r = np.zeros(params.shape[0]
)#返回乙個np陣列
for i in
range
(len
(r))
: r[i]=1
/(1+ math.exp(params[i]))
return r
defsigmodformatrix2
(xb,thetas)
: params =
- xb.dot(thetas)
r = np.zeros(params.shape[0]
)#返回乙個np陣列
for i in
range
(len
(r))
: r[i]=1
/(1+ math.exp(params[i]))
if r[i]
>=
0.5:
r[i]=1
else
: r[i]=0
return r
defsigmod
(xi,thetas)
: params =
- np.
sum(xi * thetas)
r =1/
(1+ math.exp(params)
)return r
class
linearlogsiticregression
(object):
thetas =
none
m =0#訓練
deffit
(self,x,y,alpha =
0.01
,accuracy =
0.00001):
#插入第一列為1,構成xb矩陣
self.thetas = np.full(x.shape[1]
+1,0.5
) self.m = x.shape[0]
a = np.full(
(self.m,1)
,1) xb = np.column_stack(
(a,x)
) dimension = x.shape[1]
+1#梯度下降迭代
count =
1while
true
: oldj = self.costfunc(xb, y)
#注意**函式中使用的引數是未更新的
c = sigmodformatrix(xb, self.thetas)
-y for j in
range
(dimension)
: self.thetas[j]
= self.thetas[j]
-alpha * np.
sum(c * xb[
:,j]
) newj = self.costfunc(xb, y)
if newj == oldj or math.fabs(newj - oldj)
< accuracy:
print
("代價函式迭代到最小值,退出!"
)print
("收斂到:"
,newj)
break
print
("迭代第"
,count,
"次!"
)print
("代價函式上一次的差:"
,(newj - oldj)
) count +=
1#**
defcostfunc
(self,xb,y)
:sum
=0.0
for i in
range
(self.m)
: ypre = sigmod(xb[i,
], self.thetas)
#print("ypre:",ypre)
if ypre ==
1or ypre ==0:
return infinity
sum+= y[i]
*math.log(ypre)+(
1- y[i]
)*math.log(
1-ypre)
return-1
/self.m *
sumdef
predict
(self,x)
: a = np.full(
(len
(x),1)
,1) xb = np.column_stack(
(a,x)
)return sigmodformatrix2(xb, self.thetas)
defscore
(self,x_test,y_test)
: y_predict = mylogstic.predict(x_test)
re =
(y_test==y_predict)
re1 = counter(re)
a = re1[
true]/
(re1[
true
]+re1[
false])
return a
#if __name__=="main":
from sklearn.model_selection import train_test_split
iris = datasets.load_iris(
)x= iris[
'data'
]y = iris[
'target'
]x = x[y!=2]
y=y[y!=2]
x_train,x_test, y_train, y_test = train_test_split(x,y)
mylogstic = linearlogsiticregression(
)
mylogstic.fit(x_train, y_train)
y_predict = mylogstic.predict(x_test)
print
("引數:"
,mylogstic.thetas)
print
("測試資料準確度:"
,mylogstic.score(x_test, y_test)
)print
("訓練資料準確度:"
,mylogstic.score(x_train, y_train)
)#sklean中的邏輯回歸
from sklearn.linear_model import logisticregression
print
("sklern中的邏輯回歸:"
)logr = logisticregression(
)logr.fit(x_train,y_train)
print
("準確度:"
,logr.score(x_test,y_test)
)
機器學習 邏輯回歸 Python實現邏輯回歸
coding utf 8 author 蔚藍的天空tom import numpy as np import os import matplotlib.pyplot as plt from sklearn.datasets import make blobs global variable path...
手寫邏輯回歸python實現
呼叫範例 plt.scatter x y 0,0 x y 0,1 color red plt.scatter x y 1,0 x y 1,1 color blue plt.show 切分資料集 x train,x test,y train,y test train test split x,y,ra...
邏輯回歸分類python實現模板
演算法的思路我就不說了,我就提供乙個萬能模板,適用於任何緯度資料集。雖然 類似於梯度下降,但他是個分類演算法 定義sigmoid函式def sigmoid x return1 1 np.exp x 進行邏輯回歸的引數設定以及迭代def weights x,y,alpha,thershold 初始化引...