import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import sequential
from keras.layers import dense,dropout
from keras.optimizers import sgd
(x_train,y_train),(x_test,y_test)=mnist.load_data()
#x_shape: (60000, 28, 28)
print('x_shape:',x_train.shape)
#y_shape: (60000,)
print('y_shape:',y_train.shape)
#x_shape: (60000, 28, 28)->x_shape: (60000, 784)
x_train=x_train.reshape(x_train.shape[0],784)/255.0#shape0就是60000,-1自動計算28*28
x_test=x_test.reshape(x_test.shape[0],784)/255.0
#換one hot格式:把輸出訓練成10個類
y_train=np_utils.to_categorical(y_train,num_classes=10)
y_test=np_utils.to_categorical(y_test,num_classes=10)
#建立模型:輸入784個神經元,輸出200個神經元
model=sequential([
dense(units=200,input_dim=784,bias_initializer='one',activation='tanh'),
dropout(0.4),
dense(units=100,bias_initializer='one',activation='tanh'),
dropout(0.4),
dense(units=10,bias_initializer='one',activation='softmax'),
])#定義優化器
sgd=sgd(lr=0.2)
#定義優化器,loss function,訓練過程中計算準確率
model.compile(
optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'],
)#訓練模型,每次訓練32組資料,共需60000/32次訓練,這叫乙個週期,一共訓練3個週期
model.fit(x_train,y_train,batch_size=32,epochs=3)
#評估模型
loss,accuracy=model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
loss,accuracy=model.evaluate(x_train,y_train)
print('\ntest loss',loss)
print('accuracy',accuracy)
#對比一乙個程式,層中加了dropout演算法,最後評估了訓練資料
dropput:
在前向傳播的時候,讓某個神經元的啟用值以一定的概率p停止工作
比較有效的緩解過擬合的發生,在一定程度上達到正則化的效果
下面是加入了正則化
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import sequential
from keras.layers import dense
from keras.optimizers import sgd
from keras.regularizers import l2#匯入正則化
(x_train,y_train),(x_test,y_test)=mnist.load_data()
#x_shape: (60000, 28, 28)
print('x_shape:',x_train.shape)
#y_shape: (60000,)
print('y_shape:',y_train.shape)
#x_shape: (60000, 28, 28)->x_shape: (60000, 784)
x_train=x_train.reshape(x_train.shape[0],784)/255.0#shape0就是60000,-1自動計算28*28
x_test=x_test.reshape(x_test.shape[0],784)/255.0
#換one hot格式:把輸出訓練成10個類
y_train=np_utils.to_categorical(y_train,num_classes=10)
y_test=np_utils.to_categorical(y_test,num_classes=10)
#建立模型:輸入784個神經元,輸出200個神經元
model=sequential([
dense(units=200,input_dim=784,bias_initializer='one',activation='tanh',kernel_regularizer=l2(0.0003)),
dense(units=100,bias_initializer='one',activation='tanh',kernel_regularizer=l2(0.0003)),
dense(units=10,bias_initializer='one',activation='softmax',kernel_regularizer=l2(0.0003)),
])#定義優化器
sgd=sgd(lr=0.2)
#定義優化器,loss function,訓練過程中計算準確率
model.compile(
optimizer=sgd,
loss='categorical_crossentropy',
metrics=['accuracy'],
)#訓練模型,每次訓練32組資料,共需60000/32次訓練,這叫乙個週期,一共訓練3個週期
model.fit(x_train,y_train,batch_size=32,epochs=3)
#評估模型
loss,accuracy=model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
loss,accuracy=model.evaluate(x_train,y_train)
print('\ntest loss',loss)
print('accuracy',accuracy)
adam優化演算法:
keras速度複習 非線性回歸
import keras import numpy as np import matplotlib.pyplot as plt from keras.models import sequential from keras.layers import dense,activation 啟用函式 fro...
Dropout與過擬合
假設我們要訓練這樣乙個神經網路 輸入是x輸出是y,正常的流程是 我們首先把x通過網路前向傳播,然後把誤差反向傳播以更新引數讓網路進行學習。使用dropout之後過程變成 隨機 臨時 刪掉網路中一半的隱藏神經元,輸入輸出神經元保持不變 下圖中虛線為部分臨時被刪除的神經元 把輸入x通過修改後的網路前向傳...
sklearn速度複習 knn
人工生點人工演算法 import matplotlib.pyplot as plt import numpy as np import operator x1 np.array 3,2,1 1類點x座標 y1 np.array 104,100,81 1類點y座標 x2 np.array 101,99...