from keras.datasets import mnist
from matplotlib import pyplot as plt
import numpy as np
from keras.models import sequential
from keras.layers import dense
from keras.utils import np_utils
# 從keras中匯入資料集
(x_train, y_train)
,(x_validation,y_validation)
= mnist.load_data(
)# 顯示四張手寫資料
plt.subplot(
221)
plt.imshow(x_train[0]
, cmap=plt.get_cmap(
'gray'))
plt.subplot(
222)
plt.imshow(x_train[1]
, cmap=plt.get_cmap(
'gray'))
plt.subplot(
223)
plt.imshow(x_train[2]
, cmap=plt.get_cmap(
'gray'))
plt.subplot(
224)
plt.imshow(x_train[3]
, cmap=plt.get_cmap(
'gray'))
# plt.imshow(x_train[3])
plt.show(
)# 設定隨機數種子
seed =
7np.random.seed(
)
using tensorflow backend.
#為了取得要有多少輸入神經元,所以x.shape[1]是多少行x.shape[2]是多少列,x.shape[0]代表x這個資料裡面有多少樣本
num_pixels=x_train.shape[1]
*x_train.shape[2]
print
(x_train.shape[0]
)print
(num_pixels)
60000
784
x_train = x_train.reshape(x_train.shape[0]
,num_pixels)
.astype(
'float32'
)x_validation = x_validation.reshape(x_validation.shape[0]
,num_pixels)
.astype(
'float32'
)
#歸一化
x_train = x_train/
255x_validation = x_validation/
255
#進行one-hot編碼
y_train = np_utils.to_categorical(y_train)
y_validation = np_utils.to_categorical(y_validation)
num_classes = y_validation.shape[1]
print
(num_classes)
#取得輸出層有幾個神經元
10
#定義mlp模型
defcreate_model()
:#建立模型
model = sequential(
) model.add(dense(units=num_pixels, input_dim = num_pixels,kernel_initializer=
'normal'
,activation=
'relu'))
model.add(dense(units=
784,kernel_initializer=
'normal'
,activation=
'relu'))
model.add(dense(units=num_classes, kernel_initializer=
'normal'
,activation=
'softmax'))
#編譯模型
model.
compile
(loss=
'categorical_crossentropy'
,optimizer=
'adam'
,metrics=
['accuracy'])
return model
model = create_model(
)model.fit(x_train,y_train,epochs=
10,batch_size=
200)
score = model.evaluate(x_validation,y_validation)
print
('mlp %.2f%%'
%(score[1]
*100
))
epoch 1/10
60000/60000 [******************************] - 2s 39us/step - loss: 0.2161 - acc: 0.9346
epoch 2/10
60000/60000 [******************************] - 2s 35us/step - loss: 0.0745 - acc: 0.9774
epoch 3/10
60000/60000 [******************************] - 2s 35us/step - loss: 0.0452 - acc: 0.9852
epoch 4/10
60000/60000 [******************************] - 2s 35us/step - loss: 0.0297 - acc: 0.9904
epoch 5/10
60000/60000 [******************************] - 2s 36us/step - loss: 0.0245 - acc: 0.9921
epoch 6/10
60000/60000 [******************************] - 2s 36us/step - loss: 0.0198 - acc: 0.9939
epoch 7/10
60000/60000 [******************************] - 2s 36us/step - loss: 0.0153 - acc: 0.9949
epoch 8/10
60000/60000 [******************************] - 2s 35us/step - loss: 0.0145 - acc: 0.9952
epoch 9/10
60000/60000 [******************************] - 2s 35us/step - loss: 0.0157 - acc: 0.9949
epoch 10/10
60000/60000 [******************************] - 2s 35us/step - loss: 0.0085 - acc: 0.9974
10000/10000 [******************************] - 0s 46us/step
mlp 98.23%
keras多層感知機MLP
肯定有人要說什麼多層感知機,不就是幾個隱藏層連線在一起的嗎。話是這麼說,但是我覺得我們首先要自己承認自己高階,不然怎麼去說服 hu nong 別人呢 from keras.models import sequential from keras.layers import dense import n...
學習多層感知機MLP的心得
神經網路中一種模擬神經元 neuron 的結構,有輸入 input 輸出 output 權重 weight 前饋運算 feed forward 啟用函式 activation function 等部分。單層感知器能模擬邏輯與 邏輯或 邏輯非和邏輯與非等操作,但不能實現邏輯異或!啟用函式可以表示為 其...
keras學習筆記1 Keras模組概述
keras主要包括14個模組,本文主要對models layers initializations activations objectives optimizers preprocessing metrics共計8個模組分別展開介紹,並通過乙個簡單的bp神經網路說明各個模組的作用。1.model ...