from mxnet import gluonfrom mxnet import ndarray as nd
import matplotlib.pyplot as plt
import mxnet as mx
from mxnet import autograd
def transform(data, label):
return data.astype('float32')/255, label.astype('float32')
mnist_train = gluon.data.vision.fashionmnist(train=true, transform=transform)
mnist_test = gluon.data.vision.fashionmnist(train=false, transform=transform)
def show_images(images):
n = images.shape[0]
_, figs = plt.subplots(1, n, figsize=(15, 15))
for i in range(n):
figs[i].imshow(images[i].reshape((28, 28)).asnumpy())
figs[i].axes.get_xaxis().set_visible(false)
figs[i].axes.get_yaxis().set_visible(false)
plt.show()
def get_text_labels(label):
text_labels = [
't 恤', '長 褲', '套頭衫', '裙 子', '外 套',
'涼 鞋', '襯 衣', '運動鞋', '包 包', '短 靴'
]return [text_labels[int(i)] for i in label]
data, label = mnist_train[0:10]
print('example shape: ', data.shape, 'label:', label)
show_images(data)
print(get_text_labels(label))
batch_size = 256
train_data = gluon.data.dataloader(mnist_train, batch_size, shuffle=true)
test_data = gluon.data.dataloader(mnist_test, batch_size, shuffle=false)
#計算模型
net = gluon.nn.sequential()
with net.name_scope():
net.add(gluon.nn.flatten())
net.add(gluon.nn.dense(256, activation="relu"))
net.add(gluon.nn.dense(10))
net.initialize()
softmax_cross_entropy = gluon.loss.softmaxcrossentropyloss()
#定義訓練器
trainer = gluon.trainer(net.collect_params(), 'sgd', )
def accuracy(output, label):
return nd.mean(output.argmax(axis=1) == label).asscalar()
def _get_batch(batch):
if isinstance(batch, mx.io.databatch):
data = batch.data[0]
label = batch.label[0]
else:
data, label = batch
return data, label
def evaluate_accuracy(data_iterator, net):
acc = 0.
if isinstance(data_iterator, mx.io.mxdataiter):
data_iterator.reset()
for i, batch in enumerate(data_iterator):
data, label = _get_batch(batch)
output = net(data)
acc += accuracy(output, label)
return acc / (i+1)
for epoch in range(5):
train_loss = 0.
train_acc = 0.
for data, label in train_data:
with autograd.record():
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
trainer.step(batch_size) #使用訓練器,向"前"走一步
train_loss += nd.mean(loss).asscalar()
train_acc += accuracy(output, label)
test_acc = evaluate_accuracy(test_data, net)
print("epoch %d. loss: %f, train acc %f, test acc %f" % (
epoch, train_loss/len(train_data), train_acc/len(train_data), test_acc))
data, label = mnist_test[0:10]
show_images(data)
print('true labels')
print(get_text_labels(label))
predicted_labels = net(data).argmax(axis=1)
print('predicted labels')
print(get_text_labels(predicted_labels.asnumpy()))
有變化的地方,已經加上了注釋。執行效果,跟一篇完全相同,就不重複貼圖了
機器學習筆記 4 多類邏輯回歸 使用gluton
import matplotlib.pyplot as plt import mxnet as mx from mxnet import gluon from mxnet import ndarray as nd from mxnet import autograd def transform da...
機器學習筆記 7 邏輯回歸
邏輯回歸 logistic 實際上是線性回歸推導出來的。而且是一種分類學習方法。由於簡單的二分類0 1影象不連續,我們想找到一種連續且可微的函式替換他。logistic function 正是這樣乙個函式 y 11 e z看看圖 是通過邏輯回歸根據花萼和花瓣的大小區別出是 0花 還是 1花 codi...
機器學習 邏輯回歸
邏輯回歸 線性回歸的式子,作為邏輯回歸的輸入 適用場景 二分類 線性回歸的輸入 sigmoid函式 分類 0,1 概率值 計算公式 當目標值為1時 損失函式的變化 當目標值為0時 損失函式的變化 下面用乙個例項來說明邏輯回歸的用法 癌症概率 部分資料的截圖如下 資料描述 699條樣本,供11列資料,...