# 成功執行了!import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# 載入資料集
mnist = input_data.read_data_sets("mnist_data"
, one_hot=true)
# 每個批次的大小
batch_size = 100
# 計算一共有多少個批次
n_batch = mnist.train.num_examples // batch_size
# 定義兩個placeholder
x = tf.placeholder(tf.float32, [none,
784])
y = tf.placeholder(tf.float32, [none,
10])
# 建立乙個神經網路
w = tf.variable(tf.zeros([784
, 10]))
b = tf.variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(x, w) + b)
# 二次代價函式
loss = tf.reduce_mean(tf.square(y-prediction))
# 使用梯度下降法
train_step = tf.train.gradientdescentoptimizer(0.2).minimize(loss)
# 初始化變數
initial = tf.global_variables_initializer()
# 結果存放在乙個布林型列表中 #argmax返回一位張量中最大的值所在位置
correct_prediction = tf.equal(tf.argmax(y,
1), tf.argmax(prediction,
1))# 求準確率 把true變成1.0 把fault變成0.0
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.session() as sess:
sess.run(initial)
for epoch in
range(21):
for batch in
range(n_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,
feed_dict=)
acc = sess.run(accuracy,
feed_dict=)
print("iter" + str(epoch) + ",test accuracy" + str(acc))
# 二次代價函式:
iter0,test accuracy0.8304iter1,test accuracy0.8704
iter2,test accuracy0.8813
iter3,test accuracy0.8883
iter4,test accuracy0.895
iter5,test accuracy0.8968
iter6,test accuracy0.8992
iter7,test accuracy0.9022
iter8,test accuracy0.9037
iter9,test accuracy0.9054
iter10,test accuracy0.9068
iter11,test accuracy0.9071
iter12,test accuracy0.9074
iter13,test accuracy0.909
iter14,test accuracy0.9093
iter15,test accuracy0.9107
iter16,test accuracy0.9122
iter17,test accuracy0.9128
iter18,test accuracy0.9127
iter19,test accuracy0.9143
iter20,test accuracy0.9136
# 交叉熵代價函式 效果更好一點
iter0,test accuracy0.8242iter1,test accuracy0.8831
iter2,test accuracy0.8993
iter3,test accuracy0.9048
iter4,test accuracy0.9088
iter5,test accuracy0.9093
iter6,test accuracy0.9122
iter7,test accuracy0.9131
iter8,test accuracy0.9146
iter9,test accuracy0.9164
iter10,test accuracy0.9172
iter11,test accuracy0.918
iter12,test accuracy0.9201
iter13,test accuracy0.9192
iter14,test accuracy0.9196
iter15,test accuracy0.921
iter16,test accuracy0.9211
iter17,test accuracy0.92
iter18,test accuracy0.9209
iter19,test accuracy0.9214
iter20,test accuracy0.922
python學習筆記3
1 字串的格式化操作 format variable format 是格式的樣式,variable 是要被格式化的變數 format 有如下幾種形式 o 將數值轉換為八進位制 x 將數值轉換為十六進製制 d 整數轉換符號 s 字串轉換符號 如下 o 100 144 o 100 加入乙個 號可輸出標準...
python 學習筆記3
2014 10 23 三種內建的資料結構 列表 元組和字典 list len list list.sort 修改列表本身,而不是返回乙個修改後的列表 列表時可變的,字串不可變 for item in list 遍歷 del list 0 刪除專案,自動補齊。print語句的結尾的逗號消除列印的換行符...
Python學習筆記3
條件和迴圈語句 生成隨機數的函式,此處說明一下,python是通過乙個方程來產生隨機數的,所以這些隨機數並不是真正隨機的,產生他們的方式叫做偽隨機。載入模組,本人理解模組類似c裡的庫,模組 module 含有可供其他程式使用的 的檔案,這些模組通常是按照一定的相關性進行組織的,載入模組使用 impo...