28*28個輸入單元,200個中間單元,10個輸出單元
**:
# -*- coding: utf-8 -*-
"""created on fri may 17 19:39:39 2019
@author: 666
"""import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#輸入資料集
mnist = input_data.read_data_sets("f:/ai/aitest/mnist_data",one_hot=true)
#定義每個批次的大小
batch_size = 100
#計算共有多少個批次
n_batch = mnist.train.num_examples
#定義兩個placeholder
x = tf.placeholder(tf.float32,[none,784])
y = tf.placeholder(tf.float32,[none,10])
#建立簡單神經網路
#輸入784神經元,中間層為200個單元,輸出10個神經元
#定義中間層
w1 = tf.variable(tf.zeros([784,200]))
b1 = tf.variable(tf.zeros([200]))
l1 = tf.nn.relu(tf.matmul(x,w1) + b1)
#定義輸出層
w =tf.variable(tf.zeros([200,10]))
b = tf.variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(l1,w)+b)
# 二次代價函式
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法
train_step = tf.train.gradientdescentoptimizer(0.2).minimize(loss)
#初始化變數
init = tf.global_variables_initializer()
#判斷**值和正確的值是否一樣
#tf.argmax(a,b),a是乙個一維張量,1是維度,就是返回a的最大值
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
#求準確率 cast是轉換資料型別,將bool轉成float32
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.session() as sess:
sess.run(init)
for epoch in range(21):
for batch in range(n_batch):
#batch_xs儲存資料,batch_ys儲存標籤
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict=)
acc = sess.run(accuracy,feed_dict=)
print("iter " + str(epoch) +", testing accuracy "+ str(acc))
僅僅是為了熟悉如何構建網路的**,忽略準確率。
# -*- coding: utf-8 -*-
"""created on tue may 21 19:10:24 2019
@author: miaow
"""import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#輸入資料集
mnist = input_data.read_data_sets("f:/ai/aitest/mnist_data",one_hot=true)
#定義每個批次的大小
batch_size = 100
#計算共有多少個批次
n_batch = mnist.train.num_examples
#定義兩個placeholder
x = tf.placeholder(tf.float32,[none,784])
y = tf.placeholder(tf.float32,[none,10])
keep_prob = tf.placeholder(tf.float32)
#定義第一層
w1 =tf.variable(tf.truncated_normal([784,2000],stddev=0.1))
b1 = tf.variable(tf.zeros([2000]+0.1))
l1 = tf.nn.tanh(tf.matmul(x,w1)+b1)
l1_drop = tf.nn.dropout(l1,keep_prob)
#定義第er層
w2 =tf.variable(tf.truncated_normal([2000,2000],stddev=0.1))
b2 = tf.variable(tf.zeros([2000]+0.1))
l2 = tf.nn.tanh(tf.matmul(l1_drop,w2)+b2)
l2_drop = tf.nn.dropout(l2,keep_prob)
#定義輸出層
w3 =tf.variable(tf.zeros([2000,10]))
b3 = tf.variable(tf.zeros([10]))
prediction = tf.nn.softmax(tf.matmul(l2_drop,w)+b)
# 二次代價函式
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法
train_step = tf.train.gradientdescentoptimizer(0.2).minimize(loss)
#初始化變數
init = tf.global_variables_initializer()
#判斷**值和正確的值是否一樣
#tf.argmax(a,b),a是乙個一維張量,1是維度,就是返回a的最大值
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
#求準確率 cast是轉換資料型別,將bool轉成float32
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.session() as sess:
sess.run(init)
for epoch in range(21):
for batch in range(n_batch):
#batch_xs儲存資料,batch_ys儲存標籤
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
sess.run(train_step,feed_dict=)
test_acc = sess.run(accuracy,feed_dict=)
train_acc = sess.run(accuracy,feed_dict=)
print("iter " + str(epoch) +", testing accuracy "+ str(test_acc)+", training accuracy "+ str(train_acc))
TensorFlow實戰(一)手寫數字識別
tensorflow實戰 第一章是基礎,沒什麼好看的,跳過出,第二章是說tensorflow和其他的模組,比如caffe等,caffe以前也說過,比較容易,但是配置比較麻煩 cpu的容易點,gpu比較麻煩 第三章 簡單說一下安裝吧,就行在命令列輸入 前提是你已經有python pip install...
scikit learn ID3手寫數字識別
判定樹是乙個類似於流程圖的樹結構 其中,每個內部結點表示在一屬性上的測試,每個分支代表乙個屬性輸出,而每個樹葉結點代表類或類分布。樹的頂層是根結點。id3演算法根據的就是資訊獲取量 information gain gain a info d infor a d coding utf 8 pytho...
tensorFlow識別手寫數字
這些天一直疲於奔波,感慨頗多。一直在思考發展方向的問題,可能自己有點技術控。很多人都覺得讀書多就應該賺錢多,讀書的最終目的就是賺錢麼。當然讀書不可能完全脫離煙火,但我個人覺得讀書的目的不是賺錢,賺錢只是附帶產品。像什麼某某沒讀什麼書,就可以賺很多錢的例子,簡直就是屁話。有誰說過讀書的作用就是賺錢。學...