1.普通機器學習**函式係數(y=0.1x+0.3)
# -*- coding:gbk -*-
import tensorflow as tf
import numpy as np
#生成資料,y=0.1x+0.3
x_data=np.random
.rand(100).astype(np.float32)
y_data=x_data*0.1+0.3
###開始建立tensorflow結構###
weight=tf.variable(tf.random_uniform([1],-1.0,1.0))
biases=tf.variable(tf.zeros([1]))
y=weight*x_data+biases
#誤差loss=tf.reduce_mean(tf.square(y-y_data))
#優化器
optimizer=tf.train
.gradientdescentoptimizer(0.5)
#用優化器減少誤差
train=optimizer.minimize(loss)
#定義初始化變數
init=tf.initialize_all_variables()
###完成建立tensorflow結構###
sess=tf.session()
sess.run(init) #啟用init
for step in range(201):
sess.run(train)
if step%20==0:
print(step,sess.run(weight),sess.run(biases))
2.構建簡單神經網路**y=x^2,視覺化介面顯示
# -*- coding:gbk -*-
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
defadd_layer
(inputs,in_size,out_size,activation_function=none):
weight=tf.variable(tf.random_normal([in_size,out_size]))
biases=tf.variable(tf.zeros([1,out_size])+0.1)
wx_plus_b=tf.matmul(inputs,weight)+biases
if activation_function==none:
outputs=wx_plus_b
else:
outputs=activation_function(wx_plus_b)
return outputs
#numpy.linspace(start, stop, num=50, endpoint=true, retstep=false, dtype=none)
#在指定的間隔內返回均勻間隔的數字
#[:,np.newaxis]給每個元素增加新維度,就相當於矩陣轉置,一行多列變一列多行
x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)
y_data=np.square(x_data)-0.5+noise
xs=tf.placeholder(tf.float32,[none,1])
ys=tf.placeholder(tf.float32,[none,1])
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
prediction=add_layer(l1,10,1,activation_function=none)
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
train_step=tf.train.gradientdescentoptimizer(0.1).minimize(loss)
init=tf.initialize_all_variables()
sess=tf.session()
sess.run(init)
#撥出畫圖視窗
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.scatter(x_data,y_data)
#不暫停,持續更新狀態
plt.ion()
plt.show()
for i in range(1000):
sess.run(train_step,feed_dict=)
if i%50==0:
#print(sess.run(loss,feed_dict=))
try:
ax.lines.remove(lines[0])
except exception:
pass
prediction_value=sess.run(prediction,feed_dict=)
lines=ax.plot(x_data,prediction_value,'r-',lw=5)
plt.pause(0.1)
3.tensorflow board學習。。。好像就是新增幾個命名。。。。。
# -*- coding:gbk -*-
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
defadd_layer
(inputs,in_size,out_size,activation_function=none):
with tf.name_scope('layer'):
with tf.name_scope('weight'):
weight=tf.variable(tf.random_normal([in_size,out_size]),name='w')
with tf.name_scope('biases'):
biases=tf.variable(tf.zeros([1,out_size])+0.1,name='b')
with tf.name_scope('wx_plus_b'):
wx_plus_b=tf.matmul(inputs,weight)+biases
if activation_function==none:
outputs=wx_plus_b
else:
outputs=activation_function(wx_plus_b)
return outputs
#numpy.linspace(start, stop, num=50, endpoint=true, retstep=false, dtype=none)
#在指定的間隔內返回均勻間隔的數字
#[:,np.newaxis]給每個元素增加新維度,就相當於矩陣轉置,一行多列變一列多行
x_data=np.linspace(-1,1,300)[:,np.newaxis]
noise=np.random.normal(0,0.05,x_data.shape)
y_data=np.square(x_data)-0.5+noise
with tf.name_scope('inputs'):
xs=tf.placeholder(tf.float32,[none,1],name='x_input')
ys=tf.placeholder(tf.float32,[none,1],name='y_input')
l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
prediction=add_layer(l1,10,1,activation_function=none)
with tf.name_scope('loss'):
loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
with tf.name_scope('train'):
train_step=tf.train.gradientdescentoptimizer(0.1).minimize(loss)
init=tf.initialize_all_variables()
sess=tf.session()
writer=tf.summary.filewriter("logs/",sess.graph)
sess.run(init)
for i in range(1000):
sess.run(train_step,feed_dict=)
if i%50==0:
print(sess.run(loss,feed_dict=))
莫煩 Tensorflow 變數
理解了tensorflow必須通過session去run才能輸出值,如果不通過session,那麼只能得到變數名字和型別,加深理解了tensorflow的機制。import tensorflow as tf state tf.variable 0,name counter print state 輸...
莫煩Tensorflow 入門
tensorflow 初步嘗試 建立資料 搭建模型 計算誤差 傳播誤差 初始會話 不斷訓練 import tensorflow as tf import numpy as np 建立資料 x data np.random.rand 100 astype np.float32 y data x dat...
莫煩Tensorflow 建造自己的NN
1 定義 add layer 定義乙個新增層的函式可以很容易的新增神經層 神經層裡常見的引數通常有weights biases和激勵函式 權重 偏置 啟用函式 import tensorflow as tf 輸入值 輸入的大小 輸出的大小和激勵函式,我們設定預設的激勵函式是none def add ...