loss = tf.reduce_mean(tf.square(y - y_))
optimizer = tf.train.gradientdescentoptimizer(0.5)
線性回歸最常用的耗費函式就是mse均方誤差
from __future__ import print_function
from __future__ import absolute_import
import os
os.environ['tf_cpp_min_log_level'] = '3'
import sys
import logging
import numpy as np
import tensorflow as tf
def load_data():
datafile = 'data/ex1data2.txt'
#read into the data file
cols = np.loadtxt(datafile,delimiter=',',usecols=(0,1,2),unpack=true)
# [[x1 x1 x1 x1 x1],
# [x2 x2 x2 x2 x2],
# [y y y y y]]
print(cols[-1:].shape) #(1,47)
print(cols[-1].shape) #(47,)
x = np.transpose(np.array(cols[:-1]))
y = np.transpose(np.array(cols[-1:]))
# x,y: [[x1 x2],
# [x1 x2],
# [x1 x2]]
print(x.shape) #(47,2)
print(y.shape) #(47,1)
stored_feature_means, stored_feature_stds = , #寸均值和方差
xnorm = x.copy()
for icol in range(xnorm.shape[1]):
#skip the first column
#問題:為什麼跳過第一行,又沒加過1
#if not icol: continue
#faster to not recompute the mean and std again, just used stored values
xnorm[:,icol] = (xnorm[:,icol] - stored_feature_means[-1])/stored_feature_stds[-1]
return xnorm, y
train_x, train_y = load_data()
# placeholder
x = tf.placeholder("float", [none, 2])
# model
b = tf.variable(tf.zeros([1]))
w = tf.variable(tf.zeros([2, 1]))
y = tf.matmul(x, w) + b
# minimize mean squared error
# 線性回歸最常用的耗費函式就是mse均方誤差
y_ = tf.placeholder("float", [none, 1])
loss = tf.reduce_mean(tf.square(y - y_))
optimizer = tf.train.gradientdescentoptimizer(0.5)
train = optimizer.minimize(loss)
# initialize variable
init = tf.global_variables_initializer()
# 啟** (graph)
sess = tf.session()
sess.run(init)
#也可以sess.run(tf.global_variables_initializer())
sess.run(train, feed_dict=)
print(sess.run(w), sess.run(b))
import tensorflow as tf
import numpy as np
import os
os.environ['tf_cpp_min_log_level'] = '3'
# 使用 numpy 生成假資料(phony data), 總共 100 個點.
x_data = np.float32(np.random.rand(2, 100)) # 隨機輸入
y_data = np.dot([0.100, 0.200], x_data) + 0.300
print(x_data.shape)#(2, 100)
print(y_data.shape)#(100,)
# 構造乙個線性模型
# b = tf.variable(tf.zeros([1]))
w = tf.variable(tf.random_uniform([1, 2], -1.0, 1.0))
y = tf.matmul(w, x_data) + b
# 最小化方差
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.gradientdescentoptimizer(0.5)
train = optimizer.minimize(loss)
# 初始化變數
init = tf.global_variables_initializer()
# 啟** (graph)
sess = tf.session()
sess.run(init)
# 擬合平面
for step in range(0, 201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run(w), sess.run(b)) #w的形狀是[1,2] b的形狀是[1]
TensorFlow 實現Softmax 回歸模型
importtensorflowastf importnumpyasnp importtensorflow.examples.tutorials.mnist.input dataasinput data mnist input data.read data sets mnist data one h...
tensorflow實現softmax回歸函式
softmax函式可以把多個值歸一化到 0,1 區間中,以實現分類等問題 輸出 5 dtype float32,numpy array 0.1738882 0.7230672,0.8248408,0.8263163 1.5385914 1.3104331 0.91867334 1.5094105,0...
tensorflow入門線性回歸
實際上編寫tensorflow可以總結為兩步.1 組裝乙個graph 2 使用session去執行graph中的operation。當使用tensorflow進行graph構建時,大體可以分為五部分 1 為輸入x與輸出y定義placeholder 2 定義權重w 3 定義模型結構 4 定義損失函式 ...