import numpy as np
import tensorflow as tf
import random
import pickle
from collections import counter
import nltk
from nltk.tokenize import word_tokenize
def loaddataset():
postinglist=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'i', 'love', 'him','my'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classvec = [0,1,0,1,0,1] #1 表示髒話, 0 則不是
return postinglist,classvec
# 建立詞彙表
def create_lexicon(postinglist):
lex =
for list in postinglist:
lex += list
lex = counter(lex)
temp =
for i in lex:
return temp
# 取樣本中的10%做為測試資料
postinglist,classvec = loaddataset()
lex = create_lexicon(postinglist)
def normalize_dataset(lex):
dataset =
def string_to_vector(words, clf):
features = np.zeros(len(lex))
for word in words:
features[lex.index(word)] = 1 # 乙個句子中某個詞可能出現兩次,可以用+=1,其實區別不大
return [features, clf]
for i in range(len(postinglist)):
line = postinglist[i]
if i%2 == 0 :
one_sample = string_to_vector(line, [0, 1]) # [array([ 0., 0., 0., ..., 0., 0., 0.]), [0,1]]]
if i%2 == 1 :
one_sample = string_to_vector(line, [1, 0]) # [array([ 0., 0., 0., ..., 0., 0., 0.]), [0,1]]]
# print(len(dataset))
return dataset
# feed-forward neural network
# 定義每個層有多少'神經元''
n_input_layer = len(lex) # 輸入層
n_layer_1 = 30 # hide layer
n_layer_2 = 30 # hide layer(隱藏層)聽著很神秘,其實就是除輸入輸出層外的中間層
n_output_layer = 2 # 輸出層
# 定義待訓練的神經網路
def neural_network(data):
# 定義第一層"神經元"的權重和biases
layer_1_w_b =
# # 定義第二層"神經元"的權重和biases
# layer_2_w_b =
# 定義輸出層"神經元"的權重和biases
layer_output_w_b =
# w·x+b
layer_1 = tf.add(tf.matmul(data, layer_1_w_b['w_']), layer_1_w_b['b_'])
layer_1 = tf.nn.relu(layer_1) # 啟用函式
# layer_2 = tf.add(tf.matmul(layer_1, layer_2_w_b['w_']), layer_2_w_b['b_'])
# layer_2 = tf.nn.relu(layer_2) # 啟用函式
layer_output = tf.add(tf.matmul(layer_1, layer_output_w_b['w_']), layer_output_w_b['b_'])
return layer_output
dataset = np.array(normalize_dataset(lex))
train_dataset = dataset
test_dataset = dataset
# 每次使用50條資料進行訓練
batch_size = 1
x = tf.placeholder('float', [none, len(train_dataset[0][0])])
# [none, len(train_x)]代表資料資料的高和寬(矩陣),好處是如果資料不符合寬高,tensorflow會報錯,不指定也可以。
y = tf.placeholder('float')
def train_neural_network(x, y):
predict = neural_network(x)
cost_func = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predict, labels=y))
optimizer = tf.train.adamoptimizer().minimize(cost_func) # learning rate 預設 0.001
epochs = 13
with tf.session() as session:
session.run(tf.initialize_all_variables())
epoch_loss = 0
i = 0
random.shuffle(train_dataset)
train_x = dataset[:, 0]
train_y = dataset[:, 1]
for epoch in range(epochs):
while i < len(train_x):
start = i
end = i + batch_size
batch_x = train_x[start:end]
batch_y = train_y[start:end]
_, c = session.run([optimizer, cost_func], feed_dict=)
epoch_loss += c
i += batch_size
print(epoch, ' : ', epoch_loss)
text_x = test_dataset[:, 0]
text_y = test_dataset[:, 1]
correct = tf.equal(tf.argmax(predict, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('準確率: ', accuracy.eval())
train_neural_network(x, y)
最簡單的IdentityServer實現 Api
建立asp.net core web api專案identityserver.easydemo.api 2引用identityserver4.accesstokenvalidation 3新增介面檔案identitycontroller.cs,用於測試授權 如果你直接訪問http localhost...
js實現最簡單的拖拽
關於js實現最簡單的拖拽 說到拖拽功能,現在各大,中,小型 都基本上有類似的東西,特別是對彈出層拖拽,更是常見的一塌糊塗。其實對於彈出層而言,拖拽最初的目的很單純,就是為了通過拉開層,使被彈出層擋住的內容可見,當然,後來關於拖拽的功能不斷被優化,使得拖拽的應用有了別的意義,最典型的如igoogle的...
最簡單的TCP協議實現
在客戶端方面 服務端 include include include include include include include include include include using namespace std class tcpserver 初始化 void initserver 進行繫...