corpus = [dictionary.doc2bow(text) for text in texts]
tfidf = models.tfidfmodel(corpus) # 第一步--初始化乙個模型
doc_bow = [(0, 1), (1, 1)]
print tfidf[doc_bow] # 第二步--用模型轉換向量
[(0, 0.70710678), (1, 0.70710678)]
####或者在整個語料上應用轉換
corpus_tfidf = tfidf[corpus]
for doc in corpus_tfidf:
print doc
[(0, 0.57735026918962573), (1, 0.57735026918962573), (2, 0.57735026918962573)]
[(0, 0.44424552527467476), (3, 0.44424552527467476), (4, 0.44424552527467476), (5, 0.32448702061385548), (6, 0.44424552527467476), (7, 0.32448702061385548)]
[(2, 0.5710059809418182), (5, 0.41707573620227772), (7, 0.41707573620227772), (8, 0.5710059809418182)]
[(1, 0.49182558987264147), (5, 0.71848116070837686), (8, 0.49182558987264147)]
[(3, 0.62825804686700459), (6, 0.62825804686700459), (7, 0.45889394536615247)]
[(9, 1.0)]
[(9, 0.70710678118654746), (10, 0.70710678118654746)]
[(9, 0.50804290089167492), (10, 0.50804290089167492), (11, 0.69554641952003704)]
[(4, 0.62825804686700459), (10, 0.45889394536615247), (11, 0.62825804686700459)]
很多模型就是基於tf-idf來做的
比如lsi,lda等
舉個栗子
lsi = models.lsimodel(corpus_tfidf, id2word=dictionary, num_topics=2) # 初始化乙個lsi轉換
corpus_lsi = lsi[corpus_tfidf] # 在原始語料上建立乙個雙重封裝器: bow->tfidf->fold-in-lsi
這裡我們用潛在語義索引(latent semantic indexing)將我們的tf-idf語料庫轉換到潛在2-d空間(2-d因為我們設定 num_topics=2)。現在你可以覺得奇怪:這兩個潛在維度是什麼?讓我們檢查一下models.lsimodel.print_topics():
lsi.print_topics(2)
topic #0(1.594): -0.703*"trees" + -0.538*"graph" + -0.402*"minors" + -0.187*"survey" + -0.061*"system" + -0.060*"response" + -0.060*"time" + -0.058*"user" + -0.049*"computer" + -0.035*"inte***ce"
topic #1(1.476): -0.460*"system" + -0.373*"user" + -0.332*"eps" + -0.328*"inte***ce" + -0.320*"response" + -0.320*"time" + -0.293*"computer" + -0.280*"human" + -0.171*"survey" + 0.161*"trees"
得到主題-詞分布
for doc in corpus_lsi: # 在這裡,bow->tfidf 和 tfidf->lsi 轉換實際上都是即時執行的
print doc
[(0, -0.197), (1, 0.761)] # "a survey of user opinion of computer system response time"
[(0, -0.090), (1, 0.724)] # "the eps user inte***ce management system"
[(0, -0.076), (1, 0.632)] # "system and human system engineering testing of eps"
[(0, -0.102), (1, 0.574)] # "relation of user perceived response time to error measurement"
[(0, -0.703), (1, -0.161)] # "the generation of random binary unordered trees"
[(0, -0.877), (1, -0.168)] # "the intersection graph of paths in trees"
[(0, -0.910), (1, -0.141)] # "graph minors iv widths of trees and well quasi ordering"
[(0, -0.617), (1, 0.054)] # "graph minors a survey"
2個主題,0是第乙個topic,1是第二個topic
所以這是文件--主題分布!!!!
文字向量化
table of contents概述 word2vec nnlm c wcbow and skip gram doc2vec str2vec 文字表示是自然語言處理中的基礎工作,文字表示的好壞直接影響到整個自然語言處理系統的效能。文字向量化就是將文字表示成一系列能夠表達文字語義的向量,是文字表示的...
文字向量化
文字向量化,就是把文字轉化為向量形式。這裡用兩種方式實現本文向量,一種是tf方式,一種是tf idf方式,且這裡向量的長度就是字典的長度。計算兩個向量余弦相似度import math defcount cos similarity vec 1,vec 2 if len vec 1 len vec 2...
文字資訊向量化
from sklearn.feature extraction.text import countvectorizer countvec countvectorizer min df 2 兩個以上文件出現的才保留 文件的詞與詞之間用空格分隔 x countvec.fit transform 我們 都...