xpath提取內容
//定位根節點
/ 往下層尋找
提取文字內容:/text()
提取屬性內容 : /@***x
常規匹配
#-*-coding:utf8-*-
from lxml import etree
html = '''
極客學院
點我開啟課程庫
'''selector = etree.html(html)
#提取文字
content = selector.xpath('//ul[@id="useful"]/li/text()')
for each in content:
print each
#提取屬性
link = selector.xpath('//a/@href')
for each in link:
print each
title = selector.xpath('//a/@title')
print title[0]
特殊匹配
#-*-coding:utf8-*-
from lxml import etree
html1 = '''
需要的內容1
需要的內容2
需要的內容3
'''html2 = '''
我左青龍,
右白虎,
老牛在當中,
龍頭在胸口。
'''selector = etree.html(html1)
content = selector.xpath('//div[starts-with(@id,"test")]/text()')
for each in content:
print each
# selector = etree.html(html2)
# content_1 = selector.xpath('//div[@id="test3"]/text()')
# for each in content_1:
# print each##
# data = selector.xpath('//div[@id="test3"]')[0]
# info = data.xpath('string(.)')
# content_2 = info.replace('\n','').replace(' ','')
# print content_2
單執行緒與多執行緒耗時比較
#-*-coding:utf8-*-
from multiprocessing.dummy import pool as threadpool
import requests
import time
def getsource(url):
html = requests.get(url)
urls =
for i in range(1,21):
newpage = '' + str(i)
time1 = time.time()
for i in urls:
print i
getsource(i)
time2 = time.time()
print u'單執行緒耗時:' + str(time2-time1)
pool = threadpool(4)
time3 = time.time()
results = pool.map(getsource, urls)
pool.close()
pool.join()
time4 = time.time()
print u'並行耗時:' + str(time4-time3)
#-*-coding:utf8-*-
from lxml import etree
from multiprocessing.dummy import pool as threadpool
import requests
import json
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
'''重新執行之前請刪除content.txt,因為檔案操作使用追加方式,會導致內容太多。'''
def towrite(contentdict):
f.writelines(u'回帖內容:' + unicode(contentdict['topic_reply_content']) + '\n')
f.writelines(u'回帖人:' + contentdict['user_name'] + '\n\n')
def spider(url):
html = requests.get(url)
selector = etree.html(html.text)
content_field = selector.xpath('//div[@class="l_post j_l_post l_post_bright "]')
print content_field
item = {}
for each in content_field:
reply_info = json.loads(each.xpath('@data-field')[0].replace('"', ''))
author = reply_info['author']['user_name']
content = each.xpath('div[@class="d_post_content_main"]/div/cc/div[@class="d_post_content j_d_post_content clearfix"]/text()')[0]
reply_time = reply_info['content']['date']
print content
print reply_time
print author
item['user_name'] = author
item['topic_reply_content'] = content
item['topic_reply_time'] = reply_time
towrite(item)
if __name__ == '__main__':
pool = threadpool(4)
f = open('content.txt', 'a')
page =
for i in range(1, 21):
newpage = '' + str(i)
results = pool.map(spider, page)
pool.close()
pool.join()
f.close()
1、" 在html中表示英語中的雙引號,如
你看下效果就知道了,主要為了區分語法上的引號
本文出自 「點滴積累」 部落格,請務必保留此出處
爬蟲爬取百度貼吧 python
本爬蟲是在pycharm中編寫完成,伺服器環境是ubuntu16.04,使用語言是python3,匯入的模組包是requests模組 匯入模組 import requests class tiebaspider object def init self self.base url self.head...
爬取百度貼吧
import urllib.request import urllib.parse import os,time 輸入貼吧名字 baname input 請輸入貼吧的名字 start page int input 請輸入起始頁 end page int input 請輸入結束頁 不完整的url ur...
爬取百度貼吧
帶入需要使用的包 from urllib import request,parse importos 基礎知識 變數賦值 字串賦值 爬取的關鍵字 kw lol 數值賦值 爬取的頁數範圍 start 1end 4 輸出 print kw,start,end 宣告需要爬取的連線 base url 建立資...