執行效果:# -*- coding: utf-8 -*-
from bs4 import beautifulsoup
from urllib import request
import re
import os,time
#訪問url,返回html頁面
defget_html
(url):
req = request.request(url)
req.add_header('user-agent','mozilla/5.0')
response = request.urlopen(url)
html = response.read()
return html
#從列表頁獲取**書名和鏈結
defget_books
(url):
#根據列表頁,返回此頁的的字典
html = get_html(url)
soup = beautifulsoup(html,'lxml')
fixed_html = soup.prettify()
books = soup.find_all('div',attrs=)
book_dict = {}
for book in books:
book_name = book.h3.a.string
book_url = book.h3.a.get('href')
book_dict[book_name] = book_url
return book_dict
#根據書名鏈結,獲取具體的章節 的字典
defget_parts
(url):
html = get_html(url)
soup = beautifulsoup(html,'lxml')
fixed_html = soup.prettify()
part_urls = soup.find_all('a')
host = ""
part_dict = {}
for p in part_urls:
p_url = str(p.get('href'))
if re.search(r'\d.html',p_url) and ("xiaoshuotxt"
notin p_url):
part_dict[p.string] = host + p_url
return part_dict
#根據章節的url獲取具體的章節內容
defget_txt
(url):
html = get_html(url)
soup = beautifulsoup(html,'lxml')
fixed_html = soup.prettify()
title = soup.h1.string #獲取文章標題
content = soup.find('div',attrs=)
txt = beautifulsoup.get_text(content) #正文內容
return txt
if __name__ == "__main__":
root_dir= r'e:\books'
#url = '/mingzhu/index_2.html' #第2頁的**
url = "/writer/58"
#金庸的**
books = get_books(url)
for book_name,book_url in books.items():
os.mkdir(os.path.join(root_dir,book_name))
part_dict = get_parts(book_url)
print(book_name,"共:",len(part_dict),"章節")
for part_name,part_url in part_dict.items():
print("正在儲存:",part_name)
f1 = open(r'e:\books\%s\%s.txt'%(book_name,part_name),'w',encoding='utf-8')#以utf-8編碼建立檔案
part_txt = get_txt(part_url)
f1.write(str(part_txt))
f1.close()
time.sleep(2)
Python抓取小說
這個指令碼命令mac在抓取 寫,使用python它有幾個碼。coding utf 8 import re import urllib2 import chardet import sys from bs4 import beautifulsoup import codecs class spider...
小說頁面爬蟲抓取問題
今天在抓取的時候,試了很多次 都抓不出資料和文字,一直在提示我fand all找不到目標屬性,後來修改了自己的函式代稱進行執行抓取 雖然成功了,但是卻把整個 頁面的資料和文字給全部抓取了下來 雖然解決了之前的問題,但是這些資料太過於雜亂無法獲取該有的作用,目前思路還是有點混亂,還是需要對 進行改進,...
Python 爬蟲(獲取小說)
以 筆趣閣 為例 需求 python3版本以上 安裝方法如下 先安裝python3 pip,然後檢查下版本,如果版本可以公升級,就 upgrade pip 一下,然後再安裝beautifulsoup4 sudo apt get install python3 pip pip3 version pip...