当前位置:
首页 > temp > python入门教程 >
-
爬小说
最近发现了一部好看的小说,爬下来试试看,这个脚本并不适用于所有的网页,但根据具体内容稍加修改后大部分网页还是可以用的
#!/usr/bin/python
# Author:Playon
# -*- coding:utf-8 -*-
# Time:2020/8/18 9:57
import requests
from bs4 import BeautifulSoup
import re,time,random
def finder(url,book):
"""
爬字符
:param url: 起始地址url
:param book: 保存的文件名称
:return:
"""
title = []
with open(book,'r',encoding='utf-8')as f:
rec=re.compile('第\d+章.*?')
for t in f.readlines():
t=t.strip()
if re.findall(rec,t):
if t not in title:
title.append(t)
user_agent_list = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
]
heard = random.choice(user_agent_list)
headers = {
'User-Agent': heard,
# 'Referer': 'Referer',
# 'Connection': 'close',
}
res = requests.get(url, headers=headers).content.decode('gbk','ignore')
soup = BeautifulSoup(res, 'html.parser')
# title
bookname = soup.find('div', attrs={'class': 'bookname'})
bookname = soup.h1.text
bookname =re.sub('正文 |全部章节 ','',bookname)
# print(bookname)
if bookname.lstrip() not in title:
# content
content = soup.find('div', attrs={'class': 'box_con'}).stripped_strings
# for i in content:
# print(i)
with open(book,'a',encoding='utf-8')as f:
# rec=re.compile('\([2,3]/\d\)')
rec1=re.compile('\(\d/\d\)|正文 |全部章节 ')
# if not re.findall(rec,bookname):
bookname=re.sub(rec1,'',bookname)
f.write('\n'+ bookname.title() +'\n')
rec2=re.compile('^textselect|大神小说网|^>|^投推荐票|^[上下]一章|^加入书签|^荣耀巅峰|^←|^→|^章节目录|^推荐各位书友|\(.*?\) |^正文|^第\d+章|^全部章节')
for i in content:
if not re.findall(rec2, i):
# i=re.sub(rec4,'',i.rstrip())
f.write(re.sub('Kpl','KPL',i.title() +'\n'))
print(url)
# Next page
next_page = 'http://www.dashenxiaoshuo.com' + soup.find('div', attrs={'class': 'bottem1'}).a.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling['href']
rec = re.compile('\d+\.html$')
if re.findall(rec,next_page):
# print(next_page)
finder(next_page,book)
else:
# print(next_page)
return 'End'
url="http://www.dashenxiaoshuo.com/html/32/32313/15350782.html"
book='../booklist/rydf.txt'
finder(url,book)
自己用的时候需要修改下url和book的存放地址。
最近发现了一部好看的小说,爬下来试试看,这个脚本并不适用于所有的网页,但根据具体内容稍加修改后大部分网页还是可以用的
#!/usr/bin/python
# Author:Playon
# -*- coding:utf-8 -*-
# Time:2020/8/18 9:57
import requests
from bs4 import BeautifulSoup
import re,time,random
def finder(url,book):
"""
爬字符
:param url: 起始地址url
:param book: 保存的文件名称
:return:
"""
title = []
with open(book,'r',encoding='utf-8')as f:
rec=re.compile('第\d+章.*?')
for t in f.readlines():
t=t.strip()
if re.findall(rec,t):
if t not in title:
title.append(t)
user_agent_list = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
]
heard = random.choice(user_agent_list)
headers = {
'User-Agent': heard,
# 'Referer': 'Referer',
# 'Connection': 'close',
}
res = requests.get(url, headers=headers).content.decode('gbk','ignore')
soup = BeautifulSoup(res, 'html.parser')
# title
bookname = soup.find('div', attrs={'class': 'bookname'})
bookname = soup.h1.text
bookname =re.sub('正文 |全部章节 ','',bookname)
# print(bookname)
if bookname.lstrip() not in title:
# content
content = soup.find('div', attrs={'class': 'box_con'}).stripped_strings
# for i in content:
# print(i)
with open(book,'a',encoding='utf-8')as f:
# rec=re.compile('\([2,3]/\d\)')
rec1=re.compile('\(\d/\d\)|正文 |全部章节 ')
# if not re.findall(rec,bookname):
bookname=re.sub(rec1,'',bookname)
f.write('\n'+ bookname.title() +'\n')
rec2=re.compile('^textselect|大神小说网|^>|^投推荐票|^[上下]一章|^加入书签|^荣耀巅峰|^←|^→|^章节目录|^推荐各位书友|\(.*?\) |^正文|^第\d+章|^全部章节')
for i in content:
if not re.findall(rec2, i):
# i=re.sub(rec4,'',i.rstrip())
f.write(re.sub('Kpl','KPL',i.title() +'\n'))
print(url)
# Next page
next_page = 'http://www.dashenxiaoshuo.com' + soup.find('div', attrs={'class': 'bottem1'}).a.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling['href']
rec = re.compile('\d+\.html$')
if re.findall(rec,next_page):
# print(next_page)
finder(next_page,book)
else:
# print(next_page)
return 'End'
url="http://www.dashenxiaoshuo.com/html/32/32313/15350782.html"
book='../booklist/rydf.txt'
finder(url,book)
自己用的时候需要修改下url和book的存放地址。
出处:https://www.cnblogs.com/playon/p/14070059.html
最新更新
nodejs爬虫
Python正则表达式完全指南
爬取豆瓣Top250图书数据
shp 地图文件批量添加字段
爬虫小试牛刀(爬取学校通知公告)
【python基础】函数-初识函数
【python基础】函数-返回值
HTTP请求:requests模块基础使用必知必会
Python初学者友好丨详解参数传递类型
如何有效管理爬虫流量?
2个场景实例讲解GaussDB(DWS)基表统计信息估
常用的 SQL Server 关键字及其含义
动手分析SQL Server中的事务中使用的锁
openGauss内核分析:SQL by pass & 经典执行
一招教你如何高效批量导入与更新数据
天天写SQL,这些神奇的特性你知道吗?
openGauss内核分析:执行计划生成
[IM002]Navicat ODBC驱动器管理器 未发现数据
初入Sql Server 之 存储过程的简单使用
SQL Server -- 解决存储过程传入参数作为s
关于JS定时器的整理
JS中使用Promise.all控制所有的异步请求都完
js中字符串的方法
import-local执行流程与node模块路径解析流程
检测数据类型的四种方法
js中数组的方法,32种方法
前端操作方法
数据类型
window.localStorage.setItem 和 localStorage.setIte
如何完美解决前端数字计算精度丢失与数