当前位置:
首页 > Python基础教程 >
-
C#教程之爬虫--使用scrapy爬取糗事百科并在txt文件
工程目录结构
spiders下的first源码
# -*- coding: utf-8 -*- import scrapy from firstBlood.items import FirstbloodItem class FirstSpider(scrapy.Spider): #爬虫文件的名称 #当有多个爬虫文件时,可以通过名称定位到指定的爬虫文件 name = 'first' #allowed_domains 允许的域名 跟start_url互悖 #allowed_domains = ['www.xxx.com'] #start_url 请求的url列表,会被自动的请求发送 start_urls = ['https://www.qiushibaike.com/text/'] def parse(self, response): ''' 解析请求的响应 可以使用正则,XPATH ,因为scrapy 集成了XPATH,建议使用XAPTH 解析得到一个selector :param response: :return: ''' all_data = [] div_list=response.xpath('//div[@id="content-left"]/div') for div in div_list: #author=div.xpath('./div[1]/a[2]/h2/text()')#author 拿到的不是之前理解的源码数据而 # 是selector对象,我们只需将selector类型对象下的data对象拿到即可 #author=author[0].extract() #如果存在匿名用户时,将会报错(匿名用户的数据结构与登录的用户名的数据结构不一样) ''' 改进版''' author = div.xpath('./div[1]/a[2]/h2/text()| ./div[1]/span[2]/h2/text()')[0].extract() content=div.xpath('.//div[@class="content"]/span//text()').extract() content=''.join(content) #print(author+':'+content.strip(' \n \t ')) #基于终端的存储 # dic={ # 'author':author, # 'content':content # } # all_data.append(dic) # return all_data #持久化存储的两种方式 #1 基于终端指令:parse方法有一个返回值 #scrapy crawl first -o qiubai.csv --nolog #终端指令只能存储json,csv,xml等格式文件 #2基于管道 item = FirstbloodItem()#循环里面,每次实例化一个item对象 item['author']=author item['content']=content yield item #将item提交给管道
Items文件
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # https://doc.scrapy.org/en/latest/topics/items.html import scrapy class FirstbloodItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() #item类型对象 万能对象,可以接受任意类型属性,字符串,json等 author = scrapy.Field() content = scrapy.Field()
pipeline文件
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html #只要涉及持久化存储的相关操作代码都需要写在该文件种 class FirstbloodPipeline(object): fp=None def open_spider(self,spider): print('开始爬虫') self.fp=open('./qiushibaike.txt','w',encoding='utf-8') def process_item(self, item, spider): ''' 处理Item :param item: :param spider: :return: ''' self.fp.write(item['author']+':'+item['content']) print(item['author'],item['content']) return item def close_spider(self,spider): print('爬虫结束') self.fp.close()
Setting文件
# -*- coding: utf-8 -*- # Scrapy settings for firstBlood project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'firstBlood' SPIDER_MODULES = ['firstBlood.spiders'] NEWSPIDER_MODULE = 'firstBlood.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36' # Obey robots.txt rules #默认为True ,改为False 不遵从ROBOTS协议 反爬 ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'firstBlood.middlewares.FirstbloodSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # 'firstBlood.middlewares.FirstbloodDownloaderMiddleware': 543, #} # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { 'firstBlood.pipelines.FirstbloodPipeline': 300,#300 为优先级 } # Enable and configure the AutoThrottle extension (disabled by default) # See https://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
栏目列表
最新更新
nodejs爬虫
Python正则表达式完全指南
爬取豆瓣Top250图书数据
shp 地图文件批量添加字段
爬虫小试牛刀(爬取学校通知公告)
【python基础】函数-初识函数
【python基础】函数-返回值
HTTP请求:requests模块基础使用必知必会
Python初学者友好丨详解参数传递类型
如何有效管理爬虫流量?
SQL SERVER中递归
2个场景实例讲解GaussDB(DWS)基表统计信息估
常用的 SQL Server 关键字及其含义
动手分析SQL Server中的事务中使用的锁
openGauss内核分析:SQL by pass & 经典执行
一招教你如何高效批量导入与更新数据
天天写SQL,这些神奇的特性你知道吗?
openGauss内核分析:执行计划生成
[IM002]Navicat ODBC驱动器管理器 未发现数据
初入Sql Server 之 存储过程的简单使用
这是目前我见过最好的跨域解决方案!
减少回流与重绘
减少回流与重绘
如何使用KrpanoToolJS在浏览器切图
performance.now() 与 Date.now() 对比
一款纯 JS 实现的轻量化图片编辑器
关于开发 VS Code 插件遇到的 workbench.scm.
前端设计模式——观察者模式
前端设计模式——中介者模式
创建型-原型模式