博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
新浪新闻全站
阅读量:4984 次
发布时间:2019-06-12

本文共 7790 字,大约阅读时间需要 25 分钟。

一.爬虫

# -*- coding: utf-8 -*-import scrapyimport osfrom sina.items import SinaItemclass SinapiderSpider(scrapy.Spider):    name = 'sinapider'    # allowed_domains = ['www.xxx.com']    start_urls = ['http://news.sina.com.cn/guide/']    def parse(self, response):        items = []        # 所有大类的url 和 标题        #返回值都是列表        parentUrls = response.xpath('//div[@id="tab01"]/div/h3/a/@href').extract()        parentTitle = response.xpath('//div[@id="tab01"]/div/h3/a/text()').extract()        # 所有小类的ur 和 标题        #返回值都是列表        subUrls = response.xpath('//div[@id="tab01"]/div/ul/li/a/@href').extract()        subTitle = response.xpath('//div[@id="tab01"]/div/ul/li/a/text()').extract()        # 爬取所有大类        for i in range(0, len(parentTitle)):            # 指定大类目录的路径和目录名            parentFilename = "./Data/" + parentTitle[i]            # 如果目录不存在,则创建目录            if (not os.path.exists(parentFilename)):                os.makedirs(parentFilename)            # 爬取所有小类            for j in range(0, len(subUrls)):                item = SinaItem()                # 保存大类的title和urls                #因为所有的小类的url在同一个列表,为了后面的判断是这个小类url是属于哪个父类的                item['parentTitle'] = parentTitle[i]                item['parentUrls'] = parentUrls[i]                # 检查小类的url是否以同类别大类url开头,如果是返回True (sports.sina.com.cn 和 sports.sina.com.cn/nba)                if_belong = subUrls[j].startswith(item['parentUrls'])                # 如果属于本大类,将存储目录放在本大类目录下                if (if_belong):                    subFilename = parentFilename + '/' + subTitle[j]                    # 如果目录不存在,则创建目录                    if (not os.path.exists(subFilename)):                        os.makedirs(subFilename)                    # 存储 小类url、title和filename字段数据                    item['subUrls'] = subUrls[j]                    item['subTitle'] = subTitle[j]                    item['subFilename'] = subFilename                    items.append(item)        # 发送每个小类url的Request请求,得到Response连同包含meta数据 一同交给回调函数 second_parse 方法处理        for item in items:            yield scrapy.Request(url=item['subUrls'], meta={
'meta_1': item}, callback=self.second_parse) # 对于返回的小类的url,再进行递归请求 def second_parse(self, response): # 提取每次Response的meta数据 meta_1 = response.meta['meta_1'] # 取出小类里所有子链接 sonUrls = response.xpath('//a/@href').extract() items = [] for i in range(0, len(sonUrls)): # 检查每个链接是否以大类url开头、以.shtml结尾,如果是返回True if_belong = sonUrls[i].endswith('.shtml') and sonUrls[i].startswith(meta_1['parentUrls']) # 如果属于本大类,获取字段值放在同一个item下便于传输 if (if_belong): item = SinaItem() item['parentTitle'] = meta_1['parentTitle'] item['parentUrls'] = meta_1['parentUrls'] item['subUrls'] = meta_1['subUrls'] item['subTitle'] = meta_1['subTitle'] item['subFilename'] = meta_1['subFilename'] item['sonUrls'] = sonUrls[i] items.append(item) # 发送每个小类下子链接url的Request请求,得到Response后连同包含meta数据 一同交给回调函数 detail_parse 方法处理 for item in items: yield scrapy.Request(url=item['sonUrls'], meta={
'meta_2': item}, callback=self.detail_parse) # 数据解析方法,获取文章标题和内容 def detail_parse(self, response): item = response.meta['meta_2'] content = "" head = response.xpath('//h1[@id="main_title"]/text()') content_list = response.xpath('//div[@id="artibody"]/p/text()').extract() # 将p标签里的文本内容合并到一起 for content_one in content_list: content += content_one item['head'] = head item['content'] = content yield item

 

 

二. item

import scrapyclass SinaItem(scrapy.Item):    # define the fields for your item here like:    # name = scrapy.Field()    #大类标题和连接    parentTitle = scrapy.Field()    parentUrls = scrapy.Field()    #小类标题和url    subTitle=scrapy.Field()    subUrls=scrapy.Field()    #小类目录存储路径    subFilename=scrapy.Field()    #小类下的子连接    sonUrls=scrapy.Field()    #文章的标题和内容    head=scrapy.Field()    content=scrapy.Field()

 

3.pipeline

# -*- coding: utf-8 -*-# Define your item pipelines here## Don't forget to add your pipeline to the ITEM_PIPELINES setting# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.htmlclass SinaPipeline(object):    def process_item(self, item, spider):        sonUrls = item['sonUrls']        # 文件名为子链接url中间部分,并将 / 替换为 _,保存为 .txt格式        filename = sonUrls[7:-6].replace('/', '_')        filename += ".txt"        fp = open(item['subFilename'] + '/' + filename, 'w',encoding='utf-8')        fp.write(item['content'])        fp.close()        return item

4.settings

# -*- coding: utf-8 -*-# Scrapy settings for sina project## For simplicity, this file contains only settings considered important or# commonly used. You can find more settings consulting the documentation:##     https://doc.scrapy.org/en/latest/topics/settings.html#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#     https://doc.scrapy.org/en/latest/topics/spider-middleware.htmlBOT_NAME = 'sina'SPIDER_MODULES = ['sina.spiders']NEWSPIDER_MODULE = 'sina.spiders'# Crawl responsibly by identifying yourself (and your website) on the user-agentUSER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'# Obey robots.txt rulesROBOTSTXT_OBEY = False# Configure maximum concurrent requests performed by Scrapy (default: 16)#CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay# See also autothrottle settings and docs#DOWNLOAD_DELAY = 3# The download delay setting will honor only one of:#CONCURRENT_REQUESTS_PER_DOMAIN = 16#CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)#COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)#TELNETCONSOLE_ENABLED = False# Override the default request headers:#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',# 'Accept-Language': 'en',#}# Enable or disable spider middlewares# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html#SPIDER_MIDDLEWARES = {
# 'sina.middlewares.SinaSpiderMiddleware': 543,#}# Enable or disable downloader middlewares# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#DOWNLOADER_MIDDLEWARES = {
# 'sina.middlewares.SinaDownloaderMiddleware': 543,#}# Enable or disable extensions# See https://doc.scrapy.org/en/latest/topics/extensions.html#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,#}# Configure item pipelines# See https://doc.scrapy.org/en/latest/topics/item-pipeline.htmlITEM_PIPELINES = { 'sina.pipelines.SinaPipeline': 300,}# Enable and configure the AutoThrottle extension (disabled by default)# See https://doc.scrapy.org/en/latest/topics/autothrottle.html#AUTOTHROTTLE_ENABLED = True# The initial download delay#AUTOTHROTTLE_START_DELAY = 5# The maximum download delay to be set in case of high latencies#AUTOTHROTTLE_MAX_DELAY = 60# The average number of requests Scrapy should be sending in parallel to# each remote server#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0# Enable showing throttling stats for every response received:#AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings#HTTPCACHE_ENABLED = True#HTTPCACHE_EXPIRATION_SECS = 0#HTTPCACHE_DIR = 'httpcache'#HTTPCACHE_IGNORE_HTTP_CODES = []#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'LOG_LEVEL = 'DEBUG'

 

转载于:https://www.cnblogs.com/tjp40922/p/10720991.html

你可能感兴趣的文章
LeetCode-178:分数排名
查看>>
转:退火算法 Simulate Anneal Arithmetic (SAA,模拟退火算法)
查看>>
Django电商项目---完成商品主页显示day2
查看>>
如何解决文章格式化编辑器win7 64位下找不到Comctl32.ocx
查看>>
核心动画-翻页效果的实现
查看>>
微信小程序弹出框 页面依然可以滑动的解决
查看>>
$.ajax同域请求,跨域请求的解决方案
查看>>
octave操作
查看>>
【Python】安装Python的mysql模块
查看>>
【Python】在控制台输出不同颜色的文字
查看>>
js 获取gridview 点击行每个单元格的值
查看>>
Floyd算法解说
查看>>
java基础之【堆、栈、方法区】结构图
查看>>
浅谈C++非多态单继承数据布局
查看>>
cogs 1396. wwww
查看>>
MYSQL数据库优化
查看>>
Linux 新手学习任务
查看>>
内部类对象的获取!《Thinking in Java》随笔018
查看>>
[MongoDB]Python 操作 MongoDB
查看>>
antd 表格隔行变色
查看>>