1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950 |
- import time
- import scrapy
- from electric.items import ElectricItem
- # 能源界
- class NyjieSpider(scrapy.Spider):
- name = 'nengyuanjie'
- allowed_domains = ['nengyuanjie.net']
- start_urls = [
- ('http://www.nengyuanjie.net/series/chuneng.html',188,'储能'),
- ('http://www.nengyuanjie.net/series/hedian.html',180,'核电'),
- ('http://www.nengyuanjie.net/series/fengdian.html',188,'风电'),
- ('http://www.nengyuanjie.net/series/guangfu.html',257,'光伏'),
- ('http://www.nengyuanjie.net/series/nengyuanhulianwang.html',43,'计算机'),
- ('http://www.nengyuanjie.net/series/qingneng.html',90,'氢能'),
- ]
- custom_settings = {
- 'DOWNLOAD_DELAY': 10,
- 'DOWNLOAD_TIMEOUT': 1800,
- }
- def start_requests(self):
- for url, maxpage, column in self.start_urls:
- yield scrapy.Request(url=url, callback=self.parse_pages, errback=self.errback_httpbin, meta={"url": url, "nextpage": 2, 'maxpage': maxpage, 'column': column}, dont_filter=True)
- def parse_pages(self, response):
- for item in response.xpath('//div[@class="lists"]/div[@class="li"]'):
- url = item.css('a').attrib['href']
- self.logger.info('parse item: {}'.format(url))
- yield response.follow(url=url, callback=self.parse_item, errback=self.errback_httpbin, meta={'column': response.meta['column']})
- if response.meta['nextpage'] <= response.meta['maxpage']:
- nextpage = response.meta['url'] + '?page={}'.format(response.meta['nextpage'])
- self.logger.info('next page: {}'.format(nextpage))
- yield response.follow(url=nextpage, callback=self.parse_pages, errback=self.errback_httpbin, meta={'url': response.meta['url'], "nextpage": response.meta['nextpage'] + 1, 'maxpage': response.meta['maxpage'], 'column': response.meta['column']}, dont_filter=True)
- def parse_item(self, response):
- url = response.url
- title = response.css('title::text').get()
- source = '能源界'
- description = response.css('meta[name=description]::attr(content)').get()
- content = ''.join(''.join(response.xpath('//div[@class="content"]//text()').getall()).split())
- date = time.time()
- column = response.meta['column']
- yield ElectricItem(url=url, title=title, source=source,
- description=description, content=content,
- date=date, column=column)
- def errback_httpbin(self, failure):
- self.logger.error(repr(failure))
|