1234567891011121314151617181920212223242526272829303132333435363738394041424344454647 |
- import time
- import scrapy
- from electric.items import ElectricItem
- # 光伏产业网
- class SolapvSpider(scrapy.Spider):
- name = 'solarenpv'
- allowed_domains = ['solarenpv.com']
- start_urls = [
- 'http://www.solarenpv.com/',
- ]
- custom_settings = {
- 'DOWNLOAD_DELAY': 10,
- }
- def start_requests(self):
- for url in self.start_urls:
- yield scrapy.Request(url=url, callback=self.parse_classes, errback=self.errback_httpbin, dont_filter=True)
- def parse_classes(self, response):
- for url in response.css('.m_r .ibox_head a::attr(href)').getall():
- yield response.follow(url=url, callback=self.parse_pages, errback=self.errback_httpbin, dont_filter=True)
- def parse_pages(self, response):
- for url in response.css(".catlist_li a::attr(href)").getall():
- yield response.follow(url=url, callback=self.parse_item, errback=self.errback_httpbin)
- try:
- url = response.css('.pages a:nth-of-type(10)::attr(href)').get()
- yield response.follow(url=url, callback=self.parse_pages, errback=self.errback_httpbin)
- except:
- self.logger.info('Last page')
- def parse_item(self, response):
- url = response.url
- title = response.css('title::text').get()
- source = '光伏产业网'
- description = response.css('meta[name=description]::attr(content)').get()
- content = ''.join(''.join(response.xpath('//div[@id="article"]//text()').getall()).split())
- date = time.time()
- column = '光伏'
- yield ElectricItem(url=url, title=title, source=source,
- description=description, content=content,
- date=date, column=column)
- def errback_httpbin(self, failure):
- self.logger.error(repr(failure))
|