1234567891011121314151617181920212223242526272829303132333435363738394041424344454647 |
- import time
- import scrapy
- from electric.items import ElectricItem
- # 国际风能网
- class EwpcSpider(scrapy.Spider):
- name = 'ewindpower'
- download_delay = 15
- allowed_domains = ['ewindpower.cn']
- start_urls = [
- 'http://www.ewindpower.cn/news/list-htm-catid-15-page-{}.html',
- 'http://www.ewindpower.cn/news/list-htm-catid-14-page-{}.html',
- ]
- def start_requests(self):
- yield scrapy.Request(url='http://www.ewindpower.cn/news/list-htm-catid-14.html', callback=self.parse_pages, errback=self.errback_httpbin)
- yield scrapy.Request(url='http://www.ewindpower.cn/news/list-htm-catid-15.html', callback=self.parse_pages, errback=self.errback_httpbin)
- for baseurl in self.start_urls:
- for page in range(2, 12):
- url = baseurl.format(page)
- self.logger.info('next page: {}'.format(url))
- yield scrapy.Request(url=url, callback=self.parse_pages, errback=self.errback_httpbin, dont_filter=True)
- def parse_pages(self, response):
- for item in response.css('div#iframe_11 > span > table li'):
- url = item.css('a').attrib['href']
- yield response.follow(url=url, callback=self.parse_items, errback=self.errback_httpbin)
- # nextpage = response.css('div.pages a[title="下一页"]').attrib['href']
- # self.logger.info('next page: {}'.format(nextpage))
- # yield response.follow(url=url, callback=self.parse_pages, errback=self.errback_httpbin)
- def parse_items(self, response):
- url = response.url
- title = response.css('title::text').get()
- source = '国际风能网'
- description = response.css('meta[name=description]::attr(content)').get()
- content = ''.join(response.xpath('//div[@class="content"]//text()').getall())
- date = time.time()
- column = '风能'
- self.logger.info(title)
- yield ElectricItem(url=url, title=title, source=source,
- description=description, content=content,
- date=date, column=column)
- def errback_httpbin(self, failure):
- self.logger.error(repr(failure))
|