-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmw.py
74 lines (48 loc) · 2.13 KB
/
mw.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
#paginated crawl
import scrapy
import logging
from scrapy.contrib.spiders import Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from scrapy import Request, Spider
from scrapy.exceptions import CloseSpider
from scrapy.selector import Selector
from scrapy.item import Item, Field
URL = 'http://www.masalaworks.com/collections/cushions-may-2013?page={page}'
#http://www.masalaworks.com/collections/cushions-may-2013 | http://www.masalaworks.com/collections/diary | http://www.masalaworks.com/collections/earring-organizer | http://www.masalaworks.com/collections/trivets
class ScrapySampleItem(Item):
title = Field()
link = Field()
desc = Field()
image = Field()
price = Field()
class StackOverflowSpider(scrapy.Spider):
handle_httpstatus_list = [404]
name = "masalaworks"
def start_requests(self):
index = 1
while (index < 7):
yield Request(URL.format(page=index))
index +=1
def parse(self, response):
if (response.css('.c22::text').extract_first() == 'No products found in this collection.'):
# stop crawling
logging.info("+++++++CLOOOSIIINGGGGGG+++++")
raise CloseSpider('STOPPED at %s' % response.url)
for href in response.css('.more a::attr(href)'):
full_url = response.urljoin(href.extract())
yield scrapy.Request(full_url, callback=self.parse_product)
def parse_product(self, response):
items = []
item = ScrapySampleItem()
item['title'] = response.css('h1::text').extract_first()
item['image'] = response.css('.thumbnail a::attr(href)').extract_first()
item['desc'] = response.css('div[id="content"] p span::text').extract()
item['price'] = response.css('.price-current .money::text').extract_first()
if not item['desc']:
logging.info("EMPTY RECIEVED")
item['desc'] = response.css('h1::text').extract_first()
item['link'] = response.url
items.append(item)
for item in items:
yield item