import scrapy class QuotesSpider(scrapy.Spider): name = "quotes" page = 1 start_urls = [ 'http://quotes.toscrape.com/page/1/', ] def parse(self, response): yield {'page': self.page} item = MyItem() for quote in response.css('div.quote'): yield { 'text': quote.css('span.text::text').get(), 'author': quote.css('small.author::text').get(), 'tags': quote.css('div.tags a.tag::text').getall(), } self.page += 1 next_page = response.css('li.next a::attr(href)').get() if next_page is not None: # next_page = response.urljoin(next_page) # yield scrapy.Request(next_page, callback=self.parse) yield response.follow(next_page, callback=self.parse)