pythonxmlweb-scrapingscrapy

How to add new colum to Scrapy output from csv?


I parse websites and it works fine but I need to add new colum with IDs to output. That column is saved in csv with urls:

https://www.ceneo.pl/48523541, 1362
https://www.ceneo.pl/46374217, 2457

Code of my spider:

import scrapy
from ceneo.items import CeneoItem
import csv

class QuotesSpider(scrapy.Spider):
    name = "quotes" 

    def start_requests(self):
        start_urls = []
        f = open('urls.csv', 'r')
        for i in f:
            u = i.split(',')
            start_urls.append(u[0])
        for url in start_urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        all_prices = response.xpath('(//td[@class="cell-price"] /a/span/span/span[@class="value"]/text())[position() <= 10]').extract()
        all_sellers = response.xpath('(//tr/td/div/ul/li/a[@class="js_product-offer-link"]/text())[position()<=10]').extract()

        f = open('urls.csv', 'r')
        id = []
        for i in f:
            u = i.split(',')
            id.append(u[1])

        x = len(all_prices)     
        i = 0

        while (i < x):
            all_sellers[i] = all_sellers[i].replace('Opinie o ', '')
            i += 1

        for urlid, price, seller in zip(id, all_prices, all_sellers):
            yield {'urlid': urlid.strip(), 'price': price.strip(), 'seller': seller.strip()}

In the results I get wrong data because (zip funtion?) IDs are taken alternately:

urlid,price,seller
1362,109,eMAG
1457,116,electro.pl
1362,597,apollo.pl
1457,597,allegro.pl

And it should output:

urlid,price,seller
1362,109,eMAG
1362,116,electro.pl
1457,597,apollo.pl
1457,597,allegro.pl

Solution

  • You can get ID in start_requests and assign to request using meta={'id': id_} and later in parse you can get ID using response.meta['id'].

    This way you will have correct ID in parse.

    I use string data instead of file to create working example.

    #!/usr/bin/env python3
    
    import scrapy
    
    data = '''https://www.ceneo.pl/48523541, 1362
    https://www.ceneo.pl/46374217, 2457'''
    
    class QuotesSpider(scrapy.Spider):
    
        name = "quotes" 
    
        def start_requests(self):
            #f = open('urls.csv', 'r')
    
            f = data.split('\n')
    
            for row in f:
                url, id_ = row.split(',')
    
                url = url.strip()
                id_ = id_.strip()
    
                #print(url, id_)
    
                # use meta to assign value 
                yield scrapy.Request(url=url, callback=self.parse, meta={'id': id_})
    
        def parse(self, response):
            # use meta to receive value
            id_ = response.meta["id"]
    
            all_prices = response.xpath('(//td[@class="cell-price"] /a/span/span/span[@class="value"]/text())[position() <= 10]').extract()
            all_sellers = response.xpath('(//tr/td/div/ul/li/a[@class="js_product-offer-link"]/text())[position()<=10]').extract()
    
            all_sellers = [ item.replace('Opinie o ', '') for item in all_sellers ]
    
            for price, seller in zip(all_prices, all_sellers):
                yield {'urlid': id_, 'price': price.strip(), 'seller': seller.strip()}
    
    # --- it runs without project and saves in `output.csv` ---
    
    from scrapy.crawler import CrawlerProcess
    
    c = CrawlerProcess({
        'USER_AGENT': 'Mozilla/5.0',
        'FEED_FORMAT': 'csv',
        'FEED_URI': 'output.csv', 
    })
    c.crawl(QuotesSpider)
    c.start()
    

    BTW: there is standard function id() so I use variable id_ instead of id