pythonselenium-webdriverweb-scraping

Why wont my scraper scrape the desired elements?


I am trying to scrape the sku and description on this site: https://www.dewalt.com/products/power-tools/

but, it wont scrape the desired elements despite the code being able to run. Does anyone know why? Am I grabbing the wrong elements?

import undetected_chromedriver as uc

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import time
from selenium.common.exceptions import NoSuchElementException

options = Options()
driver = uc.Chrome()

website = 'https://www.dewalt.com/products/power-tools/drills'
driver.get(website)


WebDriverWait(driver, 5).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, '#product-list')))
prod_num = []
prod_desc = []
container = driver.find_element(By.CSS_SELECTOR, '#product-list')


for _ in range(4):
        driver.execute_script("window.scrollBy(0, 2000);")
        time.sleep(2)

skus = container.find_elements(By.CLASS_NAME, 'coh-inline-element product-sku')
descriptions = container.find_elements(By.CLASS_NAME, 'coh-link subtitle card-link product-title')

for sku in skus:
    prod_num.append(sku.text)
for desc in descriptions:
    prod_desc.append(desc.text)




driver.quit()
print(len(prod_num))
print(len(prod_desc))
# Create a DataFrame from the scraped data

df = pd.DataFrame({'code': prod_num, 'description': prod_desc})

# Save the DataFrame to a CSV file
df.to_csv('dewtest1.csv', index=False)

print(df)



Solution

  • The content you are after is static, so you should go for the requests module instead. Here is how you can achieve this using the requests module:

    import requests
    import pandas as pd
    from bs4 import BeautifulSoup
    
    link = 'https://www.dewalt.com/products/power-tools/drills'
    
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
    }
    
    res = requests.get(link,headers=headers)
    soup = BeautifulSoup(res.text,"html.parser")
    df = pd.DataFrame(columns= ['sku','desc'])
    for item in soup.select(".coh-column > article[role='article']"):
        sku = item.select_one(".product-sku").get_text(strip=True)
        desc = item.select_one(".product-title").get_text(strip=True)
        df = pd.concat([df, pd.DataFrame({'sku': [sku], 'desc': [desc]})], ignore_index=True)
        print(sku,desc)
    
    df.to_csv("dewalt.csv",index=False)
    

    Your existing approach should also work if you consider the following changes within the selector:

    product_list = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, ".coh-column > article[role='article']")))
    prod_num = []
    prod_desc = []
    for container in product_list:
        sku = container.find_element(By.CSS_SELECTOR, '.product-sku').text
        description = container.find_element(By.CSS_SELECTOR, '.product-title').text
        prod_num.append(sku)
        prod_desc.append(description)
    
    df = pd.DataFrame({'code': prod_num, 'description': prod_desc})
    df.to_csv('dewtest1.csv', index=False)