pythonloopsselenium-webdriverexport-to-csv

Facing an error while looping through list and saving the data


I am trying to access data of around 500 stocks from the "https://www.nseindia.com/report-detail/eq_security" for which I have mentioned steps.

  1. Open the url
  2. enter stock symbol and click on the first option from the dropdown.
  3. Click on 1W to open the weekly data.
  4. From the table, extract data of specific date for Total Traded Quantity, No. of Trades, Deliverable Qty, and % Dly Qt to Traded Qty.
  5. Save this data into a CSV file.

So my CSV file will contain 5 columns and around 500 rows as below. enter image description here

I have built below code but these are the problems I am facing.

  1. it is opening chrome again for each and every symbol, I tried to erase the previous symbol and enter new one, but it's not working. so it is collecting the same data for every stock
  2. I don't have historical data so if I want to have 3 months data for every stock, I need to click on "3M" collect all the data. But I am not sure how to do that? Please share this code separately.

I will update the list to include all stocks and I will do this on a daily basis, so I just need to change the date in the code in future.

Below is the code I have:

import undetected_chromedriver as uc
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
import time
import csv
import os

# Step 1: Create a list of stock symbols (for illustration purposes, here are a few)
# You can replace this with a list of 500 stock symbols
stock_symbols = ["INFY", "TCS", "RELIANCE", "HDFCBANK", "ITC"]  # Add all 500 symbols here

# Define the CSV file name
csv_file = "nse_data.csv"

# Check if file already exists (to avoid writing headers again)
file_exists = os.path.isfile(csv_file)

# Step 2: Open browser and go to URL
driver = uc.Chrome()
driver.get("https://www.nseindia.com/report-detail/eq_security")
driver.maximize_window()
wait = WebDriverWait(driver, 10)

# Function to get data for a single stock
def get_stock_data(symbol):
    # Step 1: Clear the symbol input field and enter new symbol
    symbol_input = wait.until(EC.element_to_be_clickable((By.ID, "hsa-symbol")))

    # Clear the input field by sending backspace
    symbol_input.click()
    symbol_input.clear()  # Clear the previous symbol
    
    # Wait a moment to ensure the field is cleared before entering the new symbol
    time.sleep(0.5)  # Add a small delay for clarity

    # Enter the new symbol
    symbol_input.send_keys(symbol)  # Enter the new symbol
    print(f"✅ Selected symbol '{symbol}'")
    time.sleep(2)

    # Step 2: Click on the first dropdown option
    wait.until(EC.element_to_be_clickable((By.XPATH, "//div[@id='hsa-symbol_listbox']//div//div[1]"))).click()
    print(f"✅ Clicked on the first option for '{symbol}'")
    time.sleep(5)

    # Step 3: Click on the 1W button
    one_week_button = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="oneW"]')))
    one_week_button.click()
    print(f"✅ Clicked on the 1W button for {symbol}")
    time.sleep(5)

    # Step 4: Parse the table and extract data for a specific date
    target_date = "11-Apr-2025"
    soup = BeautifulSoup(driver.page_source, "html.parser")

    table = soup.find("table", {"id": "hsaTable"})
    rows = table.find_all("tr")

    # Get headers to map column names to index
    headers = [th.text.strip() for th in rows[0].find_all("th")]

    # Mapping the column names we care about
    columns_of_interest = ["Date", "Total Traded Quantity", "No. of Trades", "Deliverable Qty", "% Dly Qt to Traded Qty"]
    indices = {col: headers.index(col) for col in columns_of_interest}

    # Look for the row with the target date
    for row in rows[1:]:
        cols = [td.text.strip() for td in row.find_all("td")]
        if cols and cols[indices["Date"]] == target_date:
            extracted_data = {
                "Symbol": symbol,
                "Date": cols[indices["Date"]],
                "Total Traded Quantity": cols[indices["Total Traded Quantity"]],
                "No. of Trades": cols[indices["No. of Trades"]],
                "Deliverable Qty": cols[indices["Deliverable Qty"]],
                "% Dly Qt to Traded Qty": cols[indices["% Dly Qt to Traded Qty"]],
            }
            print(f"✅ Data for {symbol} on {target_date}:")
            for key, val in extracted_data.items():
                print(f"{key}: {val}")
            break
    else:
        print(f"❌ No data found for date: {target_date} for {symbol}")
        extracted_data = None

    return extracted_data

# Write data for multiple stocks
for symbol in stock_symbols:
    extracted_data = get_stock_data(symbol)
    
    if extracted_data:
        # Write to CSV
        with open(csv_file, mode="a", newline="", encoding="utf-8") as f:
            writer = csv.DictWriter(f, fieldnames=extracted_data.keys())

            # Write header only once
            if not file_exists:
                writer.writeheader()

            # Write the row
            writer.writerow(extracted_data)

        print(f"✅ Data for {symbol} written to CSV.")

# Close the browser when all data is collected
driver.quit()

print("✅ All data extraction completed.")

Solution

  • Regarding your first question - You need to make sure the value in the input field is cleared before entering the next symbol. Use the below code to do that.

    def clear_web_field(element):
        while element.get_attribute("value") != "":
            element.send_keys(Keys.BACKSPACE)
    

    Full code:

    import undetected_chromedriver as uc
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.common.keys import Keys
    from bs4 import BeautifulSoup
    import time
    import csv
    import os
    
    # Step 1: Create a list of stock symbols (for illustration purposes, here are a few)
    # You can replace this with a list of 500 stock symbols
    stock_symbols = ["INFY", "TCS", "RELIANCE", "HDFCBANK", "ITC"]  # Add all 500 symbols here
    
    # Define the CSV file name
    csv_file = "nse_data.csv"
    
    # Check if file already exists (to avoid writing headers again)
    file_exists = os.path.isfile(csv_file)
    
    # Step 2: Open browser and go to URL
    driver = uc.Chrome()
    driver.get("https://www.nseindia.com/report-detail/eq_security")
    driver.maximize_window()
    wait = WebDriverWait(driver, 10)
    
    def clear_web_field(element):
        while element.get_attribute("value") != "":
            element.send_keys(Keys.BACKSPACE)
    
    # Function to get data for a single stock
    def get_stock_data(symbol):
        # Step 1: Clear the symbol input field and enter new symbol
        symbol_input = wait.until(EC.element_to_be_clickable((By.ID, "hsa-symbol")))
    
        # Clear the input field by sending backspace
        clear_web_field(symbol_input)
    
        # Wait a moment to ensure the field is cleared before entering the new symbol
        time.sleep(0.5)  # Add a small delay for clarity
    
        # Enter the new symbol
        symbol_input.send_keys(symbol)  # Enter the new symbol
        print(f"✅ Selected symbol '{symbol}'")
        time.sleep(2)
    
        # Step 2: Click on the first dropdown option
        wait.until(EC.element_to_be_clickable((By.XPATH, "//div[@id='hsa-symbol_listbox']//div//div[1]"))).click()
        print(f"✅ Clicked on the first option for '{symbol}'")
        time.sleep(5)
    
        # Step 3: Click on the 1W button
        one_week_button = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="oneW"]')))
        one_week_button.click()
        print(f"✅ Clicked on the 1W button for {symbol}")
        time.sleep(5)
    
        # Step 4: Parse the table and extract data for a specific date
        target_date = "11-Apr-2025"
        soup = BeautifulSoup(driver.page_source, "html.parser")
    
        table = soup.find("table", {"id": "hsaTable"})
        rows = table.find_all("tr")
    
        # Get headers to map column names to index
        headers = [th.text.strip() for th in rows[0].find_all("th")]
    
        # Mapping the column names we care about
        columns_of_interest = ["Date", "Total Traded Quantity", "No. of Trades", "Deliverable Qty",
                               "% Dly Qt to Traded Qty"]
        indices = {col: headers.index(col) for col in columns_of_interest}
    
        # Look for the row with the target date
        for row in rows[1:]:
            cols = [td.text.strip() for td in row.find_all("td")]
            if cols and cols[indices["Date"]] == target_date:
                extracted_data = {
                    "Symbol": symbol,
                    "Date": cols[indices["Date"]],
                    "Total Traded Quantity": cols[indices["Total Traded Quantity"]],
                    "No. of Trades": cols[indices["No. of Trades"]],
                    "Deliverable Qty": cols[indices["Deliverable Qty"]],
                    "% Dly Qt to Traded Qty": cols[indices["% Dly Qt to Traded Qty"]],
                }
                print(f"✅ Data for {symbol} on {target_date}:")
                for key, val in extracted_data.items():
                    print(f"{key}: {val}")
                break
        else:
            print(f"❌ No data found for date: {target_date} for {symbol}")
            extracted_data = None
    
        return extracted_data
    
    
    # Write data for multiple stocks
    for symbol in stock_symbols:
        extracted_data = get_stock_data(symbol)
    
        if extracted_data:
            # Write to CSV
            with open(csv_file, mode="a", newline="", encoding="utf-8") as f:
                writer = csv.DictWriter(f, fieldnames=extracted_data.keys())
    
                # Write header only once
                if not file_exists:
                    writer.writeheader()
    
                # Write the row
                writer.writerow(extracted_data)
    
            print(f"✅ Data for {symbol} written to CSV.")
    
    # Close the browser when all data is collected
    driver.quit()
    
    print("✅ All data extraction completed.")
    

    SECOND QUESTION: Not sure if this is what you are looking for. See the code below to click on 3M

    # Using ID locator (this is preferred)
    three_month_button = wait.until(EC.element_to_be_clickable((By.ID, 'threeM')))
    three_month_button.click()
    
    #or using the XPath expression
    three_month_button = wait.until(EC.element_to_be_clickable((By.XPATH, '//a[@id="threeM"]')))
    three_month_button.click()
    

    UPDATE - Answer to second question:

    Check the below code. I removed the logic of target date (11 Apr). Below code will fetch data for 3M and write it to csv.
    import undetected_chromedriver as uc
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support.ui import WebDriverWait
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.common.keys import Keys
    from bs4 import BeautifulSoup
    import time
    import csv
    import os
    
    # Step 1: Create a list of stock symbols (for illustration purposes, here are a few)
    # You can replace this with a list of 500 stock symbols
    stock_symbols = ["INFY", "TCS", "RELIANCE", "HDFCBANK", "ITC"]  # Add all 500 symbols here
    
    # Define the CSV file name
    csv_file = "nse_data.csv"
    
    # Check if file already exists (to avoid writing headers again)
    file_exists = os.path.isfile(csv_file)
    header_written = os.path.getsize(csv_file) > 0 if file_exists else False  # Track header writing during script run
    
    # Step 2: Open browser and go to URL
    driver = uc.Chrome()
    driver.get("https://www.nseindia.com/report-detail/eq_security")
    driver.maximize_window()
    wait = WebDriverWait(driver, 10)
    
    def clear_web_field(element):
        while element.get_attribute("value") != "":
            element.send_keys(Keys.BACKSPACE)
    
    # Function to get data for a single stock
    def get_stock_data(symbol):
        # Step 1: Clear the symbol input field and enter new symbol
        symbol_input = wait.until(EC.element_to_be_clickable((By.ID, "hsa-symbol")))
    
        # Clear the input field by sending backspace
        clear_web_field(symbol_input)
    
        # Wait a moment to ensure the field is cleared before entering the new symbol
        time.sleep(0.5)  # Add a small delay for clarity
    
        # Enter the new symbol
        symbol_input.send_keys(symbol)  # Enter the new symbol
        print(f"✅ Selected symbol '{symbol}'")
        time.sleep(2)
    
        # Step 2: Click on the first dropdown option
        wait.until(EC.element_to_be_clickable((By.XPATH, "//div[@id='hsa-symbol_listbox']//div//div[1]"))).click()
        print(f"✅ Clicked on the first option for '{symbol}'")
        time.sleep(5)
    
        # Step 3: Click on the 1W button
        one_week_button = wait.until(EC.element_to_be_clickable((By.XPATH, '//*[@id="threeM"]')))
        one_week_button.click()
    
        print(f"✅ Clicked on the 1W button for {symbol}")
        time.sleep(5)
    
        # Step 4: Parse the table and extract data for a specific date
        target_date = "11-Apr-2025"
        soup = BeautifulSoup(driver.page_source, "html.parser")
    
        table = soup.find("table", {"id": "hsaTable"})
        rows = table.find_all("tr")
    
        # Get headers to map column names to index
        headers = [th.text.strip() for th in rows[0].find_all("th")]
    
        # Mapping the column names we care about
        columns_of_interest = ["Date", "Total Traded Quantity", "No. of Trades", "Deliverable Qty",
                               "% Dly Qt to Traded Qty"]
        indices = {col: headers.index(col) for col in columns_of_interest}
    
        extracted_data_list = []
    
        for row in rows[1:]:
            cols = [td.text.strip() for td in row.find_all("td")]
            extracted_data = {
                "Symbol": symbol,
                "Date": cols[indices["Date"]],
                "Total Traded Quantity": cols[indices["Total Traded Quantity"]],
                "No. of Trades": cols[indices["No. of Trades"]],
                "Deliverable Qty": cols[indices["Deliverable Qty"]],
                "% Dly Qt to Traded Qty": cols[indices["% Dly Qt to Traded Qty"]],
            }
            extracted_data_list.append(extracted_data)
            print(f"✅ Data for {symbol} on {target_date}:")
            for key, val in extracted_data.items():
                print(f"{key}: {val}")
        return extracted_data_list
    
    
    # Write data for multiple stocks
    for symbol in stock_symbols:
        extracted_data_list  = get_stock_data(symbol)
    
        if extracted_data_list:
            # Write to CSV
            with open(csv_file, mode="a", newline="", encoding="utf-8") as f:
                writer = csv.DictWriter(f, fieldnames=extracted_data_list[0].keys())
    
                # Write header only once
                if not header_written:
                    writer.writeheader()
                    header_written = True
    
                # Write the row
                for row_data in extracted_data_list:
                    writer.writerow(row_data)
    
            print(f"✅ Data for {symbol} written to CSV.")
    
    # Close the browser when all data is collected
    driver.quit()
    
    print("✅ All data extraction completed.")