I was scraping SEC EDGAR data by running 'MDA Extractor.py' in this link. https://github.com/rflugum/10K-MDA-Section
As this program was made in Python2, I changed some expressions (e.g., print -> print(), xrange -> range), and added useragent to avoid blocking.
Meanwhile, when it reads the link number '39126' (https://www.sec.gov/Archives/edgar/data/30302/0000030302-02-000003.txt),
The following error was generated:
['39126', 'edgar/data/30302/0000030302-02-000003.txt']
Traceback (most recent call last):
File "MDAExtractor.py", line 261, in (module) headerclean(temp, temp1)
File "MDAExtractor.py", line 112, in headerclean **for** x, line in enumerate(hand):
File "/usr/lib/python3.10/codecs.py", line 322, in decode (result, consumed) = self._buffer_decode(data, self.**errors**, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x97 in position 467: invalid start byte
I was confused because it was running well before reading the link 39126. And I tried to add "encoding = 'utf-16'" to some potential places, but it generated errors. I wonder how can I handle this and make a more broadly applicable code.
The lines related to the error are as follows:
#################################################################################
#This is the file that records the number of sections for each respective filing.
#################################################################################
LOG=os.path.join(filepath,"DOWNLOADLOG.txt")
with open(LOG,'w') as f:
f.write("Filer\tSECTIONS\n")
f.close()
######## Download the filing ############
headers = {'User-Agent': 'A B@C.D'}
with open(download, 'r') as txtfile:
reader = csv.reader(txtfile, delimiter=',')
for line in reader:
print(line)
FileNUM=line[0].strip()
Filer=os.path.join(filepath, "MDA_processed/"+str(line[0].strip())+".txt")
url = 'https://www.sec.gov/Archives/' + line[1].strip()
with open(temp, 'wb') as f:
f.write(requests.get('%s' % url, headers = headers).content)
f.close()
##### Obtain Header Information on Filing ######################
parse(temp, Filer)
headerclean(temp, temp1) ####### LINE 261 !!!!!!!#####
##### ASCII Section ######################
with open(temp,'r') as f:
str1=f.read()
output=str1
locations_xbrlbig=xbrl_clean("<type>zip", "</document>", output)
locations_xbrlbig.append(len(output))
if locations_xbrlbig!=[]:
str1=""
if len(locations_xbrlbig)%2==0:
for i in range(0,len(locations_xbrlbig),2):
str1=str1+output[locations_xbrlbig[i]:locations_xbrlbig[i+1]]
f.close
output=str1
locations_xbrlbig=xbrl_clean("<type>graphic", "</document>", output)
locations_xbrlbig.append(len(output))
if locations_xbrlbig!=[0]:
str1=""
if len(locations_xbrlbig)%2==0:
for i in range(0,len(locations_xbrlbig),2):
str1=str1+output[locations_xbrlbig[i]:locations_xbrlbig[i+1]]
output=str1
locations_xbrlbig=xbrl_clean("<type>excel", "</document>", output)
locations_xbrlbig.append(len(output))
if locations_xbrlbig!=[0]:
str1=""
if len(locations_xbrlbig)%2==0:
for i in range(0,len(locations_xbrlbig),2):
str1=str1+output[locations_xbrlbig[i]:locations_xbrlbig[i+1]]
output=str1
locations_xbrlbig=xbrl_clean("<type>pdf", "</document>", output)
locations_xbrlbig.append(len(output))
if locations_xbrlbig!=[0]:
str1=""
if len(locations_xbrlbig)%2==0:
for i in range(0,len(locations_xbrlbig),2):
str1=str1+output[locations_xbrlbig[i]:locations_xbrlbig[i+1]]
output=str1
locations_xbrlbig=xbrl_clean("<type>xml", "</document>", output)
locations_xbrlbig.append(len(output))
if locations_xbrlbig!=[0]:
str1=""
if len(locations_xbrlbig)%2==0:
for i in range(0,len(locations_xbrlbig),2):
str1=str1+output[locations_xbrlbig[i]:locations_xbrlbig[i+1]]
output=str1
locations_xbrlbig=xbrl_clean("<type>ex", "</document>", output)
locations_xbrlbig.append(len(output))
if locations_xbrlbig!=[0]:
str1=""
if len(locations_xbrlbig)%2==0:
for i in range(0,len(locations_xbrlbig),2):
str1=str1+output[locations_xbrlbig[i]:locations_xbrlbig[i+1]]
########################### DELETE HEADER INFORMATION #######################################
def headerclean(temp, temp1):
mark0=0
strings1=['</SEC-HEADER>','</IMS-HEADER>']
hand=open(temp)
hand.seek(0)
for x, line in enumerate(hand):
line=line.strip()
if any(s in line for s in strings1):
mark0=x
break
hand.seek(0)
newfile=open(temp1,'w')
for x, line in enumerate(hand): ###### LINE 112 !!!!!##########
if x>mark0:
newfile.write(line)
hand.close()
newfile.close()
newfile=open(temp1,'r')
hand=open(temp,'w')
for line in newfile:
if "END PRIVACY-ENHANCED MESSAGE" not in line:
hand.write(line)
hand.close()
newfile.close()
from bs4 import BeautifulSoup
headers = {'user-agent': '`A B@C.D`'}
with open(download, 'r') as txtfile:
reader = csv.reader(txtfile, delimiter=',')
for line in reader:
print(line)
FileNUM = line[0].strip()
Filer = os.path.join(filepath,"MDA_processed/"+str(line[0].strip()) + ".txt")
url = 'https://www.sec.gov/Archives/' + line[1].strip()
response = requests.get('%s' % url, headers=headers)
soup = BeautifulSoup(response.content, 'lxml')
filing_document = soup.body.text.encode('utf-8').decode('ascii', 'ignore')
#print(filing_document)
with open(temp, 'wb') as f:
f.write(filing_document)
f.close()