pythonpython-3.xlistspam-preventionemail-spam

python remove punctuation email spam


Trying to remove punctuation from the list of words. New to python programming so if someone could help that would be great. The purpose of this is to be used for email spam classification. Previously I had joined the words after checking to see if punctuation was present, but this gave me single characters rather than whole words. After changing it to get words this is what I have below so now trying to remove the punctuation as won't work the same as I did before.

import os
import string
from collections import Counter
from os import listdir  # return all files and folders in the directory

import nltk
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
from nltk import WordNetLemmatizer
from nltk.corpus import stopwords
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer

# used for importing the lingspam dataset
def importLingspamDataset(dir):
    allEmails = [] # for storing the emails once read
    fileNames = []
    for file in listdir(dir):
        f = open((dir + '/' + file), "r")  # used for opening the file in read only format
        fileNames.append(file)
        allEmails.append(f.read()) # appends the read emails to the emails array
        f.close()
    return allEmails, fileNames

def importEnronDataset(dir):
    allEmails = []  # for storing the emails once read
    fileNames = []
    for file in listdir(dir):
        f = open((dir + '/' + file), "r")  # used for opening the file in read only format
        fileNames.append(file)
        allEmails.append(f.read())  # appends the read emails to the emails array
        f.close()
        return allEmails, fileNames

# used to remove punctuation from the emails as this is of no use for detecting spam
def removePunctuation(cleanedEmails):
    punc = set(string.punctuation)
    for word, line in enumerate(cleanedEmails):
        words = line.split()
        x = [''.join(c for c in words if c not in string.punctuation)]
        allWords = []
        allWords += x
        return allWords

# used to remove stopwords i.e. words of no use in detecting spam
def removeStopwords(cleanedEmails):
    removeWords = set(stopwords.words('english')) # sets all the stopwords to be removed
    for stopw in removeWords: # for each word in remove words
        if stopw not in removeWords: # if the word is not in the stopwords to be removed
            cleanedEmails.append(stopw) # add this word to the cleaned emails
    return(cleanedEmails)

# funtion to return words to its root form - allows simplicity
def lemmatizeEmails(cleanedEmails):
    lemma = WordNetLemmatizer() # to be used for returning each word to its root form
    lemmaEmails = [lemma.lemmatize(i) for i in cleanedEmails] # lemmatize each word in the cleaned emails
    return lemmaEmails

# function to allow a systematic process of elimating the undesired elements within the emails
def cleanAllEmails(cleanedEmails):
    cleanPunc = removePunctuation(cleanedEmails)
    cleanStop = removeStopwords(cleanPunc)
    cleanLemma = lemmatizeEmails(cleanStop)
    return cleanLemma

def createDictionary(email):
    allWords = []
    allWords.extend(email)
    dictionary = Counter(allWords)
    dictionary.most_common(3000)
    word_cloud = WordCloud(width=400, height=400, background_color='white',
              min_font_size=12).generate_from_frequencies(dictionary)
    plt.imshow(word_cloud)
    plt.axis("off")
    plt.margins(x=0, y=0)
    plt.show()
    word_cloud.to_file('test1.png')

def featureExtraction(email):
     emailFiles = []
     emailFiles.extend(email)
     featureMatrix = np.zeros((len(emailFiles), 3000))


def classifyLingspamDataset(email):
    classifications = []
    for name in email:
         classifications.append("spmsg" in name)
    return classifications

# Lingspam dataset
trainingDataLingspam, trainingLingspamFilename = importLingspamDataset("spam-non-spam-dataset/train-mails") # extract the training emails from the dataset
#testingDataLingspam, testingLingspamFilename = importLingspamDataset("spam-non-spam-dataset/test-mails") # extract the testing emails from the dataset

trainingDataLingspamClean = cleanAllEmails(trainingDataLingspam)
#testingDataLingspamClean = cleanAllEmails(testingDataLingspam)

#trainClassifyLingspam = classifyLingspamDataset(trainingDataLingspam)
#testClassifyLingspam = classifyLingspamDataset(testingDataLingspam)

trainDictionary = createDictionary(trainingDataLingspamClean)
#createDictionary(testingDataLingspamClean)

#trainingDataEnron, trainingEnronFilename = importEnronDataset("spam-non-spam-dataset-enron/bigEmailDump/training/")

Solution

  • Based on your question, I assume that you have a list of emails, which for each email you would like to remove the punctuation marks. This answer was based on the first revision of the code you posted.

    import string
    
    
    def removePunctuation(emails):
    
        # I am using a list comprehension here to iterate over the emails.
        # For each iteration, translate the email to remove the punctuation marks.
        # Translate only allows a translation table as an argument.
        # This is why str.maketrans is used to create the translation table.
    
        cleaned_emails = [email.translate(str.maketrans('', '', string.punctuation))
                          for email in emails]
    
        return cleaned_emails
    
    
    if __name__ == '__main__':
    
        # Assuming cleanedEmails is a list of emails, 
        # I am substituting cleanedEmails with emails.
        # I used cleanedEmails as the result.
    
        emails = ["This is a, test!", "This is another#@! \ntest"]
        cleaned_emails = removePunctuation(emails)
        print(cleaned_emails)
    
    input: ["This is a, test!", "This is another#@! \ntest"]
    output: ['This is a test', 'This is another \ntest']
    

    EDIT:

    Issue is resolved after having a conversation with OP. OP was having an issue with WordCloud and the solution I provided is working. Manage to guide OP through getting WordCloud working. OP is now fine tuning the results of the WordCloud.