-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathcreateFeatures.py
117 lines (102 loc) · 3.83 KB
/
createFeatures.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import os
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import numpy as np
import random
import pickle
from collections import Counter
str_red = WordNetLemmatizer()
path = "data/dictionary_processing/"
hamPath = "data/nonspam-train/"
spamPath = "data/spam-train/"
hamPath_tst = "data/nonspam-test/"
spamPath_tst = "data/spam-test/"
# Create Dictionary of words with count (Lexicon) in all training, testing data
def create_dict():
dictionary = []
files = os.listdir(path)
for file in files:
with open(path + file, 'r') as f:
contents = f.read()
words = word_tokenize(contents)
dictionary += words
dictionary = [str_red.lemmatize(i) for i in dictionary]
dictionary = Counter(dictionary)
lexicon = []
for w in dictionary:
if dictionary[w] > 14:
lexicon.append(w)
print(len(lexicon))
return lexicon
# Create featureset for training data
def create_featureset(dictCount):
spam_f = os.listdir(spamPath)
ham_f = os.listdir(hamPath)
spam_f_tst = os.listdir(spamPath_tst)
ham_f_tst = os.listdir(hamPath_tst)
featureSet_train = []
featureSet_test = []
# Creating features for Training - spam
for file in spam_f:
with open(spamPath + file, 'r') as f:
contents = f.read()
words = word_tokenize(contents)
words = [str_red.lemmatize(i) for i in words]
ft = np.zeros(len(dictCount))
for word in words:
if word in dictCount:
idx = dictCount.index(word)
ft[idx] += 1
featureSet_train.append([list(ft), [0, 1]])
# Creating features for Training - ham
for file in ham_f:
with open(hamPath + file, 'r') as f:
contents = f.read()
words = word_tokenize(contents)
words = [str_red.lemmatize(i) for i in words]
ft = np.zeros(len(dictCount))
for word in words:
if word in dictCount:
idx = dictCount.index(word)
ft[idx] += 1
featureSet_train.append([list(ft), [1, 0]])
print("Training feature set done")
# Creating features for Testing - spam
for file in spam_f_tst:
with open(spamPath_tst + file, 'r') as f:
contents = f.read()
words = word_tokenize(contents)
words = [str_red.lemmatize(i) for i in words]
ft = np.zeros(len(dictCount))
for word in words:
if word in dictCount:
idx = dictCount.index(word)
ft[idx] += 1
featureSet_test.append([list(ft), [0, 1]])
# Creating features for Testing - ham
for file in ham_f_tst:
with open(hamPath_tst + file, 'r') as f:
contents = f.read()
words = word_tokenize(contents)
words = [str_red.lemmatize(i) for i in words]
ft = np.zeros(len(dictCount))
for word in words:
if word in dictCount:
idx = dictCount.index(word)
ft[idx] += 1
featureSet_test.append([list(ft), [1, 0]])
print("Testing feature set done")
random.shuffle(featureSet_train)
featureSet_train = np.array(featureSet_train)
random.shuffle(featureSet_test)
featureSet_test = np.array(featureSet_test)
train_data = list(featureSet_train[:, 0])
train_label = list(featureSet_train[:, 1])
test_data = list(featureSet_test[:, 0])
test_label = list(featureSet_test[:, 1])
return train_data, train_label, test_data, test_label
if __name__ == '__main__':
dictWords = create_dict()
tr_data, tr_label, tst_data, tst_label = create_featureset(dictWords)
with open('data/data.pickle', 'wb') as f:
pickle.dump([tr_data, tr_label, tst_data, tst_label], f)