-
Notifications
You must be signed in to change notification settings - Fork 0
/
MLP models
94 lines (85 loc) · 2.94 KB
/
MLP models
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
#checking version
import pandas as pd
import numpy as np
import scipy
import matplotlib
import sklearn
import sys
print('Python: {}'.format(sys.version))
print('numpy: {}'.format(sys.version))
print('scipy: {}'.format(sys.version))
print('matplotlib: {}'.format(sys.version))
print('sklearn: {}'.format(sys.version))
#importing dependencies
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
#Loading the data
# the data set is not connected by url,
#dimension and discription
print(iris.shape)
print(iris.describe())
#Class Discriptions
print(iris.groupby('class').size())
# box and visker plots
iris.plot(kind='box', subplots=True , layout=(2,2), sharex=False, sharey=False)
pyplot.show()
#histogram of the
iris.hist()
pyplot.show()
#multivariate plots
scatter_matrix(iris)
pyplot.show()
#creating the data set
#sploitting the data to 70 30
array = iris.values
x = array[:,0:4]
y = array[:,4]
x_train, x_validation, y_train, y_validation = train_test_split(x,y,test_size=0.2, random_state=1)
#building models
#Logistic Regression L
#K nearest neighbours NL
#Linear discriminant analysis L
#Classification and regression treee NL
#GAussian Naive bayes NL
#Support Vector Machines nl
models = []
models.append(('LR',LogisticRegression(solver='liblinear', multi_class='ovr') ))
models.append(('LDA',LinearDiscriminantAnalysis()))
models.append(('KNN',KNeighborsClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma='auto')))
# the tenfold method
#its like the data will be splited into 10 parts and model trains on 9 parts and tests 1, and repeats it with each combination
results =[]
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=10, random_state = 1)
cv_results = cross_val_score(model, x_train, y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s %f (%f)' %(name,cv_results.mean(), cv_results.std()))
#compare the models
pyplot.boxplot(results, labels=names)
pyplot.title('Algorith brooo')
pyplot.show()
#make prediction as svc
model = SVC(gamma='auto')
model.fit(x_train, y_train)
predictions = model.predict(x_validation)
#evaluate our prediction
print(accuracy_score(y_validation, predictions))
print(confusion_matrix(y_validation, predictions))
print(classification_report(y_validation, predictions))