Skip to content

Commit 9d7c26d

Browse files
committed
initialize
0 parents  commit 9d7c26d

File tree

15 files changed

+6721
-0
lines changed

15 files changed

+6721
-0
lines changed

DL/pract1.ipynb

Lines changed: 1413 additions & 0 deletions
Large diffs are not rendered by default.

DL/pract1.txt

Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
import io
2+
import pandas as pd
3+
import numpy as np
4+
import matplotlib.pyplot as plt
5+
import seaborn as sns
6+
%matplotlib inline
7+
from sklearn.model_selection import train_test_split
8+
from sklearn.preprocessing import StandardScaler
9+
import tensorflow as tf
10+
from tensorflow import keras
11+
from tensorflow.keras import layers
12+
from sklearn.metrics import mean_absolute_error, r2_score
13+
import warnings
14+
warnings.filterwarnings('ignore')
15+
16+
# Importing DataSet and take a look at Data
17+
data = pd.read_csv('housing_data - housing_data.csv')
18+
data
19+
20+
# Handle null values by filling them with the mean of the respective columns
21+
data.fillna(data.mean(), inplace=True)
22+
23+
data.isnull().sum()
24+
25+
data.describe()
26+
27+
data.info()
28+
data.shape
29+
30+
import seaborn as sns
31+
sns.distplot(data.MEDV)
32+
33+
sns.boxplot(data.MEDV)
34+
35+
correlation = data.corr()
36+
correlation.loc['MEDV']
37+
38+
# plotting the heatmap
39+
import matplotlib.pyplot as plt
40+
fig,axes = plt.subplots(figsize=(15,12))
41+
sns.heatmap(correlation,square = True,annot = True)
42+
43+
# Checking the scatter plot with the most correlated features
44+
plt.figure(figsize = (20,5))
45+
features = ['LSTAT','RM','PTRATIO']
46+
for i, col in enumerate(features):
47+
plt.subplot(1, len(features) , i+1)
48+
x = data[col]
49+
y = data.MEDV
50+
plt.scatter(x, y, marker='o')
51+
plt.title("Variation in House prices")
52+
plt.xlabel(col)
53+
plt.ylabel('"House prices in $1000"')
54+
55+
# Splitting the dependent feature and independent feature
56+
#X = data[['LSTAT','RM','PTRATIO']]
57+
X = data.iloc[:,:-1]
58+
y= data.MEDV
59+
60+
import numpy as np
61+
from sklearn.model_selection import train_test_split
62+
63+
# Assuming you have data stored in some variables X and y
64+
# Splitting data into training and testing sets
65+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
66+
# Now you can proceed with the code you provided
67+
# Importing necessary libraries
68+
from sklearn.linear_model import LinearRegression
69+
from sklearn.preprocessing import StandardScaler
70+
# Scaling the features
71+
scaler = StandardScaler()
72+
X_train_scaled = scaler.fit_transform(X_train)
73+
X_test_scaled = scaler.transform(X_test)
74+
mean = X_train.mean(axis=0)
75+
std = X_train.std(axis=0)
76+
X_train = (X_train - mean) / std
77+
X_test = (X_test - mean) / std
78+
#Linear Regression
79+
80+
from sklearn.linear_model import LinearRegression
81+
regressor = LinearRegression()
82+
#Fitting the model
83+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
84+
regressor.fit(X_train,y_train)
85+
86+
87+
88+
#Prediction on the test dataset
89+
y_pred = regressor.predict(X_test)
90+
# Predicting RMSE the Test set results
91+
from sklearn.metrics import mean_squared_error
92+
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
93+
print(rmse)
94+
95+
from sklearn.metrics import r2_score
96+
r2 = r2_score(y_test, y_pred)
97+
print(r2)
98+
99+
from sklearn.metrics import r2_score
100+
r2 = r2_score(y_test, y_pred)
101+
print(r2)
102+
103+
#Evaluation of the model
104+
y_pred = model.predict(X_test)
105+
mse_nn, mae_nn = model.evaluate(X_test, y_test)
106+
print('Mean squared error on test data: ', mse_nn)
107+
print('Mean absolute error on test data: ', mae_nn)
108+
109+
#Comparison with traditional approaches
110+
#First let's try with a simple algorithm, the Linear Regression:
111+
from sklearn.metrics import mean_absolute_error
112+
lr_model = LinearRegression()
113+
lr_model.fit(X_train, y_train)
114+
y_pred_lr = lr_model.predict(X_test)
115+
mse_lr = mean_squared_error(y_test, y_pred_lr)
116+
mae_lr = mean_absolute_error(y_test, y_pred_lr)
117+
print('Mean squared error on test data: ', mse_lr)
118+
print('Mean absolute error on test data: ', mae_lr)
119+
from sklearn.metrics import r2_score
120+
r2 = r2_score(y_test, y_pred)
121+
print(r2)
122+
123+
# Predicting RMSE the Test set results
124+
from sklearn.metrics import mean_squared_error
125+
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
126+
print(rmse)
127+
128+
# Make predictions on new data
129+
import sklearn
130+
new_data = scaler.transform([[0.1, 10.0, 5.0, 0, 0.4, 6.0, 50, 6.0, 1, 400, 20, 300, 10]]) # Scaling new data
131+
prediction = model.predict(new_data)
132+
print("Predicted house price:", prediction)

DL/pract2.ipynb

Lines changed: 520 additions & 0 deletions
Large diffs are not rendered by default.

DL/pract2.txt

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
import tensorflow as tf
2+
import matplotlib.pyplot as plt
3+
from tensorflow import keras
4+
import numpy as np
5+
6+
(x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data()
7+
8+
9+
plt.imshow(x_train[1])
10+
11+
# Next, we will preprocess the data by scaling the pixel values to be between 0 and 1, and then reshaping the images to be 28x28 pixels.
12+
x_train = x_train.astype('float32') / 255.0
13+
x_test = x_test.astype('float32') / 255.0
14+
x_train = x_train.reshape(-1, 28, 28, 1)
15+
x_test = x_test.reshape(-1, 28, 28, 1)
16+
# 28, 28 comes from width, height, 1 comes from the number of channels
17+
# -1 means that the length in that dimension is inferred.
18+
# This is done based on the constraint that the number of elements in an ndarray or Tensor when reshaped must remain the same.
19+
# each image is a row vector (784 elements) and there are lots of such rows (let it be n, so there are 784n elements). So TensorFlow can infer that -1 is n.
20+
21+
x_train.shape
22+
x_test.shape
23+
y_train.shape
24+
y_test.shape
25+
26+
# We will use a convolutional neural network (CNN) to classify the fashion items.
27+
# The CNN will consist of multiple convolutional layers followed by max pooling,
28+
# dropout, and dense layers. Here is the code for the model:
29+
30+
model = keras.Sequential([
31+
keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28,28,1)),
32+
# 32 filters (default), randomly initialized
33+
# 3*3 is Size of Filter
34+
# 28,28,1 size of Input Image
35+
# No zero-padding: every output 2 pixels less in every dimension
36+
# in Paramter shwon 320 is value of weights: (3x3 filter weights + 32 bias) * 32 filters
37+
# 32*3*3=288(Total)+32(bias)= 320
38+
39+
40+
keras.layers.MaxPooling2D((2,2)),
41+
# It shown 13 * 13 size image with 32 channel or filter or depth.
42+
43+
keras.layers.Dropout(0.25),
44+
# Reduce Overfitting of Training sample drop out 25% Neuron
45+
46+
keras.layers.Conv2D(64, (3,3), activation='relu'),
47+
# Deeper layers use 64 filters
48+
# 3*3 is Size of Filter
49+
# Observe how the input image on 28x28x1 is transformed to a 3x3x64 feature map
50+
# 13(Size)-3(Filter Size )+1(bias)=11 Size for Width and Height with 64 Depth or filtter or channel
51+
# in Paramter shwon 18496 is value of weights: (3x3 filter weights + 64 bias) * 64 filters
52+
# 64*3*3=576+1=577*32 + 32(bias)=18496
53+
54+
keras.layers.MaxPooling2D((2,2)),
55+
# It shown 5 * 5 size image with 64 channel or filter or depth.
56+
57+
keras.layers.Dropout(0.25),
58+
59+
keras.layers.Conv2D(128, (3,3), activation='relu'),
60+
# Deeper layers use 128 filters
61+
# 3*3 is Size of Filter
62+
# Observe how the input image on 28x28x1 is transformed to a 3x3x128 feature map
63+
# It show 5(Size)-3(Filter Size )+1(bias)=3 Size for Width and Height with 64 Depth or filtter or channel
64+
# 128*3*3=1152+1=1153*64 + 64(bias)= 73856
65+
66+
# To classify the images, we still need a Dense and Softmax layer.
67+
# We need to flatten the 3x3x128 feature map to a vector of size 1152
68+
# https://medium.com/@iamvarman/how-to-calculate-the-number-of-parameters-in-the-cnn-5bd55364d7ca
69+
70+
keras.layers.Flatten(),
71+
keras.layers.Dense(128, activation='relu'),
72+
# 128 Size of Node in Dense Layer
73+
# 1152*128 = 147584
74+
75+
keras.layers.Dropout(0.25),
76+
keras.layers.Dense(10, activation='softmax')
77+
# 10 Size of Node another Dense Layer
78+
# 128*10+10 bias= 1290
79+
])
80+
81+
model.summary()
82+
83+
# Compile and Train the Model
84+
# After defining the model, we will compile it and train it on the training data.
85+
86+
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
87+
88+
history = model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test))
89+
90+
# 1875 is a number of batches. By default batches contain 32 samles.60000 / 32 = 1875
91+
92+
93+
# Finally, we will evaluate the performance of the model on the test data.
94+
95+
test_loss, test_acc = model.evaluate(x_test, y_test)
96+
97+
print('Test accuracy:', test_acc)

0 commit comments

Comments
 (0)