Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

InceptionV3 model benchmark update #5

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from __future__ import absolute_import

import tensorflow as tf
from tensorflow.keras.applications.inception_v3 import InceptionV3

print('tf version: ', tf.__version__)

try:
from .utils import train_model, nb_classes, INPUT_SHAPE
except:
from utils import train_model, nb_classes, INPUT_SHAPE


iv3 = InceptionV3(input_shape=INPUT_SHAPE, weights=None,
include_top=True, classes=nb_classes)
iv3.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['accuracy'])

train_model(iv3)
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
import tensorflow as tf
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import concatenate
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras import Input

from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D

print('tf version: ', tf.__version__)

try:
from .utils import train_model, nb_classes, INPUT_SHAPE
except:
from utils import train_model, nb_classes, INPUT_SHAPE


def conv2d_bn(x, filters, kernel_size, padding='same', strides=(1,1), name=None):
"""Convolution with batch normalization and relu activation"""

if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None

x = Conv2D(filters, kernel_size, strides=strides, padding=padding,
name=conv_name, use_bias=False)(x)
x = BatchNormalization(scale=False, name=bn_name)(x)
x = Activation('relu', name=name)(x)

return x

def Inceptionv3(nb_classes, input_tensor=None, input_shape=None):
"""InceptionV3 model"""

if input_tensor is None:
input = Input(shape=input_shape)
else:
input = input_tensor

# starting stem of inceptionv3 architecture
x = conv2d_bn(input, 32, 3, padding='valid', strides=(2,2))
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = MaxPooling2D(3, strides=(2,2))(x)
x = conv2d_bn(x, 80, 3, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid', strides=(2,2))
x = conv2d_bn(x, 288, 3)

# 3 inceptions with kernel 5*5 factorized to two deep 3*3
for i in range(3):
conv1x1 = conv2d_bn(x, 192, 1)

conv3x3 = conv2d_bn(x, 128, 1)
conv3x3 = conv2d_bn(conv3x3, 192, 3)

conv3x3dbl = conv2d_bn(x, 128, 1)
conv3x3dbl = conv2d_bn(conv3x3dbl, 128, 3)
conv3x3dbl = conv2d_bn(conv3x3dbl, 192, 3)

pool = AveragePooling2D((3,3), strides=(1,1), padding='same')(x)
pool = conv2d_bn(pool, 192, 1)

x = concatenate([conv1x1, conv3x3, conv3x3dbl, pool], name='inception1_mix'+str(i))

# 5 inceptions with kernels factorized into deep 1*n and n*1 rather than n*n
for i in range(5):
conv1x1 = conv2d_bn(x, 384, 1)

conv3x3 = conv2d_bn(x, 320, 1)
conv3x3 = conv2d_bn(conv3x3, 384, (1,3))
conv3x3 = conv2d_bn(conv3x3, 384, (3,1))

conv3x3dbl = conv2d_bn(x, 320, 1)
conv3x3dbl = conv2d_bn(conv3x3dbl, 320, (1,3))
conv3x3dbl = conv2d_bn(conv3x3dbl, 320, (3,1))
conv3x3dbl = conv2d_bn(conv3x3dbl, 384, (1,3))
conv3x3dbl = conv2d_bn(conv3x3dbl, 384, (3,1))

pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
pool = conv2d_bn(pool, 384, 1)

x = concatenate([conv1x1, conv3x3, conv3x3dbl, pool], name='inception2_mix' + str(i))

# 2 inceptions with kernels factorized into wide 1*n and n*1 rather than n*n
for i in range(2):
conv1x1 = conv2d_bn(x, 512, 1)

conv3x3 = conv2d_bn(x, 440, 1)
conv1x3 = conv2d_bn(conv3x3, 512, (1, 3))
conv3x1 = conv2d_bn(conv3x3, 512, (3, 1))

conv3x3dbl = conv2d_bn(x, 440, 1)
conv3x3dbl = conv2d_bn(conv3x3dbl, 440, (3, 3))
conv1x3dbl = conv2d_bn(conv3x3dbl, 512, (1, 3))
conv3x1dbl = conv2d_bn(conv3x3dbl, 512, (3, 1))

pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
pool = conv2d_bn(pool, 512, 1)

x = concatenate([conv1x1, conv1x3, conv3x1, conv1x3dbl, conv3x1dbl, pool], name='inception3_mix' + str(i))

x = GlobalAveragePooling2D()(x)

flattened = Flatten()(x)
outputs = Dense(nb_classes, activation='softmax')(flattened)

model = Model(inputs=input, outputs=outputs)

return model


model = Inceptionv3(nb_classes, input_shape=INPUT_SHAPE)
model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['accuracy'])

train_model(model)
154 changes: 33 additions & 121 deletions Plant_Disease_Detection_Benchmark_models/InceptionV3/finetune.py
Original file line number Diff line number Diff line change
@@ -1,150 +1,62 @@
import os
import sys
import glob
import argparse
import matplotlib.pyplot as plt
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.callbacks import ReduceLROnPlateau, EarlyStopping, CSVLogger
from keras_vggface.vggface import VGGFace
import tensorflow as tf
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D,Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from keras.layers import Input

IM_WIDTH, IM_HEIGHT = 100, 100 # fixed size for InceptionV3
NB_EPOCHS = 50
BAT_SIZE = 64
FC_SIZE = 512
NB_IV3_LAYERS_TO_FREEZE = 172


# python finetune.py --train_dir "../dataset/color/train" --val_dir "../dataset/color/val"


lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6)
early_stopper = EarlyStopping(min_delta=0.001, patience=10)
csv_logger = CSVLogger('Inception_Finetuning_log.csv')



def get_nb_files(directory):
"""Get number of files by searching directory recursively"""
if not os.path.exists(directory):
return 0
cnt = 0
for r, dirs, files in os.walk(directory):
for dr in dirs:
cnt += len(glob.glob(os.path.join(r, dr + "/*")))
return cnt
print('tf version: ', tf.__version__)

try:
from .utils import train_model, nb_classes, INPUT_SHAPE, FC_SIZE
except:
from utils import train_model, nb_classes, INPUT_SHAPE, FC_SIZE


def InceptionV3WithCustomLayers(input_shape, nb_classes):
"""
Adding custom final layers on InceptionV3 model with imagenet weights

def add_new_last_layer(base_model, nb_classes):
"""Add last layer to the convnet
Args:
base_model: keras model excluding top
input_shape: input shape of the images
nb_classes: # of classes
Returns:
new keras model with last layer
new keras model with last layer/s
"""
base_model = InceptionV3(input_tensor=Input(shape=input_shape),
weights='imagenet', include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu')(x) # new FC layer, random init
x = Dropout(0.5)(x)
x = Dense(FC_SIZE * 2, activation='relu')(x) # new FC layer, random init
x = Dropout(0.5)(x)
# x = Dense(FC_SIZE, activation='relu')(x) # new FC layer, random init
# x = Dropout(0.5)(x)
#x = Dense(FC_SIZE * 4, activation='relu')(x) # new FC layer, random init
#x = Dropout(0.5)(x)
predictions = Dense(nb_classes, activation='softmax')(x) # new softmax layer
model = Model(output=predictions, input=base_model.input)
return model


def setup_to_finetune(model):
"""Freeze the bottom NB_IV3_LAYERS and retrain the remaining top layers.
note: NB_IV3_LAYERS corresponds to the top 2 inception blocks in the inceptionv3 arch
def setup_trainable_layers(model, layers_to_freeze=None):
"""
Freeze the bottom layers and make trainable the remaining top layers.

Args:
model: keras model
layers_to_freeze: number of layers to freeze or None to use the model as it is
"""
for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
layer.trainable = False
for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
layer.trainable = True
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])


def train(args):
"""Use transfer learning and fine-tuning to train a network on a new dataset"""
nb_train_samples = get_nb_files(args.train_dir)
nb_classes = len(glob.glob(args.train_dir + "/*"))
nb_val_samples = get_nb_files(args.val_dir)
nb_epoch = int(args.nb_epoch)
batch_size = int(args.batch_size)

# data prep
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)

train_generator = train_datagen.flow_from_directory(args.train_dir, target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size)

validation_generator = test_datagen.flow_from_directory(args.val_dir, target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=batch_size)

# setup model
base_model = InceptionV3(input_tensor=Input(shape=(IM_HEIGHT, IM_WIDTH, 3)), weights='imagenet', include_top=False)

model = add_new_last_layer(base_model, nb_classes)


# fine-tuning
setup_to_finetune(model)

history_ft = model.fit_generator(train_generator, steps_per_epoch=nb_train_samples // batch_size, epochs=nb_epoch,
validation_data=validation_generator,
validation_steps=nb_val_samples // batch_size,callbacks=[lr_reducer,early_stopper,csv_logger])

model.save(args.output_model_file)

plot_training(history_ft)


def plot_training(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))

plt.plot(epochs, acc, 'r.')
plt.plot(epochs, val_acc, 'r')
plt.title('Training and validation accuracy')

plt.figure()
plt.plot(epochs, loss, 'r.')
plt.plot(epochs, val_loss, 'r-')
plt.title('Training and validation loss')
plt.show()


if __name__ == "__main__":
a = argparse.ArgumentParser()
a.add_argument("--train_dir")
a.add_argument("--val_dir")
a.add_argument("--nb_epoch", default=NB_EPOCHS)
a.add_argument("--batch_size", default=BAT_SIZE)
a.add_argument("--output_model_file", default="../Models/InceptionV3-finetuning.h5")
a.add_argument("--plot", action="store_true")
if layers_to_freeze is not None:
for layer in model.layers[:layers_to_freeze]:
layer.trainable = False
for layer in model.layers[layers_to_freeze:]:
layer.trainable = True

args = a.parse_args()
if args.train_dir is None or args.val_dir is None:
a.print_help()
sys.exit(1)

if (not os.path.exists(args.train_dir)) or (not os.path.exists(args.val_dir)):
print("directories do not exist")
sys.exit(1)
# setup model
model = InceptionV3WithCustomLayers(INPUT_SHAPE, nb_classes)
# setup layers to be trained or not
setup_trainable_layers(model)
# compiling the model
model.compile(optimizer='RMSprop', loss='categorical_crossentropy', metrics=['accuracy'])

train(args)
train_model(model)

This file was deleted.

Empty file.
Loading