Image Classification on Grape Leaves Disease using Deep Learning

Photo by Jovana Askrabic on Unsplash
Grape leaves with esca disease images.
Figure 1: Grape leaves with esca disease images.
dataset_name = "esca_dataset"# Url to repo (repo temporary saved in Google Drive but intended to Mendeley repo)dataset_url =  "https://drive.google.com/file/d/1qO997Wy5drvRpVbAOCL20w82FEGiDpmV/view?usp=sharing"   # Google Drive -> to change with Mendely Link# Trick to use wget with gDrive: use 'https://docs.google.com/uc?export=download&id=FILEID'# where FILEID is extracted from the virtual link provided from Google drivedataset_url4wget = "https://docs.google.com/uc?export=download&id=1qO997Wy5drvRpVbAOCL20w82FEGiDpmV"# Download the archive directly from url!wget -r --no-check-certificate "$dataset_url4wget" -O $dataset_name".zip"!ls# Unzip data!unzip  $dataset_name".zip"!ls
# The new dataset 'augmented_esca_dataset' will be created.# This dataset contains the augmented images create by the ImageGenerator class and the orginal images,# in order to obtain an expanded version of the orginal dataset ready-to-usefrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import tensorflow as tf
import os
from numpy import expand_dims
import cv2
import matplotlib.pyplot as plt
from pathlib import Path
def blur(img):return (cv2.blur(img,(30,30)))def horizontal_flip(img):return (tf.image.flip_left_right(img))def vertical_flip(img):return (tf.image.flip_up_down(img))def contrast(img):return (tf.image.adjust_contrast(img, 0.5))def saturation(img):return (tf.image.adjust_saturation(img, 3))def hue(img):return (tf.image.adjust_hue(img, 0.1))def gamma(img):return (tf.image.adjust_gamma(img, 2))new_dataset = 'augmented_esca_dataset'classes = ['esca', 'healthy']for class_tag in classes:input_path = '/content/' + dataset_name + '/' + class_tag + '/'output_path = '/content/' + dataset_name + '/' + new_dataset + '/' + class_tag + '/'print(input_path)print(output_path)# TMP!rm -rf $output_path# END TMPtry:if not os.path.exists(output_path):os.makedirs(output_path)except OSError:print ("Creation of the directory %s failed\n\n" % output_path)else:print ("Successfully created the directory %s\n\n" % output_path)for filename in os.listdir(input_path):if filename.endswith(".jpg"):# Copy the original image in the new datasetoriginal_file_path = input_path + filenameoriginal_newname_file_path = output_path + Path(filename).stem + "_original.jpg"%cp $original_file_path $original_newname_file_path# Initialising the ImageDataGenerator class.# We will pass in the augmentation parameters in the constructor.for transformation in transformation_array:if transformation == "horizontalFlip":#datagen = ImageDataGenerator(horizontal_flip = True) # for random flipdatagen = ImageDataGenerator(preprocessing_function=horizontal_flip) # all imgs flippedelif transformation == "verticalFlip":#datagen = ImageDataGenerator(vertical_flip = True) # for random flipdatagen = ImageDataGenerator(preprocessing_function=vertical_flip) # all imgs flippedelif transformation == "rotation":datagen = ImageDataGenerator(rotation_range = 40, fill_mode='nearest')elif transformation == "widthShift":datagen = ImageDataGenerator(width_shift_range = 0.2, fill_mode='nearest')elif transformation == "heightShift":datagen = ImageDataGenerator(height_shift_range = 0.2, fill_mode='nearest')elif transformation == "shearRange":datagen = ImageDataGenerator(shear_range = 0.2)elif transformation == "zoom":datagen = ImageDataGenerator(zoom_range = [0.5, 1.0])elif transformation == "blur":datagen = ImageDataGenerator(preprocessing_function=blur)elif transformation == "brightness":#Values less than 1.0 darken the image, e.g. [0.5, 1.0],#whereas values larger than 1.0 brighten the image, e.g. [1.0, 1.5],#where 1.0 has no effect on brightness.datagen = ImageDataGenerator(brightness_range = [1.1, 1.5])elif transformation == "contrast":datagen = ImageDataGenerator(preprocessing_function=contrast)elif transformation == "saturation":datagen = ImageDataGenerator(preprocessing_function=saturation)elif transformation == "hue":datagen = ImageDataGenerator(preprocessing_function=hue)elif transformation == "gamma":datagen = ImageDataGenerator(preprocessing_function=gamma)# Loading a sample imageimg = load_img(input_path + filename)# Converting the input sample image to an arraydata = img_to_array(img)# Reshaping the input image expand dimension to one samplesamples = expand_dims(data, 0)# Plot original imageprint("Original image:")print(filename)if enable_show:plt.imshow(img)plt.show()print("\n\n")# Generating and saving n_augmented_images augmented samplesprint("Apply " + transformation + ".")# prepare iteratorit = datagen.flow(samples, batch_size = 1,save_to_dir = output_path,save_prefix = Path(filename).stem + "_" + transformation,save_format ='jpg')batch = it.next()# Plot trasnformed imageimage = batch[0].astype('uint8')if enable_show:print("Transformed image:")plt.imshow(image)plt.show()print("\n\n")print("Done!\n\n")
Figure 2: Augmentation result.

Import All Important Dependencies

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
from tensorflow.keras.preprocessing import image_dataset_from_directory
import numpy as np
import matplotlib.pyplot as plt
import os
import time

Extract Our Dataset Information

data_dir = pathlib.Path(dir_original)#Import dataset directoryset_samples = ['train', 'validation', 'test']print("set_samples: ", set_samples, "\n")CLASS_NAMES = np.array([item.name for item in sorted(data_dir.glob('*'))])print("class: ", CLASS_NAMES, "\n")N_IMAGES = np.array([len(list(data_dir.glob(item.name+'/*.jpg'))) for item in sorted(data_dir.glob('*'))])      # number of images for classprint("number of images for class: ", N_IMAGES, "\n")N_samples = np.array([(int(np.around(n*60/100)), int(np.around(n*15/100)), int(np.around(n*25/100))) for n in N_IMAGES])  # number of images for set (train,validation,test)print("split of dataset: \n ", N_samples, "\n")
Figure 3: Dataset Information.

Model Architecture

This model consists of 5 convolutional 2D layers followed by ReLu activation function and 5 2D max pooling with 2×2 pool size. In the final stage a flatten, two dense layers, with ReLu and softmax activation function respectively and a dropout layer between them, are inserted to classify the provided input training images into 2 fill level classes.

  1. CNN documentation from Tensorflow
  2. CNN Architecture Explanation
model = Sequential()model.add(Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(2)) #because we have 2 class
model.add(Activation('softmax'))
model.summary()
Figure 4: Architecture summary.

Model Compilation & Training

After that we can compile the model using .compile() method and start the training session by calling .fit() method like this:

#---compilation----#
model.compile(loss='categorical_crossentropy',optimizer=keras.optimizers.Adadelta(learning_rate=1, name='Adadelta'),metrics['accuracy'])
#----training----#
with tf.device('/device:GPU:0'):
history = model.fit(train_dataset,epochs=epochs,
validation_data=validation_dataset)
model.save('your directory') #save your model in any file path you want

Model Evaluation

When the last epochs is done in training, the model will be saved automatically in directory we put earlier, with that we can always download the model even call it back to do evaluation with testing dataset we split earlier.

Figure 5: Performance Graph.

InceptionV3

First we need to import Transfer Learning library as I’m using InceptionV3, we need to import that first and freeze some layer in the network to prevent its weight from being modified during the backward pass of training. It will progressively ‘lock-in’ the weights for each layer to reduce the amount of computation in the backward pass and decrease training time. To made this happened, layer.trainable = False is used.

import keras
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model,load_model
conv_base = InceptionV3(weights='imagenet',include_top=False,input_shape=(300, 300, 3))output = conv_base.layers[-1].output
output = keras.layers.Flatten()(output)
model_tl = Model(conv_base.input, output)
model_tl.trainable = False
for layer in model_tl.layers:
layer.trainable = False
layers = [(layer, layer.name, layer.trainable) for layer in
model_tl.layers]
model_layers=pd.DataFrame(layers, columns=["Layer Type", "Layer Name", "Layer Trainable"])print(model_layers)

Architecture summary for InceptionV3:

Figure 6: Architecture Summary for Transfer Learning CNN.
Figure 7: Performance Graph.
Figure 8
Figure 9
Table 1: Hyperparameters for both CNN architecture.

--

--

Get the Medium app

A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store