Contents

Contents

Convolutional Neural Network

Summary

We used Colab’s GPU to build a neural network using a pretrained model from Keras (ResNet50).

Pretrained Model

We chose to try ResNet50 in order to compare its performance against MobileNetV2 and a from scratch CNN at Camilo’s suggestion. We remove the top and add output layers to fine tune the model to our eight super classes.

The model uses a learning rate of 0.0001.

Results

The ResNet50 model took about 50 minutes to train and had a training accuracy of 100% and a test accuracy of 83.3%.

The model appears to be overfit, but still performed relatively well on the test set.

import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import datasets
import matplotlib.image as mpimg
import os

%matplotlib inline
import keras
from keras.layers import Conv2D, MaxPooling2D, Dense, Input, Flatten, Dropout, UpSampling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.optimizers import Adam, SGD
import matplotlib.pyplot as plt
from keras.utils import np_utils
from keras.datasets import cifar10
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from IPython.display import Image, display
import random, os



Using TensorFlow backend.
#Make sure you're using GPU
!nvidia-smi
Wed Dec 12 17:25:03 2018       
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 396.44                 Driver Version: 396.44                    |
|-------------------------------+----------------------+----------------------+
| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|===============================+======================+======================|
|   0  Tesla K80           Off  | 00000000:00:04.0 Off |                    0 |
| N/A   73C    P8    35W / 149W |      0MiB / 11441MiB |      0%      Default |
+-------------------------------+----------------------+----------------------+
                                                                               
+-----------------------------------------------------------------------------+
| Processes:                                                       GPU Memory |
|  GPU       PID   Type   Process name                             Usage      |
|=============================================================================|
|  No running processes found                                                 |
+-----------------------------------------------------------------------------+

##Build Directories

!rm -rf 'train'
!rm -rf 'test'
!unzip 'train.zip'
!unzip 'train_partial.zip'
!unzip 'test.zip'
!unzip 'test_partial.zip'
!rm 'test/test.zip'
!rm 'train/train.zip'
rm: cannot remove 'test/test.zip': No such file or directory
rm: cannot remove 'train/train.zip': No such file or directory
!ls test

0  1  2  3  4  5  6  7
test_imgs_count = sum([len(files) for r, d, files in os.walk("test")])
test_partial_imgs_count = sum([len(files) for r, d, files in os.walk("test_partial")])
print("Test images: {}, partial test images: {}".format(test_imgs_count, test_partial_imgs_count))
Test images: 8580, partial test images: 1716
train_imgs_count = sum([len(files) for r, d, files in os.walk("train")])
train_partial_imgs_count = sum([len(files) for r, d, files in os.walk("train_partial")])
print("Train images: {}, partial train images: {}".format(train_imgs_count, train_partial_imgs_count))
Train images: 11999, partial train images: 2400

There are 11999 total training images, so we can choose a batch size of 13 or 71 as numbers closest to 32. There are 2400 partial train images so we can choose a batch size of 32.

batch_size_train = 71
batch_size_test = 1

batch_size_train_partial = 32
batch_size_test_partial = 1

##Build Models

train_datagen = ImageDataGenerator(
        rescale=1./255)
#consider adding horizontal flip = true here

test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
    directory=r"./train/",
    target_size=(224, 224),
    color_mode="rgb",
    batch_size=batch_size_train,
    class_mode="categorical",
    shuffle=True,
    seed=42
)
test_generator = test_datagen.flow_from_directory(
    directory=r"./test/",
    target_size=(224, 224),
    color_mode="rgb",
    batch_size=1,
    class_mode="categorical",
    shuffle=False,
    seed=42
)
optimizer = Adam(lr=0.0001)


Found 11999 images belonging to 8 classes.
Found 8580 images belonging to 8 classes.
train_partial_datagen = ImageDataGenerator(
        rescale=1./255)
#consider adding horizontal flip = true here

test_partial_datagen = ImageDataGenerator(rescale=1./255)

train_partial_generator = train_datagen.flow_from_directory(
    directory=r"./train_partial/",
    target_size=(224, 224),
    color_mode="rgb",
    batch_size=batch_size_train_partial,
    class_mode="categorical",
    shuffle=False,
    seed=42
)
test_partial_generator = test_datagen.flow_from_directory(
    directory=r"./test_partial/",
    target_size=(224, 224),
    color_mode="rgb",
    batch_size=1,
    class_mode="categorical",
    shuffle=False,
    seed=42
)
optimizer = Adam(lr=0.0001)

Found 2400 images belonging to 8 classes.
Found 1716 images belonging to 8 classes.

Pretrained Model: ResNet50


from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image

inp = Input(shape = (224,224,3))

model_resnet = ResNet50(input_shape=(224,224,3), include_top=False, weights='imagenet')
x1 = model_resnet(inp)
x2 = GlobalAveragePooling2D()(x1)
out = Dense(8, activation='softmax')(x2)

model_resnet = Model(inputs = inp, outputs = out)
model_resnet.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
model_resnet.fit_generator(generator=train_generator,
                    steps_per_epoch=STEP_SIZE_TRAIN,
                    epochs=10
)

#TIME: 3020 secs / 50.3 mins
/usr/local/lib/python3.6/dist-packages/keras_applications/resnet50.py:265: UserWarning: The output shape of `ResNet50(include_top=False)` has been changed since Keras 2.2.0.
  warnings.warn('The output shape of `ResNet50(include_top=False)` '


Downloading data from https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
94658560/94653016 [==============================] - 1s 0us/step
Epoch 1/10
169/169 [==============================] - 320s 2s/step - loss: 0.8967 - acc: 0.6811
Epoch 2/10
169/169 [==============================] - 299s 2s/step - loss: 0.1966 - acc: 0.9418
Epoch 3/10
169/169 [==============================] - 299s 2s/step - loss: 0.0494 - acc: 0.9895
Epoch 4/10
169/169 [==============================] - 299s 2s/step - loss: 0.0133 - acc: 0.9986
Epoch 5/10
169/169 [==============================] - 299s 2s/step - loss: 0.0066 - acc: 0.9992
Epoch 6/10
169/169 [==============================] - 299s 2s/step - loss: 0.0020 - acc: 1.0000
Epoch 7/10
169/169 [==============================] - 299s 2s/step - loss: 0.0011 - acc: 1.0000
Epoch 8/10
169/169 [==============================] - 300s 2s/step - loss: 7.3083e-04 - acc: 1.0000
Epoch 9/10
169/169 [==============================] - 300s 2s/step - loss: 7.8796e-04 - acc: 1.0000
Epoch 10/10
169/169 [==============================] - 299s 2s/step - loss: 5.4552e-04 - acc: 1.0000





<keras.callbacks.History at 0x7f97a9d4e160>

##Loss and Accuracy Scores

###ResNet50

#train accuracy
train_generator.reset()
model_resnet.evaluate_generator(train_generator, steps=(train_imgs_count/batch_size_train))
[8.43342943681878e-05, 1.0]
#test accuracy
test_generator.reset()
model_resnet.evaluate_generator(test_generator, steps=(test_imgs_count/batch_size_test))
[0.6647914932028297, 0.8327505827505828]
model_resnet.metrics_names
['loss', 'acc']

##Confusion Matrices

%run -i 'metrics_script.py'

###ResNet50

train_partial_generator.reset()
y_pmf_train = model_resnet.predict_generator(train_partial_generator,steps=train_partial_generator.n/batch_size_train_partial,verbose=1)

75/75 [==============================] - 24s 315ms/step
y_pred_train = np.argmax(y_pmf_train,axis=1).astype(np.str)
y_true_train = [n[0] for n in train_partial_generator.filenames]
labels = ['0','1','2','3','4','5','6','7']
model_name_train = "ResNet50 Train"
test_partial_generator.reset()
y_pmf_test = model_resnet.predict_generator(test_partial_generator,steps=test_partial_generator.n/batch_size_test_partial,verbose=1)
1716/1716 [==============================] - 51s 30ms/step
y_pred_test = np.argmax(y_pmf_test,axis=1).astype(np.str)
y_true_test = [n[0] for n in test_partial_generator.filenames]
model_name_test = "ResNet50 Test"

run_metrics(y_pred_train, y_pmf_train, y_true_train, model_name_train, labels=labels)
Categorical Loss: 8.369817491181781e-05
Accuracy Score: 1.0





(8.369817491181781e-05, 1.0)

png

png

run_metrics(y_pred_test, y_pmf_test, y_true_test, model_name_test, labels=labels)
Categorical Loss: 0.6864226685600351
Accuracy Score: 0.8234265734265734





(0.6864226685600351, 0.8234265734265734)

png

png