CNNs for Image Colorization

DSCI 552 | Machine Learning for Data Science

Homework 7

Matheus Schmitz

USC ID: 5039286453

Imports

In [1]:
# Py Data Stack
import numpy as np
import pandas as pd

# Visualization
import matplotlib.pyplot as plt

# Scitkit-Learn
from sklearn.cluster import KMeans
from sklearn.preprocessing import OneHotEncoder

# Scikit-Image
from skimage.color import rgb2gray
from skimage import img_as_ubyte

# Tensor Flow & Keras
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, MaxPool2D, Reshape, Softmax, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger, EarlyStopping

# Progress Bar
from tqdm import tqdm

# OS
import os

# Disable warnings
import warnings
warnings.filterwarnings("ignore")
In [2]:
# Making sure Tensor Flow is properly working with GPU
print('Available Devices:')
for device in tf.config.experimental.list_physical_devices():
    print(device)
print()
print(f'TensorFlow using GPU: {tf.test.is_gpu_available()}')
print(f'TensorFlow using CUDA: {tf.test.is_built_with_cuda()}')
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
if tf.test.gpu_device_name():
    print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
    print("Oh boy, there's no GPU, so prepare yourself for a long wait :(")
print()
try:
    !nvcc --version
except:
    print('ooops, watch out, something went wrong!')
print()
try:
    !nvidia-smi
except:
    print('ooops, watch out, something went wrong!')
Available Devices:
PhysicalDevice(name='/physical_device:CPU:0', device_type='CPU')
PhysicalDevice(name='/physical_device:XLA_CPU:0', device_type='XLA_CPU')
PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')
PhysicalDevice(name='/physical_device:XLA_GPU:0', device_type='XLA_GPU')

WARNING:tensorflow:From <ipython-input-2-46d45dd0bd84>:6: is_gpu_available (from tensorflow.python.framework.test_util) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.config.list_physical_devices('GPU')` instead.
TensorFlow using GPU: True
TensorFlow using CUDA: True
Num GPUs Available:  1
Default GPU Device: /device:GPU:0

nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2019 NVIDIA Corporation
Built on Sun_Jul_28_19:12:52_Pacific_Daylight_Time_2019
Cuda compilation tools, release 10.1, V10.1.243

Sat Nov 21 16:52:19 2020       
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 446.14       Driver Version: 446.14       CUDA Version: 11.0     |
|-------------------------------+----------------------+----------------------+
| GPU  Name            TCC/WDDM | Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|===============================+======================+======================|
|   0  GeForce GTX 960M   WDDM  | 00000000:01:00.0 Off |                  N/A |
| N/A   53C    P0    N/A /  N/A |    155MiB /  2048MiB |      0%      Default |
+-------------------------------+----------------------+----------------------+
                                                                               
+-----------------------------------------------------------------------------+
| Processes:                                                                  |
|  GPU                  PID   Type   Process name                  GPU Memory |
|                                                                  Usage      |
|=============================================================================|
|    0                 9340      C   ...heus\Anaconda3\python.exe    N/A      |
+-----------------------------------------------------------------------------+

(a) Dataset

2a.JPG

Guide on loading the downloaded data: http://www.cs.toronto.edu/~kriz/cifar.html

Guide on loading the Cifar-10 dataset directly from Keras: https://www.tensorflow.org/api_docs/python/tf/keras/datasets/cifar10/load_data

In [3]:
# Downloading the Cifar-10 dataset from Keras
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
In [4]:
# Shape of x is: samples x img_width x img_height x color_channels
print(x_train.shape)

# Shape of y is: samples x class_encoding
print(y_train.shape)
(50000, 32, 32, 3)
(50000, 1)

(b) Extract the Bird Class

2b.JPG

In [5]:
# Find the label encoding for the bird class
cifar_10_classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
bird_encoding = cifar_10_classes.index('bird')
print(f'Bird encoding: {bird_encoding}')
Bird encoding: 2
In [6]:
# Create a boolean mask to extract only bird samples
mask_train = [True if i == bird_encoding else False for i in y_train]
mask_test = [True if i == bird_encoding else False for i in y_test]

# Filter train and test dataset
x_train_bird = x_train[mask_train]
y_train_bird = y_train[mask_train]
x_test_bird = x_test[mask_test]
y_test_bird = y_test[mask_test]

# Check results
print(f'x_train_bird.shape: {x_train_bird.shape}')
print(f'y_train_bird.shape: {y_train_bird.shape}')
print(f'x_test_bird.shape: {x_test_bird.shape}')
print(f'y_test_bird.shape: {y_test_bird.shape}')
x_train_bird.shape: (5000, 32, 32, 3)
y_train_bird.shape: (5000, 1)
x_test_bird.shape: (1000, 32, 32, 3)
y_test_bird.shape: (1000, 1)
In [7]:
# Check if images are birds
fig, axs = plt.subplots(ncols=5, nrows=2, figsize=(15,5))

# Loop through axes and plot random images
for ax in axs.ravel():
    ax.imshow(x_train_bird[np.random.choice(x_train_bird.shape[0])])
    ax.set_xticks([], [])  
    ax.set_yticks([], []) 

plt.tight_layout()

(c) Pixel Selection

2c.JPG

In [8]:
# I'm opting to choose all pixels

(d) Tetra-Chrome Conversion

2d.JPG2_note6.JPG

In [9]:
NUM_COLORS = 4

# K-Means requires the data to have 2 or less dimensions, so reshape is required
KMEANS = KMeans(n_clusters=NUM_COLORS)
KMEANS.fit(x_train_bird.reshape(-1, 3))
cluster_centers = KMEANS.cluster_centers_
pred_clusters_train = KMEANS.predict(x_train_bird.reshape(-1, 3))
pred_clusters_test = KMEANS.predict(x_test_bird.reshape(-1, 3))

# Use cluster_centers and the predicted cluster of each pixel to convert images to tetra-chrome
x_train_tetra = np.array([cluster_centers[pred_clusters_train[i]] for i in range(len(x_train_bird.reshape(-1, 3)))], dtype=np.uint8)
x_test_tetra = np.array([cluster_centers[pred_clusters_test[i]] for i in range(len(x_test_bird.reshape(-1, 3)))], dtype=np.uint8)

# Conver the tetra-chrome images back to the correct shape for visualization
x_train_tetra = x_train_tetra.reshape(-1, 32, 32, 3)
x_test_tetra = x_test_tetra.reshape(-1, 32, 32, 3)

# Check the images 
fig, axs = plt.subplots(ncols=5, nrows=2, figsize=(15,5))

# Loop through axes and plot random images
for ax in axs.ravel():
    ax.imshow(x_train_tetra[np.random.choice(x_train_tetra.shape[0])])
    ax.set_xticks([], [])  
    ax.set_yticks([], []) 

plt.tight_layout()
In [10]:
# Indeed the colors have low contrast, so I'll use the suggested colors for contrast
contrast_colors =  np.array([[0,0,128],[230,25,75],[170,255,195],[255,255,255]], dtype=np.uint8)

# Color the images
x_train_colored = np.array([contrast_colors[pred_clusters_train[i]] for i in range(len(x_train_bird.reshape(-1, 3)))], dtype=np.uint8)
x_test_colored = np.array([contrast_colors[pred_clusters_test[i]] for i in range(len(x_test_bird.reshape(-1, 3)))], dtype=np.uint8)

# Conver the tetra-chrome images back to the correct shape for visualization
x_train_colored = x_train_colored.reshape(-1, 32, 32, 3)
x_test_colored = x_test_colored.reshape(-1, 32, 32, 3)

# Check the images 
fig, axs = plt.subplots(ncols=5, nrows=2, figsize=(15,5))

# Loop through axes and plot random images
for ax in axs.ravel():
    ax.imshow(x_train_colored[np.random.choice(x_train_colored.shape[0])])
    ax.set_xticks([], [])  
    ax.set_yticks([], []) 

plt.tight_layout()

(e) Grayscale Images

2e.JPG

In [11]:
# Conver images to grayscle
x_train_gray = np.array([rgb2gray(x_train_bird[i]) for i in range(len(x_train_bird))])
x_test_gray = np.array([rgb2gray(x_test_bird[i]) for i in range(len(x_test_bird))])

# Check the images 
fig, axs = plt.subplots(ncols=5, nrows=2, figsize=(15,5))

# Loop through axes and plot random images
for ax in axs.ravel():
    ax.imshow(x_train_gray[np.random.choice(x_train_gray.shape[0])], cmap='gray')
    ax.set_xticks([], [])  
    ax.set_yticks([], []) 

plt.tight_layout()

(f) Deep Convolutional Neural Network

2f.JPG 2_note7.JPG 2_note8.JPG 2_note9.JPG

In [12]:
# One-Hot Encode the clusters (tetra-chrome pixel color)
OHE = OneHotEncoder()
pred_clusters_train_ohe = OHE.fit_transform(pred_clusters_train.reshape(-1, 1))
pred_clusters_test_ohe = OHE.transform(pred_clusters_test.reshape(-1, 1))

# Reshape the pixel colors to match the image shape
pred_clusters_train_ohe = pred_clusters_train_ohe.reshape(-1,32*32*NUM_COLORS).toarray().astype(int)
pred_clusters_train_ohe = pred_clusters_train_ohe.reshape(-1,32,32,NUM_COLORS)
tetrachrome_color_train = pred_clusters_train_ohe.copy()
pred_clusters_test_ohe = pred_clusters_test_ohe.reshape(-1,32*32*NUM_COLORS).toarray().astype(int)
pred_clusters_test_ohe = pred_clusters_test_ohe.reshape(-1,32,32,NUM_COLORS)
tetrachrome_color_test = pred_clusters_test_ohe.copy()

# Shapes
print(f'pred_clusters_train_ohe.shape: {pred_clusters_train_ohe.shape}')
print(f'pred_clusters_test_ohe.shape: {pred_clusters_test_ohe.shape}')

# Reshape the gray images to match the expected input shape of (32, 32, 1), where 1 the the number of channels (aka grayscale)
x_train_gray = x_train_gray.reshape(-1, 32, 32, 1)
x_test_gray = x_test_gray.reshape(-1, 32, 32, 1)

# And convert the graysale images to uint8
x_train_gray = img_as_ubyte(x_train_gray)
x_test_gray = img_as_ubyte(x_test_gray)

# Shapes
print(f'x_train_gray.shape: {x_train_gray.shape}')
print(f'x_test_gray.shape: {x_test_gray.shape}')
pred_clusters_train_ohe.shape: (5000, 32, 32, 4)
pred_clusters_test_ohe.shape: (1000, 32, 32, 4)
x_train_gray.shape: (5000, 32, 32, 1)
x_test_gray.shape: (1000, 32, 32, 1)
In [21]:
# CNN
model = Sequential()
model.add(Conv2D(128, kernel_size=5, activation='relu', input_shape=(32, 32, 1), padding='same'))
model.add(MaxPool2D(pool_size=2, strides=2))
model.add(Conv2D(64, kernel_size=5, activation='relu', padding='same'))
model.add(MaxPool2D(pool_size=2, strides=2))
model.add(Conv2D(32, kernel_size=5, activation='relu', padding='same'))
model.add(MaxPool2D(pool_size=2, strides=2))
model.add(Flatten())
model.add(Dense(4096))
model.add(Reshape((32, 32, NUM_COLORS)))
model.add(Softmax())

# Compile the model
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), 
              loss='categorical_crossentropy', 
              metrics=['accuracy'])

# Model summary
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_3 (Conv2D)            (None, 32, 32, 128)       3328      
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 16, 16, 128)       0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 16, 16, 64)        204864    
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 8, 8, 64)          0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 8, 8, 32)          51232     
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 4, 4, 32)          0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 512)               0         
_________________________________________________________________
dense_1 (Dense)              (None, 4096)              2101248   
_________________________________________________________________
reshape_1 (Reshape)          (None, 32, 32, 4)         0         
_________________________________________________________________
softmax_1 (Softmax)          (None, 32, 32, 4)         0         
=================================================================
Total params: 2,360,672
Trainable params: 2,360,672
Non-trainable params: 0
_________________________________________________________________
In [22]:
# Set to true to continue the model's training using the pre-trained neurons - THIS WILL GIVE AN ERROR IF THE WINDOW_SIZE OR NEURONS WAS CHANGED
# Set to false to train the neural network from scratch - THIS WILL LOSE ALL PROGRESS AND CAUSE WORSE PREDICTIONS
LOAD_TRAINED_WEIGHTS = True

if LOAD_TRAINED_WEIGHTS and os.path.exists('./checkpoints/model_k4.hdf5'):
    
    # Try loading weights. Will fail if the model structure changed
    try:
    
        # Load best model weights
        model.load_weights('./checkpoints/model_k4.hdf5')

        # Objective Function
        model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), 
                      loss='categorical_crossentropy',  
                      metrics=['accuracy'])

        # Check model
        model.summary()
        
        # Variable to guide the csv log callback
        SUCCESSFUL_WEIGHT_LOAD = True
    
    except:
        SUCCESSFUL_WEIGHT_LOAD = False
        print('Could not load weights. Most likely the network architecture changed.')

else:
    SUCCESSFUL_WEIGHT_LOAD = False
Could not load weights. Most likely the network architecture changed
In [23]:
EPOCHS = 50
BATCH_SIZE = 64
TRAIN_MODEL = True

if TRAIN_MODEL:
    
    # Define directory for model checkpoints
    BACKUP_DIR = './checkpoints'
    if not os.path.exists(BACKUP_DIR):
        os.mkdir(BACKUP_DIR)

    # Define file to store checkpoint
    BACKUP_FILE = os.path.join(BACKUP_DIR, 'model_k4.hdf5')

    # Callbacks
    checkpoint = ModelCheckpoint(BACKUP_FILE, 
                                 monitor='val_loss',
                                 save_best_only=True,
                                 save_weights_only=True,
                                 verbose=0)
    plateauLRreduce = ReduceLROnPlateau(factor = 0.5,
                                        patience = 3,
                                        monitor='val_loss',
                                        min_lr = 0.0000001,
                                        verbose=1)
    stopearly = EarlyStopping(monitor='val_loss',
                              patience=15,
                              verbose=1)
    logCSV = CSVLogger(filename='log_model_k4.csv',
                       separator=',', 
                       append=(LOAD_TRAINED_WEIGHTS & SUCCESSFUL_WEIGHT_LOAD))
    model_callbacks = [checkpoint, plateauLRreduce, stopearly, logCSV]

    # Train model and save history
    model_history = model.fit(x_train_gray,
                              tetrachrome_color_train,
                              epochs=EPOCHS,
                              batch_size=BATCH_SIZE,
                              validation_split=0.2,
                              callbacks=model_callbacks,
                              verbose=1,
                              shuffle=True)
Epoch 1/50
63/63 [==============================] - 3s 43ms/step - loss: 1.4372 - accuracy: 0.3197 - val_loss: 1.3677 - val_accuracy: 0.3618 - lr: 0.0010
Epoch 2/50
63/63 [==============================] - 2s 32ms/step - loss: 1.3429 - accuracy: 0.3610 - val_loss: 1.3143 - val_accuracy: 0.3684 - lr: 0.0010
Epoch 3/50
63/63 [==============================] - 2s 33ms/step - loss: 1.2889 - accuracy: 0.3947 - val_loss: 1.2823 - val_accuracy: 0.4174 - lr: 0.0010
Epoch 4/50
63/63 [==============================] - 2s 32ms/step - loss: 1.2433 - accuracy: 0.4158 - val_loss: 1.2299 - val_accuracy: 0.4208 - lr: 0.0010
Epoch 5/50
63/63 [==============================] - 2s 33ms/step - loss: 1.1578 - accuracy: 0.4630 - val_loss: 1.1629 - val_accuracy: 0.4513 - lr: 0.0010
Epoch 6/50
63/63 [==============================] - 2s 33ms/step - loss: 1.1085 - accuracy: 0.4859 - val_loss: 1.1407 - val_accuracy: 0.4798 - lr: 0.0010
Epoch 7/50
63/63 [==============================] - 2s 33ms/step - loss: 1.0619 - accuracy: 0.5108 - val_loss: 1.0525 - val_accuracy: 0.5229 - lr: 0.0010
Epoch 8/50
63/63 [==============================] - 2s 32ms/step - loss: 1.0242 - accuracy: 0.5302 - val_loss: 1.0313 - val_accuracy: 0.5217 - lr: 0.0010
Epoch 9/50
63/63 [==============================] - 2s 32ms/step - loss: 1.0101 - accuracy: 0.5355 - val_loss: 1.0257 - val_accuracy: 0.5310 - lr: 0.0010
Epoch 10/50
63/63 [==============================] - 2s 32ms/step - loss: 0.9837 - accuracy: 0.5498 - val_loss: 0.9894 - val_accuracy: 0.5443 - lr: 0.0010
Epoch 11/50
63/63 [==============================] - 2s 32ms/step - loss: 0.9659 - accuracy: 0.5592 - val_loss: 0.9842 - val_accuracy: 0.5490 - lr: 0.0010
Epoch 12/50
63/63 [==============================] - 2s 33ms/step - loss: 0.9516 - accuracy: 0.5644 - val_loss: 0.9808 - val_accuracy: 0.5479 - lr: 0.0010
Epoch 13/50
63/63 [==============================] - 2s 33ms/step - loss: 0.9349 - accuracy: 0.5737 - val_loss: 0.9717 - val_accuracy: 0.5589 - lr: 0.0010
Epoch 14/50
63/63 [==============================] - 2s 33ms/step - loss: 0.9153 - accuracy: 0.5841 - val_loss: 0.9473 - val_accuracy: 0.5641 - lr: 0.0010
Epoch 15/50
63/63 [==============================] - 2s 32ms/step - loss: 0.9113 - accuracy: 0.5848 - val_loss: 0.9577 - val_accuracy: 0.5651 - lr: 0.0010
Epoch 16/50
63/63 [==============================] - 2s 32ms/step - loss: 0.8968 - accuracy: 0.5934 - val_loss: 0.9431 - val_accuracy: 0.5730 - lr: 0.0010
Epoch 17/50
63/63 [==============================] - 2s 32ms/step - loss: 0.8828 - accuracy: 0.6009 - val_loss: 0.9455 - val_accuracy: 0.5743 - lr: 0.0010
Epoch 18/50
63/63 [==============================] - 2s 33ms/step - loss: 0.8751 - accuracy: 0.6048 - val_loss: 0.9298 - val_accuracy: 0.5825 - lr: 0.0010
Epoch 19/50
63/63 [==============================] - 2s 32ms/step - loss: 0.8701 - accuracy: 0.6082 - val_loss: 0.9466 - val_accuracy: 0.5677 - lr: 0.0010
Epoch 20/50
63/63 [==============================] - 2s 32ms/step - loss: 0.8550 - accuracy: 0.6160 - val_loss: 0.9334 - val_accuracy: 0.5733 - lr: 0.0010
Epoch 21/50
63/63 [==============================] - 2s 32ms/step - loss: 0.8470 - accuracy: 0.6200 - val_loss: 0.9295 - val_accuracy: 0.5859 - lr: 0.0010
Epoch 22/50
63/63 [==============================] - 2s 33ms/step - loss: 0.8234 - accuracy: 0.6326 - val_loss: 0.9267 - val_accuracy: 0.5783 - lr: 0.0010
Epoch 23/50
63/63 [==============================] - 2s 32ms/step - loss: 0.8244 - accuracy: 0.6329 - val_loss: 0.9280 - val_accuracy: 0.5798 - lr: 0.0010
Epoch 24/50
63/63 [==============================] - 2s 32ms/step - loss: 0.8056 - accuracy: 0.6421 - val_loss: 0.9324 - val_accuracy: 0.5763 - lr: 0.0010
Epoch 25/50
61/63 [============================>.] - ETA: 0s - loss: 0.7974 - accuracy: 0.6465
Epoch 00025: ReduceLROnPlateau reducing learning rate to 0.0005000000237487257.
63/63 [==============================] - 2s 32ms/step - loss: 0.7976 - accuracy: 0.6463 - val_loss: 0.9718 - val_accuracy: 0.5803 - lr: 0.0010
Epoch 26/50
63/63 [==============================] - 2s 33ms/step - loss: 0.7614 - accuracy: 0.6671 - val_loss: 0.9096 - val_accuracy: 0.5988 - lr: 5.0000e-04
Epoch 27/50
63/63 [==============================] - 2s 32ms/step - loss: 0.7438 - accuracy: 0.6753 - val_loss: 0.9106 - val_accuracy: 0.5953 - lr: 5.0000e-04
Epoch 28/50
63/63 [==============================] - 2s 32ms/step - loss: 0.7348 - accuracy: 0.6804 - val_loss: 0.9224 - val_accuracy: 0.6006 - lr: 5.0000e-04
Epoch 29/50
61/63 [============================>.] - ETA: 0s - loss: 0.7278 - accuracy: 0.6831
Epoch 00029: ReduceLROnPlateau reducing learning rate to 0.0002500000118743628.
63/63 [==============================] - 2s 32ms/step - loss: 0.7280 - accuracy: 0.6829 - val_loss: 0.9153 - val_accuracy: 0.5952 - lr: 5.0000e-04
Epoch 30/50
63/63 [==============================] - 2s 33ms/step - loss: 0.7090 - accuracy: 0.6938 - val_loss: 0.9059 - val_accuracy: 0.6070 - lr: 2.5000e-04
Epoch 31/50
63/63 [==============================] - 2s 32ms/step - loss: 0.6993 - accuracy: 0.6983 - val_loss: 0.9113 - val_accuracy: 0.6082 - lr: 2.5000e-04
Epoch 32/50
63/63 [==============================] - 2s 32ms/step - loss: 0.6911 - accuracy: 0.7033 - val_loss: 0.9142 - val_accuracy: 0.6090 - lr: 2.5000e-04
Epoch 33/50
61/63 [============================>.] - ETA: 0s - loss: 0.6844 - accuracy: 0.7067
Epoch 00033: ReduceLROnPlateau reducing learning rate to 0.0001250000059371814.
63/63 [==============================] - 2s 32ms/step - loss: 0.6847 - accuracy: 0.7064 - val_loss: 0.9073 - val_accuracy: 0.6085 - lr: 2.5000e-04
Epoch 34/50
63/63 [==============================] - 2s 34ms/step - loss: 0.6737 - accuracy: 0.7120 - val_loss: 0.9119 - val_accuracy: 0.6060 - lr: 1.2500e-04
Epoch 35/50
63/63 [==============================] - 2s 32ms/step - loss: 0.6694 - accuracy: 0.7146 - val_loss: 0.9102 - val_accuracy: 0.6137 - lr: 1.2500e-04
Epoch 36/50
62/63 [============================>.] - ETA: 0s - loss: 0.6634 - accuracy: 0.7178 ETA: 0s - loss: 0.6595 - accuracy: 
Epoch 00036: ReduceLROnPlateau reducing learning rate to 6.25000029685907e-05.
63/63 [==============================] - 2s 40ms/step - loss: 0.6635 - accuracy: 0.7177 - val_loss: 0.9096 - val_accuracy: 0.6134 - lr: 1.2500e-04
Epoch 37/50
63/63 [==============================] - 2s 36ms/step - loss: 0.6568 - accuracy: 0.7215 - val_loss: 0.9070 - val_accuracy: 0.6153 - lr: 6.2500e-05
Epoch 38/50
63/63 [==============================] - 2s 32ms/step - loss: 0.6557 - accuracy: 0.7218 - val_loss: 0.9184 - val_accuracy: 0.6147 - lr: 6.2500e-05
Epoch 39/50
61/63 [============================>.] - ETA: 0s - loss: 0.6546 - accuracy: 0.7221
Epoch 00039: ReduceLROnPlateau reducing learning rate to 3.125000148429535e-05.
63/63 [==============================] - 2s 32ms/step - loss: 0.6548 - accuracy: 0.7219 - val_loss: 0.9074 - val_accuracy: 0.6159 - lr: 6.2500e-05
Epoch 40/50
63/63 [==============================] - 2s 32ms/step - loss: 0.6497 - accuracy: 0.7251 - val_loss: 0.9087 - val_accuracy: 0.6174 - lr: 3.1250e-05
Epoch 41/50
63/63 [==============================] - 2s 32ms/step - loss: 0.6485 - accuracy: 0.7258 - val_loss: 0.9084 - val_accuracy: 0.6179 - lr: 3.1250e-05
Epoch 42/50
61/63 [============================>.] - ETA: 0s - loss: 0.6471 - accuracy: 0.7263
Epoch 00042: ReduceLROnPlateau reducing learning rate to 1.5625000742147677e-05.
63/63 [==============================] - 2s 32ms/step - loss: 0.6470 - accuracy: 0.7265 - val_loss: 0.9088 - val_accuracy: 0.6185 - lr: 3.1250e-05
Epoch 43/50
63/63 [==============================] - 2s 32ms/step - loss: 0.6455 - accuracy: 0.7272 - val_loss: 0.9077 - val_accuracy: 0.6185 - lr: 1.5625e-05
Epoch 44/50
63/63 [==============================] - 2s 33ms/step - loss: 0.6448 - accuracy: 0.7276 - val_loss: 0.9109 - val_accuracy: 0.6183 - lr: 1.5625e-05
Epoch 45/50
61/63 [============================>.] - ETA: 0s - loss: 0.6442 - accuracy: 0.7282
Epoch 00045: ReduceLROnPlateau reducing learning rate to 7.812500371073838e-06.
63/63 [==============================] - 2s 32ms/step - loss: 0.6443 - accuracy: 0.7281 - val_loss: 0.9077 - val_accuracy: 0.6189 - lr: 1.5625e-05
Epoch 00045: early stopping

Model Accuray, Loss and Learning Rate

In [24]:
# Read the log file
log_model_k4 = pd.read_csv('log_model_k4.csv')

# Create figure
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(15,5))

# Accuracy
fig.sca(axs[0])
g1 = plt.plot(log_model_k4.index, log_model_k4.accuracy, color='navy', label='Train')
g2 = plt.plot(log_model_k4.index, log_model_k4.val_accuracy, color='darkorange', label='Test')
axs[0].set_ylim([0, 1])
axs[0].title.set_text('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].legend()

# Loss
fig.sca(axs[1])
g2 = plt.plot(log_model_k4.index, log_model_k4.loss, color='navy', label='Train')
g3 = plt.plot(log_model_k4.index, log_model_k4.val_loss, color='darkorange', label='Test')
axs[1].title.set_text('Loss')
axs[1].set_xlabel('Epoch')
axs[1].legend()

# Learning Rate
fig.sca(axs[2])
axs[2].set_yscale('log')
g = plt.plot(log_model_k4.index, log_model_k4.lr, color='green')
axs[2].title.set_text('Learning Rate')
axs[2].set_xlabel('Epoch')

plt.tight_layout(w_pad=5)
In [25]:
performance_train = model.evaluate(x_train_gray, tetrachrome_color_train, return_dict=True)
performance_test = model.evaluate(x_test_gray, tetrachrome_color_test, return_dict=True)

print()
print('Train Dataset')
print(f'Loss: {performance_train["loss"]:.5f}')
print(f'Accuracy: {performance_train["accuracy"]:.5f}')
print()
print('Test Dataset')
print(f'Loss: {performance_test["loss"]:.5f}')
print(f'Accuracy: {performance_test["accuracy"]:.5f}')
157/157 [==============================] - 1s 9ms/step - loss: 0.6957 - accuracy: 0.7069
32/32 [==============================] - 0s 8ms/step - loss: 0.9324 - accuracy: 0.6096

Train Dataset
Loss: 0.69566
Accuracy: 0.70685

Test Dataset
Loss: 0.93242
Accuracy: 0.60964

Model Colorized Images

In [26]:
# Use the model to predict the class of each pixel in the test dataset
pred_test_colors = model.predict(x_test_gray)

# Need to flatten pixels as the one-hot encoded arrays
pred_test_colors = pred_test_colors.reshape(-1, 4)

# Use the predicted pixel class to extract the pixel color
colored_test_images = []
for k in tqdm(range(len(pred_test_colors))):
    colored_test_images.append([np.uint8(color) for color in contrast_colors[np.argmax(pred_test_colors[k])]])
    
# Reshape
colored_test_images = np.asarray(np.reshape(colored_test_images, (-1, 32, 32, 3)))
100%|████████████████████████████| 1024000/1024000 [00:08<00:00, 118285.01it/s]

Vizualising First 10 Images

In [29]:
# Plot original images, tetra-chrome images, and images colored by the model
num_images = 10
fig, axs = plt.subplots(ncols=4, nrows=num_images, figsize=(15,num_images*3))

first_images = 0
# Loop through axes and plot random images
for axs_row in range(axs.shape[0]):
    
    # set image index
    img_index = first_images
    first_images += 1
    
    # Plot original image
    axs[axs_row][0].imshow(x_test_bird[img_index])
    axs[axs_row][0].set_xticks([], [])  
    axs[axs_row][0].set_yticks([], [])
    axs[axs_row][0].set_title('Original')
    
    # Plot tetra-chrome image
    axs[axs_row][1].imshow(x_test_colored[img_index])
    axs[axs_row][1].set_xticks([], [])  
    axs[axs_row][1].set_yticks([], [])
    axs[axs_row][1].set_title('Tetra-Chrome')

    # Plot grayscale image (the model input)
    axs[axs_row][2].imshow(x_test_gray[img_index].reshape(32, 32), cmap='gray')
    axs[axs_row][2].set_xticks([], [])  
    axs[axs_row][2].set_yticks([], [])
    axs[axs_row][2].set_title('Grayscale')
    
    # Plot model colored image
    axs[axs_row][3].imshow(colored_test_images[img_index])
    axs[axs_row][3].set_xticks([], [])  
    axs[axs_row][3].set_yticks([], [])
    axs[axs_row][3].set_title('CNN Colored')

plt.tight_layout()

Vizualising 5 Random Images

In [31]:
# Plot original images, tetra-chrome images, and images colored by the model
num_images = 5
fig, axs = plt.subplots(ncols=4, nrows=num_images, figsize=(15,num_images*3))

# Loop through axes and plot random images
for axs_row in range(axs.shape[0]):
    
    # Pick a random image
    img_index = np.random.choice(x_test_bird.shape[0])
    
    # Plot original image
    axs[axs_row][0].imshow(x_test_bird[img_index])
    axs[axs_row][0].set_xticks([], [])  
    axs[axs_row][0].set_yticks([], [])
    axs[axs_row][0].set_title('Original')
    
    # Plot tetra-chrome image
    axs[axs_row][1].imshow(x_test_colored[img_index])
    axs[axs_row][1].set_xticks([], [])  
    axs[axs_row][1].set_yticks([], [])
    axs[axs_row][1].set_title('Tetra-Chrome')

    # Plot grayscale image (the model input)
    axs[axs_row][2].imshow(x_test_gray[img_index].reshape(32, 32), cmap='gray')
    axs[axs_row][2].set_xticks([], [])  
    axs[axs_row][2].set_yticks([], [])
    axs[axs_row][2].set_title('Grayscale')
    
    # Plot model colored image
    axs[axs_row][3].imshow(colored_test_images[img_index])
    axs[axs_row][3].set_xticks([], [])  
    axs[axs_row][3].set_yticks([], [])
    axs[axs_row][3].set_title('CNN Colored')

plt.tight_layout()

The End!