I am trying to cast predictions on a VGG16 model for a brain MRI dataset

When I attempt to cast predictions only indices 1 and 2 are predicted respective classes meningioma and notumor out of four classes

Raw Prediction Output: [[0.0000000e+00 1.0000000e+00 1.2160221e-08 0.0000000e+00]]
1
Predicted Class Index: 1
Predicted Probability: 1.0
Converted Predicted Class: 1
1/1 [==============================] - 0s 71ms/step
Raw Prediction Output: [[0. 0. 1. 0.]]
2
Predicted Class Index: 2
Predicted Probability: 1.0
Converted Predicted Class: 2
1/1 [==============================] - 0s 74ms/step
Raw Prediction Output: [[0.000000e+00 4.721507e-11 1.000000e+00 0.000000e+00]]
2
Predicted Class Index: 2
Predicted Probability: 1.0

As seen above the other probabilities are always zero

code for predictions:

import tensorflow as tf
from tensorflow import keras
from tensorflow.python.client import device_lib 
from keras import layers
from keras.models import Sequential, Model
from keras.layers import BatchNormalization
from keras.layers import Input, Lambda, Dense, Flatten, Activation, Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras import applications
from keras.preprocessing.image import img_to_array, array_to_img
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image

import cv2
import numpy as np
import matplotlib.pyplot as plt

import os
import cv2
import numpy as np
from tqdm import tqdm
from PIL import Image
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import seaborn as sns

import sys
import PyQt5 as qt
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QPushButton, QVBoxLayout, QWidget, QFileDialog, QMessageBox, QMenu, QAction
from PyQt5.QtGui import QPixmap, QFont, QImage
from PyQt5.QtCore import Qt

TumorModel = keras.models.load_model('./VGG16.keras')
TumorModel.summary()

def preprocess_data(image):
    # Check if the image is already a numpy array
    if isinstance(image, np.ndarray):
        # Resize the image to match the input shape expected by the model
        image = cv2.resize(image, (224, 224))
    else:
        # Convert the image to a NumPy array
        image_array = np.array(image)

        # Resize the image to match the input shape expected by the model
        image = cv2.resize(image_array, (224, 224))

    # Expand the dimensions to create a batch of one image
    image = np.expand_dims(image, axis=0)

    # Preprocess the input image
    preprocessed_image = preprocess_input(image)

    return preprocessed_image




class MainWindow(QMainWindow):
    def __init__(self):
        super().__init__()
        self.setWindowTitle("Brain MRI Prediction Software")
        self.setGeometry(100, 100, 600, 500)

        # Set custom font
        self.custom_font = QFont("Arial", 12)

        # Create UI elements
        self.title_label = QLabel("Brain MRI Prediction Software", self)
        self.title_label.setAlignment(Qt.AlignCenter)
        self.title_label.setFont(QFont("Arial", 16, QFont.Bold))
        self.image_label = QLabel(self)
        self.image_label.setAlignment(Qt.AlignCenter)
        self.image_label.setFont(self.custom_font)
        self.upload_button = QPushButton("Upload MRI", self)
        self.upload_button.setFont(self.custom_font)
        self.predict_button = QPushButton("Predict", self)
        self.predict_button.setFont(self.custom_font)

        # Additional labels for notes from the model
        self.prediction_label = QLabel(self)
        self.prediction_label.setFont(self.custom_font)
        self.notes_label = QLabel(self)
        self.notes_label.setFont(self.custom_font)
        
        # Connect button signals to slots
        self.upload_button.clicked.connect(self.upload_mri)
        self.predict_button.clicked.connect(self.predict)
        
        # Set layout
        layout = QVBoxLayout()
        layout.addWidget(self.title_label)
        layout.addWidget(self.image_label)
        layout.addWidget(self.upload_button)
        layout.addWidget(self.predict_button)
        layout.addWidget(self.prediction_label)  # Add prediction label
        layout.addWidget(self.notes_label)  # Add notes label
        
        widget = QWidget()
        widget.setLayout(layout)
        self.setCentralWidget(widget)

        # Create actions for theme switching
        self.light_theme_action = QAction("Light Theme", self)
        self.dark_theme_action = QAction("Dark Theme", self)
        self.light_theme_action.triggered.connect(self.set_light_theme)
        self.dark_theme_action.triggered.connect(self.set_dark_theme)

        # Create settings menu
        settings_menu = QMenu("Settings", self)
        settings_menu.addAction(self.light_theme_action)
        settings_menu.addAction(self.dark_theme_action)

        # Create menu bar
        menubar = self.menuBar()
        menubar.addMenu(settings_menu)

        # Set default theme
        self.light_theme()

    def light_theme(self):
        self.setStyleSheet("")

    def dark_theme(self):
        self.setStyleSheet("""
            background-color: #333333;
            color: #FFFFFF;
        """)

    def set_light_theme(self):
        self.light_theme()

    def set_dark_theme(self):
        self.dark_theme()

    def upload_mri(self):
        file_dialog = QFileDialog(self)
        file_dialog.setNameFilter("Images (*.png *.jpg *.jpeg)")
        if file_dialog.exec_():
            file_path = file_dialog.selectedFiles()[0]
            self.image = image.load_img(file_path, target_size=(224, 224))
            pixmap = QPixmap(file_path)
            self.image_label.setPixmap(pixmap.scaled(400, 400))

    def predict(self):
        if hasattr(self, 'image'):
            # Preprocess the image
            preprocessed_image = preprocess_data(self.image)

            # Predict using the preprocessed image array
            prediction = TumorModel.predict(preprocessed_image)
            print("Raw Prediction Output:", prediction)

            predicted_class_index = np.argmax(prediction,axis=0)
            print(predicted_class_index)

            # Get the predicted probability of the class with the highest probability
            predicted_prob = prediction[0][predicted_class_index]

            # Debugging: Print predicted class index and probability
            print("Predicted Class Index:", predicted_class_index)
            print("Predicted Probability:", predicted_prob)

            # Convert predicted_class_index to integer scalar
            predicted_class = int(predicted_class_index.item())  # Access the scalar value using .item()

            # Debugging: Print converted predicted class
            print("Converted Predicted Class:", predicted_class)

            # Format the predicted probability as a percentage with two decimal places
            formatted_prob = '{:.2f}%'.format(predicted_prob * 100)

            # Display prediction results
            QMessageBox.information(self, "Prediction Result",
                                    f"Predicted Class: {predicted_class}\n"
                                    f"Predicted Probability: {formatted_prob}")


            prediction_result = "The predicted class is " + str(predicted_class) + "."
            additional_notes = "Additional notes from the model."
            self.prediction_label.setText("Prediction: " + prediction_result)
            self.notes_label.setText("Notes: " + additional_notes)
        else:
            QMessageBox.warning(self, "Warning", "Please upload an MRI image first.")


if __name__ == "__main__":
    app = QApplication(sys.argv)

    window = MainWindow()
    window.show()
    sys.exit(app.exec_())

Model:

import numpy as np
import os
import pandas as pd
import cv2
from tqdm import tqdm
import io
from keras.layers import Input, Lambda, Dense, Flatten
from keras.models import Model
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
import numpy as np
from glob import glob
import matplotlib.pyplot as plt

import warnings
warnings.filterwarnings("ignore", category=FutureWarning)

import seaborn as sns
import matplotlib.pyplot as plt

from sklearn.utils import shuffle # Shuffle arrays or sparse matrices in a consistent way
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.models import Sequential, load_model

from sklearn.metrics import classification_report, confusion_matrix
import scikitplot as skplt
import warnings as wr
wr.filterwarnings('ignore')

labels = ['glioma', 'meningioma', 'notumor', 'pituitary']
IMAGE_SIZE = [224, 224]

X_train = [] #Training Dataset
Y_train = [] #Training Labels

image_size=224

for i in labels:
    folderPath = os.path.join('C:/Users/win11/Documents/Dataset', 'Training', i)
    for j in tqdm(os.listdir(folderPath)):
        image = cv2.imread(os.path.join(folderPath, j))
        image = cv2.resize(image, (image_size, image_size))
        X_train.append(image)
        Y_train.append(i)
        
        
for i in labels:
    folderPath = os.path.join('C:/Users/win11/Documents/Dataset', 'Testing', i) # Join two or more pathname components
    for j in tqdm(os.listdir(folderPath)):
        image = cv2.imread(os.path.join(folderPath, j))
        image = cv2.resize(image, (image_size, image_size))
        X_train.append(image)
        Y_train.append(i)
        
#Image and Label is appended as list, now is to be converted into array
X_train = np.array(X_train)
Y_train = np.array(Y_train)

print(X_train.shape) #No of sample = 7023

X_train, Y_train = shuffle(X_train, Y_train, random_state=42)
X_train.shape

grid_width = 4
grid_height = 4
f, ax = plt.subplots(grid_width, grid_height)
f.set_size_inches(8, 8)

img_idx = 0
for i in range(0, grid_width):
    for j in range(0, grid_height):
        ax[i][j].axis('off')
        ax[i][j].set_title('Label: '+Y_train[img_idx])
        ax[i][j].imshow(X_train[img_idx])
        img_idx += 1

plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.2, hspace=0.55)  

X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size=0.2, random_state=42)
#X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train, test_size=0.1, random_state=42)

y_train_new = []
#y_valid_new = []
y_test_new = []

for i in Y_train:
    y_train_new.append(labels.index(i))#Converting String Label to integer i.e
                                       # glioma ---> 0, meningioma---> 1, notumor ---> 2, pituitary ---> 3
Y_train = to_categorical(y_train_new) #Converts a class vector (integers) to binary class matrix

#for i in Y_valid:
#    y_valid_new.append(labels.index(i))

#Y_valid = to_categorical(y_valid_new)

for i in Y_test:
    y_test_new.append(labels.index(i))

Y_test = to_categorical(y_test_new)

vgg = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False)
for layer in vgg.layers:
  layer.trainable = False

# our layers - you can add more if you want
x = Flatten()(vgg.output)
# x = Dense(1000, activation='relu')(x)
prediction = Dense(4, activation='softmax')(x)
model = Model(inputs=vgg.input, outputs=prediction)

model.summary()

model.compile(
  loss='categorical_crossentropy',
  optimizer='adam',
  metrics=['accuracy']
)

# Scaling Train, Valid and Test Features
X_train_scaled = X_train.astype('float32')
#X_valid_scaled = X_valid.astype('float32')
X_test_scaled = X_test.astype('float32')


X_train_scaled /= 255
#X_valid_scaled /= 255
X_test_scaled /= 255

history = model.fit(x=X_train_scaled, y=Y_train,
                   validation_data=(X_test_scaled, Y_test),
                   batch_size=32,
                   epochs=10,
                   verbose=1)

f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))
t = f.suptitle('Basic CNN Performance', fontsize=12)
f.subplots_adjust(top=0.85, wspace=0.3)

epoch_list = list(range(1,21))
ax1.plot(history.history['accuracy'], label='Train Accuracy')
ax1.plot(history.history['val_accuracy'], label='Validation Accuracy')
ax1.set_xticks(np.arange(0, 21, 5))
ax1.set_ylabel('Accuracy Value')
ax1.set_xlabel('Epoch')
ax1.set_title('Accuracy')
l1 = ax1.legend(loc="best")

ax2.plot(history.history['loss'], label='Train Loss')
ax2.plot(history.history['val_loss'], label='Validation Loss')
ax2.set_xticks(np.arange(0, 21, 5))
ax2.set_ylabel('Loss Value')
ax2.set_xlabel('Epoch')
ax2.set_title('Loss')
l2 = ax2.legend(loc="best")

model.save('VGG16.keras')

# Getting model predictions
test_predictions = model.predict(X_test_scaled)
preds = np.argmax(test_predictions, axis=1)
actual_label = np.argmax(Y_test, axis=1)
print(classification_report(actual_label, preds))

cnf = confusion_matrix(actual_label, preds)
plt.figure(figsize=(8,6), dpi=70, facecolor='w', edgecolor='k')
ax = sns.heatmap(cnf, cmap='Blues', annot=True, fmt = 'd', xticklabels=labels, yticklabels=labels)
plt.title('Brain Tumor MRI Classification')
plt.xlabel('Prediction')
plt.ylabel('Ground Truth')
plt.show(ax)

skplt.metrics.plot_roc(actual_label, test_predictions)

grid_width = 5
grid_height = 5
f, ax = plt.subplots(grid_width, grid_height)
f.set_size_inches(15, 15)

img_idx = 0
for i in range(0, grid_width):
    for j in range(0, grid_height):
        actual = actual_label[img_idx]
        predicted = preds[img_idx]
        confidence = round(test_predictions[img_idx][predicted], 2)
        ax[i][j].axis('off')
        ax[i][j].set_title('Actual: '+labels[actual]+'\nPred: '+labels[predicted] + '\nConf: ' +str(confidence))
        ax[i][j].imshow(X_test[img_idx])
        img_idx += 1

plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0.5, hspace=0.55)

Hi @polarbear, Could you please let us know if you have trained the model with the balance dataset or not. Thank You.