Classification of tumors in Human Brain

Author

Venkata Nekkanti

Published

May 9, 2024

from google.colab import drive
drive.mount('/content/drive')

# Replace 'path/to/your/dataset' with the actual path to your dataset folder in Google Drive
path = '/content/drive/MyDrive/classification_brain_tumor'

path
Mounted at /content/drive
'/content/drive/MyDrive/classification_brain_tumor'
#!pip install --upgrade --force-reinstall tensorflow

import tensorflow as tf
from tensorflow import keras

from keras.models import Sequential
from keras.layers import Conv2D,Flatten,Dense,MaxPooling2D,Dropout
from sklearn.metrics import accuracy_score
import ipywidgets as widgets
import io
from PIL import Image
import tqdm
from sklearn.model_selection import train_test_split
import cv2
from sklearn.utils import shuffle
import os
import numpy as np
import cv2

X_train = []
Y_train = []
image_size = 150
labels = ['glioma_tumor','meningioma_tumor','no_tumor','pituitary_tumor']

# Use the downloaded dataset path
for i in labels:
    # Update folderPath to use the correct dataset path
    folderPath = os.path.join(path, 'Training', i)
    for j in os.listdir(folderPath):
        img = cv2.imread(os.path.join(folderPath,j))
        img = cv2.resize(img,(image_size,image_size))
        X_train.append(img)
        Y_train.append(i)

for i in labels:
    # Update folderPath to use the correct dataset path
    folderPath = os.path.join(path, 'Testing', i)
    for j in os.listdir(folderPath):
        img = cv2.imread(os.path.join(folderPath,j))
        img = cv2.resize(img,(image_size,image_size))
        X_train.append(img)
        Y_train.append(i)

X_train = np.array(X_train)
Y_train = np.array(Y_train)
X_train,Y_train = shuffle(X_train,Y_train,random_state=101)
X_train.shape
(3274, 150, 150, 3)
X_train,X_test,y_train,y_test = train_test_split(X_train,Y_train,test_size=0.1,random_state=101)
y_train_new = []
for i in y_train:
    y_train_new.append(labels.index(i))
y_train=y_train_new
y_train = tf.keras.utils.to_categorical(y_train)

y_test_new = []
for i in y_test:
    y_test_new.append(labels.index(i))
y_test=y_test_new
y_test = tf.keras.utils.to_categorical(y_test)
model = Sequential()
model.add(Conv2D(32,(3,3),activation = 'relu',input_shape=(150,150,3)))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(Dropout(0.3))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(Dropout(0.3))
model.add(MaxPooling2D(2,2))
model.add(Dropout(0.3))
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(Dropout(0.3))
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(Conv2D(256,(3,3),activation='relu'))
model.add(MaxPooling2D(2,2))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(512,activation = 'relu'))
model.add(Dense(512,activation = 'relu'))
model.add(Dropout(0.3))
model.add(Dense(4,activation='softmax'))
/usr/local/lib/python3.10/dist-packages/keras/src/layers/convolutional/base_conv.py:107: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
model.summary()
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━┓
┃ Layer (type)                          Output Shape                         Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━┩
│ conv2d (Conv2D)                      │ (None, 148, 148, 32)        │             896 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_1 (Conv2D)                    │ (None, 146, 146, 64)        │          18,496 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ max_pooling2d (MaxPooling2D)         │ (None, 73, 73, 64)          │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout (Dropout)                    │ (None, 73, 73, 64)          │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_2 (Conv2D)                    │ (None, 71, 71, 64)          │          36,928 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_3 (Conv2D)                    │ (None, 69, 69, 64)          │          36,928 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_1 (Dropout)                  │ (None, 69, 69, 64)          │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ max_pooling2d_1 (MaxPooling2D)       │ (None, 34, 34, 64)          │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_2 (Dropout)                  │ (None, 34, 34, 64)          │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_4 (Conv2D)                    │ (None, 32, 32, 128)         │          73,856 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_5 (Conv2D)                    │ (None, 30, 30, 128)         │         147,584 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_6 (Conv2D)                    │ (None, 28, 28, 128)         │         147,584 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ max_pooling2d_2 (MaxPooling2D)       │ (None, 14, 14, 128)         │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_3 (Dropout)                  │ (None, 14, 14, 128)         │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_7 (Conv2D)                    │ (None, 12, 12, 128)         │         147,584 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ conv2d_8 (Conv2D)                    │ (None, 10, 10, 256)         │         295,168 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ max_pooling2d_3 (MaxPooling2D)       │ (None, 5, 5, 256)           │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_4 (Dropout)                  │ (None, 5, 5, 256)           │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ flatten (Flatten)                    │ (None, 6400)                │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense (Dense)                        │ (None, 512)                 │       3,277,312 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_1 (Dense)                      │ (None, 512)                 │         262,656 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dropout_5 (Dropout)                  │ (None, 512)                 │               0 │
├──────────────────────────────────────┼─────────────────────────────┼─────────────────┤
│ dense_2 (Dense)                      │ (None, 4)                   │           2,052 │
└──────────────────────────────────────┴─────────────────────────────┴─────────────────┘
 Total params: 4,447,044 (16.96 MB)
 Trainable params: 4,447,044 (16.96 MB)
 Non-trainable params: 0 (0.00 B)
model.compile(loss='categorical_crossentropy',optimizer='Adam',metrics=['accuracy'])
history = model.fit(X_train,y_train,epochs=20,validation_split=0.1)
Epoch 1/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 44s 301ms/step - accuracy: 0.2696 - loss: 3.4935 - val_accuracy: 0.4102 - val_loss: 1.3118
Epoch 2/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 7s 72ms/step - accuracy: 0.4672 - loss: 1.1965 - val_accuracy: 0.5864 - val_loss: 0.9219
Epoch 3/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 6s 71ms/step - accuracy: 0.5523 - loss: 1.0151 - val_accuracy: 0.6169 - val_loss: 0.8877
Epoch 4/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 11s 74ms/step - accuracy: 0.5950 - loss: 0.8989 - val_accuracy: 0.6746 - val_loss: 0.8467
Epoch 5/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 10s 73ms/step - accuracy: 0.6704 - loss: 0.7856 - val_accuracy: 0.7220 - val_loss: 0.6445
Epoch 6/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 6s 75ms/step - accuracy: 0.7296 - loss: 0.6704 - val_accuracy: 0.7153 - val_loss: 0.7502
Epoch 7/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 10s 76ms/step - accuracy: 0.7334 - loss: 0.6548 - val_accuracy: 0.7831 - val_loss: 0.5249
Epoch 8/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 10s 73ms/step - accuracy: 0.7908 - loss: 0.5048 - val_accuracy: 0.8034 - val_loss: 0.4778
Epoch 9/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 6s 76ms/step - accuracy: 0.8169 - loss: 0.4760 - val_accuracy: 0.8271 - val_loss: 0.4549
Epoch 10/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 10s 77ms/step - accuracy: 0.8406 - loss: 0.3994 - val_accuracy: 0.8610 - val_loss: 0.4185
Epoch 11/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 6s 77ms/step - accuracy: 0.8651 - loss: 0.3395 - val_accuracy: 0.8983 - val_loss: 0.3406
Epoch 12/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 6s 75ms/step - accuracy: 0.9048 - loss: 0.2624 - val_accuracy: 0.8746 - val_loss: 0.3340
Epoch 13/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 10s 77ms/step - accuracy: 0.8871 - loss: 0.2911 - val_accuracy: 0.8576 - val_loss: 0.3961
Epoch 14/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 6s 76ms/step - accuracy: 0.9127 - loss: 0.2273 - val_accuracy: 0.8847 - val_loss: 0.3082
Epoch 15/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 6s 75ms/step - accuracy: 0.9270 - loss: 0.1931 - val_accuracy: 0.9051 - val_loss: 0.2890
Epoch 16/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 10s 77ms/step - accuracy: 0.9365 - loss: 0.1811 - val_accuracy: 0.8983 - val_loss: 0.3200
Epoch 17/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 6s 74ms/step - accuracy: 0.9405 - loss: 0.1548 - val_accuracy: 0.9186 - val_loss: 0.2497
Epoch 18/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 6s 74ms/step - accuracy: 0.9552 - loss: 0.1318 - val_accuracy: 0.9288 - val_loss: 0.2364
Epoch 19/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 10s 76ms/step - accuracy: 0.9569 - loss: 0.1395 - val_accuracy: 0.9085 - val_loss: 0.3221
Epoch 20/20
83/83 ━━━━━━━━━━━━━━━━━━━━ 6s 76ms/step - accuracy: 0.9623 - loss: 0.1085 - val_accuracy: 0.9390 - val_loss: 0.2632

The accuracy stood at 96% with accuracy related visulaizations given below.

import matplotlib.pyplot as plt
import seaborn as sns
#model.save('braintumor.h5')

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
epochs = range(len(acc))
fig = plt.figure(figsize=(14,7))
plt.plot(epochs,acc,'r',label="Training Accuracy")
plt.plot(epochs,val_acc,'b',label="Validation Accuracy")
plt.legend(loc='upper left')
plt.show()

loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
fig = plt.figure(figsize=(14,7))
plt.plot(epochs,loss,'r',label="Training loss")
plt.plot(epochs,val_loss,'b',label="Validation loss")
plt.legend(loc='upper left')
plt.show()

import cv2
import numpy as np
import kagglehub
import os

print("Path to dataset files:", path)

# Construct the correct image path
image_filename = 'p (107).jpg'  # Replace with your desired image filename
image_path = os.path.join(path, 'Training', 'pituitary_tumor', image_filename)

# Load the image
img = cv2.imread(image_path)

# Check if the image was loaded successfully
if img is None:
    print(f"Error: Could not load image from {image_path}. Check file path and permissions.")
else:
    # Resize the image
    img = cv2.resize(img, (150, 150))
    img_array = np.array(img)
    print(img_array.shape)
Path to dataset files: /content/drive/MyDrive/classification_brain_tumor
(150, 150, 3)
img_array = img_array.reshape(1,150,150,3)
img_array.shape
(1, 150, 150, 3)
from tensorflow.keras.preprocessing import image
image_filename = 'p (107).jpg'  # Replace with your desired image filename
image_path = os.path.join(path, 'Training', 'pituitary_tumor', image_filename)

# Load the image using the corrected path
img = image.load_img(image_path)

# Display the image
import matplotlib.pyplot as plt  # Import matplotlib.pyplot
plt.imshow(img, interpolation='nearest')
plt.show()

a=model.predict(img_array)
indices = a.argmax()
indices
1/1 ━━━━━━━━━━━━━━━━━━━━ 1s 1s/step
3