Omp tread error while using Resnet50 trasfer learning

0

I am trying to use Resnet50 model for my data using transfer learning, My image data is of size - 1280*960 *1.
I used the following code to preprocess the data to use ResNet50.i Want to find the efficiency of this model for my training and testing data set. Is code for preprocessing the data correct? or is there any other more efficient way to do it?

import time
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import ResNet50, EfficientNetB0, DenseNet121
from sklearn.metrics import accuracy_score
from tensorflow.keras.preprocessing.image import img_to_array
import pandas as pd
import os

os.environ.setdefault( 'OMP_NUM_THREADS', '4')

# Configure TensorFlow session
config = tf.compat.v1.ConfigProto(
    intra_op_parallelism_threads=1,
    inter_op_parallelism_threads=1
)
tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))

# Clear TensorFlow session
tf.keras.backend.clear_session()

# Define constants
TARGET_SIZE = (224, 224)
TRAIN_DIR = '/home/pavani/output_folder_1/train/'
TEST_DIR = '/home/pavani/output_folder_1/test/'

# Set random seed for reproducibility
np.random.seed(42)
tf.random.set_seed(42)

# Define constants
NUM_CLASSES = 2
IMAGE_SIZE = (224, 224)
BATCH_SIZE = 32
EPOCHS = 10

# Create data generators
train_datagen = ImageDataGenerator(rescale=1.0/255)
test_datagen = ImageDataGenerator(rescale=1.0/255)

train_generator = train_datagen.flow_from_directory(
    TRAIN_DIR,
    target_size=TARGET_SIZE,
    batch_size=BATCH_SIZE,
    class_mode='categorical',
    shuffle=True
)

test_generator = test_datagen.flow_from_directory(
    TEST_DIR,
    target_size=TARGET_SIZE,
    batch_size=BATCH_SIZE,
    class_mode='categorical',
    shuffle=False
)

# Define the models
pretrained_models = [
    ResNet50(weights='imagenet', include_top=False, input_shape=(TARGET_SIZE[0], TARGET_SIZE[1], 3))]

# Create a DataFrame to store the results
results_df = pd.DataFrame(columns=['Model', 'Accuracy', 'Training Time', 'Trainable Parameters'])

# Train and evaluate each model
for pretrained_model in pretrained_models:
    # Freeze the pretrained layers
    for layer in pretrained_model.layers:
        layer.trainable = False
    
    # Add custom classification layers
    flatten = tf.keras.layers.Flatten()(pretrained_model.output)
    output = tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')(flatten)
    cnn_model = tf.keras.models.Model(inputs=pretrained_model.input, outputs=output)
    
    # Compile the model
    cnn_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    
    # Train the model
    start_time = time.time()  # Start time
    history = cnn_model.fit(
        train_generator,
        steps_per_epoch=train_generator.samples // BATCH_SIZE,
        epochs=EPOCHS,
        validation_data=test_generator,
        validation_steps=test_generator.samples // BATCH_SIZE
    )
    end_time = time.time()  # End time
    training_time = end_time - start_time
    
    # Evaluate the model
    test_generator.reset()
    y_pred = cnn_model.predict(test_generator)
    y_pred = np.argmax(y_pred, axis=1)
    y_true = test_generator.classes
    accuracy = accuracy_score(y_true, y_pred)
    

    # Print model summary and number of trainable parameters
    cnn_model.summary()
    trainable_count = int(np.sum([tf.keras.backend.count_params(w) for w in cnn_model.trainable_weights]))
    
    # Append the results to the list
    results_df = results_df.append({
       'Model': pretrained_model.name,
       'Accuracy': accuracy,
       'Training Time': training_time,
       'Trainable Parameters': trainable_count
   }, ignore_index=True)


# Save the DataFrame to an Excel file
results_df.to_excel('model_results_3.xlsx', index=False)

# Display the results
print(results_df)


Error:

tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor ‘Placeholder/_0’ with dtype int32
[[{{node Placeholder/_0}}]]
Epoch 1/10
OMP: Error #34: System unable to allocate necessary resources for OMP thread:
OMP: System error #11: Resource temporarily unavailable
OMP: Hint Try decreasing the value of OMP_NUM_THREADS.

Fatal Python error: Aborted

@Nani,

Welcome to the Tensorflow Forum!

Can you try decreasing the value of OMP_NUM_THREADS ?

Thank you!

# Configure TensorFlow session
config = tf.compat.v1.ConfigProto(
    intra_op_parallelism_threads=1,
    inter_op_parallelism_threads=1
)
tf.compat.v1.keras.backend.set_session(tf.compat.v1.Session(config=config))

# Clear TensorFlow session
tf.keras.backend.clear_session()
import tensorflow as tf

I have set it to the least value posible but still the same error.

I have reduced the image size to 640*480 and its a gray scale images. Do i have to convert it from gray scale to rgb inorder to use resnet? I am stuck during the preprocessing, This is code i am trying. kindly, Let me know if there is any mistake.

#The batch refers to the number of training examples utilized in one #iteration
batch_size = 16
image_size = (640, 480)  # Original image size
target_size = (224, 224)  # Resized image size
channels = 3  # Number of channels (RGB)
num_epochs=10
#Define a function to preprocess the input images:

def preprocess_image(image_path):
    image = load_img(image_path, target_size=target_size, color_mode='rgb')
    image = img_to_array(image)
    if image.shape[-1] == 1:
        image = np.repeat(image, 3, axis=-1)
    image = preprocess_input(image)
    return image
image_gen = ImageDataGenerator(
                                  rescale = 1./255,
                                  shear_range = 0.2,
                                  zoom_range = 0.2,
                                  horizontal_flip = True,          
                               )