Respected all TensorFlow developers, i am new to TensorFlow , as usual i am seeing an error, please help me to solve it

Respected all ,
This is my TensorFlow code in google co lab when hit run training function , i am seeing an error please help me solve it.
code:-

import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import image_dataset_from_directory

train_transforms = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
rotation_range=10,
width_shift_range=0.15,
height_shift_range=0.15,
zoom_range=0.2,
fill_mode=‘nearest’)

test_transforms = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
rotation_range=10
)import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import image_dataset_from_directory

train_transforms = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
rotation_range=10,
width_shift_range=0.15,
height_shift_range=0.15,
zoom_range=0.2,
fill_mode=‘nearest’)

test_transforms = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
rotation_range=10
)
train_dataloader = train_data.prefetch(tf.data.experimental.AUTOTUNE)

test_dataloader = test_data.prefetch(tf.data.experimental.AUTOTUNE)
import tensorflow as tf

class TinyVGG1(tf.keras.Model):
def init(self, hidden=78, input_shape=(224, 224, 3), output=2):
super().init()

    self.conv1 = tf.keras.Sequential([
        tf.keras.layers.Conv2D(filters=hidden, kernel_size=(3, 3), strides=(1, 1), padding='same'),
        tf.keras.layers.BatchNormalization(hidden),
        tf.keras.layers.ReLU(),
        tf.keras.layers.Dropout(rate=0.2),

        tf.keras.layers.Conv2D(filters=hidden, kernel_size=(3, 3), strides=(1, 1), padding='same'),
        tf.keras.layers.BatchNormalization(hidden),
        tf.keras.layers.ReLU(),
        tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))
    ])

    self.conv2 = tf.keras.Sequential([
        tf.keras.layers.Conv2D(filters=hidden, kernel_size=(3, 3), strides=(1, 1), padding='same'),
        tf.keras.layers.BatchNormalization(hidden),
        tf.keras.layers.ReLU(),
        tf.keras.layers.Dropout(rate=0.2),

        tf.keras.layers.Conv2D(filters=hidden, kernel_size=(3, 3), strides=(1, 1), padding='same'),
        tf.keras.layers.BatchNormalization(hidden),
        tf.keras.layers.ReLU(),
        tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))
    ])

    self.conv3 = tf.keras.Sequential([
        tf.keras.layers.Conv2D(filters=hidden, kernel_size=(3, 3), strides=(1, 1), padding='same'),
        tf.keras.layers.BatchNormalization(hidden),
        tf.keras.layers.ReLU(),
        tf.keras.layers.Dropout(rate=0.2),

        tf.keras.layers.Conv2D(filters=hidden, kernel_size=(3, 3), strides=(1, 1), padding='same'),
        tf.keras.layers.BatchNormalization(hidden),
        tf.keras.layers.ReLU(),
        tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))
    ])

    self.conv4 = tf.keras.Sequential([
        tf.keras.layers.Conv2D(filters=hidden, kernel_size=(3, 3), strides=(1, 1), padding='same'),
        tf.keras.layers.BatchNormalization(hidden),
        tf.keras.layers.ReLU(),
        tf.keras.layers.Dropout(rate=0.2),

        tf.keras.layers.Conv2D(filters=hidden, kernel_size=(3, 3), strides=(1, 1), padding='same'),
        tf.keras.layers.BatchNormalization(hidden),
        tf.keras.layers.ReLU(),
        tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(2, 2))
    ])

    self.classifier = tf.keras.Sequential([
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(units=output)
    ])

def call(self, inputs):
    x = self.conv1(inputs)
    x = self.conv2(x)
    x = self.conv3(x)
    x = self.conv4(x)
    x = self.classifier(x)
    return x

model_x = TinyVGG1(input_shape=(224, 224, 3), output=2)
device = tf.device(“GPU:0” if tf.test.is_gpu_available() else “CPU”)

import tensorflow as tf
from tensorflow.keras.metrics import Accuracy

def accuracy_fn(y_true, y_pred):

Calculate the number of correct predictions

correct = tf.equal(y_true, y_pred)

Calculate the accuracy as the proportion of correct predictions

accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

Convert accuracy to percentage and return

return accuracy * 100
mport tensorflow as tf

from tensorflow.keras.losses import CategoricalCrossentropy

from tensorflow.keras.optimizers import Adam

loss_fn = CategoricalCrossentropy()

optimizer = tf.keras.optimizers.legacy.Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0.00001)import tensorflow as tf
from tensorflow.keras.metrics import Accuracy

def train_step(model: tf.keras.Model,
dataloader: tf.data.Dataset,
optimizer: tf.keras.optimizers.Optimizer,
loss_fn: tf.keras.losses.Loss,
accuracy_fn: Accuracy,
device: tf.device):

train_acc, train_loss = 0, 0
model.train()

for batch, (X, y) in enumerate(dataloader):
    X, y = X.to(device), y.to(device)

    with tf.GradientTape() as tape:
        # Forward pass
        y_pred = model(X)

        # Calculate loss
        loss = loss_fn(y, y_pred)

        # Calculate regularization loss
        l2_loss = tf.reduce_sum([tf.nn.l2_loss(param) for param in model.trainable_variables])

        # Total loss
        total_loss = loss + l2_loss

        # Calculate accuracy
        acc = accuracy_fn(y, y_pred)

    # Compute gradients and apply update step
    grads = tape.gradient(total_loss, model.trainable_variables)
    optimizer.apply_gradients(zip(grads, model.trainable_variables))

    # Update loss and accuracy metrics
    train_acc.update_state(y, y_pred)
    train_loss.update_state(total_loss)

# Calculate average loss and accuracy over the epoch
train_acc = train_acc.result() * 100
train_loss = train_loss.result()

print(f"Epoch {epoch} | Train acc: {train_acc:.2f}% | Train Loss {train_loss:.4f}")

def test_step(model: tf.keras.Model,
dataloader: tf.data.Dataset,
loss_fn: tf.keras.losses.Loss,
accuracy_fn: Accuracy,
device: tf.device):

test_acc, test_loss = 0, 0
model.eval()

for batch, (X, y) in enumerate(dataloader):
    X, y = X.to(device), y.to(device)

    # Forward pass
    y_pred = model(X)

    # Calculate loss
    test_loss += loss_fn(y, y_pred)

    # Calculate accuracy
    test_acc += accuracy_fn(y, y_pred)

# Calculate average loss and accuracy over the epoch
test_acc = test_acc * 100 / len(dataloader)
test_loss = test_loss / len(dataloader)

print(f"Test Acc {test_acc:.2f}% | Test Loss {test_loss:.4f}")

import tensorflow as tf

from timeit import default_timer as timer

Set random seed

tf.random.set_seed(42)

Define epochs

epochs = 500

Start timer

start = timer()

Train the model for specified epochs

for epoch in range(epochs):
train_step(model=model_x,
dataloader=train_dataloader,
optimizer=optimizer,
loss_fn=loss_fn,
accuracy_fn=accuracy_fn,
device=device)

Evaluate the model on test data

test_step(model=model_x,
dataloader=test_dataloader,
loss_fn=loss_fn,
accuracy_fn=accuracy_fn,
device=device)

End timer and calculate total training time

end = timer()
training_time = end - start
print(f"Total training time: {training_time:.2f} seconds")

this is the error:-AttributeError Traceback (most recent call last)

in <cell line: 15>()
14 # Train the model for specified epochs
15 for epoch in range(epochs):
—> 16 train_step(model=model_x,
17 dataloader=train_dataloader,
18 optimizer=optimizer,

1 frames

in train(self, mode)
67
68 def train(self, mode=True):
—> 69 super(TinyVGG1, self).train(mode) # Explicitly pass the ‘mode’ argument to super()
70 for layer in self.layers:
71 if isinstance(layer, tf.keras.layers.BatchNormalization):

AttributeError: ‘super’ object has no attribute ‘train’

Hi @Teddy_Salas, For training the model you have to use model.fit() . Thank You.

1 Like

@Kiran_Sai_Ramineni i am still seeing an error
when i run train func code ,Error:----------------------------------------------------------------------------

ValueError Traceback (most recent call last)

in <cell line: 15>()
14 # Train the model for specified epochs
15 for epoch in range(epochs):
—> 16 train_step(model=model_x,
17 dataloader=train_dataloader,
18 optimizer=optimizer,

2 frames

/usr/local/lib/python3.10/dist-packages/keras/src/engine/data_adapter.py in select_data_adapter(x, y)
1103 if not adapter_cls:
1104 # TODO(scottzhu): This should be a less implementation-specific error.
→ 1105 raise ValueError(
1106 “Failed to find data adapter that can handle input: {}, {}”.format(
1107 _type_name(x), _type_name(y)

ValueError: Failed to find data adapter that can handle input: <class ‘NoneType’>, <class ‘NoneType’>

Hi @Teddy_Salas, Could you please check whether your training/testing data and training/testing labels contain any None data type if there are any None values please remove those and from the training/testing data. Thank You.