Data augmentation issue

The code come from here :

directory = “Crop_dataset/”
IMG_SIZE = 128
BATCH_SIZE=16

train_dataset = tf.keras.utils.image_dataset_from_directory(
directory,
labels=‘inferred’,
label_mode=‘int’,
class_names=(“Confortable”,“Inconfortable”),
color_mode=‘rgb’, #rgb ou grayscale
batch_size=BATCH_SIZE,
image_size=(IMG_SIZE, IMG_SIZE),
shuffle=True,
validation_split=0.3,
subset=“training”,
seed=123)

test_dataset = tf.keras.utils.image_dataset_from_directory(
directory,
labels=‘inferred’,
label_mode=‘int’,
class_names=(“Confortable”,“Inconfortable”),
color_mode=‘rgb’,
batch_size=BATCH_SIZE,
image_size=(IMG_SIZE, IMG_SIZE),
shuffle=True,
validation_split=0.3,
subset=“validation”,
seed=123)

test_batches = tf.data.experimental.cardinality(test_dataset)
validation_dataset = test_dataset.take(test_batches // 2)
test_dataset = test_dataset.skip(test_batches // 2)

AUTOTUNE = tf.data.AUTOTUNE

train_ds = train_dataset.prefetch(buffer_size=AUTOTUNE)
val_ds = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_ds = test_dataset.prefetch(buffer_size=AUTOTUNE)

print(‘Number of train batches: %d’ % tf.data.experimental.cardinality(train_dataset))
print(‘Number of validation batches: %d’ % tf.data.experimental.cardinality(validation_dataset))
print(‘Number of test batches: %d’ % tf.data.experimental.cardinality(test_dataset))

data_augmentation = keras.Sequential([
layers.Reshape((BATCH_SIZE,IMG_SIZE,IMG_SIZE,3)),
layers.RandomFlip(“horizontal_and_vertical”),
layers.RandomRotation(0.2),
layers.RandomContrast(0.2),
])

resize_and_rescale = tf.keras.Sequential([
layers.Resizing(IMG_SIZE, IMG_SIZE),
layers.Rescaling(1./255),
])

batch_size = BATCH_SIZE
AUTOTUNE = tf.data.AUTOTUNE

aug_ds = train_ds.map(
lambda x, y: (resize_and_rescale(x, training=True), y))

def prepare(ds, shuffle=False, augment=False):
ds = ds.map(lambda x, y: (resize_and_rescale(x), y),
num_parallel_calls=AUTOTUNE)

if shuffle:
ds = ds.shuffle(1000)
ds = ds.batch(batch_size)

if augment:
ds = ds.map(lambda x, y: (data_augmentation(x, training=True), y),
num_parallel_calls=AUTOTUNE)

return ds.prefetch(buffer_size=AUTOTUNE)

train_ds = prepare(train_ds, shuffle=True, augment=True)
val_ds = prepare(val_ds)
test_ds = prepare(test_ds)

model = tf.keras.Sequential([
layers.Conv2D(16, 3, padding=‘same’, activation=‘relu’),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding=‘same’, activation=‘relu’),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding=‘same’, activation=‘relu’),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation=‘relu’),
layers.Dense(1)
])

opt = Adamax(learning_rate=1e-04)
model.compile(optimizer=opt, loss=‘binary_crossentropy’, metrics=[‘accuracy’])

scores, histories = list(), list()
modeleCheval = Advanced_model(CHANEL)
history = modeleCheval.fit(train_ds, validation_data=val_ds, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1)
loss, acc = modeleCheval.evaluate(test_ds, verbose = 0)
print(‘accuracy %.3f’ % (acc * 100.0))
scores.append(acc)
histories.append(history)
plot_curves(histories)

I got this issue :

  • Line : ds = ds.map(lambda x, y: (data_augmentation(x, training=True), y),

  • Error :
    ValueError: in user code:

    File “”, line 71, in None *
    lambda x, y: (data_augmentation(x, training=True), y)
    File “/usr/local/lib/python3.9/dist-packages/keras/utils/traceback_utils.py”, line 70, in error_handler **
    raise e.with_traceback(filtered_tb) from None

    ValueError: Exception encountered when calling layer ‘random_flip_24’ (type RandomFlip).

    ‘image’ (shape (None, None, 128, 128, 3)) must have either 3 or 4 dimensions.

    Call arguments received by layer ‘random_flip_24’ (type RandomFlip):
    • inputs=tf.Tensor(shape=(None, None, 128, 128, 3), dtype=float32)
    • training=True

To fixe it I do this :
data_augmentation = keras.Sequential([
layers.Reshape((IMG_SIZE,IMG_SIZE,3)),
layers.RandomFlip(“horizontal_and_vertical”),
layers.RandomRotation(0.2),
layers.RandomContrast(0.2),
])

But I got a new error :
Line : history = modeleCheval.fit(train_ds, validation_data=val_ds, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1)
Error :

InvalidArgumentError: Graph execution error:

2 root error(s) found.
(0) INVALID_ARGUMENT: 2 root error(s) found.
(0) INVALID_ARGUMENT: Input to reshape is a tensor with 12582912 values, but the requested shape has 786432
[[{{node sequential_78/reshape_30/Reshape}}]]
[[sequential_78/random_contrast_26/stateful_uniform_full_int/Cast_1/_2]]
(1) INVALID_ARGUMENT: Input to reshape is a tensor with 12582912 values, but the requested shape has 786432
[[{{node sequential_78/reshape_30/Reshape}}]]
0 successful operations.
0 derived errors ignored.
[[IteratorGetNext]]
[[IteratorGetNext/_4]]
(1) INVALID_ARGUMENT: 2 root error(s) found.
(0) INVALID_ARGUMENT: Input to reshape is a tensor with 12582912 values, but the requested shape has 786432
[[{{node sequential_78/reshape_30/Reshape}}]]
[[sequential_78/random_contrast_26/stateful_uniform_full_int/Cast_1/_2]]
(1) INVALID_ARGUMENT: Input to reshape is a tensor with 12582912 values, but the requested shape has 786432
[[{{node sequential_78/reshape_30/Reshape}}]]
0 successful operations.
0 derived errors ignored.
[[IteratorGetNext]]
0 successful operations.
0 derived errors ignored. [Op:__inference_train_function_68973]

Hi @matis_alias,You are getting an error due to tf.keras.layers.reshape does not support the batch_size argument. Thank You.

1 Like