Input 0 of layer sequential_3 is incompatible with the layer

Tensorflow version: 2.2.0

test_ Dataset cannot run normally. Use train_ Dataset is normal. I don’t know why?

code:

train_size = int(0.7 * image_count)
val_size = int(0.15 * image_count)
test_size = int(0.15 * image_count)

train_dataset = dataset.take(train_size)
test_dataset = dataset.skip(train_size)
val_dataset = dataset.skip(val_size)

train_dataset, test_dataset, val_dataset
(<TakeDataset shapes: ((256, 256, 3), ()), types: (tf.float32, tf.int32)>,
<SkipDataset shapes: ((256, 256, 3), ()), types: (tf.float32, tf.int32)>,
<SkipDataset shapes: ((256, 256, 3), ()), types: (tf.float32, tf.int32)>)

train_dataset = dataset.skip(test_size)
test_dataset = dataset.take(test_size)

test_dataset, train_dataset
(<TakeDataset shapes: ((256, 256, 3), ()), types: (tf.float32, tf.int32)>,
<SkipDataset shapes: ((256, 256, 3), ()), types: (tf.float32, tf.int32)>)

BATCH_SIZE = 32
train_dataset = train_dataset.shuffle(train_size).repeat().batch(BATCH_SIZE)
test_dataset = train_dataset.shuffle(test_size).repeat().batch(BATCH_SIZE)

model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(64, (3, 3), input_shape=(256, 256, 3),activation=‘relu’,padding=‘same’))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation=‘relu’,padding=‘same’))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Conv2D(128, (3, 3), activation=‘relu’,padding=‘same’))
model.add(tf.keras.layers.Conv2D(128, (3, 3), activation=‘relu’,padding=‘same’))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Conv2D(256, (3, 3), activation=‘relu’,padding=‘same’))
model.add(tf.keras.layers.Conv2D(256, (3, 3), activation=‘relu’,padding=‘same’))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Conv2D(512, (3, 3), activation=‘relu’,padding=‘same’))
model.add(tf.keras.layers.Conv2D(512, (3, 3), activation=‘relu’,padding=‘same’))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dense(1024, activation=‘relu’))
model.add(tf.keras.layers.Dense(256, activation=‘relu’))
model.add(tf.keras.layers.Dense(1, activation=‘sigmoid’))
model.summary()

model.compile(optimizer=‘adam’,
loss=‘binary_crossentropy’,
metrics=[‘acc’])

steps_per_epoch = train_size // BATCH_SIZE
val_steps = test_size // BATCH_SIZE

his = model.fit(test_dataset,
epochs=2,
steps_per_epoch=val_steps,
)

Training error:
Epoch 1/2

ValueError Traceback (most recent call last)
Input In [87], in <cell line: 1>()
----> 1 his = model.fit(test_dataset,
2 epochs=2,
3 steps_per_epoch=val_steps,
4 )

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:66, in enable_multi_worker.._method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
—> 66 return method(self, *args, **kwargs)
68 # Running inside run_distribute_coordinator already.
69 if dc_context.get_current_worker_context():

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:848, in Model.fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
841 with traceme.TraceMe(
842 ‘TraceContext’,
843 graph_type=‘train’,
844 epoch_num=epoch,
845 step_num=step,
846 batch_size=batch_size):
847 callbacks.on_train_batch_begin(step)
→ 848 tmp_logs = train_function(iterator)
849 # Catch OutOfRangeError for Datasets of unknown size.
850 # This blocks until the batch has finished executing.
851 # TODO(b/150292341): Allow multiple async steps here.
852 if not data_handler.inferred_steps:

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py:580, in Function.call(self, *args, **kwds)
578 xla_context.Exit()
579 else:
→ 580 result = self._call(*args, **kwds)
582 if tracing_count == self._get_tracing_count():
583 self._call_counter.called_without_tracing()

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py:627, in Function._call(self, *args, **kwds)
624 try:
625 # This is the first call of call, so we have to initialize.
626 initializers = []
→ 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
630 # interestingly an exception was raised) so we no longer need a lock.
631 self._lock.release()

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py:505, in Function._initialize(self, args, kwds, add_initializers_to)
502 self._lifted_initializer_graph = lifted_initializer_graph
503 self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
504 self._concrete_stateful_fn = (
→ 505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
506 *args, **kwds))
508 def invalid_creator_scope(*unused_args, **unused_kwds):
509 “”“Disables variable creation.”""

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/eager/function.py:2446, in Function._get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
→ 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/eager/function.py:2777, in Function._maybe_define_function(self, args, kwargs)
2774 return self._define_function_with_shape_relaxation(args, kwargs)
2776 self._function_cache.missed.add(call_context_key)
→ 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/eager/function.py:2657, in Function.create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2652 missing_arg_names = [
2653 "%s
%d" % (arg, i) for i, arg in enumerate(missing_arg_names)
2654 ]
2655 arg_names = base_arg_names + missing_arg_names
2656 graph_function = ConcreteFunction(
→ 2657 func_graph_module.func_graph_from_py_func(
2658 self._name,
2659 self._python_function,
2660 args,
2661 kwargs,
2662 self.input_signature,
2663 autograph=self._autograph,
2664 autograph_options=self._autograph_options,
2665 arg_names=arg_names,
2666 override_flat_arg_shapes=override_flat_arg_shapes,
2667 capture_by_value=self._capture_by_value),
2668 self._function_attributes,
2669 # Tell the ConcreteFunction to clean up its graph once it goes out of
2670 # scope. This is not the default behavior since it gets used in some
2671 # places (like Keras) where the FuncGraph lives longer than the
2672 # ConcreteFunction.
2673 shared_func_graph=False)
2674 return graph_function

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py:981, in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
978 else:
979 _, original_func = tf_decorator.unwrap(python_func)
→ 981 func_outputs = python_func(*func_args, **func_kwargs)
983 # invariant: func_outputs contains only Tensors, CompositeTensors,
984 # TensorArrays and Nones.
985 func_outputs = nest.map_structure(convert, func_outputs,
986 expand_composites=True)

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py:441, in Function._defun_with_scope..wrapped_fn(*args, **kwds)
426 # We register a variable creator with reduced priority. If an outer
427 # variable creator is just modifying keyword arguments to the variable
428 # constructor, this will work harmoniously. Since the scope registered
(…)
436 # better than the alternative, tracing the initialization graph but giving
437 # the user a variable type they didn’t want.
438 with ops.get_default_graph()._variable_creator_scope(scope, priority=50): # pylint: disable=protected-access
439 # wrapped allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
→ 441 return weak_wrapped_fn().wrapped(*args, **kwds)

File ~/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py:968, in func_graph_from_py_func..wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, “ag_error_metadata”):
→ 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise

ValueError: in user code:

/root/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:571 train_function  *
    outputs = self.distribute_strategy.run(
/root/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:951 run  **
    return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/root/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2290 call_for_each_replica
    return self._call_for_each_replica(fn, args, kwargs)
/root/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py:2649 _call_for_each_replica
    return fn(*args, **kwargs)
/root/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:531 train_step  **
    y_pred = self(x, training=True)
/root/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:885 __call__
    input_spec.assert_input_compatibility(self.input_spec, inputs,
/root/anaconda3/envs/ks/lib/python3.8/site-packages/tensorflow/python/keras/engine/input_spec.py:176 assert_input_compatibility
    raise ValueError('Input ' + str(input_index) + ' of layer ' +

ValueError: Input 0 of layer sequential_3 is incompatible with the layer: expected ndim=4, found ndim=5. Full shape received: [None, None, 256, 256, 3]

Can you look at the test dataset shape before fit model ?

train_dataset = dataset.take(train_size)
test_dataset = dataset.skip(train_size)
val_dataset = dataset.skip(val_size)

train_dataset, test_dataset, val_dataset
(<TakeDataset shapes: ((256, 256, 3), ()), types: (tf.float32, tf.int32)>,
<SkipDataset shapes: ((256, 256, 3), ()), types: (tf.float32, tf.int32)>,
<SkipDataset shapes: ((256, 256, 3), ()), types: (tf.float32, tf.int32)>)