Tensorflow custom layer

I have a custom Keras layer as follows:

import tensorflow as tf
from tensorflow import keras
import numpy as np
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.optimizers import SGD
from keras.layers import Flatten

ig = 0.0
im = 0.5
iw = 0.88
isg = 0.2
ism = 0.2
isw = 0.2
eps=1e-7
pi=3.14
epoch_count = 1000
learning_rate = 0.001
sequence_len = 100

def w_initialization(dimension, activationrange = 2):
if type(dimension) is list and len(dimension) > 1:
input_dim = dimension[0]
output_dim = dimension[1]
elif type(dimension) is list:
input_dim = 0
output_dim = dimension[0]
else:
input_dim = 0
output_dim = dimension
return np.random.normal(loc=iw, scale=isw, size=dimension).astype('float32')

class NMU(keras.layers.Layer):
def **init**(self, output_dim, input_dim):
super(NMU, self).**init**()
self.weight = tf.Variable(w_initialization([input_dim, output_dim]), dtype="float32", name="weight", trainable=True)
def call(self, inputs):
W = tf.minimum(tf.maximum(self.weight , 0), 1)
output = (inputs * W)+1-W
return tf.reduce_prod(output, axis=1)

X_TRAIN = []
X_TEST = []

Y_TRAIN = []
Y_TEST = []

#DATASET
for x in range(1, 10000):
a1 = np.random.randint(1000)
a2 = np.random.randint(1000)
X_TRAIN.append(np.array([np.float32(a1), np.float32(a2)]))
Y_TRAIN.append(np.array([np.float32(a2*a1), np.float32(a2*a1)]))

for x in range(1, 10000):
a1 = np.random.randint(1000)
a2 = np.random.randint(1000)
X_TEST.append(np.array([np.float32(a1),np.float32(a2)]))
Y_TEST.append(np.array([np.float32(a2*a1), np.float32(a2*a1)]))

X_TRAIN = np.array(X_TRAIN)
X_TEST = np.array(X_TEST)
Y_TEST = np.array(Y_TEST)
Y_TRAIN = np.array(Y_TRAIN)

#MODEL
model = Sequential()
model.add(NMU(2,2))
model.compile(loss='mse', optimizer=RMSprop(lr=learning_rate))
model.fit(
validation_data=(X_TEST, Y_TEST),
x=X_TRAIN,
y=Y_TRAIN,
epochs=epoch_count,
callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0000001, patience=100, verbose=0)]
)

and I the following error:

tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [32,2] vs. [2,2] [[node sequential/nmu/mul (defined at nalu.py:135) ]] [Op:__inference_train_function_547]

I cannot reproduce your error:

The code you kindly ran has a mistake.
The following lines must have a layer of indention:

X_TRAIN.append(np.array([np.float32(a1), np.float32(a2)]))
Y_TRAIN.append(np.array([np.float32(a2a1), np.float32(a2a1)]))

X_TEST.append(np.array([np.float32(a1),np.float32(a2)]))
Y_TEST.append(np.array([np.float32(a2a1), np.float32(a2a1)]))

Please check the following code:

It seems batch_size has an effect