How to save this model?

My question is how to save my model that I created, I have tried Model.save , but I get back this error 'TypeError: Cannot deserialize object of type PositiionalEmbedding. If PositiionalEmbeddingis a custom class, please register it using the@keras.saving.register_keras_serializable() decorator.' , then I tried @keras.saving.register_keras_serializable() and I would get the same error and I tried different ways of loading it with ’ ```
new_model = tf.keras.models.load_model(‘model.h5’, custom_objects={‘CustomLayer’: CustomLayer})

here is my code and my tf version is 2.13.0

@keras.saving.register_keras_serializable()
class TransformerEncoderLayer(layers.Layer):
def init(self, embed_dim, dense_dim, num_heads,dropout, **kwargs):
super().init(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.dropout = dropout
self.attention = MultiHeadAttention(num_heads = num_heads, key_dim= embed_dim)
self.norm1 = LayerNormalization()
self.norm2 = LayerNormalization()
self.dense1 = Dense(units=dense_dim, activation=‘relu’)
self.dense2 = Dense(units=embed_dim)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.dropout3 = Dropout(dropout)
self.supports_masking = True
def call(self,inputs,mask=None):
attention_output = self.attention(query=inputs, value=inputs, key=inputs)
x = self.dropout1(attention_output)
proj_input = self.norm1(inputs + x)
x = self.dense1(proj_input)
x = self.dropout2(x)
x = self.dense2(x)
x = self.dropout3(x)
return self.norm2(proj_input + x)
def get_config(self):
config = super().get_config()
config.update(
{
“embed_dim”: self.embed_dim,
“dense_dim”: self.dense_dim,
“num_heads”: self.num_heads
}
)
return config
@keras.saving.register_keras_serializable()
class PositiionalEmbedding(layers.Layer):
def init(self, sequence_length, vocab_size, embed_dim, **kwargs):
super().init(**kwargs)
self.token_embedddings = layers.Embedding(
input_dim=vocab_size, output_dim= embed_dim
)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim= embed_dim
)
self.sequence_length = sequence_length
self.vocab_size= vocab_size
self.embed_dim = embed_dim
def call(self,inputs):
length = tf.shape(inputs)[-1] #is used to get the (dimension) of the last axis od the inputs tensor
positions = tf.range(start=0,limit=length,delta=1)# Start: The starting value, limit: the limit or stopping point delta: the step size
embedded_tokens = self.token_embedddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self,inputs,mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super().get_config()
config.update(
{
“sequence_length”: self.sequence_length,
“vocab_size”: self.vocab_size,
“embed_dim”: self.embed_dim,
}
)
return config
@keras.saving.register_keras_serializable()
class TransformerDecoder(layers.Layer):
def init(self, embed_dim, latent_dim, num_heads,dropout, **kwargs):
super().init(**kwargs)
self.embed_dim = embed_dim
self.latent_dim = latent_dim
self.num_heads = num_heads
self.dropout = dropout
self.attention1 = MultiHeadAttention(num_heads = num_heads, key_dim= embed_dim)
self.attention2 = MultiHeadAttention(num_heads = num_heads, key_dim= embed_dim)
self.norm1 = LayerNormalization()
self.norm2 = LayerNormalization()
self.norm3 = LayerNormalization()
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.dropout3 = Dropout(dropout)
self.dropout4 = Dropout(dropout)
self.dense1 = Dense(units=latent_dim, activation=‘relu’)
self.dense2 = Dense(units=embed_dim)

    self.supports_masking = True
def call(self,inputs,encoder_outputs,mask=None):
    attention_output_1 = self.attention1(
        query=inputs, value=inputs, key=inputs, use_causal_mask=True

    )
    y = self.dropout1(attention_output_1)
    out_1 = self.norm1(inputs + y)
    attention_output_2 = self.attention2(
        query=out_1,
        value=encoder_outputs,
        key=encoder_outputs,
    )
    y = self.dropout2(attention_output_2)
    out_2 = self.norm2(out_1 + y)
    y = self.dense1(out_2)
    y = self.dropout2(y)
    y = self.dense2(y)
    y = self.dropout3(y)
    return self.norm3(y + out_2)
def get_config(self):
    config = super().get_config()
    config.update(
        {
            "embed_dim": self.embed_dim,
            "latent_dim": self.latent_dim,
            "num_heads": self.num_heads,
        }
    )
    return config

encoder_inputs = keras.Input(shape=(None,), dtype=“int64”, name=“encoder_inputs”)
x = PositiionalEmbedding(max_sequence_length, vocab_size,embed_dim)(encoder_inputs)
encoder_outputs = TransformerEncoderLayer(embed_dim, latent_dim,num_heads, drop_prob)(x)
encoder = keras.Model(encoder_inputs, encoder_outputs)
decoder_inputs = keras.Input(shape=(None,), dtype=“int64”, name=“decoder_inputs”)
encoded_seq_inputs = keras.Input(shape =(None, embed_dim), name =“decoder_state_inputs”)
x = PositiionalEmbedding(max_length, out_max_vocab, embed_dim)(decoder_inputs)
x = TransformerDecoder(embed_dim, latent_dim, num_heads, drop_prob)(x, encoded_seq_inputs)
x = layers.Dropout(0.1)(x)
decoder_outputs = layers.Dense(out_max_vocab,activation=“softmax”)(x)
decoder = keras.Model([decoder_inputs, encoded_seq_inputs], decoder_outputs)
decoder_outputs = decoder([decoder_inputs, encoder_outputs])
transformer = keras.Model(
[encoder_inputs, decoder_inputs], decoder_outputs, name=“transformer”
)

epochs = 1 # This should be at least 30 for convergence

transformer.summary()

transformer.compile(
“adam”, loss=“sparse_categorical_crossentropy”, metrics=[“accuracy”]
)

Hi @HunterGamez ,

Could you please try the below changes into your code:

# Save the model
transformer.save("my_model.h5")

# Load the model with custom objects
loaded_model = tf.keras.models.load_model("my_model.h5", custom_objects={
    "TransformerEncoderLayer": TransformerEncoderLayer,
    "PositiionalEmbedding": PositiionalEmbedding,
    "TransformerDecoder": TransformerDecoder
})

In the custom_objects dictionary, you need to map the names of the custom objects as strings to the actual Python classes.

Make sure that the classes TransformerEncoderLayer, PositiionalEmbedding, and TransformerDecoder are defined and available in the scope where you load the model.

Also, ensure that the get_config method is properly defined in each custom layer so that the model can be serialized and deserialized correctly.

Please let me know if these changes are solving the issue.

Thanks.