Hi.
Im trying to implement a variational autoencoder and Im facing the following problem.
the loss function as I increase the the latent space increases. If I understand it right the exact opposite should be happening. Or am I missing something?
Latent space → loss function
32->0.105
64->0.217
96->0.291
128->0.524
Thanks in advance
I upload my code.
prior = tfd.Independent(tfd.Normal(loc=tf.zeros(latent_dimentions), scale=1),
reinterpreted_batch_ndims=1)
# Encoder
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=(138, 138, 1)),
tfkl.Conv2D(filters=filters_number,
kernel_size=3,
strides=stride,
activation="relu"),
tfkl.Conv2D(filters=filters_number,
kernel_size=3,
strides=stride,
activation="relu"),
tfkl.Conv2D(filters=filters_number,
kernel_size=3,
strides=stride,
activation="relu"),
tfkl.Flatten(),
tfkl.Dense(units=tfpl.MultivariateNormalTriL.params_size(latent_dimentions),
activation=None),
tfpl.MultivariateNormalTriL(event_size=latent_dimentions,
convert_to_tensor_fn=tfd.Distribution.sample,
activity_regularizer=tfpl.KLDivergenceRegularizer(prior,
weight=kl_weight)),
])
decoder = tfk.Sequential([
tfkl.InputLayer(input_shape=latent_dimentions),
tfkl.Dense(16*16*latent_dimentions, activation=None),
tfkl.Reshape((16, 16, latent_dimentions)),
tfkl.Conv2DTranspose(filters=filters_number,
kernel_size=3,
strides=2,
activation="relu"),
tfkl.Conv2DTranspose(filters=filters_number,
kernel_size=4,
strides=2,
activation="relu"),
tfkl.Conv2DTranspose(filters=filters_number,
kernel_size=4,
strides=2,
activation="relu"),
tfkl.Conv2DTranspose(filters=1,
kernel_size=3,
strides=1,
padding="same",
activation=None),
VAE = tfk.Model(inputs=encoder.inputs, outputs=decoder(encoder.outputs))
VAE.compile(optimizer=tf.optimizers.Adam(learning_rate=learning_rate),
loss=tf.keras.losses.MeanSquaredError())