Eager execution does not produce good results wrt graph computation

Hi
I am newcomer of Tensorflow 2.xs. I am experimeting GaussianLogLikelihood loss function for my time series problem, when I diasble eager execution I sacrifice from computation time, but get better results. However, when I go with custom training loop nearly with the same code, I get much worse results. What may be the problem with my implementation?

Here is my code:

import tensorflow as tf
import math
from tensorflow.keras.models import Model
from tensorflow.keras.layers import LSTM,GRU, Dense, Flatten, Conv1D, Dropout, AveragePooling1D
import tensorflow_probability as tfp

#####Deep AR model####

class DeepAR(Model):
def init(self, lstm_units, n_steps_in, n_features):
super().init()

    self.lstm_1 = LSTM(lstm_units[0], return_sequences=True,return_state=True, input_shape=(n_steps_in, n_features))
    self.flat = Flatten()
    self.dense = Dense(1, activation="relu")
    self.dense_mu = Dense(1)
    self.dense_sigma = Dense(1, activation="softplus")

def call(self, inputs, initial_state=None):

    # outputs,state_h,state_c=self.lstm_1(inputs,initial_state=initial_state)
    outputs, state_h, state_c = self.lstm_1(inputs,initial_state=initial_state)
    #outputs,x,y = self.lstn_2(outputs)
    dense_1 = self.dense(outputs)
    dense_1 = self.flat(dense_1)
    mu = self.dense_mu(dense_1)
    sigma = self.dense_sigma(dense_1)
    initial_state = [state_h, state_c]

    return mu, sigma, initial_state

#####My loss function####
def log_gaussian_loss(y_true,mu,sigma):
“”"
Gaussian loss fn.
“”"

return -tf.reduce_sum(tfp.distributions.Normal(loc=mu, scale=sigma).log_prob(y_true))

#####My codes in training#####
train_dataset = tf.data.Dataset.from_tensor_slices((t_inputs_dem, t_labs))
train_dataset = train_dataset.shuffle(360).batch(36)

Prepare the testing dataset.

test_dataset = tf.data.Dataset.from_tensor_slices((testing_inputs_dem, test_labs))
test_dataset = test_dataset.batch(24)

Model construction

LSTM_UNITS = [10, 30]
EPOCHS = 100
time_step = 168
n_features = 3
model = DeepAR(LSTM_UNITS, time_step, n_features)

optimizer

optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.0015)

metric

rmse = tf.keras.metrics.MeanSquaredError()
mse = tf.keras.losses.MeanSquaredError()
train_loss = tf.keras.metrics.Mean(name=‘train_loss’)

########Normal loop##########
epochs=5
for epoch in range(epochs):
print(“\nStart of epoch %d” % (epoch,))
# Iterate over the batches of the dataset.
st=None
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
#print(x_batch_train.shape)
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
y_batch_train=tf.cast(y_batch_train,dtype=tf.float32)
with tf.GradientTape() as tape:
mu,sigma,st=model(x_batch_train,initial_state=st,training=True)
loss=log_gaussian_loss(y_batch_train,mu,sigma)

    #backward
    grads=tape.gradient(loss,model.trainable_variables)
    optimizer.apply_gradients(zip(grads,model.trainable_variables))
print("Gaussian loss is:",train_loss(loss))
train_loss.reset_states()

Thanks you for help.