Minimize custom loss function calling scipy function error

I am trying to minimize a custom loss function that calls a scipy function, using the following code:

from scipy import stats
var1 = tf.Variable(initial_value=621124.1, dtype=tf.float32, trainable=True)

def my_numpy_func(x):
    return np.float32(stats.shapiro(x)[1])

@tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])
def tf_function(input):
    y = tf.numpy_function(my_numpy_func, [input], tf.float32)
    return y
def loss1():
    y_hat = whittaker1D(len(y), lmbd=var1)(y)
    z = y - y_hat
    return 1-tf_function(z)
def loss():
    y_hat = whittaker1D(len(y), lmbd=var1)(y)
    z = y_real - y_hat
    return tf.reduce_sum(tf.math.abs(z)) #tf.math.abs(tf.reduce_mean(z))

todos = []
opt = tf.keras.optimizers.Adam(learning_rate=1e3, amsgrad=True)
for i in range(50):
    print ('y = {}, x1 = {}'.format(loss1(), var1.numpy()))
    todos.append([loss1().numpy(), var1.numpy()])
    opt.minimize(loss1, var_list=[var1])
todos = np.matrix(todos)

If I use loss function the code execute correctly, but if a execute with loss1 function it throw the follow message: ValueError: No gradients provided for any variable: ([‘Variable:0’],). Provided grads_and_vars is ((None, <tf.Variable ‘Variable:0’ shape=() dtype=float32, numpy=621124.1>),).

1 Like

Hello, I’m facing the same issue. I wan’t to use a Scipy function in my loss calculation. Did you find a solution?