Functions are not converted to graph mode

Hi,
I was trying to implement a conjugate gradient for sparse tensors under TensorFlow. The test is the following snippet:

EAGERMODE = False
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
tf.config.run_functions_eagerly(EAGERMODE)
if EAGERMODE:
    tf.print('running in eager mode')
else:
    tf.print('running in graph mode')
import numpy as np
from scipy import sparse
from scipy.sparse import coo_matrix,csr_matrix,linalg
from time import time
class CONJGRAD:
    def __init__(self,A:tf.sparse.SparseTensor,b:tf.constant,x0:tf.constant,precond = None):
        self.__A = A
        self.__b = tf.Variable(b,trainable=False)
        self.__x0 = x0
        self.__precond = precond
        self.__X       = tf.Variable(x0,name="X")
        self.__r       = tf.Variable(x0,name="r")
        self.__p       = tf.Variable(x0,name="p")
        self.__Ap      = tf.Variable(x0,name="Ap")
        self.__z       = None
        if precond is not None:
            self.__z       = tf.Variable(x0,name="z")
        self.__rsold = tf.Variable(0.0,dtype=np.float32,name="rsold")
        self.__rzold = tf.Variable(0.0,dtype=np.float32,name="rzold")
        self.__rsnew = tf.Variable(0.0,dtype=np.float32,name="rsnew")
        self.__rznew = tf.Variable(0.0,dtype=np.float32,name="rznew")
        self.__alpha = tf.Variable(0.0,dtype=np.float32,name="alpha")
        self.__beta  = tf.Variable(0.0,dtype=np.float32,name="beta")
            
    @tf.function
    def __initialize(self):
        self.__r.assign(self.__b)
        self.__r.assign_sub(tf.sparse.sparse_dense_matmul(self.__A,self.__x0,name="Ax0"))
        self.__rsold.assign(tf.reduce_sum(tf.multiply(self.__r, self.__r,name="rsq"),name="rsqnorm")  )
        if self.__precond:
            self.__z.assign(self.__precond.solve_precond_system(self.__r))
            self.__p.assign(self.__z)
            self.__rzold.assign(tf.reduce_sum(tf.multiply(self.__r, self.__r)) )
        else:
            self.__p.assign(self.__r)
            self.__rzold.assign(self.__rsold)
        self.__rsnew.assign(1.e32)
        self.__rznew.assign(1.e32)

    @tf.function
    def __iterate(self):
        self.__Ap.assign( tf.sparse.sparse_dense_matmul(self.__A , self.__p,name="Ap"))
        self.__alpha.assign(self.__rzold /tf.reduce_sum(tf.multiply(self.__p,self.__Ap) ))
        self.__X.assign_add(self.__alpha * self.__p)             
        self.__r.assign_sub(self.__alpha * self.__Ap)
        self.__rsnew.assign(tf.reduce_sum(tf.multiply(self.__r,self.__r)))
        if self.__precond:
            self.__z.assign(self.__precond.solve_precond_system(self.__r))
            self.__rznew.assign( tf.reduce_sum(tf.multiply(self.__r, self.__z)) )
        else:
            self.__rznew.assign(self.__rsnew)
        self.__beta.assign(self.__rznew / self.__rzold)
        if self.__precond:
            self.__p.assign(self.__z + self.__beta * self.__p)
        else:
            self.__p.assign(self.__r + self.__beta * self.__p)
        self.__rsold.assign(self.__rsnew)
        self.__rzold.assign(self.__rznew)
    
    def solve(self,niter:int=100,toll:float=1.e-5) -> list:
        """
        solve(niter,toll)
        returns 
        x:  solution of the linear problem
        it: the nb of iterations
        r:  the residual norm
        """
        self.__initialize()
        for it in range(1,1+niter):
            self.__iterate()
            if (tf.sqrt(self.__rsnew) < toll):
                break
        return([self.__X,it,tf.sqrt(self.__rsnew)])
        
if __name__=='__main__':
    npt      = 200
    fmat     = sparse.rand(npt, npt, density=0.04,random_state=1234)
    A0       = csr_matrix(fmat.transpose() * fmat+2.0*np.eye(npt))
    Xtrue    = np.ones(shape=(npt,1))
    b0       = A0*Xtrue
    indices  = np.hstack([A0.tocoo().row[:,np.newaxis].astype(np.int32), A0.tocoo().col[:,np.newaxis].astype(np.int32)])
    A        = tf.sparse.SparseTensor(indices=indices, values=A0.tocoo().data.astype(np.float32), dense_shape=[npt, npt])
    b        = tf.constant(b0.astype(np.float32))
    CG       = CONJGRAD(A,b,b)
    t0       = time()
    u0,it0,r0 = CG.solve(niter=4*npt,toll=1.e-8)
    tesp = time()-t0
    err0 = np.linalg.norm(Xtrue-u0.numpy())
    print('|x-xtrue|: {:3.2f}\t\titerations: {}, residual: {:3.2f}\telapsed: {:3.2f} s'.format(err0,it0,r0,tesp))
    

The algorithm works smoothly with no warnings in eager mode.
However, when I switch to graph mode, the code works but yields the following warnings:

WARNING:tensorflow:AutoGraph could not transform <bound method CONJGRAD.__initialize of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f362c4a7070>> and will run it as-is.
Cause: mangled names are not yet supported
To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert
WARNING:tensorflow:AutoGraph could not transform <bound method CONJGRAD.__iterate of <tensorflow.python.eager.function.TfMethodTarget object at 0x7f362c4a7520>> and will run it as-is.
Cause: mangled names are not yet supported
To silence this warning, decorate the function with @tf.autograph.experimental.do_not_convert

I would like to run the code in graph mode rather than silencing the warnings. Is there a way to obtain this?

Thanks,

Kind Regards,

Cesare