AttributeError: Layer retrieval_1 has no inbound nodes

I am getting below error

AttributeError: Layer retrieval_1 has no inbound nodes.

import os
import pprint
import tempfile

from typing import Dict, Text

import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds

ratings = tfds.load(‘movielens/100k-ratings’, split=“train”)
movies = tfds.load(‘movielens/100k-movies’, split=“train”)

Select the basic features.

ratings = ratings.map(lambda x: {
“movie_title”: x[“movie_title”],
“user_id”: x[“user_id”],
“user_rating”: x[“user_rating”],
})
movies = movies.map(lambda x: x[“movie_title”])
tf.random.set_seed(42)
shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False)

train = shuffled.take(80_000)
test = shuffled.skip(80_000).take(20_000)

movie_titles = movies.batch(1_000)
user_ids = ratings.batch(1_000_000).map(lambda x: x[“user_id”])

unique_movie_titles = np.unique(np.concatenate(list(movie_titles)))
unique_user_ids = np.unique(np.concatenate(list(user_ids)))

class MovielensModel(tfrs.models.Model):

def init(self, rating_weight: float, retrieval_weight: float) → None:
# We take the loss weights in the constructor: this allows us to instantiate
# several model objects with different loss weights.

super().__init__()

embedding_dimension = 32

# User and movie models.
self.movie_model: tf.keras.layers.Layer = tf.keras.Sequential([
  tf.keras.layers.StringLookup(
    vocabulary=unique_movie_titles, mask_token=None),
  tf.keras.layers.Embedding(len(unique_movie_titles) + 1, embedding_dimension)
])
self.user_model: tf.keras.layers.Layer = tf.keras.Sequential([
  tf.keras.layers.StringLookup(
    vocabulary=unique_user_ids, mask_token=None),
  tf.keras.layers.Embedding(len(unique_user_ids) + 1, embedding_dimension)
])

# A small model to take in user and movie embeddings and predict ratings.
# We can make this as complicated as we want as long as we output a scalar
# as our prediction.
self.rating_model = tf.keras.Sequential([
    tf.keras.layers.Dense(256, activation="relu"),
    tf.keras.layers.Dense(128, activation="relu"),
    tf.keras.layers.Dense(1),
])

# The tasks.
self.rating_task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
    loss=tf.keras.losses.MeanSquaredError(),
    metrics=[tf.keras.metrics.RootMeanSquaredError()],
)
self.retrieval_task: tf.keras.layers.Layer = tfrs.tasks.Retrieval(
    metrics=tfrs.metrics.FactorizedTopK(
        candidates=movies.batch(128).map(self.movie_model)
    )
)

# The loss weights.
self.rating_weight = rating_weight
self.retrieval_weight = retrieval_weight

def call(self, features: Dict[Text, tf.Tensor]) → tf.Tensor:
# We pick out the user features and pass them into the user model.
user_embeddings = self.user_model(features[“user_id”])
# And pick out the movie features and pass them into the movie model.
movie_embeddings = self.movie_model(features[“movie_title”])

return (
    user_embeddings,
    movie_embeddings,
    # We apply the multi-layered rating model to a concatentation of
    # user and movie embeddings.
    self.rating_model(
        tf.concat([user_embeddings, movie_embeddings], axis=1)
    ),
)

def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) → tf.Tensor:

ratings = features.pop("user_rating")

user_embeddings, movie_embeddings, rating_predictions = self(features)

# We compute the loss for each task.
rating_loss = self.rating_task(
    labels=ratings,
    predictions=rating_predictions,
)
retrieval_loss = self.retrieval_task(user_embeddings, movie_embeddings)

# And combine them using the loss weights.
return (self.rating_weight * rating_loss
        + self.retrieval_weight * retrieval_loss)

model = MovielensModel(rating_weight=1.0, retrieval_weight=0.0)
model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1))
cached_train = train.shuffle(100_000).batch(8192).cache()
cached_test = test.batch(4096).cache()
model.fit(cached_train, epochs=3)
metrics = model.evaluate(cached_test, return_dict=True)

model = MovielensModel(rating_weight=0.0, retrieval_weight=1.0)
model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1))
model.fit(cached_train, epochs=3)
metrics = model.evaluate(cached_test, return_dict=True)

print(f"Retrieval top-100 accuracy: {metrics[‘factorized_top_k/top_100_categorical_accuracy’]:.3f}.")
print(f"Ranking RMSE: {metrics[‘root_mean_squared_error’]:.3f}.")
import pandas as pd
import shap
model.summary()
class SHAPPredictor:

def __init__(
    self,
    model: tfrs.models.Model,
    tensor_slice_specs: dict,
    target_name: str):
    
  self.model = model
  self.tensor_slice_specs = tensor_slice_specs
  self.target_name = target_name
    
# Convert typed model inputs to uniformly-typed input SHAP expects.
def convert_to_shap_input(
    self,
    ds: tf.data.Dataset,
    sample_size: int):
    
  sample_ds = ds.unbatch().take(sample_size)
  x_sample = pd.DataFrame(sample_ds.as_numpy_iterator()).drop(self.target_name, axis = 1)
  input_df = x_sample.applymap(lambda x: str(x) if isinstance(x, int) else x.decode("utf-8", "ignore"))

  return input_df
    
# Convert uniformly-typed input to typed input the model expects.
def convert_to_model_input(
    self,
    X: np.ndarray):
  
  num_columns = X.shape[1]
  rejiggered = [X[:, i] for i in range(num_columns)]
  tensor_slices = {
    name: tf.convert_to_tensor(rejiggered[index], dtype = dtype) for index, (name, dtype) in enumerate(tensor_slice_specs.items()) if index < num_columns
  }
  input_ds = tf.data.Dataset.from_tensor_slices(tensor_slices)
  # print(list(input_ds.batch(5).as_numpy_iterator()))
  
  return input_ds

# Adapt input and invoke the model to make predictions.
def predict(
    self,
    X: np.ndarray):
  
  input_ds = self.convert_to_model_input(X)
  # _, _, predictions = model.predict(input_ds.batch(50))
  predictions = self.model.predict(input_ds.batch(50))

  return predictions

  import shap

train_sample_size = 10000
test_sample_size = 100
background_sample_size = 50
shap_sample_size = 100
print(train)
tensor_slice_specs={‘movie_title’: tf.string, ‘user_id’: tf.string, ‘user_rating’: tf.float32}

shap_predictor = SHAPPredictor(model, tensor_slice_specs, target_name = “user_rating”)

x_train = shap_predictor.convert_to_shap_input(cached_train, sample_size = train_sample_size)

x_test = shap_predictor.convert_to_shap_input(cached_test, sample_size = test_sample_size)

background = shap.sample(x_train, background_sample_size)

explainer = shap.DeepExplainer((model.layers[0].input,model.layers[-1].output), background)
print(“Done.”)

print(“Using explainer to determine SHAP values.”)
shap_values = explainer.shap_values(x_test, nsamples = shap_sample_size)

Model: “movielens_model_1”


Layer (type) Output Shape Param #

sequential_3 (Sequential) (None, 32) 53280

sequential_4 (Sequential) (None, 32) 30208

sequential_5 (Sequential) (None, 1) 49665

ranking_1 (Ranking) multiple 0

retrieval_1 (Retrieval) multiple 1

=================================================================
Total params: 133,154
Trainable params: 133,153
Non-trainable params: 1


<TakeDataset element_spec={‘movie_title’: TensorSpec(shape=(), dtype=tf.string, name=None), ‘user_id’: TensorSpec(shape=(), dtype=tf.string, name=None), ‘user_rating’: TensorSpec(shape=(), dtype=tf.float32, name=None)}>

AttributeError Traceback (most recent call last)
in
77
78
—> 79 explainer = shap.DeepExplainer((model.layers[0].input,model.layers[-1].output), background)
80 print(“Done.”)
81

/usr/local/lib/python3.7/dist-packages/keras/engine/base_layer.py in output(self)
1908 “”"
1909 if not self._inbound_nodes:
→ 1910 raise AttributeError(‘Layer ’ + self.name + ’ has no inbound nodes.’)
1911 return self._get_node_attribute_at_index(0, ‘output_tensors’, ‘output’)
1912

AttributeError: Layer retrieval_1 has no inbound nodes.

Hi mlbot89,

It’s hard to read your post due to formatting
Are you following one tutorial? is this code on a notebook?
if so, it would be easier for other people here in the forum to help you if you shared it too

Adding @Wei_Wei as the recommenders expert

# -*- coding: utf-8 -*-
"""multitask.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/github/tensorflow/recommenders/blob/main/docs/examples/multitask.ipynb

##### Copyright 2020 The TensorFlow Authors.
"""

#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""# Multi-task recommenders

<table class="tfo-notebook-buttons" align="left">
  <td>
    <a target="_blank" href="https://www.tensorflow.org/recommenders/examples/multitask"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
  </td>
  <td>
    <a target="_blank" href="https://colab.research.google.com/github/tensorflow/recommenders/blob/main/docs/examples/multitask.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
  </td>
  <td>
    <a target="_blank" href="https://github.com/tensorflow/recommenders/blob/main/docs/examples/multitask.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
  </td>
  <td>
    <a href="https://storage.googleapis.com/tensorflow_docs/recommenders/docs/examples/multitask.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
  </td>
</table>

In the [basic retrieval tutorial](basic_retrieval) we built a retrieval system using movie watches as positive interaction signals.

In many applications, however, there are multiple rich sources of feedback to draw upon. For example, an e-commerce site may record user visits to product pages (abundant, but relatively low signal), image clicks, adding to cart, and, finally, purchases. It may even record post-purchase signals such as reviews and returns.

Integrating all these different forms of feedback is critical to building systems that users love to use, and that do not optimize for any one metric at the expense of overall performance.

In addition, building a joint model for multiple tasks may produce better results than building a number of task-specific models. This is especially true where some data is abundant (for example, clicks), and some data is sparse (purchases, returns, manual reviews). In those scenarios, a joint model may be able to use representations learned from the abundant task to improve its predictions on the sparse task via a phenomenon known as [transfer learning](https://en.wikipedia.org/wiki/Transfer_learning). For example, [this paper](https://openreview.net/pdf?id=SJxPVcSonN) shows that a model predicting explicit user ratings from sparse user surveys can be substantially improved by adding an auxiliary task that uses abundant click log data.

In this tutorial, we are going to build a multi-objective recommender for Movielens, using both implicit (movie watches) and explicit signals (ratings).

## Imports


Let's first get our imports out of the way.
"""

!pip install -q tensorflow-recommenders
!pip install -q --upgrade tensorflow-datasets
!pip install shap

import os
import pprint
import tempfile

from typing import Dict, Text

import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds

import tensorflow_recommenders as tfrs

"""## Preparing the dataset

We're going to use the Movielens 100K dataset.
"""

ratings = tfds.load('movielens/100k-ratings', split="train")
movies = tfds.load('movielens/100k-movies', split="train")

# Select the basic features.
ratings = ratings.map(lambda x: {
    "movie_title": x["movie_title"],
    "user_id": x["user_id"],
    "user_rating": x["user_rating"],
})
movies = movies.map(lambda x: x["movie_title"])

"""And repeat our preparations for building vocabularies and splitting the data into a train and a test set:"""

# Randomly shuffle data and split between train and test.
tf.random.set_seed(42)
shuffled = ratings.shuffle(100_000, seed=42, reshuffle_each_iteration=False)

train = shuffled.take(80_000)
test = shuffled.skip(80_000).take(20_000)

movie_titles = movies.batch(1_000)
user_ids = ratings.batch(1_000_000).map(lambda x: x["user_id"])

unique_movie_titles = np.unique(np.concatenate(list(movie_titles)))
unique_user_ids = np.unique(np.concatenate(list(user_ids)))

"""## A multi-task model

There are two critical parts to multi-task recommenders:

1. They optimize for two or more objectives, and so have two or more losses.
2. They share variables between the tasks, allowing for transfer learning.

In this tutorial, we will define our models as before, but instead of having  a single task, we will have two tasks: one that predicts ratings, and one that predicts movie watches.

The user and movie models are as before:

```python
user_model = tf.keras.Sequential([
  tf.keras.layers.StringLookup(
      vocabulary=unique_user_ids, mask_token=None),
  # We add 1 to account for the unknown token.
  tf.keras.layers.Embedding(len(unique_user_ids) + 1, embedding_dimension)
])

movie_model = tf.keras.Sequential([
  tf.keras.layers.StringLookup(
      vocabulary=unique_movie_titles, mask_token=None),
  tf.keras.layers.Embedding(len(unique_movie_titles) + 1, embedding_dimension)
])

However, now we will have two tasks. The first is the rating task:

tfrs.tasks.Ranking(
    loss=tf.keras.losses.MeanSquaredError(),
    metrics=[tf.keras.metrics.RootMeanSquaredError()],
)

Its goal is to predict the ratings as accurately as possible.

The second is the retrieval task:

tfrs.tasks.Retrieval(
    metrics=tfrs.metrics.FactorizedTopK(
        candidates=movies.batch(128)
    )
)

As before, this task’s goal is to predict which movies the user will or will not watch.

Putting it together

We put it all together in a model class.

The new component here is that - since we have two tasks and two losses - we need to decide on how important each loss is. We can do this by giving each of the losses a weight, and treating these weights as hyperparameters. If we assign a large loss weight to the rating task, our model is going to focus on predicting ratings (but still use some information from the retrieval task); if we assign a large loss weight to the retrieval task, it will focus on retrieval instead.
“”"

class MovielensModel(tfrs.models.Model):

def init(self, rating_weight: float, retrieval_weight: float) → None:
# We take the loss weights in the constructor: this allows us to instantiate
# several model objects with different loss weights.

super().__init__()

embedding_dimension = 32

# User and movie models.
self.movie_model: tf.keras.layers.Layer = tf.keras.Sequential([
  tf.keras.layers.StringLookup(
    vocabulary=unique_movie_titles, mask_token=None),
  tf.keras.layers.Embedding(len(unique_movie_titles) + 1, embedding_dimension)
])
self.user_model: tf.keras.layers.Layer = tf.keras.Sequential([
  tf.keras.layers.StringLookup(
    vocabulary=unique_user_ids, mask_token=None),
  tf.keras.layers.Embedding(len(unique_user_ids) + 1, embedding_dimension)
])

# A small model to take in user and movie embeddings and predict ratings.
# We can make this as complicated as we want as long as we output a scalar
# as our prediction.
self.rating_model = tf.keras.Sequential([
    tf.keras.layers.Dense(256, activation="relu"),
    tf.keras.layers.Dense(128, activation="relu"),
    tf.keras.layers.Dense(1),
])

# The tasks.
self.rating_task: tf.keras.layers.Layer = tfrs.tasks.Ranking(
    loss=tf.keras.losses.MeanSquaredError(),
    metrics=[tf.keras.metrics.RootMeanSquaredError()],
)
self.retrieval_task: tf.keras.layers.Layer = tfrs.tasks.Retrieval(
    metrics=tfrs.metrics.FactorizedTopK(
        candidates=movies.batch(128).map(self.movie_model)
    )
)

# The loss weights.
self.rating_weight = rating_weight
self.retrieval_weight = retrieval_weight

def call(self, features: Dict[Text, tf.Tensor]) → tf.Tensor:
# We pick out the user features and pass them into the user model.
user_embeddings = self.user_model(features[“user_id”])
# And pick out the movie features and pass them into the movie model.
movie_embeddings = self.movie_model(features[“movie_title”])

return (
    user_embeddings,
    movie_embeddings,
    # We apply the multi-layered rating model to a concatentation of
    # user and movie embeddings.
    self.rating_model(
        tf.concat([user_embeddings, movie_embeddings], axis=1)
    ),
)

def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) → tf.Tensor:

ratings = features.pop("user_rating")

user_embeddings, movie_embeddings, rating_predictions = self(features)

# We compute the loss for each task.
rating_loss = self.rating_task(
    labels=ratings,
    predictions=rating_predictions,
)
retrieval_loss = self.retrieval_task(user_embeddings, movie_embeddings)

# And combine them using the loss weights.
return (self.rating_weight * rating_loss
        + self.retrieval_weight * retrieval_loss)

“”"### Rating-specialized model

Depending on the weights we assign, the model will encode a different balance of the tasks. Let’s start with a model that only considers ratings.
“”"

model = MovielensModel(rating_weight=1.0, retrieval_weight=0.0)
model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1))

cached_train = train.shuffle(100_000).batch(8192).cache()
cached_test = test.batch(4096).cache()

model.fit(cached_train, epochs=3)
metrics = model.evaluate(cached_test, return_dict=True)

print(f"Retrieval top-100 accuracy: {metrics[‘factorized_top_k/top_100_categorical_accuracy’]:.3f}.")
print(f"Ranking RMSE: {metrics[‘root_mean_squared_error’]:.3f}.")

“”"The model does OK on predicting ratings (with an RMSE of around 1.11), but performs poorly at predicting which movies will be watched or not: its accuracy at 100 is almost 4 times worse than a model trained solely to predict watches.

Retrieval-specialized model

Let’s now try a model that focuses on retrieval only.
“”"

model = MovielensModel(rating_weight=0.0, retrieval_weight=1.0)
model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1))

model.fit(cached_train, epochs=3)
metrics = model.evaluate(cached_test, return_dict=True)

print(f"Retrieval top-100 accuracy: {metrics[‘factorized_top_k/top_100_categorical_accuracy’]:.3f}.")
print(f"Ranking RMSE: {metrics[‘root_mean_squared_error’]:.3f}.")

“”"We get the opposite result: a model that does well on retrieval, but poorly on predicting ratings.

Joint model

Let’s now train a model that assigns positive weights to both tasks.
“”"

#model = MovielensModel(rating_weight=1.0, retrieval_weight=1.0)
#model.compile(optimizer=tf.keras.optimizers.Adagrad(0.1))
import pandas as pd
import shap
model.summary()
class SHAPPredictor:

def __init__(
    self,
    model: tfrs.models.Model,
    tensor_slice_specs: dict,
    target_name: str):
    
  self.model = model
  self.tensor_slice_specs = tensor_slice_specs
  self.target_name = target_name
    
# Convert typed model inputs to uniformly-typed input SHAP expects.
def convert_to_shap_input(
    self,
    ds: tf.data.Dataset,
    sample_size: int):
    
  sample_ds = ds.unbatch().take(sample_size)
  x_sample = pd.DataFrame(sample_ds.as_numpy_iterator()).drop(self.target_name, axis = 1)
  input_df = x_sample.applymap(lambda x: str(x) if isinstance(x, int) else x.decode("utf-8", "ignore"))

  return input_df
    
# Convert uniformly-typed input to typed input the model expects.
def convert_to_model_input(
    self,
    X: np.ndarray):
  
  num_columns = X.shape[1]
  rejiggered = [X[:, i] for i in range(num_columns)]
  tensor_slices = {
    name: tf.convert_to_tensor(rejiggered[index], dtype = dtype) for index, (name, dtype) in enumerate(tensor_slice_specs.items()) if index < num_columns
  }
  input_ds = tf.data.Dataset.from_tensor_slices(tensor_slices)
  # print(list(input_ds.batch(5).as_numpy_iterator()))
  
  return input_ds

# Adapt input and invoke the model to make predictions.
def predict(
    self,
    X: np.ndarray):
  
  input_ds = self.convert_to_model_input(X)
  # _, _, predictions = model.predict(input_ds.batch(50))
  predictions = self.model.predict(input_ds.batch(50))

  return predictions

  import shap

train_sample_size = 10000
test_sample_size = 100
background_sample_size = 50
shap_sample_size = 100
print(train)
tensor_slice_specs={‘movie_title’: tf.string, ‘user_id’: tf.string, ‘user_rating’: tf.float32}

shap_predictor = SHAPPredictor(model, tensor_slice_specs, target_name = “user_rating”)

x_train = shap_predictor.convert_to_shap_input(cached_train, sample_size = train_sample_size)

x_test = shap_predictor.convert_to_shap_input(cached_test, sample_size = test_sample_size)

background = shap.sample(x_train, background_sample_size)

explainer = shap.DeepExplainer((model.layers[0].input,model.layers[-1].output), background)
print(“Done.”)

print(“Using explainer to determine SHAP values.”)
shap_values = explainer.shap_values(x_test, nsamples = shap_sample_size)

model.fit(cached_train, epochs=3)
metrics = model.evaluate(cached_test, return_dict=True)

print(f"Retrieval top-100 accuracy: {metrics[‘factorized_top_k/top_100_categorical_accuracy’]:.3f}.")
print(f"Ranking RMSE: {metrics[‘root_mean_squared_error’]:.3f}.")

“”"The result is a model that performs roughly as well on both tasks as each specialized model.

Making prediction

We can use the trained multitask model to get trained user and movie embeddings, as well as the predicted rating:
“”"

trained_movie_embeddings, trained_user_embeddings, predicted_rating = model({
“user_id”: np.array([“42”]),
“movie_title”: np.array([“Dances with Wolves (1990)”])
})
print(“Predicted rating:”)
print(predicted_rating)

“”“While the results here do not show a clear accuracy benefit from a joint model in this case, multi-task learning is in general an extremely useful tool. We can expect better results when we can transfer knowledge from a data-abundant task (such as clicks) to a closely related data-sparse task (such as purchases).”""