How to correct a model?

Hi everyone, I make a program and a model to let the PC or a raspberry pi do something later.

I made an XML with sentences, entities and actions. I can use the model, but the model sometimes outputs another entity & action when the input is the same as a sentence. Is there a way to correct the model?

I build the XML up like this:

  • command:
    input: Who is stronger
    entity: search
    action: searchwiki

The code for classifying:

import numpy as np
from keras.models import load_model

labels = open('nlu/entities.txt', 'r', encoding='utf-8').read().split('\n')
model = load_model('nlu/model.h5')

label2idx = {}
idx2label = {}

for k, label in enumerate(labels):
    label2idx[label] = k
    idx2label[k] = label

# Classify any given text into a category of our NLU framework
def classify(text):
    # Create an input array
    x = np.zeros((1, 57, 256), dtype='float32')

    if len(text) > 25:
        text = text[:25]
    # Fill the x array with data from input text
    for k, ch in enumerate(bytes(text.encode('utf-8'))):
        x[0, k, int(ch)] = 1.0

    out = model.predict(x)
    idx = out.argmax()

    # print('Text: "{}" is classified as "{}"'.format(text, idx2label[idx]))
    return {"entity": idx2label[idx], "conf": max(out[0])}

The code for making the model:

import numpy as np
import tensorflow as tf
import yaml
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
epochs = 1000

print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))

data = yaml.safe_load(open('train.yml').read())

# Reading the data

inputs, outputs = [], []

for command in data['commands']:
    outputs.append('{}\{}'.format(command['entity'], command['action']))

# Create a dataset
# Choose a level of tokenization: byte-level -> static vocab, handles out-out-vocabulary

# Create input data

max_sent = max([len((x.encode('utf-8'))) for x in inputs])

# Create arrays one-hot encoding (number of examples, seq length,  vocab_size)
# Create arrays sparse encoding (number of examples, seq length)

input_data = np.zeros((len(inputs), max_sent, 256), dtype='float32')

for i, inp in enumerate(inputs):
    for k, ch in enumerate(bytes(inp.encode('utf-8'))):
        input_data[i, k, int(ch)] = 1.0

# output_data = to_categorical(output_data, len(output_data))

# print(input_data.shape)


# print(len(chars))
# print('Max input seq:', max_sent)

labels = set(outputs)

fwrite = open('entities.txt', 'w', encoding='utf-8')
for label in labels:
    fwrite.write(label + '\n')

labels = open('entities.txt', 'r', encoding='utf-8').read().split('\n')

label2idx = {}
idx2label = {}

for k, label in enumerate(labels):
    label2idx[label] = k
    idx2label[k] = label

output_data = []

for output in outputs:

output_data = to_categorical(output_data, len(labels))

model = Sequential()
model.add(Dense(len(labels), activation='softmax'))

model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc']), output_data, epochs=epochs)'model.h5')