fixed an issue where the chat interface would only show the final token of a model answer

This commit is contained in:
faraphel 2025-01-18 18:28:31 +01:00
parent 0034c7b31a
commit 156db5d6a1

View file

@ -20,7 +20,7 @@ class ChatInterface(base.BaseInterface):
async def send_message(self, user_message, old_messages: list[dict], system_message: str): async def send_message(self, user_message, old_messages: list[dict], system_message: str):
# normalize the user message (the type can be wrong, especially when "edited") # normalize the user message (the type can be wrong, especially when "edited")
if isinstance(user_message, str): if isinstance(user_message, str):
user_message: dict = {"files": [], "text": user_message} user_message: dict = {"text": user_message}
# copy the history to avoid modifying it # copy the history to avoid modifying it
messages: list[dict] = old_messages.copy() messages: list[dict] = old_messages.copy()
@ -41,8 +41,10 @@ class ChatInterface(base.BaseInterface):
}) })
# infer the message through the model # infer the message through the model
assistant_message = ""
async for chunk in self.model.infer(messages=messages): async for chunk in self.model.infer(messages=messages):
yield chunk.decode("utf-8") assistant_message += " " + chunk.decode("utf-8")
yield assistant_message
def get_application(self): def get_application(self):
# create a gradio interface # create a gradio interface