replaced the previous venv system by a conda one, allowing for better dependencies management

This commit is contained in:
faraphel 2025-01-18 14:44:46 +01:00
parent 8bf28e4c48
commit 0034c7b31a
17 changed files with 313 additions and 230 deletions

View file

@ -30,19 +30,19 @@ class ChatInterface(base.BaseInterface):
messages.insert(0, {"role": "system", "content": system_message})
# add the user message
# NOTE: gradio.ChatInterface add our message and the assistant message
# TODO(Faraphel): add support for files
# NOTE: gradio.ChatInterface add our message and the assistant message automatically
# TODO(Faraphel): add support for files - directory use user_message ? apparently, field "image" is supported.
# check "https://huggingface.co/docs/transformers/main_classes/pipelines" at "ImageTextToTextPipeline"
# TODO(Faraphel): add a "MultimodalChatInterface" to support images
messages.append({
"role": "user",
"content": user_message["text"],
})
# infer the message through the model
chunks = [chunk async for chunk in await self.model.infer(messages=messages)]
assistant_message: str = b"".join(chunks).decode("utf-8")
# send back the messages, clear the user prompt, disable the system prompt
return assistant_message
async for chunk in self.model.infer(messages=messages):
yield chunk.decode("utf-8")
def get_application(self):
# create a gradio interface
@ -65,7 +65,7 @@ class ChatInterface(base.BaseInterface):
gradio.ChatInterface(
fn=self.send_message,
type="messages",
multimodal=True,
multimodal=False, # TODO(Faraphel): should handle at least image and text files
editable=True,
save_history=True,
additional_inputs=[system_prompt],