import streamlit as st #from streamlit_chat import message as st_message from streamlit_chat import message as st_message from transformers import AutoModelForSeq2SeqLM, AutoTokenizer import torch from peft import PeftModel, PeftConfig st.title("Chatbot Produit") if "history" not in st.session_state: st.session_state.history = [] st.session_state.history.append({"message": "Bonjour ! Donnez moi une phrase et j'essaierai de trouver le domaine associƩ", "is_user": False}) def get_models(): peft_model_id = "tbboukhari/chatbot-produit-fr" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path, torch_dtype="auto", device_map="auto") tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model model = PeftModel.from_pretrained(model, peft_model_id) return tokenizer, model tokenizer, model = get_models() def generate_answer(): user_message = st.session_state.input_text inputs = tokenizer(st.session_state.input_text, return_tensors="pt") result = model.generate(**inputs) message_bot = tokenizer.decode(result[0], skip_special_tokens=True) # .replace("", "").replace("", "") st.session_state.history.append({"message": user_message, "is_user": True}) st.session_state.history.append({"message": message_bot, "is_user": False}) st.text_input("Response", key="input_text", on_change=generate_answer) for chat in st.session_state.history: st_message(**chat)