import streamlit as st from transformers import AutoModelForCausalLM import torch from mistral_common.tokens.tokenizers.mistral import MistralTokenizer # Load the model and tokenizer model_name = "GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2" tokenizer = MistralTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def get_bot_response(user_input): # Tokenize user input inputs = tokenizer.encode(user_input, return_tensors="pt") # Generate response try: bot_response = model.generate(inputs, max_length=100, num_return_sequences=1) except Exception as e: return f"Sorry, an error occurred while generating a response: {e}" # Decode and return response return tokenizer.decode(bot_response[0], skip_special_tokens=True) def main(): st.title("Mental Health Chatbot") st.write("Welcome to the mental health chatbot. You can ask questions or share your thoughts, and I'll try to provide helpful responses.") user_input = st.text_input("Type your message here:", value="", key="user_input") if user_input: st.write(f"You: {user_input}") bot_response = get_bot_response(user_input) st.write(f"Chatbot: {bot_response}") if __name__ == "__main__": main()