amitpandey8 commited on
Commit
874ec99
1 Parent(s): d6cc604

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -31
app.py CHANGED
@@ -1,38 +1,32 @@
1
- import streamlit as st
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
 
4
 
5
- # Load the model and tokenizer
6
- model_name = "GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
- def get_bot_response(user_input):
11
- # Tokenize user input
12
- inputs = tokenizer.encode(user_input, return_tensors="pt")
 
 
 
13
 
14
- # Generate response
15
- try:
16
- bot_response = model.generate(inputs, max_length=100, num_return_sequences=1)
17
- except Exception as e:
18
- return f"Sorry, an error occurred while generating a response: {e}"
19
 
20
- # Decode and return response
21
- return tokenizer.decode(bot_response[0], skip_special_tokens=True)
 
 
22
 
23
- def main():
24
- st.title("Mental Health Chatbot")
25
-
26
- st.write("Welcome to the mental health chatbot. You can ask questions or share your thoughts, and I'll try to provide helpful responses.")
27
-
28
- user_input = st.text_input("Type your message here:", value="", key="user_input")
29
-
30
- if user_input:
31
- st.write(f"You: {user_input}")
32
-
33
- bot_response = get_bot_response(user_input)
34
-
35
- st.write(f"Chatbot: {bot_response}")
36
 
37
- if __name__ == "__main__":
38
- main()
 
 
 
1
  import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ model_path = "GRMenon/mental-mistral-7b-instruct-autotrain"
 
 
 
5
 
6
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
7
+ model = AutoModelForCausalLM.from_pretrained(
8
+ model_path,
9
+ device_map="auto",
10
+ torch_dtype='auto'
11
+ ).eval()
12
 
13
+ device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
14
 
15
+ # Prompt content:
16
+ messages = [
17
+ {"role": "user", "content": "Hey Connor! I have been feeling a bit down lately. I could really use some advice on how to feel better?"}
18
+ ]
19
 
20
+ input_ids = tokenizer.apply_chat_template(conversation=messages,
21
+ tokenize=True,
22
+ add_generation_prompt=True,
23
+ return_tensors='pt').to(device)
24
+ output_ids = model.generate(input_ids=input_ids,
25
+ max_new_tokens=512,
26
+ do_sample=True,
27
+ pad_token_id=2)
28
+ response = tokenizer.batch_decode(output_ids.detach().cpu().numpy(),
29
+ skip_special_tokens = True)
 
 
 
30
 
31
+ # Model response:
32
+ print(response[0])