amitpandey8 commited on
Commit
ac3e0e8
1 Parent(s): 84bfc91

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
  # Load the model and tokenizer
5
  model_name = "GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2"
@@ -11,7 +12,10 @@ def get_bot_response(user_input):
11
  inputs = tokenizer.encode(user_input, return_tensors="pt")
12
 
13
  # Generate response
14
- bot_response = model.generate(inputs, max_length=100, num_return_sequences=1)
 
 
 
15
 
16
  # Decode and return response
17
  return tokenizer.decode(bot_response[0], skip_special_tokens=True)
 
1
  import streamlit as st
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
 
5
  # Load the model and tokenizer
6
  model_name = "GRMenon/mental-health-mistral-7b-instructv0.2-finetuned-V2"
 
12
  inputs = tokenizer.encode(user_input, return_tensors="pt")
13
 
14
  # Generate response
15
+ try:
16
+ bot_response = model.generate(inputs, max_length=100, num_return_sequences=1)
17
+ except Exception as e:
18
+ return f"Sorry, an error occurred while generating a response: {e}"
19
 
20
  # Decode and return response
21
  return tokenizer.decode(bot_response[0], skip_special_tokens=True)