sushruthsam
commited on
Commit
•
47d4754
1
Parent(s):
2e22707
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from ctransformers import AutoModelForCausalLM
|
3 |
+
|
4 |
+
# Load the model
|
5 |
+
llm = AutoModelForCausalLM.from_pretrained(
|
6 |
+
model_path_or_repo_id="mistral-7b-instruct-v0.2.Q2_K.gguf",
|
7 |
+
model_type="mistral",
|
8 |
+
)
|
9 |
+
|
10 |
+
st.title("Conversational Chat with Mistral 🦙🗨️")
|
11 |
+
|
12 |
+
|
13 |
+
# Function to generate response
|
14 |
+
def generate_response(user_query):
|
15 |
+
prompt = f"""The user query is '{user_query}'"""
|
16 |
+
args = {
|
17 |
+
"prompt": prompt,
|
18 |
+
"stream": True,
|
19 |
+
"max_new_tokens": 2048,
|
20 |
+
"temperature": 0,
|
21 |
+
}
|
22 |
+
|
23 |
+
response_placeholder = st.empty() # Placeholder for displaying response chunks
|
24 |
+
|
25 |
+
response_so_far = "" # Initialize empty string to store cumulative response
|
26 |
+
|
27 |
+
for chunk in llm(**args):
|
28 |
+
response_so_far += chunk # Append current chunk to cumulative response
|
29 |
+
response_placeholder.write(response_so_far) # Display cumulative response
|
30 |
+
|
31 |
+
return # No need to return anything
|
32 |
+
|
33 |
+
|
34 |
+
# User input
|
35 |
+
user_query = st.text_input("Enter your query:", "")
|
36 |
+
|
37 |
+
if user_query:
|
38 |
+
# Generate and display response
|
39 |
+
generate_response(user_query)
|