Spaces:
Running
Running
pragneshbarik
commited on
Commit
•
2006c2b
1
Parent(s):
c5bc833
added mistralAI
Browse files- __pycache__/mistral7b.cpython-311.pyc +0 -0
- app.py +62 -13
- chat_log.txt +0 -0
- id_log.txt +0 -0
- ikigai.svg +13 -0
- mistral7b.py +47 -0
- requirements.txt +2 -1
- utils.py +0 -2
__pycache__/mistral7b.cpython-311.pyc
ADDED
Binary file (1.79 kB). View file
|
|
app.py
CHANGED
@@ -1,21 +1,58 @@
|
|
1 |
import streamlit as st
|
2 |
from utils import generate_text_embeddings
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
st.title("Echo Bot")
|
5 |
|
6 |
if "messages" not in st.session_state:
|
7 |
st.session_state.messages = []
|
8 |
-
|
9 |
|
10 |
-
|
11 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
st.markdown("---")
|
13 |
-
st.markdown("
|
14 |
-
st.
|
15 |
-
st.markdown("
|
16 |
-
st.markdown("
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
|
|
|
|
18 |
|
|
|
|
|
|
|
|
|
19 |
|
20 |
|
21 |
for message in st.session_state.messages:
|
@@ -23,12 +60,24 @@ for message in st.session_state.messages:
|
|
23 |
st.markdown(message["content"])
|
24 |
|
25 |
|
26 |
-
if prompt := st.chat_input("
|
27 |
-
query_embeddings = generate_text_embeddings(prompt)
|
28 |
st.chat_message("user").markdown(prompt)
|
29 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
|
|
|
|
|
|
|
30 |
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
st.markdown(response)
|
34 |
-
st.session_state.
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from utils import generate_text_embeddings
|
3 |
+
from mistral7b import mistral
|
4 |
+
|
5 |
+
import time
|
6 |
+
|
7 |
|
|
|
8 |
|
9 |
if "messages" not in st.session_state:
|
10 |
st.session_state.messages = []
|
|
|
11 |
|
12 |
+
if "tokens_used" not in st.session_state :
|
13 |
+
st.session_state.tokens_used = 0
|
14 |
+
|
15 |
+
if "inference_time" not in st.session_state :
|
16 |
+
st.session_state.inference_time = [0.00]
|
17 |
+
|
18 |
+
if "model_settings" not in st.session_state :
|
19 |
+
st.session_state.model_settings = {
|
20 |
+
"temp" : 0.9,
|
21 |
+
"max_tokens" : 512,
|
22 |
+
}
|
23 |
+
|
24 |
+
if "history" not in st.session_state :
|
25 |
+
st.session_state.history = []
|
26 |
+
|
27 |
+
if "top_k" not in st.session_state :
|
28 |
+
st.session_state.top_k = 5
|
29 |
+
|
30 |
+
with st.sidebar:
|
31 |
+
st.markdown("# Model Analytics")
|
32 |
+
st.write("Tokens used :", st.session_state['tokens_used'])
|
33 |
+
|
34 |
+
st.write("Average Inference Time: ", round(sum(st.session_state["inference_time"]) / len(st.session_state["inference_time"]), 3))
|
35 |
+
st.write("Cost Incured :",round( 0.033 * st.session_state['tokens_used']/ 1000, 3), "INR")
|
36 |
+
|
37 |
st.markdown("---")
|
38 |
+
st.markdown("# Retrieval Settings")
|
39 |
+
st.slider(label="Documents to retrieve", min_value=1, max_value=10, value=3)
|
40 |
+
st.markdown("---")
|
41 |
+
st.markdown("# Model Settings")
|
42 |
+
selected_model = st.sidebar.radio('Select one:', ["Mistral 7B", "GPT 3.5 Turbo", "GPT 4", "Llama 7B"])
|
43 |
+
selected_temperature = st.slider(label="Temperature", min_value=0.0, max_value=1.0, step=0.1, value=0.5)
|
44 |
+
st.write(" ")
|
45 |
+
st.info("**2023 ©️ Pragnesh Barik**")
|
46 |
+
|
47 |
+
|
48 |
|
49 |
+
st.image("ikigai.svg")
|
50 |
+
st.title("Ikigai Chat")
|
51 |
|
52 |
+
with st.expander("What is Ikigai Chat ?"):
|
53 |
+
st.info("""Ikigai Chat is a vector database powered chat agent, it works on the principle of
|
54 |
+
of Retrieval Augmented Generation (RAG), Its primary function revolves around maintaining an extensive repository of Ikigai Docs and providing users with answers that align with their queries.
|
55 |
+
This approach ensures a more refined and tailored response to user inquiries.""")
|
56 |
|
57 |
|
58 |
for message in st.session_state.messages:
|
|
|
60 |
st.markdown(message["content"])
|
61 |
|
62 |
|
63 |
+
if prompt := st.chat_input("Chat with Ikigai Docs?"):
|
|
|
64 |
st.chat_message("user").markdown(prompt)
|
65 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
66 |
+
|
67 |
+
tick = time.time()
|
68 |
+
response = mistral(prompt, st.session_state.history, temperature=st.session_state.model_settings["temp"] , max_new_tokens=st.session_state.model_settings["max_tokens"])
|
69 |
+
tock = time.time()
|
70 |
|
71 |
+
|
72 |
+
st.session_state.inference_time.append(tock - tick)
|
73 |
+
response = response.replace("</s>", "")
|
74 |
+
len_response = len(response.split())
|
75 |
+
|
76 |
+
st.session_state["tokens_used"] = len_response + st.session_state["tokens_used"]
|
77 |
+
|
78 |
+
with st.chat_message("assistant"):
|
79 |
st.markdown(response)
|
80 |
+
st.session_state.history.append([prompt, response])
|
81 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
82 |
+
|
83 |
+
|
chat_log.txt
ADDED
File without changes
|
id_log.txt
ADDED
File without changes
|
ikigai.svg
ADDED
mistral7b.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import InferenceClient
|
2 |
+
import os
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
load_dotenv()
|
5 |
+
|
6 |
+
API_TOKEN = os.getenv('HF_TOKEN')
|
7 |
+
client = InferenceClient(
|
8 |
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
9 |
+
token=API_TOKEN
|
10 |
+
)
|
11 |
+
|
12 |
+
|
13 |
+
def format_prompt(message, history):
|
14 |
+
prompt = "<s>"
|
15 |
+
for user_prompt, bot_response in history:
|
16 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
17 |
+
prompt += f" {bot_response}</s> "
|
18 |
+
prompt += f"[INST] {message} [/INST]"
|
19 |
+
return prompt
|
20 |
+
|
21 |
+
def mistral(
|
22 |
+
prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
|
23 |
+
):
|
24 |
+
temperature = float(temperature)
|
25 |
+
if temperature < 1e-2:
|
26 |
+
temperature = 1e-2
|
27 |
+
top_p = float(top_p)
|
28 |
+
|
29 |
+
generate_kwargs = dict(
|
30 |
+
temperature=temperature,
|
31 |
+
max_new_tokens=max_new_tokens,
|
32 |
+
top_p=top_p,
|
33 |
+
repetition_penalty=repetition_penalty,
|
34 |
+
do_sample=True,
|
35 |
+
seed=42,
|
36 |
+
)
|
37 |
+
|
38 |
+
formatted_prompt = format_prompt(prompt, history)
|
39 |
+
|
40 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
41 |
+
output = ""
|
42 |
+
|
43 |
+
for response in stream:
|
44 |
+
# print(response)
|
45 |
+
output += response.token["text"]
|
46 |
+
# yield output
|
47 |
+
return output
|
requirements.txt
CHANGED
@@ -4,6 +4,7 @@ av==10.0.0
|
|
4 |
bitarray==2.8.1
|
5 |
blinker==1.6.3
|
6 |
cachetools==5.3.1
|
|
|
7 |
certifi==2023.7.22
|
8 |
charset-normalizer==3.2.0
|
9 |
click==8.1.7
|
@@ -53,7 +54,7 @@ PyPDF2==3.0.1
|
|
53 |
pyreadline3==3.4.1
|
54 |
python-dotenv==1.0.0
|
55 |
pytz==2023.3.post1
|
56 |
-
PyYAML==6.0.
|
57 |
readme-renderer==42.0
|
58 |
referencing==0.30.2
|
59 |
regex==2023.8.8
|
|
|
4 |
bitarray==2.8.1
|
5 |
blinker==1.6.3
|
6 |
cachetools==5.3.1
|
7 |
+
huggingface-hub==0.16.4
|
8 |
certifi==2023.7.22
|
9 |
charset-normalizer==3.2.0
|
10 |
click==8.1.7
|
|
|
54 |
pyreadline3==3.4.1
|
55 |
python-dotenv==1.0.0
|
56 |
pytz==2023.3.post1
|
57 |
+
PyYAML==6.0.1git
|
58 |
readme-renderer==42.0
|
59 |
referencing==0.30.2
|
60 |
regex==2023.8.8
|
utils.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
import json
|
2 |
-
import requests
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
5 |
from sentence_transformers import SentenceTransformer
|
|
|
|
|
|
|
1 |
import os
|
2 |
from dotenv import load_dotenv
|
3 |
from sentence_transformers import SentenceTransformer
|