Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,11 @@
|
|
1 |
import os
|
2 |
import logging
|
3 |
-
import
|
|
|
4 |
|
5 |
# Install necessary libraries using os.system
|
6 |
os.system("pip install --upgrade pip")
|
7 |
-
os.system("pip install
|
8 |
|
9 |
# Attempt to import all required modules
|
10 |
try:
|
@@ -22,22 +23,7 @@ try:
|
|
22 |
from utils import CitingSources
|
23 |
from settings import get_context_by_model, get_messages_formatter_type
|
24 |
except ImportError as e:
|
25 |
-
|
26 |
-
if 'utils' in str(e):
|
27 |
-
st.warning("Mocking utils.CitingSources")
|
28 |
-
class CitingSources:
|
29 |
-
sources = []
|
30 |
-
|
31 |
-
if 'settings' in str(e):
|
32 |
-
st.warning("Mocking settings functions")
|
33 |
-
def get_context_by_model(model):
|
34 |
-
return 4096
|
35 |
-
|
36 |
-
def get_messages_formatter_type(model):
|
37 |
-
return MessagesFormatterType.BASIC
|
38 |
-
|
39 |
-
import logging
|
40 |
-
from huggingface_hub import hf_hub_download
|
41 |
|
42 |
# Download the models
|
43 |
hf_hub_download(
|
@@ -57,7 +43,7 @@ hf_hub_download(
|
|
57 |
)
|
58 |
|
59 |
# Function to respond to user messages
|
60 |
-
def respond(message,
|
61 |
try:
|
62 |
model = "mixtral-8x7b-instruct-v0.1.Q5_K_M.gguf"
|
63 |
max_tokens = 3000
|
@@ -106,12 +92,6 @@ def respond(message, history, temperature, top_p, top_k, repeat_penalty):
|
|
106 |
|
107 |
messages = BasicChatHistory()
|
108 |
|
109 |
-
for msn in history:
|
110 |
-
user = {"role": Roles.user, "content": msn[0]}
|
111 |
-
assistant = {"role": Roles.assistant, "content": msn[1]}
|
112 |
-
messages.add_message(user)
|
113 |
-
messages.add_message(assistant)
|
114 |
-
|
115 |
result = web_search_agent.get_chat_response(
|
116 |
message,
|
117 |
llm_sampling_settings=settings,
|
@@ -136,7 +116,6 @@ def respond(message, history, temperature, top_p, top_k, repeat_penalty):
|
|
136 |
|
137 |
for text in response_text:
|
138 |
outputs += text
|
139 |
-
yield outputs
|
140 |
|
141 |
output_settings = LlmStructuredOutputSettings.from_pydantic_models(
|
142 |
[CitingSources], LlmStructuredOutputType.object_instance
|
@@ -153,23 +132,24 @@ def respond(message, history, temperature, top_p, top_k, repeat_penalty):
|
|
153 |
)
|
154 |
outputs += "\n\nSources:\n"
|
155 |
outputs += "\n".join(citing_sources.sources)
|
156 |
-
|
157 |
|
158 |
except Exception as e:
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
|
|
|
1 |
import os
|
2 |
import logging
|
3 |
+
import gradio as gr
|
4 |
+
from huggingface_hub import hf_hub_download
|
5 |
|
6 |
# Install necessary libraries using os.system
|
7 |
os.system("pip install --upgrade pip")
|
8 |
+
os.system("pip install llama-cpp-agent huggingface_hub trafilatura beautifulsoup4 requests duckduckgo-search googlesearch-python")
|
9 |
|
10 |
# Attempt to import all required modules
|
11 |
try:
|
|
|
23 |
from utils import CitingSources
|
24 |
from settings import get_context_by_model, get_messages_formatter_type
|
25 |
except ImportError as e:
|
26 |
+
raise ImportError(f"Error importing modules: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
# Download the models
|
29 |
hf_hub_download(
|
|
|
43 |
)
|
44 |
|
45 |
# Function to respond to user messages
|
46 |
+
def respond(message, temperature, top_p, top_k, repeat_penalty):
|
47 |
try:
|
48 |
model = "mixtral-8x7b-instruct-v0.1.Q5_K_M.gguf"
|
49 |
max_tokens = 3000
|
|
|
92 |
|
93 |
messages = BasicChatHistory()
|
94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
result = web_search_agent.get_chat_response(
|
96 |
message,
|
97 |
llm_sampling_settings=settings,
|
|
|
116 |
|
117 |
for text in response_text:
|
118 |
outputs += text
|
|
|
119 |
|
120 |
output_settings = LlmStructuredOutputSettings.from_pydantic_models(
|
121 |
[CitingSources], LlmStructuredOutputType.object_instance
|
|
|
132 |
)
|
133 |
outputs += "\n\nSources:\n"
|
134 |
outputs += "\n".join(citing_sources.sources)
|
135 |
+
return outputs
|
136 |
|
137 |
except Exception as e:
|
138 |
+
return f"An error occurred: {e}"
|
139 |
+
|
140 |
+
# Gradio interface
|
141 |
+
demo = gr.Interface(
|
142 |
+
fn=respond,
|
143 |
+
inputs=[
|
144 |
+
gr.Textbox(label="Enter your message:"),
|
145 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.45, step=0.1, label="Temperature"),
|
146 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
|
147 |
+
gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k"),
|
148 |
+
gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
|
149 |
+
],
|
150 |
+
outputs="text",
|
151 |
+
title="Novav2 Web Engine"
|
152 |
+
)
|
153 |
+
|
154 |
+
if __name__ == "__main__":
|
155 |
+
demo.launch()
|