Add radio for model selection
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import openai
|
|
6 |
from dotenv import load_dotenv, find_dotenv
|
7 |
from simpleaichat import AIChat
|
8 |
|
9 |
-
from main import weather, search
|
10 |
from utils.tts import TTS, voices
|
11 |
|
12 |
load_dotenv(find_dotenv())
|
@@ -30,7 +30,7 @@ def transcribe(audio_file, state=""):
|
|
30 |
return state, state
|
31 |
|
32 |
|
33 |
-
def chat_with_gpt(prompt, ai_state):
|
34 |
if ai_state is None:
|
35 |
params = {"temperature": 0.0, "max_tokens": 200}
|
36 |
system_prompt = (
|
@@ -39,12 +39,9 @@ def chat_with_gpt(prompt, ai_state):
|
|
39 |
"Answer all questions empathetically, and ALWAYS ask follow-up questions."
|
40 |
"Do NOT say Confidante in any response."
|
41 |
"You must TRUST the provided context to inform your response."
|
42 |
-
# "If a question does not make any sense, or is not factually coherent, explain why "
|
43 |
-
# "instead of answering something not correct. If you don't know the answer to a question, "
|
44 |
-
# "please don't share false information."
|
45 |
)
|
46 |
ai = AIChat(
|
47 |
-
params=params, model=
|
48 |
)
|
49 |
else:
|
50 |
ai = ai_state
|
@@ -63,13 +60,13 @@ def tts(text, voice_id):
|
|
63 |
return audio_data
|
64 |
|
65 |
|
66 |
-
def transcribe_and_chat(audio_file, voice, history, ai_state):
|
67 |
if audio_file is None:
|
68 |
raise gr.Error("Empty audio file.")
|
69 |
voice_id = voices[voice]
|
70 |
|
71 |
text, text_state = transcribe(audio_file)
|
72 |
-
gpt_response, ai_state = chat_with_gpt(text, ai_state)
|
73 |
audio_data = tts(gpt_response, voice_id)
|
74 |
|
75 |
# Update the history with the new messages
|
@@ -94,6 +91,15 @@ with gr.Blocks(title="JARVIS") as demo:
|
|
94 |
)
|
95 |
audio_input = gr.Audio(source="microphone", type="filepath", visible=True)
|
96 |
gr.ClearButton(audio_input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
|
98 |
voice_select = gr.Radio(choices=list(voices.keys()), label="Voice", value="Bella")
|
99 |
history = gr.State(label="History", value=[])
|
@@ -106,7 +112,7 @@ with gr.Blocks(title="JARVIS") as demo:
|
|
106 |
|
107 |
audio_input.stop_recording(
|
108 |
transcribe_and_chat,
|
109 |
-
inputs=[audio_input, voice_select, history, ai_state],
|
110 |
outputs=[chat_box, response_audio, history, ai_state],
|
111 |
)
|
112 |
audio_input.clear()
|
|
|
6 |
from dotenv import load_dotenv, find_dotenv
|
7 |
from simpleaichat import AIChat
|
8 |
|
9 |
+
from main import weather, search
|
10 |
from utils.tts import TTS, voices
|
11 |
|
12 |
load_dotenv(find_dotenv())
|
|
|
30 |
return state, state
|
31 |
|
32 |
|
33 |
+
def chat_with_gpt(prompt, ai_state, model):
|
34 |
if ai_state is None:
|
35 |
params = {"temperature": 0.0, "max_tokens": 200}
|
36 |
system_prompt = (
|
|
|
39 |
"Answer all questions empathetically, and ALWAYS ask follow-up questions."
|
40 |
"Do NOT say Confidante in any response."
|
41 |
"You must TRUST the provided context to inform your response."
|
|
|
|
|
|
|
42 |
)
|
43 |
ai = AIChat(
|
44 |
+
params=params, model=model, system=system_prompt, save_messages=True
|
45 |
)
|
46 |
else:
|
47 |
ai = ai_state
|
|
|
60 |
return audio_data
|
61 |
|
62 |
|
63 |
+
def transcribe_and_chat(audio_file, voice, history, ai_state, model):
|
64 |
if audio_file is None:
|
65 |
raise gr.Error("Empty audio file.")
|
66 |
voice_id = voices[voice]
|
67 |
|
68 |
text, text_state = transcribe(audio_file)
|
69 |
+
gpt_response, ai_state = chat_with_gpt(text, ai_state, model)
|
70 |
audio_data = tts(gpt_response, voice_id)
|
71 |
|
72 |
# Update the history with the new messages
|
|
|
91 |
)
|
92 |
audio_input = gr.Audio(source="microphone", type="filepath", visible=True)
|
93 |
gr.ClearButton(audio_input)
|
94 |
+
gr.Markdown(
|
95 |
+
"Choose the AI model to use for generating responses. "
|
96 |
+
"GPT-4 is slower but more accurate, while GPT-3.5-turbo-16k is faster but less accurate."
|
97 |
+
)
|
98 |
+
model_select = gr.Radio(
|
99 |
+
choices=["gpt-4", "gpt-3.5-turbo-16k"],
|
100 |
+
label="Model",
|
101 |
+
value="gpt-3.5-turbo-16k",
|
102 |
+
)
|
103 |
|
104 |
voice_select = gr.Radio(choices=list(voices.keys()), label="Voice", value="Bella")
|
105 |
history = gr.State(label="History", value=[])
|
|
|
112 |
|
113 |
audio_input.stop_recording(
|
114 |
transcribe_and_chat,
|
115 |
+
inputs=[audio_input, voice_select, history, ai_state, model_select],
|
116 |
outputs=[chat_box, response_audio, history, ai_state],
|
117 |
)
|
118 |
audio_input.clear()
|