DHEIVER commited on
Commit
99a97dd
1 Parent(s): 254aed0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -39
app.py CHANGED
@@ -3,47 +3,51 @@ from groq import Groq
3
  import gradio as gr
4
  from config import GROQ_API_KEY
5
 
6
- os.environ["GROQ_API_KEY"] = GROQ_API_KEY
7
- client = Groq()
 
 
 
 
 
 
8
 
9
- system_prompt = {
10
- "role": "system",
11
- "content":
12
- "You are a useful assistant. You reply with efficient answers. "
13
- }
14
-
15
- async def chat_groq(message, history):
16
-
17
- messages = [system_prompt]
18
-
19
- for msg in history:
20
- messages.append({"role": "user", "content": str(msg[0])})
21
- messages.append({"role": "assistant", "content": str(msg[1])})
22
 
23
- messages.append({"role": "user", "content": str (message)})
24
-
25
- response_content = ''
26
-
27
- stream = client.chat.completions.create(
28
- model="llama3-70b-8192",
29
- messages=messages,
30
- max_tokens=1024,
31
- temperature=1.3,
32
- stream=True
33
- )
34
 
35
- for chunk in stream:
36
- content = chunk.choices[0].delta.content
37
- if content:
38
- response_content += chunk. choices[0].delta.content
39
- yield response_content
40
 
41
- with gr. Blocks(theme=gr.themes.Monochrome(), fill_height=True) as demo:
42
- gr.ChatInterface(chat_groq,
43
- clear_btn=None,
44
- undo_btn=None,
45
- retry_btn=None,
46
- )
 
 
47
 
48
- demo.queue()
49
- demo.launch()
 
 
 
 
3
  import gradio as gr
4
  from config import GROQ_API_KEY
5
 
6
+ class ConversationalAI:
7
+ def __init__(self):
8
+ os.environ["GROQ_API_KEY"] = GROQ_API_KEY
9
+ self.client = Groq()
10
+ self.system_prompt = {
11
+ "role": "system",
12
+ "content": "You are a useful assistant. You reply with efficient answers. "
13
+ }
14
 
15
+ async def chat_groq(self, message, history):
16
+ messages = [self.system_prompt]
17
+
18
+ for msg in history:
19
+ messages.append({"role": "user", "content": str(msg[0])})
20
+ messages.append({"role": "assistant", "content": str(msg[1])})
21
+
22
+ messages.append({"role": "user", "content": str(message)})
23
+
24
+ response_content = ''
 
 
 
25
 
26
+ stream = self.client.chat.completions.create(
27
+ model="llama3-70b-8192",
28
+ messages=messages,
29
+ max_tokens=1024,
30
+ temperature=1.3,
31
+ stream=True
32
+ )
 
 
 
 
33
 
34
+ for chunk in stream:
35
+ content = chunk.choices[0].delta.content
36
+ if content:
37
+ response_content += chunk.choices[0].delta.content
38
+ yield response_content
39
 
40
+ def create_chat_interface(self):
41
+ with gr.Blocks(theme=gr.themes.Monochrome(), fill_height=True) as demo:
42
+ gr.ChatInterface(self.chat_groq,
43
+ clear_btn=None,
44
+ undo_btn=None,
45
+ retry_btn=None,
46
+ )
47
+ return demo
48
 
49
+ if __name__ == "__main__":
50
+ ai = ConversationalAI()
51
+ demo = ai.create_chat_interface()
52
+ demo.queue()
53
+ demo.launch()