mrbeliever commited on
Commit
b3cbb61
1 Parent(s): 7bea98c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -29
app.py CHANGED
@@ -3,6 +3,7 @@ import re
3
  import gradio as gr
4
  import edge_tts
5
  import asyncio
 
6
  import tempfile
7
  from huggingface_hub import InferenceClient
8
 
@@ -38,14 +39,6 @@ async def generate1(prompt):
38
  for response in stream:
39
  output += response.token.text
40
 
41
- # Clean the output to remove extraneous characters and trailing 's'
42
- output = re.sub(r'[\s/]+', ' ', output).strip()
43
- output = re.sub(r'\s*$', '', output).strip() # Remove trailing whitespaces
44
- if output.endswith(' s'):
45
- output = output[:-2].strip() # Remove trailing ' s'
46
- if output.endswith('s'):
47
- output = output[:-1].strip() # Remove trailing 's'
48
-
49
  communicate = edge_tts.Communicate(output)
50
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
51
  tmp_path = tmp_file.name
@@ -71,14 +64,6 @@ async def generate2(prompt):
71
  for response in stream:
72
  output += response.token.text
73
 
74
- # Clean the output to remove extraneous characters and trailing 's'
75
- output = re.sub(r'[\s/]+', ' ', output).strip()
76
- output = re.sub(r'\s*$', '', output).strip() # Remove trailing whitespaces
77
- if output.endswith(' s'):
78
- output = output[:-2].strip() # Remove trailing ' s'
79
- if output.endswith('s'):
80
- output = output[:-1].strip() # Remove trailing 's'
81
-
82
  communicate = edge_tts.Communicate(output)
83
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
84
  tmp_path = tmp_file.name
@@ -87,7 +72,7 @@ async def generate2(prompt):
87
 
88
  client3 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
89
 
90
- system_instructions3 = "[SYSTEM] The text provided is a request for a specific type of response from me, the virtual assistant. The request asks me to provide detailed and friendly responses as if I am the character Jarvis, inspired by 'Tony Stark.' The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, my task is to generate responses that appear as if they are created by the user and not an AI-powered assistant. [USER]"
91
 
92
  async def generate3(prompt):
93
  generate_kwargs = dict(
@@ -104,14 +89,6 @@ async def generate3(prompt):
104
  for response in stream:
105
  output += response.token.text
106
 
107
- # Clean the output to remove extraneous characters and trailing 's'
108
- output = re.sub(r'[\s/]+', ' ', output).strip()
109
- output = re.sub(r'\s*$', '', output).strip() # Remove trailing whitespaces
110
- if output.endswith(' s'):
111
- output = output[:-2].strip() # Remove trailing ' s'
112
- if output.endswith('s'):
113
- output = output[:-1].strip() # Remove trailing 's'
114
-
115
  communicate = edge_tts.Communicate(output)
116
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
117
  tmp_path = tmp_file.name
@@ -124,9 +101,9 @@ with gr.Blocks(css="style.css") as demo:
124
  user_input = gr.Textbox(label="Type Your Question", value="")
125
  input_text = gr.Textbox(label="Input Text", elem_id="important")
126
  output_audio = gr.Audio(label="Womener AI", type="filepath",
127
- interactive=False,
128
- autoplay=True,
129
- elem_classes="audio")
130
  with gr.Row():
131
  translate_btn = gr.Button("Response")
132
  translate_btn.click(fn=generate1, inputs=user_input,
@@ -135,4 +112,4 @@ with gr.Blocks(css="style.css") as demo:
135
  gr.Markdown(MORE)
136
 
137
  if __name__ == "__main__":
138
- demo.queue(max_size=200).launch()
 
3
  import gradio as gr
4
  import edge_tts
5
  import asyncio
6
+ import time
7
  import tempfile
8
  from huggingface_hub import InferenceClient
9
 
 
39
  for response in stream:
40
  output += response.token.text
41
 
 
 
 
 
 
 
 
 
42
  communicate = edge_tts.Communicate(output)
43
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
44
  tmp_path = tmp_file.name
 
64
  for response in stream:
65
  output += response.token.text
66
 
 
 
 
 
 
 
 
 
67
  communicate = edge_tts.Communicate(output)
68
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
69
  tmp_path = tmp_file.name
 
72
 
73
  client3 = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct")
74
 
75
+ system_instructions3 = "[SYSTEM]The text provided is a request for a specific type of response from me, the virtual assistant. The request asks me to provide detailed and friendly responses as if I am the character Jarvis, inspired by 'Tony Stark.' The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, my task is to generate responses that appear as if they are created by the user and not an AI-powered assistant.[USER]"
76
 
77
  async def generate3(prompt):
78
  generate_kwargs = dict(
 
89
  for response in stream:
90
  output += response.token.text
91
 
 
 
 
 
 
 
 
 
92
  communicate = edge_tts.Communicate(output)
93
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
94
  tmp_path = tmp_file.name
 
101
  user_input = gr.Textbox(label="Type Your Question", value="")
102
  input_text = gr.Textbox(label="Input Text", elem_id="important")
103
  output_audio = gr.Audio(label="Womener AI", type="filepath",
104
+ interactive=False,
105
+ autoplay=True,
106
+ elem_classes="audio")
107
  with gr.Row():
108
  translate_btn = gr.Button("Response")
109
  translate_btn.click(fn=generate1, inputs=user_input,
 
112
  gr.Markdown(MORE)
113
 
114
  if __name__ == "__main__":
115
+ demo.queue(max_size=200).launch()