import os
import gradio as gr
import subprocess
os.system("git clone https://github.com/doevent/FullSubNet-plus")
os.system("mv FullSubNet-plus/speech_enhance .")
os.system("mv FullSubNet-plus/config .")
os.system("gdown https://drive.google.com/uc?id=1UJSt1G0P_aXry-u79LLU_l9tCnNa2u7C -O best_model.tar")
from speech_enhance.tools.denoise_hf_clone_voice import start
# If the file is too duration to inference
def duration(input_audio) -> int:
command = f"ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 -i {input_audio}"
result = subprocess.run(command, shell=True, stdout=subprocess.PIPE)
data = result.stdout.decode('ascii').rstrip()
return int(float(data))
def inference(audio):
try:
if audio.find("audio") < 0:
if duration(audio) >= 150:
return "error.wav"
result = start(to_list_files=[audio])
return result[0]
except Exception as e:
gr.Error(f"Maximum duration 150 sec\n{str(e)}")
title = """
DeNoise Speech Enhancement
"""
description = """
This is an unofficial demo for FullSubNet-plus: DeNoise Speech Enhancement. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below.
Link to GitHub:
- [FullSubNet +](https://github.com/hit-thusz-RookieCJ/FullSubNet-plus)
"""
twitter_link = "[![](https://img.shields.io/twitter/follow/DoEvent?label=@DoEvent&style=social)](https://twitter.com/DoEvent)"
css = '''
h1#title {
text-align: center;
}
'''
with gr.Blocks(css=css) as demo:
gr.Markdown(title)
gr.Markdown(description)
gr.Markdown(twitter_link)
with gr.Tab("Upload audio"):
u_audio = gr.Audio(type="filepath", source="upload", label="Input audio")
u_output = gr.Audio(type="filepath", label="Output audio")
u_button = gr.Button("Submit")
with gr.Tab("Record your voice"):
m_audio = gr.Audio(ype="filepath", source="microphone", label="Record yourself reading something out loud")
m_output = gr.Audio(type="filepath", label="Output audio")
m_button = gr.Button("Submit")
gr.Examples(examples=["man.wav", "woman.wav"], inputs=u_audio, outputs=u_output, fn=inference, cache_examples=True)
u_button.click(inference, inputs=u_audio, outputs=u_output)
m_button.click(inference, inputs=m_audio, outputs=m_output)
gr.Markdown("")
demo.queue(concurrency_count=1, api_open=False).launch(show_api=False, show_error=True)