File size: 2,121 Bytes
3381d4a
ed343f7
 
b4cbbe2
ed343f7
71f4c16
ed343f7
71f4c16
b4cbbe2
71f4c16
 
 
 
 
 
3381d4a
6bd8168
 
71f4c16
3381d4a
71f4c16
055d288
b4cbbe2
 
ed343f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71f4c16
ed343f7
71f4c16
b4cbbe2
71f4c16
 
 
 
 
 
 
 
 
 
 
 
b4cbbe2
71f4c16
 
f2021d6
5606356
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#uvicorn app:app --host 0.0.0.0 --port 8000 --reload

from fastapi import FastAPI, UploadFile, File
from transformers import WhisperProcessor, WhisperForConditionalGeneration
from fastapi.responses import HTMLResponse
import librosa
import io
import re

html_tag_remover = re.compile(r'<[^>]+>')

def remove_tags(text):
  return html_tag_remover.sub('', text)

app = FastAPI()

processor = WhisperProcessor.from_pretrained("openai/whisper-medium")
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-medium")
model.config.forced_decoder_ids = None

chunk_duration = 30 
overlap_duration = 5

@app.get("/")
def read_root():
    html_form = """
    <html>
        <body>
            <h2>ASR Transcription</h2>
            <form action="/transcribe" method="post" enctype="multipart/form-data">
                <label for="audio_file">Upload an audio file (MP3 or WAV):</label>
                <input type="file" id="audio_file" name="audio_file" accept=".mp3, .wav" required><br><br>
                <input type="submit" value="Transcribe">
            </form>
        </body>
    </html>
    """
    return HTMLResponse(content=html_form, status_code=200)

@app.post("/transcribe")
async def transcribe_audio(audio_file: UploadFile):
        audio_data = await audio_file.read()
        audio_data, _ = librosa.load(io.BytesIO(audio_data), sr=16000)
        
        transcription = []
        
        start = 0
        while start < len(audio_data):
            end = start + chunk_duration * 16000
            audio_chunk = audio_data[start:end]
            
            input_features = processor(audio_chunk.tolist(), return_tensors="pt").input_features
            predicted_ids = model.generate(input_features, max_length=1000)
            chunk_transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)

            transcription.extend(chunk_transcription) 

            start = end - overlap_duration * 16000
        
        final_transcription = " ".join(transcription)
        final_transcription = remove_tags(final_transcription)

        return final_transcription