seikin_alexey commited on
Commit
afc83c1
1 Parent(s): 5cbf6e6
Files changed (2) hide show
  1. README.md +1 -1
  2. app4.py +0 -56
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: green
5
  colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 3.0.19
8
- app_file: app4.py
9
  pinned: false
10
  duplicated_from: harish3110/emotion_detection
11
  ---
 
5
  colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 3.0.19
8
+ app_file: app3.py
9
  pinned: false
10
  duplicated_from: harish3110/emotion_detection
11
  ---
app4.py DELETED
@@ -1,56 +0,0 @@
1
- from speechbrain.pretrained.interfaces import foreign_class
2
- import gradio as gr
3
- import os
4
- import warnings
5
- warnings.filterwarnings("ignore")
6
-
7
- # Function to get the list of audio files in the 'rec/' directory
8
- def get_audio_files_list(directory="rec"):
9
- try:
10
- return [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
11
- except FileNotFoundError:
12
- print("The 'rec' directory does not exist. Please make sure it is the correct path.")
13
- return []
14
-
15
- # Loading the speechbrain emotion detection model
16
- learner = foreign_class(
17
- source="speechbrain/emotion-recognition-wav2vec2-IEMOCAP",
18
- pymodule_file="custom_interface.py",
19
- classname="CustomEncoderWav2vec2Classifier"
20
- )
21
-
22
- # Building prediction function for Gradio
23
- emotion_dict = {
24
- 'sad': 'Sad',
25
- 'hap': 'Happy',
26
- 'ang': 'Anger',
27
- 'fea': 'Fear',
28
- 'sur': 'Surprised',
29
- 'neu': 'Neutral'
30
- }
31
-
32
- def predict_emotion(selected_audio):
33
- file_path = os.path.join("rec", selected_audio)
34
- out_prob, score, index, text_lab = learner.classify_file(file_path)
35
- emotion = emotion_dict[text_lab[0]]
36
- return emotion, file_path # Return both emotion and file path
37
-
38
- def button_click(selected_audio):
39
- emotion, file_path = predict_emotion(selected_audio)
40
- return emotion, gr.Interface.Play("rec/" + selected_audio)
41
-
42
- # Get the list of audio files for the dropdown
43
- audio_files_list = get_audio_files_list()
44
-
45
- # Loading Gradio interface
46
- inputs = gr.Dropdown(label="Select Audio", choices=audio_files_list)
47
- outputs = [gr.outputs.Textbox(label="Predicted Emotion"), gr.outputs.Audio(label="Play Audio")]
48
-
49
- # Create the button
50
- sub_btn = gr.Interface.Button(label="Detect Emotion", elem_id="btn", onclick=button_click)
51
-
52
- title = "ML Speech Emotion Detection3"
53
- description = "Speechbrain powered wav2vec 2.0 pretrained model on IEMOCAP dataset using Gradio."
54
-
55
- interface = gr.Interface(fn=predict_emotion, inputs=[inputs, sub_btn], outputs=outputs, title=title, description=description)
56
- interface.launch()