padmalcom commited on
Commit
115b3b4
1 Parent(s): bf5c155

Upload 2 files

Browse files
Files changed (2) hide show
  1. inference.py +39 -0
  2. test.wav +0 -0
inference.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ import torchaudio
4
+ from transformers import AutoConfig, Wav2Vec2Processor
5
+
6
+ from Wav2Vec2ForSpeechClassification import Wav2Vec2ForSpeechClassification
7
+
8
+ MY_MODEL = "myrun3"
9
+
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+ config = AutoConfig.from_pretrained(MY_MODEL)
12
+ processor = Wav2Vec2Processor.from_pretrained(MY_MODEL)
13
+ sampling_rate = processor.feature_extractor.sampling_rate
14
+ model = Wav2Vec2ForSpeechClassification.from_pretrained(MY_MODEL).to(device)
15
+
16
+ def speech_file_to_array_fn(path, sampling_rate):
17
+ speech_array, _sampling_rate = torchaudio.load(path)
18
+ resampler = torchaudio.transforms.Resample(_sampling_rate)
19
+ speech = resampler(speech_array).squeeze().numpy()
20
+ return speech
21
+
22
+
23
+ def predict(path, sampling_rate):
24
+ speech = speech_file_to_array_fn(path, sampling_rate)
25
+ features = processor(speech, sampling_rate=sampling_rate, return_tensors="pt", padding=True)
26
+
27
+ input_values = features.input_values.to(device)
28
+ attention_mask = features.attention_mask.to(device)
29
+
30
+ with torch.no_grad():
31
+ logits = model(input_values, attention_mask=attention_mask).logits
32
+
33
+ scores = F.softmax(logits, dim=1).detach().cpu().numpy()[0]
34
+ outputs = [{"Emotion": config.id2label[i], "Score": f"{round(score * 100, 3):.1f}%"} for i, score in enumerate(scores)]
35
+ return outputs
36
+
37
+ res = predict("test.wav", 16000)
38
+ max = max(res, key=lambda x: x['Score'])
39
+ print("Expected anger:", max)
test.wav ADDED
Binary file (60.1 kB). View file