Spaces:
Runtime error
Runtime error
Delete app.py
Browse files
app.py
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
3 |
-
import torch
|
4 |
-
from collections import Counter
|
5 |
-
from scipy.special import softmax
|
6 |
-
|
7 |
-
article_string = "Author: <a href=\"https://huggingface.co/knowhate\">kNOwHATE</a>. Read more about our <a href=\"https://knowhate.eu/pt-pt\">research on the evaluation of Portuguese language models</a>."
|
8 |
-
|
9 |
-
app_title = "Portuguese Hate Speech Detection"
|
10 |
-
|
11 |
-
app_description = """
|
12 |
-
This app detects hate speech on Portuguese text using multiple models. You can either introduce your own sentences by filling in "Text" or click on one of the examples provided below.
|
13 |
-
"""
|
14 |
-
|
15 |
-
app_examples = [
|
16 |
-
["as pessoas tem que perceber que ser 'panasca' não é deixar de ser homem, é deixar de ser humano kkk"],
|
17 |
-
["ontem encontrei-me com um amigo meu e tivemos uma conversa agradável"],
|
18 |
-
]
|
19 |
-
|
20 |
-
output_textbox_component_description = """
|
21 |
-
This box will display the hate speech detection results based on the average score of multiple models.
|
22 |
-
"""
|
23 |
-
|
24 |
-
output_json_component_description = { "breakdown": """
|
25 |
-
This box presents a detailed breakdown of the evaluation for each model.
|
26 |
-
"""}
|
27 |
-
|
28 |
-
short_score_descriptions = {
|
29 |
-
0: "Non Hate Speech",
|
30 |
-
1: "Hate Speech"
|
31 |
-
}
|
32 |
-
|
33 |
-
score_descriptions = {
|
34 |
-
0: "This text is not Hate Speech.",
|
35 |
-
1: "This text is Hate Speech.",
|
36 |
-
}
|
37 |
-
|
38 |
-
model_list = [
|
39 |
-
"knowhate/HateBERTimbau",
|
40 |
-
"knowhate/HateBERTimbau-youtube",
|
41 |
-
"knowhate/HateBERTimbau-twitter",
|
42 |
-
"knowhate/HateBERTimbau-yt-tt",
|
43 |
-
]
|
44 |
-
|
45 |
-
user_friendly_name = {
|
46 |
-
"knowhate/HateBERTimbau": "HateBERTimbau (Original)",
|
47 |
-
"knowhate/HateBERTimbau-youtube": "HateBERTimbau (YouTube)",
|
48 |
-
"knowhate/HateBERTimbau-twitter": "HateBERTimbau (Twitter)",
|
49 |
-
"knowhate/HateBERTimbau-yt-tt": "HateBERTimbau (YouTube + Twitter)",
|
50 |
-
}
|
51 |
-
|
52 |
-
reverse_user_friendly_name = { v:k for k,v in user_friendly_name.items() }
|
53 |
-
|
54 |
-
user_friendly_name_list = list(user_friendly_name.values())
|
55 |
-
|
56 |
-
model_array = []
|
57 |
-
|
58 |
-
for model_name in model_list:
|
59 |
-
row = {}
|
60 |
-
row["name"] = model_name
|
61 |
-
row["tokenizer"] = AutoTokenizer.from_pretrained(model_name)
|
62 |
-
row["model"] = AutoModelForSequenceClassification.from_pretrained(model_name)
|
63 |
-
model_array.append(row)
|
64 |
-
|
65 |
-
def most_frequent(array):
|
66 |
-
occurence_count = Counter(array)
|
67 |
-
return occurence_count.most_common(1)[0][0]
|
68 |
-
|
69 |
-
|
70 |
-
def predict(s1, chosen_model):
|
71 |
-
if not chosen_model:
|
72 |
-
chosen_model = user_friendly_name_list[0]
|
73 |
-
scores = {}
|
74 |
-
full_chosen_model_name = reverse_user_friendly_name[chosen_model]
|
75 |
-
for row in model_array:
|
76 |
-
name = row["name"]
|
77 |
-
if name != full_chosen_model_name:
|
78 |
-
continue
|
79 |
-
else:
|
80 |
-
tokenizer = row["tokenizer"]
|
81 |
-
model = row["model"]
|
82 |
-
model_input = tokenizer(*([s1],), padding=True, return_tensors="pt")
|
83 |
-
with torch.no_grad():
|
84 |
-
output = model(**model_input)
|
85 |
-
logits = output[0][0].detach().numpy()
|
86 |
-
logits = softmax(logits).tolist()
|
87 |
-
break
|
88 |
-
def get_description(idx):
|
89 |
-
description = score_descriptions[idx]
|
90 |
-
description_pt = score_descriptions_pt[idx]
|
91 |
-
final_description = description + "\n \n" + description_pt
|
92 |
-
return final_description
|
93 |
-
|
94 |
-
max_pos = logits.index(max(logits))
|
95 |
-
markdown_description = get_description(max_pos)
|
96 |
-
scores = { short_score_descriptions[k]:v for k,v in enumerate(logits) }
|
97 |
-
|
98 |
-
return scores, markdown_description
|
99 |
-
|
100 |
-
|
101 |
-
inputs = [
|
102 |
-
gr.Textbox(label="Text", value=app_examples[0][0]),
|
103 |
-
gr.Dropdown(label="Model", choices=user_friendly_name_list, value=user_friendly_name_list[0])
|
104 |
-
]
|
105 |
-
|
106 |
-
outputs = [
|
107 |
-
gr.Label(label="Result"),
|
108 |
-
gr.Markdown(),
|
109 |
-
]
|
110 |
-
|
111 |
-
|
112 |
-
gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title=app_title,
|
113 |
-
description=app_description,
|
114 |
-
examples=app_examples,
|
115 |
-
article = article_string).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|