AlexWortega commited on
Commit
036acc1
1 Parent(s): 2690cb8

Create new file

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import random
5
+ device = 'cpu'
6
+
7
+ def ans(question ):
8
+ description=''
9
+ category=''
10
+ seed = random.randint(1, 10000000)
11
+ print(f'Seed: {seed}')
12
+ torch.manual_seed(seed)
13
+
14
+ inp = tokenizer.encode(f'Вопрос: {question}\nОписание: {description}\nОтвет:',return_tensors="pt").to(device)
15
+ print('question',question)
16
+ gen = model.generate(inp, do_sample=True, top_p=0.9, temperature=0.86, max_new_tokens=100, repetition_penalty=1.2) #, stop_token="<eos>")
17
+
18
+ gen = tokenizer.decode(gen[0])
19
+ gen = gen[:gen.index('<eos>') if '<eos>' in gen else len(gen)]
20
+ gen = gen.split('Ответ:')[1]
21
+ return gen
22
+
23
+
24
+
25
+
26
+
27
+
28
+
29
+ # Download checkpoint:
30
+ checkpoint = "its5Q/rugpt3large_mailqa"
31
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint)
32
+ model = AutoModelForCausalLM.from_pretrained(checkpoint)
33
+ model = model.eval()
34
+
35
+ # Gradio
36
+
37
+ title = "Ответы на главные вопросы жизни, вселенной и вообще"
38
+ description = "t5 large predict activity "
39
+ article = "<p style='text-align: center'><a href='https://github.com/NeuralPushkin/MailRu_Q-A'>Github with fine-tuning ruGPT3large on QA</a></p> Cозданно при поддержке <p style='text-align: center'><a href='https://t.me/lovedeathtransformers'>Love Death Transformers</a></p>"
40
+
41
+
42
+ iface = gr.Interface(fn=ans, title=title, description=description, article=article, inputs="text", outputs="text")
43
+
44
+ if __name__ == "__main__":
45
+ iface.launch()