Spaces:
Runtime error
Runtime error
import streamlit as st | |
from torch.utils.data import Dataset, DataLoader | |
import torch | |
from sklearn.model_selection import train_test_split | |
from transformers import get_linear_schedule_with_warmup, AdamW | |
from torch.cuda.amp import autocast, GradScaler | |
from transformers import DistilBertForSequenceClassification, DistilBertTokenizer, \ | |
BigBirdPegasusForSequenceClassification, BigBirdTokenizer | |
from transformers import pipeline | |
from torch.utils.data import TensorDataset, random_split, DataLoader, RandomSampler, SequentialSampler | |
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score | |
import streamlit as st | |
from transformers import DistilBertModel, DistilBertTokenizer | |
import pandas as pd | |
import json | |
import ast | |
from scipy import stats | |
import numpy as np | |
import time | |
import datetime | |
# | |
def get_top95(y_predict, convert_target): | |
lst_labels = [] | |
tuple_arr = tuple((idx, val) for idx, val in enumerate(y_predict)) | |
sort_y = sorted(tuple_arr, key=lambda x: x[1], reverse=True) | |
cumsum = 0 | |
for key, prob in sort_y: | |
cumsum += prob | |
print(prob) | |
lst_labels.append(convert_target[str(key)]) | |
if cumsum > 0.95: | |
break | |
return lst_labels | |
class DistillBERTClass(torch.nn.Module): | |
def __init__(self): | |
super(DistillBERTClass, self).__init__() | |
self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased") | |
self.pre_classifier = torch.nn.Linear(768, 768) | |
self.dropout = torch.nn.Dropout(0.3) | |
self.classifier = torch.nn.Linear(768, 8) | |
def forward(self, input_ids, attention_mask): | |
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask) | |
hidden_state = output_1[0] | |
pooler = hidden_state[:, 0] | |
pooler = self.pre_classifier(pooler) | |
pooler = torch.nn.ReLU()(pooler) | |
pooler = self.dropout(pooler) | |
output = self.classifier(pooler) | |
return output | |
model = DistillBERTClass() | |
LEARNING_RATE = 1e-05 | |
optimizer = torch.optim.Adam(params = model.parameters(), lr=LEARNING_RATE) | |
model = torch.load("bert_distilbert.bin", map_location=torch.device('cpu')) | |
def get_predict(title, abstract): | |
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased') | |
inputs = tokenizer(abstract, title, return_tensors="pt") | |
outputs = model( | |
input_ids=inputs['input_ids'], | |
attention_mask=inputs['attention_mask'], | |
) | |
logits = outputs[0] | |
print(logits) | |
y_predict = torch.nn.functional.softmax(logits).cpu().detach().numpy() | |
file_path = "sample.json" | |
with open(file_path, 'r') as json_file: | |
decode_target = json.load(json_file) | |
return get_top95(y_predict, decode_target) | |
st.markdown("Классификатор статей") | |
# ^-- можно показывать пользователю текст, картинки, ограниченное подмножество html - всё как в jupyter | |
title = st.text_area("Title", key=1) | |
abstract = st.text_area("Abstract", key=2) | |
# ^-- показать текстовое поле. В поле text лежит строка, которая находится там в данный момент | |
# from transformers import pipeline | |
# pipe = pipeline("ner", "Davlan/distilbert-base-multilingual-cased-ner-hrl") | |
# raw_predictions = pipe(text) | |
# тут уже знакомый вам код с huggingface.transformers -- его можно заменить на что угодно от fairseq до catboost | |
st.markdown(f"It's prediction: {get_predict(title, abstract)}") |