from transformers import TokenClassificationPipeline class UniversalDependenciesPipeline(TokenClassificationPipeline): def _forward(self,model_inputs): import torch v=[self.tokenizer.cls_token_id]+[t for t,(s,e) in zip(model_inputs["input_ids"][0].tolist(),model_inputs["offset_mapping"][0].tolist()) if s1: k,h=z[numpy.nanargmax(m[z,z])],numpy.nanmin(m)-numpy.nanmax(m) m[:,z]+=[[0 if j in z and (i!=j or i==k) else h for i in z] for j in range(m.shape[0])] h=self.chu_liu_edmonds(m) v=[(s,e) for s,e in model_outputs["offset_mapping"][0].tolist() if sb else b-1 for a,b in enumerate(h) if i!=a] v[i-1]=(v[i-1][0],v.pop(i)[1]) q.pop(i) t=model_outputs["sentence"].replace("\n"," ") u="# text = "+t+"\n" for i,(s,e) in enumerate(v): u+="\t".join([str(i+1),t[s:e],"_",q[i][0],"_","|".join(q[i][1:-1]),str(0 if h[i]==i else h[i]+1),q[i][-1],"_","_" if i+1