Upload metrics.py with huggingface_hub
Browse files- metrics.py +141 -0
metrics.py
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .stream import Stream
|
2 |
+
from .operator import SingleStreamOperator, StreamInstanceOperator
|
3 |
+
from dataclasses import dataclass, field
|
4 |
+
from abc import abstractmethod, ABC
|
5 |
+
|
6 |
+
from typing import List, Dict, Any
|
7 |
+
|
8 |
+
|
9 |
+
def absrtact_factory():
|
10 |
+
return {}
|
11 |
+
|
12 |
+
|
13 |
+
def abstract_field():
|
14 |
+
return field(default_factory=absrtact_factory)
|
15 |
+
|
16 |
+
|
17 |
+
class UpdateStream(StreamInstanceOperator):
|
18 |
+
update: dict
|
19 |
+
|
20 |
+
def process(self, instance: Dict[str, Any], stream_name: str = None) -> Dict[str, Any]:
|
21 |
+
instance.update(self.update)
|
22 |
+
return instance
|
23 |
+
|
24 |
+
|
25 |
+
class Metric(ABC):
|
26 |
+
@property
|
27 |
+
@abstractmethod
|
28 |
+
def main_score(self):
|
29 |
+
pass
|
30 |
+
|
31 |
+
|
32 |
+
class GlobalMetric(SingleStreamOperator, Metric):
|
33 |
+
def process(self, stream: Stream):
|
34 |
+
references = []
|
35 |
+
predictions = []
|
36 |
+
global_score = {}
|
37 |
+
|
38 |
+
instances = []
|
39 |
+
|
40 |
+
for instance in stream:
|
41 |
+
if "score" not in instance:
|
42 |
+
instance["score"] = {"global": global_score, "instance": {}}
|
43 |
+
else:
|
44 |
+
global_score = instance["score"]["global"]
|
45 |
+
|
46 |
+
refs, pred = instance["references"], instance["prediction"]
|
47 |
+
|
48 |
+
instance_score = self._compute([refs], [pred])
|
49 |
+
instance["score"]["instance"].update(instance_score)
|
50 |
+
|
51 |
+
references.append(refs)
|
52 |
+
predictions.append(pred)
|
53 |
+
instances.append(instance)
|
54 |
+
|
55 |
+
result = self._compute(references, predictions)
|
56 |
+
|
57 |
+
global_score.update(result)
|
58 |
+
|
59 |
+
for instance in instances:
|
60 |
+
instance["score"]["global"] = global_score
|
61 |
+
yield instance
|
62 |
+
|
63 |
+
def _compute(self, references: List[List[str]], predictions: List[str]) -> dict:
|
64 |
+
result = self.compute(references, predictions)
|
65 |
+
result["score"] = result[self.main_score]
|
66 |
+
return result
|
67 |
+
|
68 |
+
@abstractmethod
|
69 |
+
def compute(self, references: List[List[str]], predictions: List[str]) -> dict:
|
70 |
+
pass
|
71 |
+
|
72 |
+
|
73 |
+
class InstanceMetric(SingleStreamOperator, Metric):
|
74 |
+
implemented_reductions: List[str] = field(default_factory=lambda: ["mean"])
|
75 |
+
|
76 |
+
@property
|
77 |
+
@abstractmethod
|
78 |
+
def reduction_map(self) -> dict:
|
79 |
+
pass
|
80 |
+
|
81 |
+
def process(self, stream: Stream):
|
82 |
+
global_score = {}
|
83 |
+
instances = []
|
84 |
+
|
85 |
+
for instance in stream:
|
86 |
+
refs, pred = instance["references"], instance["prediction"]
|
87 |
+
|
88 |
+
instance_score = self._compute(refs, pred)
|
89 |
+
|
90 |
+
if "score" not in instance:
|
91 |
+
instance["score"] = {"global": global_score, "instance": {}}
|
92 |
+
else:
|
93 |
+
global_score = instance["score"]["global"]
|
94 |
+
|
95 |
+
instance["score"]["instance"].update(instance_score)
|
96 |
+
|
97 |
+
instances.append(instance)
|
98 |
+
|
99 |
+
for reduction, fields in self.reduction_map.items():
|
100 |
+
assert (
|
101 |
+
reduction in self.implemented_reductions
|
102 |
+
), f"Reduction {reduction} is not implemented, use one of {self.implemented_reductions}"
|
103 |
+
|
104 |
+
if reduction == "mean":
|
105 |
+
from statistics import mean
|
106 |
+
|
107 |
+
for field in fields:
|
108 |
+
global_score[field] = mean([instance["score"]["instance"][field] for instance in instances])
|
109 |
+
if field == self.main_score:
|
110 |
+
global_score["score"] = global_score[field]
|
111 |
+
|
112 |
+
for instance in instances:
|
113 |
+
yield instance
|
114 |
+
|
115 |
+
def _compute(self, references: List[List[str]], predictions: List[str]) -> dict:
|
116 |
+
result = self.compute(references, predictions)
|
117 |
+
result["score"] = result[self.main_score]
|
118 |
+
return result
|
119 |
+
|
120 |
+
@abstractmethod
|
121 |
+
def compute(self, references: List[str], prediction: str) -> dict:
|
122 |
+
pass
|
123 |
+
|
124 |
+
|
125 |
+
class SingleReferenceInstanceMetric(InstanceMetric):
|
126 |
+
def _compute(self, references: List[str], prediction: str) -> dict:
|
127 |
+
result = self.compute(references[0], prediction)
|
128 |
+
result["score"] = result[self.main_score]
|
129 |
+
return result
|
130 |
+
|
131 |
+
@abstractmethod
|
132 |
+
def compute(self, reference, prediction: str) -> dict:
|
133 |
+
pass
|
134 |
+
|
135 |
+
|
136 |
+
class Accuracy(SingleReferenceInstanceMetric):
|
137 |
+
reduction_map = {"mean": ["accuracy"]}
|
138 |
+
main_score = "accuracy"
|
139 |
+
|
140 |
+
def compute(self, reference, prediction: str) -> dict:
|
141 |
+
return {"accuracy": float(str(reference) == str(prediction))}
|