File size: 2,041 Bytes
6b23642 33a5854 6b23642 33a5854 6b23642 33a5854 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
from dataclasses import field
from typing import Any, Dict, List
from datasets import Features, Sequence, Value
from .operator import StreamInstanceOperatorValidator
UNITXT_DATASET_SCHEMA = Features(
{
"source": Value("string"),
"target": Value("string"),
"references": Sequence(Value("string")),
"metrics": Sequence(Value("string")),
"group": Value("string"),
"postprocessors": Sequence(Value("string")),
}
)
# UNITXT_METRIC_SCHEMA = Features({
# "predictions": Value("string", id="sequence"),
# "target": Value("string", id="sequence"),
# "references": Value("string", id="sequence"),
# "metrics": Value("string", id="sequence"),
# 'group': Value('string'),
# 'postprocessors': Value("string", id="sequence"),
# })
class ToUnitxtGroup(StreamInstanceOperatorValidator):
group: str
metrics: List[str] = None
postprocessors: List[str] = field(default_factory=lambda: ["to_string"])
remove_unnecessary_fields: bool = True
def process(self, instance: Dict[str, Any], stream_name: str = None) -> Dict[str, Any]:
if self.remove_unnecessary_fields:
for key in instance.keys():
if key not in UNITXT_DATASET_SCHEMA:
del instance[key]
instance["group"] = self.group
if self.metrics is not None:
instance["metrics"] = self.metrics
if self.postprocessors is not None:
instance["postprocessors"] = self.postprocessors
return instance
def validate(self, instance: Dict[str, Any], stream_name: str = None):
# verify the instance has the required schema
assert instance is not None, f"Instance is None"
assert isinstance(instance, dict), f"Instance should be a dict, got {type(instance)}"
assert all(
[key in instance for key in UNITXT_DATASET_SCHEMA]
), f"Instance should have the following keys: {UNITXT_DATASET_SCHEMA}"
UNITXT_DATASET_SCHEMA.encode_example(instance)
|