Datasets:
cjvt
/

Modalities:
Text
Languages:
Slovenian
Libraries:
Datasets
License:
si_nli / si_nli.py
Matej Klemen
Fix issues with private dataset not loading properly
ea77161
"""SI-NLI is a Slovene natural language inference dataset."""
import csv
import logging
import os
import datasets
_CITATION = """\
@misc{sinli,
title = {Slovene Natural Language Inference Dataset {SI}-{NLI}},
author = {Klemen, Matej and {\v Z}agar, Ale{\v s} and {\v C}ibej, Jaka and Robnik-{\v S}ikonja, Marko},
url = {http://hdl.handle.net/11356/1707},
note = {Slovenian language resource repository {CLARIN}.{SI}},
year = {2022}
}
"""
_DESCRIPTION = """\
SI-NLI (Slovene Natural Language Inference Dataset) contains 5,937 human-created Slovene sentence pairs
(premise and hypothesis) that are manually labeled with the labels "entailment", "contradiction", and "neutral".
The dataset was created using sentences that appear in the Slovenian reference corpus ccKres.
Annotators were tasked to modify the hypothesis in a candidate pair in a way that reflects one of the labels.
The dataset is balanced since the annotators created three modifications (entailment, contradiction, neutral)
for each candidate sentence pair.
"""
_HOMEPAGE = "http://hdl.handle.net/11356/1707"
_LICENSE = "Creative Commons - Attribution 4.0 International (CC BY 4.0)"
_URLS = {
"si-nli": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1707/SI-NLI.zip"
}
NA_STR = ""
UNIFIED_LABELS = {"E": "entailment", "N": "neutral", "C": "contradiction"}
class SINLI(datasets.GeneratorBasedBuilder):
"""SI-NLI is a Slovene natural language inference dataset."""
VERSION = datasets.Version("1.0.1")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="public", version=VERSION,
description="Load the publicly available dataset (without test labels)."),
datasets.BuilderConfig(name="private", version=VERSION,
description="Load the privately available dataset by manuallly providing the path to the data."),
]
DEFAULT_CONFIG_NAME = "public"
def _info(self):
features = datasets.Features({
"pair_id": datasets.Value("string"),
"premise": datasets.Value("string"),
"hypothesis": datasets.Value("string"),
"annotation1": datasets.Value("string"),
"annotator1_id": datasets.Value("string"),
"annotation2": datasets.Value("string"),
"annotator2_id": datasets.Value("string"),
"annotation3": datasets.Value("string"),
"annotator3_id": datasets.Value("string"),
"annotation_final": datasets.Value("string"),
"label": datasets.Value("string")
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
split_prefix = ""
if self.config.name == "public":
urls = _URLS["si-nli"]
data_dir = dl_manager.download_and_extract(urls)
else:
# `data_dir` must have the map SI-NLI inside and train.tsv, dev.tsv, test.tsv
if dl_manager.manual_dir is None or not os.path.exists(dl_manager.manual_dir):
logging.warning("data_dir does not point to a valid directory")
# Allow user to specify path to the private data directory: `load_dataset(..., data_dir=...)`
data_dir = dl_manager.manual_dir
if data_dir is None:
split_prefix = "dummy_"
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"file_path": os.path.join(data_dir, "SI-NLI", "train.tsv"),
"split": f"{split_prefix}train"
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"file_path": os.path.join(data_dir, "SI-NLI", "dev.tsv"),
"split": f"{split_prefix}dev"
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"file_path": os.path.join(data_dir, "SI-NLI", "test.tsv"),
"split": f"{split_prefix}test"
}
)
]
def _generate_examples(self, file_path, split):
if split.startswith("dummy"):
return None
with open(file_path, encoding="utf-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar='"')
header = next(reader)
for i, row in enumerate(reader):
pair_id = annotation1 = annotator1_id = annotation2 = annotator2_id = annotation3 = annotator3_id = \
annotation_final = label = NA_STR
# Public test set only contains the premise and the hypothesis
if len(row) == 2:
premise, hypothesis = row
# Public train/validation set and private test set contain additional annotation data
else:
pair_id, premise, hypothesis, annotation1, _, annotator1_id, annotation2, _, annotator2_id, \
annotation3, _, annotator3_id, annotation_final, label = row
yield i, {
"pair_id": pair_id,
"premise": premise, "hypothesis": hypothesis,
"annotation1": UNIFIED_LABELS.get(annotation1, annotation1), "annotator1_id": annotator1_id,
"annotation2": UNIFIED_LABELS.get(annotation2, annotation2), "annotator2_id": annotator2_id,
"annotation3": UNIFIED_LABELS.get(annotation3, annotation3), "annotator3_id": annotator3_id,
"annotation_final": UNIFIED_LABELS.get(annotation_final, annotation_final),
"label": label
}