File size: 4,489 Bytes
40b58f1 736c175 40b58f1 736c175 40b58f1 736c175 40b58f1 736c175 40b58f1 736c175 40b58f1 736c175 40b58f1 736c175 40b58f1 736c175 40b58f1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
from pathlib import Path
from typing import List
import datasets
import pandas as pd
from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Tasks, DEFAULT_SOURCE_VIEW_NAME, DEFAULT_SEACROWD_VIEW_NAME
_DATASETNAME = "emotcmt"
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME
_LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
_LOCAL = False
_CITATION = """\
@inproceedings{barik-etal-2019-normalization,
title = "Normalization of {I}ndonesian-{E}nglish Code-Mixed {T}witter Data",
author = "Barik, Anab Maulana and
Mahendra, Rahmad and
Adriani, Mirna",
booktitle = "Proceedings of the 5th Workshop on Noisy User-generated Text (W-NUT 2019)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D19-5554",
doi = "10.18653/v1/D19-5554",
pages = "417--424"
}
@article{Yulianti2021NormalisationOI,
title={Normalisation of Indonesian-English Code-Mixed Text and its Effect on Emotion Classification},
author={Evi Yulianti and Ajmal Kurnia and Mirna Adriani and Yoppy Setyo Duto},
journal={International Journal of Advanced Computer Science and Applications},
year={2021}
}
"""
_DESCRIPTION = """\
EmotCMT is an emotion classification Indonesian-English code-mixing dataset created through an Indonesian-English code-mixed Twitter data pipeline consisting of 4 processing steps, i.e., tokenization, language identification, lexical normalization, and translation. The dataset consists of 825 tweets, 22.736 tokens with 11.204 Indonesian tokens and 5.613 English tokens. Each tweet is labelled with an emotion, i.e., cinta (love), takut (fear), sedih (sadness), senang (joy), or marah (anger).
"""
_HOMEPAGE = "https://github.com/ir-nlp-csui/emotcmt"
_LICENSE = "MIT"
_URLs = {
"test": "https://raw.githubusercontent.com/ir-nlp-csui/emotcmt/main/codeswitch_emotion.csv"
}
_SUPPORTED_TASKS = [Tasks.EMOTION_CLASSIFICATION]
_SOURCE_VERSION = "1.0.0"
_SEACROWD_VERSION = "2024.06.20"
class EmotCMT(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
SEACrowdConfig(
name="emotcmt_source",
version=datasets.Version(_SOURCE_VERSION),
description="EmotCMT source schema",
schema="source",
subset_id="emotcmt",
),
SEACrowdConfig(
name="emotcmt_seacrowd_text",
version=datasets.Version(_SEACROWD_VERSION),
description="EmotCMT Nusantara schema",
schema="seacrowd_text",
subset_id="emotcmt",
),
]
DEFAULT_CONFIG_NAME = "emotcmt_source"
def _info(self):
if self.config.schema == "source":
features = datasets.Features({"tweet": datasets.Value("string"), "label": datasets.Value("string")})
elif self.config.schema == "seacrowd_text":
features = schemas.text_features(["cinta", "takut", "sedih", "senang", "marah"])
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
test_csv_path = Path(dl_manager.download_and_extract(_URLs["test"]))
data_files = {
"test": test_csv_path,
}
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_files["test"]},
)
]
def _generate_examples(self, filepath: Path):
df = pd.read_csv(filepath).reset_index()
df.columns = ["id", "label", "sentence"]
if self.config.schema == "source":
for row in df.itertuples():
ex = {"tweet": row.sentence, "label": row.label}
yield row.id, ex
elif self.config.schema == "seacrowd_text":
for row in df.itertuples():
ex = {
"id": str(row.id),
"text": row.sentence,
"label": row.label
}
yield row.id, ex
else:
raise ValueError(f"Invalid config: {self.config.name}")
|