Datasets:

Modalities:
Tabular
Text
Libraries:
Datasets
License:
kleinay commited on
Commit
34b2b51
1 Parent(s): e7205eb
Files changed (1) hide show
  1. qa_adj.py +213 -0
qa_adj.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """A Dataset loading script for the QA-Adj dataset."""
16
+
17
+
18
+ from dataclasses import dataclass
19
+ from typing import Optional, Tuple, Union, Iterable, Set
20
+ from pathlib import Path
21
+ import itertools
22
+ import pandas as pd
23
+ import datasets
24
+
25
+
26
+ _DESCRIPTION = """\
27
+ The dataset contains question-answer pairs to capture adjectival semantics.
28
+ This dataset was annotated by selected workers from Amazon Mechanical Turk.
29
+ """
30
+
31
+ _LICENSE = """MIT License
32
+
33
+ Copyright (c) 2022 Ayal Klein (kleinay)
34
+
35
+ Permission is hereby granted, free of charge, to any person obtaining a copy
36
+ of this software and associated documentation files (the "Software"), to deal
37
+ in the Software without restriction, including without limitation the rights
38
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
39
+ copies of the Software, and to permit persons to whom the Software is
40
+ furnished to do so, subject to the following conditions:
41
+
42
+ The above copyright notice and this permission notice shall be included in all
43
+ copies or substantial portions of the Software.
44
+
45
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
46
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
47
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
48
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
49
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
50
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
51
+ SOFTWARE."""
52
+
53
+ URL = "https://github.com/kleinay/QA-Adj-Dataset/raw/main/QAADJ_Dataset.zip"
54
+
55
+ SUPPOERTED_DOMAINS = {"wikinews", "wikipedia"}
56
+
57
+ @dataclass
58
+ class QAAdjBuilderConfig(datasets.BuilderConfig):
59
+ domains: Union[str, Iterable[str]] = "all" # can provide also a subset of acceptable domains.
60
+ full_dataset: bool = False
61
+
62
+ class QaAdj(datasets.GeneratorBasedBuilder):
63
+ """QAAdj: Question-Answer based semantics for adjectives.
64
+ """
65
+
66
+ VERSION = datasets.Version("1.0.0")
67
+
68
+ BUILDER_CONFIG_CLASS = QAAdjBuilderConfig
69
+
70
+ BUILDER_CONFIGS = [
71
+ QAAdjBuilderConfig(
72
+ name="default", version=VERSION, description="This provides the QAAdj dataset - train, dev and test"#, redistribute_dev=(0,1,0)
73
+ ),
74
+ QAAdjBuilderConfig(
75
+ name="full", version=VERSION, full_dataset=True,
76
+ description="""This provides the QAAdj dataset including gold reference
77
+ (300 expert-annotated instances) and propbank comparison instances"""
78
+ ),
79
+ ]
80
+
81
+ DEFAULT_CONFIG_NAME = (
82
+ "default" # It's not mandatory to have a default configuration. Just use one if it make sense.
83
+ )
84
+
85
+ def _info(self):
86
+ features = datasets.Features(
87
+ {
88
+ "sentence": datasets.Value("string"),
89
+ "sent_id": datasets.Value("string"),
90
+ "predicate_idx": datasets.Value("int32"),
91
+ "predicate_idx_end": datasets.Value("int32"),
92
+ "predicate": datasets.Value("string"),
93
+ "object_question": datasets.Value("string"),
94
+ "object_answer": datasets.Sequence(datasets.Value("string")),
95
+ "domain_question": datasets.Value("string"),
96
+ "domain_answer": datasets.Sequence(datasets.Value("string")),
97
+ "reference_question": datasets.Value("string"),
98
+ "reference_answer": datasets.Sequence(datasets.Value("string")),
99
+ "extent_question": datasets.Value("string"),
100
+ "extent_answer": datasets.Sequence(datasets.Value("string")),
101
+ }
102
+ )
103
+ return datasets.DatasetInfo(
104
+ # This is the description that will appear on the datasets page.
105
+ description=_DESCRIPTION,
106
+ # This defines the different columns of the dataset and their types
107
+ features=features, # Here we define them above because they are different between the two configurations
108
+ # If there's a common (input, target) tuple from the features,
109
+ # specify them here. They'll be used if as_supervised=True in
110
+ # builder.as_dataset.
111
+ supervised_keys=None,
112
+ # Homepage of the dataset for documentation
113
+ # homepage=_HOMEPAGE,
114
+ # License for the dataset if available
115
+ license=_LICENSE,
116
+ # Citation for the dataset
117
+ # citation=_CITATION,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager):
121
+ """Returns SplitGenerators."""
122
+
123
+ # Handle domain selection
124
+ domains: Set[str] = []
125
+ if self.config.domains == "all":
126
+ domains = SUPPOERTED_DOMAINS
127
+ elif isinstance(self.config.domains, str):
128
+ if self.config.domains in SUPPOERTED_DOMAINS:
129
+ domains = {self.config.domains}
130
+ else:
131
+ raise ValueError(f"Unrecognized domain '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported")
132
+ else:
133
+ domains = set(self.config.domains) & SUPPOERTED_DOMAINS
134
+ if len(domains) == 0:
135
+ raise ValueError(f"Unrecognized domains '{self.config.domains}'; only {SUPPOERTED_DOMAINS} are supported")
136
+ self.config.domains = domains
137
+
138
+ self.corpus_base_path = Path(dl_manager.download_and_extract(URL))
139
+
140
+ splits = [
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TRAIN,
143
+ # These kwargs will be passed to _generate_examples
144
+ gen_kwargs={
145
+ "csv_fn": self.corpus_base_path / "train.csv",
146
+ },
147
+ ),
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.VALIDATION,
150
+ # These kwargs will be passed to _generate_examples
151
+ gen_kwargs={
152
+ "csv_fn": self.corpus_base_path / "dev.csv",
153
+ },
154
+ ),
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.TEST,
157
+ # These kwargs will be passed to _generate_examples
158
+ gen_kwargs={
159
+ "csv_fn": self.corpus_base_path / "test.csv",
160
+ },
161
+ ),
162
+ ]
163
+ if self.config.full_dataset:
164
+ splits = splits + [
165
+ # ##TODO change "reference_data.csv" to be in same format and add it to zip file
166
+ # datasets.SplitGenerator(
167
+ # name="gold_reference",
168
+ # # These kwargs will be passed to _generate_examples
169
+ # gen_kwargs={
170
+ # "csv_fn": self.corpus_base_path / "reference_data.csv",
171
+ # },
172
+ # ),
173
+ datasets.SplitGenerator(
174
+ name="propbank",
175
+ # These kwargs will be passed to _generate_examples
176
+ gen_kwargs={
177
+ "csv_fn": self.corpus_base_path / "propbank_comparison_data.csv",
178
+ },
179
+ ),
180
+ ]
181
+
182
+ return splits
183
+
184
+ def _generate_examples(self, csv_fn):
185
+ df = pd.read_csv(csv_fn)
186
+ for counter, row in df.iterrows():
187
+ yield counter, {
188
+ "sentence": row['Input.sentence'],
189
+ "sent_id": row['Input.qasrl_id'],
190
+ "predicate_idx": row['Input.adj_index_start'],
191
+ "predicate_idx_end": row['Input.adj_index_end'],
192
+ "predicate": row['Input.target'],
193
+ "object_question": self._get_optional_question(row.object_q),
194
+ "object_answer": self._get_optional_answer(row["Answer.answer1"]),
195
+ "domain_question": self._get_optional_question(row.domain_q),
196
+ "domain_answer": self._get_optional_answer(row["Answer.answer3"]),
197
+ "reference_question": self._get_optional_question(row.comparison_q),
198
+ "reference_answer": self._get_optional_answer(row["Answer.answer2"]),
199
+ "extent_question": self._get_optional_question(row.degree_q),
200
+ "extent_answer": self._get_optional_answer(row["Answer.answer4"]),
201
+ }
202
+
203
+ def _get_optional_answer(self, val):
204
+ if pd.isnull(val): # no answer
205
+ return []
206
+ else:
207
+ return val.split("+")
208
+ def _get_optional_question(self, val):
209
+ if pd.isnull(val): # no question
210
+ return ""
211
+ else:
212
+ return val
213
+