smallv0221 commited on
Commit
9208677
1 Parent(s): 51625df

Create dureader_robust.py

Browse files

Add dureader robust dataset

Files changed (1) hide show
  1. dureader_robust.py +118 -0
dureader_robust.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
3
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Lint as: python3
18
+
19
+
20
+ import json
21
+ import os
22
+
23
+ import datasets
24
+ from datasets.tasks import QuestionAnsweringExtractive
25
+
26
+
27
+ logger = datasets.logging.get_logger(__name__)
28
+
29
+
30
+ _URL = "https://bj.bcebos.com/paddlenlp/datasets/dureader_robust-data.tar.gz"
31
+
32
+
33
+ class DureaderRobustConfig(datasets.BuilderConfig):
34
+ """BuilderConfig for DureaderRobust."""
35
+
36
+ def __init__(self, **kwargs):
37
+ """BuilderConfig for DureaderRobust.
38
+
39
+ Args:
40
+ **kwargs: keyword arguments forwarded to super.
41
+ """
42
+ super(DureaderRobustConfig, self).__init__(**kwargs)
43
+
44
+
45
+ class DureaderRobust(datasets.GeneratorBasedBuilder):
46
+
47
+ BUILDER_CONFIGS = [
48
+ DureaderRobustConfig(
49
+ name="plain_text",
50
+ version=datasets.Version("1.0.0", ""),
51
+ description="Plain text",
52
+ ),
53
+ ]
54
+
55
+ def _info(self):
56
+ return datasets.DatasetInfo(
57
+ description=_DESCRIPTION,
58
+ features=datasets.Features(
59
+ {
60
+ "id": datasets.Value("string"),
61
+ "title": datasets.Value("string"),
62
+ "context": datasets.Value("string"),
63
+ "question": datasets.Value("string"),
64
+ "answers": datasets.features.Sequence(
65
+ {
66
+ "text": datasets.Value("string"),
67
+ "answer_start": datasets.Value("int32"),
68
+ }
69
+ ),
70
+ }
71
+ ),
72
+ # No default supervised_keys (as we have to pass both question
73
+ # and context as input).
74
+ supervised_keys=None,
75
+ homepage="https://arxiv.org/abs/2004.11142",
76
+ citation=_CITATION,
77
+ task_templates=[
78
+ QuestionAnsweringExtractive(
79
+ question_column="question", context_column="context", answers_column="answers"
80
+ )
81
+ ],
82
+ )
83
+
84
+ def _split_generators(self, dl_manager):
85
+ dl_dir = dl_manager.download_and_extract(_URL)
86
+
87
+ return [
88
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_dir,'dureader_robust-data', 'train.json')}),
89
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(dl_dir,'dureader_robust-data', 'dev.json')}),
90
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(dl_dir,'dureader_robust-data', 'test.json')}),
91
+ ]
92
+
93
+ def _generate_examples(self, filepath):
94
+ """This function returns the examples in the raw (text) form."""
95
+ logger.info("generating examples from = %s", filepath)
96
+ key = 0
97
+ with open(filepath, encoding="utf-8") as f:
98
+ durobust = json.load(f)
99
+ for article in durobust["data"]:
100
+ title = article.get("title", "")
101
+ for paragraph in article["paragraphs"]:
102
+ context = paragraph["context"] # do not strip leading blank spaces GH-2585
103
+ for qa in paragraph["qas"]:
104
+ answer_starts = [answer["answer_start"] for answer in qa.get("answers",'')]
105
+ answers = [answer["text"] for answer in qa.get("answers",'')]
106
+ # Features currently used are "context", "question", and "answers".
107
+ # Others are extracted here for the ease of future expansions.
108
+ yield key, {
109
+ "title": title,
110
+ "context": context,
111
+ "question": qa["question"],
112
+ "id": qa["id"],
113
+ "answers": {
114
+ "answer_start": answer_starts,
115
+ "text": answers,
116
+ },
117
+ }
118
+ key += 1