Datasets:

Languages:
English
ArXiv:
License:
system HF staff commited on
Commit
580b51a
1 Parent(s): 1edcd4d

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. wiki_movies.py +18 -24
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - crowdsourced
4
  language_creators:
 
1
  ---
2
+ pretty_name: WikiMovies
3
  annotations_creators:
4
  - crowdsourced
5
  language_creators:
wiki_movies.py CHANGED
@@ -17,8 +17,6 @@ It was built with the following goals in mind: (i) machine learning techniques s
17
  """
18
 
19
 
20
- import os
21
-
22
  import datasets
23
 
24
 
@@ -85,49 +83,45 @@ class WikiMovies(datasets.GeneratorBasedBuilder):
85
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
86
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
87
  my_urls = _URLs[self.config.name]
88
- data_dir = dl_manager.download_and_extract(my_urls)
89
  return [
90
  datasets.SplitGenerator(
91
  name=datasets.Split.TRAIN,
92
  # These kwargs will be passed to _generate_examples
93
  gen_kwargs={
94
- "filepath": os.path.join(
95
- data_dir, "movieqa", "questions", "wiki_entities", "wiki-entities_qa_train.txt"
96
- ),
97
- "split": "train",
98
  },
99
  ),
100
  datasets.SplitGenerator(
101
  name=datasets.Split.TEST,
102
  # These kwargs will be passed to _generate_examples
103
  gen_kwargs={
104
- "filepath": os.path.join(
105
- data_dir, "movieqa", "questions", "wiki_entities", "wiki-entities_qa_test.txt"
106
- ),
107
- "split": "test",
108
  },
109
  ),
110
  datasets.SplitGenerator(
111
  name=datasets.Split.VALIDATION,
112
  # These kwargs will be passed to _generate_examples
113
  gen_kwargs={
114
- "filepath": os.path.join(
115
- data_dir, "movieqa", "questions", "wiki_entities", "wiki-entities_qa_dev.txt"
116
- ),
117
- "split": "dev",
118
  },
119
  ),
120
  ]
121
 
122
- def _generate_examples(self, filepath, split):
123
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
124
  # The key is not important, it's more here for legacy reason (legacy from tfds)
125
 
126
- with open(filepath, encoding="utf-8") as f:
127
- for id_, row in enumerate(f):
128
- tmp_data = row.split("\t")
129
- tmp_question = tmp_data[0][1:]
130
- yield id_, {
131
- "question": tmp_question,
132
- "answer": tmp_data[1],
133
- }
 
 
 
17
  """
18
 
19
 
 
 
20
  import datasets
21
 
22
 
 
83
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
84
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
85
  my_urls = _URLs[self.config.name]
86
+ archive = dl_manager.download(my_urls)
87
  return [
88
  datasets.SplitGenerator(
89
  name=datasets.Split.TRAIN,
90
  # These kwargs will be passed to _generate_examples
91
  gen_kwargs={
92
+ "filepath": "/".join(["movieqa", "questions", "wiki_entities", "wiki-entities_qa_train.txt"]),
93
+ "files": dl_manager.iter_archive(archive),
 
 
94
  },
95
  ),
96
  datasets.SplitGenerator(
97
  name=datasets.Split.TEST,
98
  # These kwargs will be passed to _generate_examples
99
  gen_kwargs={
100
+ "filepath": "/".join(["movieqa", "questions", "wiki_entities", "wiki-entities_qa_test.txt"]),
101
+ "files": dl_manager.iter_archive(archive),
 
 
102
  },
103
  ),
104
  datasets.SplitGenerator(
105
  name=datasets.Split.VALIDATION,
106
  # These kwargs will be passed to _generate_examples
107
  gen_kwargs={
108
+ "filepath": "/".join(["movieqa", "questions", "wiki_entities", "wiki-entities_qa_dev.txt"]),
109
+ "files": dl_manager.iter_archive(archive),
 
 
110
  },
111
  ),
112
  ]
113
 
114
+ def _generate_examples(self, filepath, files):
115
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
116
  # The key is not important, it's more here for legacy reason (legacy from tfds)
117
 
118
+ for path, f in files:
119
+ if path == filepath:
120
+ for id_, row in enumerate(f):
121
+ tmp_data = row.decode("utf-8").split("\t")
122
+ tmp_question = tmp_data[0][1:]
123
+ yield id_, {
124
+ "question": tmp_question,
125
+ "answer": tmp_data[1],
126
+ }
127
+ break