Datasets:
Updates dataset file.
Browse files- nist_lpbf_scan_tracks.py +26 -19
nist_lpbf_scan_tracks.py
CHANGED
@@ -17,6 +17,7 @@
|
|
17 |
import os
|
18 |
|
19 |
import datasets
|
|
|
20 |
|
21 |
|
22 |
# # TODO: Add BibTeX citation
|
@@ -174,22 +175,28 @@ class AMDataset(datasets.GeneratorBasedBuilder):
|
|
174 |
]
|
175 |
|
176 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
import os
|
18 |
|
19 |
import datasets
|
20 |
+
import pickle
|
21 |
|
22 |
|
23 |
# # TODO: Add BibTeX citation
|
|
|
175 |
]
|
176 |
|
177 |
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
178 |
+
def _generate_examples(self, files):
|
179 |
+
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
180 |
+
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
181 |
+
|
182 |
+
for index, file in enumerate(files):
|
183 |
+
with open(file, "rb") as f:
|
184 |
+
track = pickle.load(f)
|
185 |
+
yield index, track
|
186 |
+
|
187 |
+
# with open(filepath, encoding="utf-8") as f:
|
188 |
+
# for key, row in enumerate(f):
|
189 |
+
# data = json.loads(row)
|
190 |
+
# if self.config.name == "raw":
|
191 |
+
# # Yields examples as (key, example) tuples
|
192 |
+
# yield key, {
|
193 |
+
# "sentence": data["sentence"],
|
194 |
+
# "option1": data["option1"],
|
195 |
+
# "answer": "" if split == "test" else data["answer"],
|
196 |
+
# }
|
197 |
+
# else:
|
198 |
+
# yield key, {
|
199 |
+
# "sentence": data["sentence"],
|
200 |
+
# "option2": data["option2"],
|
201 |
+
# "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
|
202 |
+
# }
|