Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
paperswithcode_id: billion-word-benchmark
|
3 |
---
|
4 |
|
|
|
1 |
---
|
2 |
+
pretty_name: Lm1b
|
3 |
paperswithcode_id: billion-word-benchmark
|
4 |
---
|
5 |
|
lm1b.py
CHANGED
@@ -17,8 +17,8 @@
|
|
17 |
"""The Language Model 1 Billion dataset."""
|
18 |
|
19 |
|
20 |
-
import glob
|
21 |
import os
|
|
|
22 |
|
23 |
import datasets
|
24 |
|
@@ -55,8 +55,8 @@ modeling. This has almost one billion words in the training data.
|
|
55 |
|
56 |
_DOWNLOAD_URL = "http://www.statmt.org/lm-benchmark/" "1-billion-word-language-modeling-benchmark-r13output.tar.gz"
|
57 |
_TOP_LEVEL_DIR = "1-billion-word-language-modeling-benchmark-r13output"
|
58 |
-
_TRAIN_FILE_FORMAT =
|
59 |
-
_HELDOUT_FILE_FORMAT =
|
60 |
|
61 |
|
62 |
class Lm1bConfig(datasets.BuilderConfig):
|
@@ -71,14 +71,6 @@ class Lm1bConfig(datasets.BuilderConfig):
|
|
71 |
super(Lm1bConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
|
72 |
|
73 |
|
74 |
-
def _train_data_filenames(tmp_dir):
|
75 |
-
return sorted(glob.glob(os.path.join(tmp_dir, _TRAIN_FILE_FORMAT)))
|
76 |
-
|
77 |
-
|
78 |
-
def _test_data_filenames(tmp_dir):
|
79 |
-
return sorted(glob.glob(os.path.join(tmp_dir, _HELDOUT_FILE_FORMAT)))
|
80 |
-
|
81 |
-
|
82 |
class Lm1b(datasets.GeneratorBasedBuilder):
|
83 |
"""1 Billion Word Language Model Benchmark dataset."""
|
84 |
|
@@ -99,21 +91,23 @@ class Lm1b(datasets.GeneratorBasedBuilder):
|
|
99 |
)
|
100 |
|
101 |
def _split_generators(self, dl_manager):
|
102 |
-
|
103 |
-
|
104 |
-
train_files = _train_data_filenames(lm1b_path)
|
105 |
-
test_files = _test_data_filenames(lm1b_path)
|
106 |
|
107 |
return [
|
108 |
-
datasets.SplitGenerator(
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
]
|
111 |
|
112 |
-
def _generate_examples(self, files):
|
113 |
-
for
|
114 |
-
|
115 |
-
with open(filepath, encoding="utf-8") as f:
|
116 |
for idx, line in enumerate(f):
|
117 |
-
yield "%s_%d" % (os.path.basename(
|
118 |
-
"text": line.strip(),
|
119 |
}
|
|
|
17 |
"""The Language Model 1 Billion dataset."""
|
18 |
|
19 |
|
|
|
20 |
import os
|
21 |
+
from fnmatch import fnmatch
|
22 |
|
23 |
import datasets
|
24 |
|
|
|
55 |
|
56 |
_DOWNLOAD_URL = "http://www.statmt.org/lm-benchmark/" "1-billion-word-language-modeling-benchmark-r13output.tar.gz"
|
57 |
_TOP_LEVEL_DIR = "1-billion-word-language-modeling-benchmark-r13output"
|
58 |
+
_TRAIN_FILE_FORMAT = "/".join([_TOP_LEVEL_DIR, "training-monolingual.tokenized.shuffled", "news.en-*"])
|
59 |
+
_HELDOUT_FILE_FORMAT = "/".join([_TOP_LEVEL_DIR, "heldout-monolingual.tokenized.shuffled", "news.en.heldout-*"])
|
60 |
|
61 |
|
62 |
class Lm1bConfig(datasets.BuilderConfig):
|
|
|
71 |
super(Lm1bConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
|
72 |
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
class Lm1b(datasets.GeneratorBasedBuilder):
|
75 |
"""1 Billion Word Language Model Benchmark dataset."""
|
76 |
|
|
|
91 |
)
|
92 |
|
93 |
def _split_generators(self, dl_manager):
|
94 |
+
archive = dl_manager.download(_DOWNLOAD_URL)
|
|
|
|
|
|
|
95 |
|
96 |
return [
|
97 |
+
datasets.SplitGenerator(
|
98 |
+
name=datasets.Split.TRAIN,
|
99 |
+
gen_kwargs={"files": dl_manager.iter_archive(archive), "pattern": _TRAIN_FILE_FORMAT},
|
100 |
+
),
|
101 |
+
datasets.SplitGenerator(
|
102 |
+
name=datasets.Split.TEST,
|
103 |
+
gen_kwargs={"files": dl_manager.iter_archive(archive), "pattern": _HELDOUT_FILE_FORMAT},
|
104 |
+
),
|
105 |
]
|
106 |
|
107 |
+
def _generate_examples(self, files, pattern):
|
108 |
+
for path, f in files:
|
109 |
+
if fnmatch(path, pattern):
|
|
|
110 |
for idx, line in enumerate(f):
|
111 |
+
yield "%s_%d" % (os.path.basename(path), idx), {
|
112 |
+
"text": line.decode("utf-8").strip(),
|
113 |
}
|