parallel decompress from olm/
Browse files- wikipedia.py +56 -46
wikipedia.py
CHANGED
@@ -23,7 +23,10 @@ import json
|
|
23 |
import re
|
24 |
import xml.etree.cElementTree as etree
|
25 |
from urllib.parse import quote
|
|
|
26 |
from mwparserfromhell.wikicode import Wikicode, Heading
|
|
|
|
|
27 |
|
28 |
import datasets
|
29 |
|
@@ -905,7 +908,7 @@ class WikipediaConfig(datasets.BuilderConfig):
|
|
905 |
_DATE = "20220301"
|
906 |
|
907 |
|
908 |
-
class Wikipedia(datasets.
|
909 |
"""Wikipedia dataset."""
|
910 |
|
911 |
# Use mirror (your.org) to avoid download caps.
|
@@ -935,7 +938,7 @@ class Wikipedia(datasets.BeamBasedBuilder):
|
|
935 |
citation=_CITATION,
|
936 |
)
|
937 |
|
938 |
-
def _split_generators(self, dl_manager
|
939 |
def _base_url(lang):
|
940 |
return _BASE_URL_TMPL.format(lang=lang.replace("-", "_"), date=self.config.date)
|
941 |
|
@@ -964,9 +967,10 @@ class Wikipedia(datasets.BeamBasedBuilder):
|
|
964 |
xml_urls.append(_base_url(lang) + fname)
|
965 |
|
966 |
# Use dictionary since testing mock always returns the same result.
|
|
|
|
|
967 |
downloaded_files = dl_manager.download({"xml": xml_urls})
|
968 |
-
|
969 |
-
downloaded_files = dl_manager.ship_files_with_pipeline(downloaded_files, pipeline)
|
970 |
|
971 |
return [
|
972 |
datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
|
@@ -974,43 +978,40 @@ class Wikipedia(datasets.BeamBasedBuilder):
|
|
974 |
)
|
975 |
]
|
976 |
|
977 |
-
def
|
978 |
-
|
979 |
-
import apache_beam as beam
|
980 |
-
import mwparserfromhell
|
981 |
|
982 |
def _extract_content(filepath):
|
983 |
"""Extracts article content from a single WikiMedia XML file."""
|
984 |
logger.info("generating examples from = %s", filepath)
|
985 |
-
|
986 |
-
|
987 |
-
|
988 |
-
|
989 |
-
|
990 |
-
|
991 |
-
|
992 |
-
|
993 |
-
|
994 |
-
|
995 |
-
|
996 |
-
|
997 |
-
|
998 |
-
|
999 |
-
|
1000 |
-
|
1001 |
-
elem.clear()
|
1002 |
-
continue
|
1003 |
-
|
1004 |
-
raw_content = elem.find(f"./{namespace}revision/{namespace}text").text
|
1005 |
elem.clear()
|
|
|
|
|
|
|
|
|
1006 |
|
1007 |
-
|
1008 |
-
|
1009 |
-
|
1010 |
-
continue
|
1011 |
|
1012 |
-
|
1013 |
-
|
1014 |
|
1015 |
def _clean_content(inputs, language):
|
1016 |
"""Cleans raw wikicode to extract text."""
|
@@ -1018,27 +1019,37 @@ class Wikipedia(datasets.BeamBasedBuilder):
|
|
1018 |
try:
|
1019 |
text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell, language=language)
|
1020 |
except (mwparserfromhell.parser.ParserError) as e:
|
1021 |
-
beam.metrics.Metrics.counter(language, "parser-error").inc()
|
1022 |
logger.error("mwparserfromhell ParseError: %s", e)
|
1023 |
return
|
1024 |
|
1025 |
if not text:
|
1026 |
-
beam.metrics.Metrics.counter(language, "empty-clean-examples").inc()
|
1027 |
return
|
1028 |
|
1029 |
url = _construct_url(title, language)
|
1030 |
|
1031 |
-
|
1032 |
|
1033 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1034 |
|
1035 |
-
|
1036 |
-
|
1037 |
-
|
1038 |
-
|
1039 |
-
|
1040 |
-
|
1041 |
-
|
|
|
1042 |
|
1043 |
|
1044 |
def _parse_and_clean_wikicode(raw_content, parser, language):
|
@@ -1097,7 +1108,6 @@ def _parse_and_clean_wikicode(raw_content, parser, language):
|
|
1097 |
try_replace_obj(obj)
|
1098 |
for obj in section.ifilter_tags(matches=rm_tag, recursive=True):
|
1099 |
try_remove_obj(obj, section)
|
1100 |
-
section.ifilter()
|
1101 |
|
1102 |
striped_text = _custom_strip_code(section, hierarchical_headings).strip()
|
1103 |
section_text.append(re.sub(re_rm_magic, "", striped_text))
|
|
|
23 |
import re
|
24 |
import xml.etree.cElementTree as etree
|
25 |
from urllib.parse import quote
|
26 |
+
import mwparserfromhell
|
27 |
from mwparserfromhell.wikicode import Wikicode, Heading
|
28 |
+
from multiprocess import Process, Manager
|
29 |
+
from tqdm import tqdm
|
30 |
|
31 |
import datasets
|
32 |
|
|
|
908 |
_DATE = "20220301"
|
909 |
|
910 |
|
911 |
+
class Wikipedia(datasets.GeneratorBasedBuilder):
|
912 |
"""Wikipedia dataset."""
|
913 |
|
914 |
# Use mirror (your.org) to avoid download caps.
|
|
|
938 |
citation=_CITATION,
|
939 |
)
|
940 |
|
941 |
+
def _split_generators(self, dl_manager):
|
942 |
def _base_url(lang):
|
943 |
return _BASE_URL_TMPL.format(lang=lang.replace("-", "_"), date=self.config.date)
|
944 |
|
|
|
967 |
xml_urls.append(_base_url(lang) + fname)
|
968 |
|
969 |
# Use dictionary since testing mock always returns the same result.
|
970 |
+
|
971 |
+
print("Dowloading Wikipedia dump")
|
972 |
downloaded_files = dl_manager.download({"xml": xml_urls})
|
973 |
+
print("Finished downloading Wikipedia dump")
|
|
|
974 |
|
975 |
return [
|
976 |
datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
|
|
|
978 |
)
|
979 |
]
|
980 |
|
981 |
+
def _generate_examples(self, filepaths, language, no_labels=False):
|
982 |
+
|
|
|
|
|
983 |
|
984 |
def _extract_content(filepath):
|
985 |
"""Extracts article content from a single WikiMedia XML file."""
|
986 |
logger.info("generating examples from = %s", filepath)
|
987 |
+
content = []
|
988 |
+
f = bz2.BZ2File(filename=filepath)
|
989 |
+
# Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
|
990 |
+
utf_f = codecs.getreader("utf-8")(f)
|
991 |
+
context = etree.iterparse(utf_f, events=("end",))
|
992 |
+
for unused_event, elem in context:
|
993 |
+
if not elem.tag.endswith("page"):
|
994 |
+
continue
|
995 |
+
namespace = elem.tag[:-4]
|
996 |
+
title = elem.find(f"./{namespace}title").text
|
997 |
+
ns = elem.find(f"./{namespace}ns").text
|
998 |
+
id_ = elem.find(f"./{namespace}id").text
|
999 |
+
red_ = elem.find(f"./{namespace}redirect")
|
1000 |
+
|
1001 |
+
# Filter pages that are not in the "main" namespace.
|
1002 |
+
if ns != "0":
|
|
|
|
|
|
|
|
|
1003 |
elem.clear()
|
1004 |
+
continue
|
1005 |
+
|
1006 |
+
raw_content = elem.find(f"./{namespace}revision/{namespace}text").text
|
1007 |
+
elem.clear()
|
1008 |
|
1009 |
+
# Filter redirects.
|
1010 |
+
if raw_content is None or red_ is not None:
|
1011 |
+
continue
|
|
|
1012 |
|
1013 |
+
content.append((id_, title, raw_content))
|
1014 |
+
return content
|
1015 |
|
1016 |
def _clean_content(inputs, language):
|
1017 |
"""Cleans raw wikicode to extract text."""
|
|
|
1019 |
try:
|
1020 |
text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell, language=language)
|
1021 |
except (mwparserfromhell.parser.ParserError) as e:
|
|
|
1022 |
logger.error("mwparserfromhell ParseError: %s", e)
|
1023 |
return
|
1024 |
|
1025 |
if not text:
|
|
|
1026 |
return
|
1027 |
|
1028 |
url = _construct_url(title, language)
|
1029 |
|
1030 |
+
return id_, {"id": id_, "url": url, "title": title, "text": text}
|
1031 |
|
1032 |
+
print("Parsing and cleaning Wikipedia examples")
|
1033 |
+
with Manager() as manager:
|
1034 |
+
examples = manager.list()
|
1035 |
+
processes = []
|
1036 |
+
for filepath in filepaths:
|
1037 |
+
def parse_and_clean(examples):
|
1038 |
+
content = _extract_content(filepath)
|
1039 |
+
for obj in tqdm(content):
|
1040 |
+
examples.append(_clean_content(obj, language=language))
|
1041 |
+
p = Process(target=parse_and_clean, args=(examples,))
|
1042 |
+
p.start()
|
1043 |
+
processes.append(p)
|
1044 |
|
1045 |
+
for p in processes:
|
1046 |
+
p.join()
|
1047 |
+
|
1048 |
+
print("Parsed and cleaned Wikipedia examples")
|
1049 |
+
|
1050 |
+
for example in examples:
|
1051 |
+
if example is not None:
|
1052 |
+
yield example
|
1053 |
|
1054 |
|
1055 |
def _parse_and_clean_wikicode(raw_content, parser, language):
|
|
|
1108 |
try_replace_obj(obj)
|
1109 |
for obj in section.ifilter_tags(matches=rm_tag, recursive=True):
|
1110 |
try_remove_obj(obj, section)
|
|
|
1111 |
|
1112 |
striped_text = _custom_strip_code(section, hierarchical_headings).strip()
|
1113 |
section_text.append(re.sub(re_rm_magic, "", striped_text))
|