SuperWikiNEXT-32B / scripts /wikipedia_soup.py
KaraKaraWitch's picture
Initial Commit
d93fd32
import concurrent.futures as conc
import pathlib
import re
import string
import traceback
import typing
import unicodedata
import orjson
import tqdm
import typer
from bs4 import BeautifulSoup, NavigableString
from markdownify import MarkdownConverter
from mediawiki_soup import MediaWikiSoup
from wikipedia_template import citations, stubs, section_reference_list, redirects_list
app = typer.Typer()
def is_stub(soup: BeautifulSoup, meta: dict):
"""Checks if the articles is a stub
Stub detection can be done by checking if any `mw:WikiLink` href links to a "Wikipedia:Stub".
If it's a stub, it drops the article.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
for wikilink in soup.find_all("a", attrs={"rel": "mw:WikiLink"}):
href = wikilink.get("href")
if href and href.lstrip("./").lower().replace("_", " ") in stubs:
# print("Drop due to stub")
return soup, {"_drop": True}
# return soup, {**meta, "stub": True}
return soup, meta
def style_merge(soup: BeautifulSoup, meta: dict):
"""Collapses <style> tags with `data-mw` attribute into the next sibling element
Some templates have <style> tags which can be annoying to process, so we remove it at this stage.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
for style in soup.find_all("style", attrs={"data-mw": True}):
if style.next_sibling:
# print(style.parent)
# print("STYLE>>>>>>>",style)
has_correct_sib = False
for sibling in style.next_siblings:
if sibling == "\n" or not sibling:
continue
elif isinstance(sibling, NavigableString):
continue
else:
has_correct_sib = True
break
# print(">>>>>>>",list(style.next_siblings))
if not has_correct_sib:
print("Incorrect sibling?", meta["title"])
else:
sibling["data-mw"] = style["data-mw"]
style.decompose()
for style in soup.find_all("style"):
style.decompose()
# print(soup, meta)
return soup, meta
rgx = re.compile(r"\|Lsjbot\|")
def is_lsjbot(soup: BeautifulSoup, meta: dict):
"""Detects lsjbot article.
A SuperWiki 1.5 filter makes it's return. Refer to (https://en.wikipedia.org/wiki/Lsjbot)
TLDR: We remove all Lsjbot generated articles because it's low quality bot generated content.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
if rgx.findall(meta.get("wikitext", "")):
return soup, {"_drop": True}
return soup, meta
def filter_href(soup: BeautifulSoup, meta: dict):
"""Removes all <a> where the text matches the href.
There was an article which the link looked like this:
<a href=\"https://example.com\">https://example.com</a>
Which isn't exactly ideal. so we have this filter.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
for href in soup.find_all("a"):
if href.get_text(strip=True).lower() == href.get("href", "").lower():
href.decompose()
return soup, meta
def pull_title(soup: BeautifulSoup, meta: dict):
"""Extracts out the <title> element
When converting to markdown (markdownify), <title> elements gets converted to text too... Which is not ideal.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
title = soup.find("title")
if title:
title = title.extract()
return soup, {**meta, "title": title.get_text() if title else None}
def filter_redirect(soup: BeautifulSoup, meta: dict):
"""Removes... Redirect icons?
Kinda unsure now that I think about it. But I'm sure it's for a good reason!
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
for wikilink in soup.find_all("div", attrs={"rel": "mw:WikiLink"}):
if wikilink is None:
continue
if wikilink.attrs is None:
print("NoneWikilink?", wikilink.parent)
continue
datamw = wikilink.attrs.get("data-mw")
try:
data_mw = orjson.loads(wikilink.attrs.get("data-mw"))
except orjson.JSONDecodeError:
continue
parts = data_mw.get("parts")
parts = parts[0].get("template")["target"]["href"]
if parts.lstrip("./").lower() in redirects_list:
wikilink.decompose()
return soup, meta
def filter_cite_needed(soup: BeautifulSoup, meta: dict):
"""Filters out [citation needed] tags.
Similar to citations, we also have to test for complete removal of "[]",
since there are some articles where it's only "citation needed" as a superscript html tag.
Args:
soup (BeautifulSoup): _description_
meta (dict): _description_
Returns:
_type_: _description_
"""
for wikilink in soup.find_all("a", attrs={"rel": "mw:WikiLink"}):
if wikilink is None:
continue
if wikilink.attrs is None:
print("NoneWikilink?", meta["title"])
continue
href = wikilink.get("href")
if href:
href = href.lstrip("./").lower().replace("_", " ")
if href in citations:
if "[" in wikilink.get_text():
# print("Decompose_raw_wikilink",meta["title"])
# the bracket is included. So we don't have to do anything.
wikilink.decompose()
else:
# Track backtracking 3 times
backtrack = 2
parent = wikilink
while backtrack > 0:
if parent and "[" in parent.get_text(strip=True):
# print("Decompose_parented_wikilink",meta["title"])
parent.decompose()
break
parent = parent.parent
backtrack -= 1
if not parent.decomposed:
# blindly remove it.
wikilink.decompose()
return soup, meta
def filter_cite(soup: BeautifulSoup, meta: dict):
"""Filters out citations.
Citations looks trival to do but, there's is a lot of "Gotchas".
Though in general, anything with `sup.reference` is likely a reference that can be removed.
Additionally, we do a test for [] because they *should* be included when selecting `sup.reference`.
...Else, we try to remove the "[]" surrounding the `sup.reference` tag (While being very cautious in doing so.)
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
for superscript in soup.select("sup.reference"):
if "[" in superscript.get_text():
# the bracket is included. So we don't have to do anything.
superscript.decompose()
else:
if superscript is None or superscript.name is None:
continue
if superscript.get_text().lower().startswith("note"):
superscript.decompose()
print("Citation: Note X. UnsureRemoval")
elif superscript.attrs and superscript.attrs.get("data-mw"):
try:
data_mw = orjson.loads(superscript.attrs.get("data-mw"))
except orjson.JSONDecodeError:
print("Citation with datamw-decode?", meta["title"], superscript)
superscript.decompose()
continue
if not data_mw.get("parts"):
print(
"Citation with datamw-decode parts missing?",
meta["title"],
superscript,
)
superscript.decompose()
continue
# print(data_mw)
parts = data_mw.get("parts")
parts = parts[0].get("template")["target"]["href"]
if parts.lstrip("./").lower() == "template:rp":
superscript.decompose()
else:
# This might happen. From 90 ish % of the time, it's fine to remove it.
# print("Citation with no bracket. BlindRemoval", meta["title"], superscript)
superscript.decompose()
# backtrack = 3
# parent = superscript
# while backtrack > 0:
# parent = superscript.parent
# if parent and "[" in parent.get_text():
# parent.decompose()
# break
# if not parent.decomposed:
# # blindly remove it.
# superscript.decompose()
return soup, meta
def remove_msg_boxes(soup: BeautifulSoup, meta: dict):
"""Removes amboxes/omboxes
Previously this is known to be a "ritual" but given that almost all ambox and omboxes are notifications,
it's quite likely a safe assumption to remove it like so.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
selects = soup.select('table[class~="ambox"], table[class~="ombox"]')
for msg_box in selects:
if msg_box is None:
continue
if msg_box.attrs is not None and msg_box.get("mw-data"):
pass
# ...?
msg_box.decompose()
return soup, meta
def remove_infobox(soup: BeautifulSoup, meta: dict):
"""Removes infoboxes.
This is pretty self-explainatory, the table at the right hand side of some articles, listing people roles or company infomation.
As those tables are complex, we extract them for others to examine and see how to reliable extract data.
(It's really dense but very useful data.)
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
selects = ", ".join(
["table.infobox", "table.sidebar.vcard.hlist", "table.infobox.vcard"]
)
selects = soup.select(selects)
extracted = []
for msg_box in selects:
if msg_box.get("mw-data"):
pass
# ...?
extracted.append(str(msg_box.extract()))
return soup, {**meta, "infobox": extracted}
def only_tables_list(soup: BeautifulSoup, meta: dict):
"""Filters out article where they are listicles.
"Listicles" isn't the exact term here since it includes tables too but in general,
this filter drops articles where the content mostly consists of tables or lists.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
tablelists_count = 0
selects = soup.select("table, ul, ol")
for tablelist in selects:
if not tablelist.find_parents(tablelist.name) and not tablelist.find_parents(
["table", "ul", "ol"]
):
tablelists_count += get_raw_text_length(
tablelist.get_text().replace("\n", "").replace(" ", "")
)
all_text = get_raw_text_length(soup.get_text().replace("\n", "").replace(" ", ""))
if all_text == 0:
tablelist_ratio = 1
else:
tablelist_ratio = tablelists_count / all_text
if tablelist_ratio > 0.5: # Now that it actually works...
# print("Drop due to tablelist", tablelist_ratio, tablelists_count, all_text, meta["title"])
return soup, {
"_drop": True, # NOTE: Trial test.
**meta,
"mostly_tablelist": True,
"tablelist_ratio": [tablelists_count, all_text, tablelist_ratio],
}
return soup, {
**meta,
"mostly_tablelist": False,
"tablelist_ratio": [tablelists_count, all_text, tablelist_ratio],
}
def remove_tables(soup: BeautifulSoup, meta: dict):
"""Removes tables.
Filter mainly removes tables where the the <td> elements is more than the text string of the table.
In 1.5 or the older superWIKI, it's known as "Excessive TD elements" or something similar.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
tables = []
for table in soup.select("table"):
tds = len(table.find_all("td"))
texsize = len(table.get_text().replace(" ", ""))
if tds >= texsize and texsize < 50:
# print(table.get_text().replace(" ", ""))
tables.append(str(table.extract()))
return soup, {**meta, "td_tables": tables}
def wikipedia_figures(soup: BeautifulSoup, meta: dict):
"""Removes figure html elements.
Self-explanatory. Remove <figure> html elements.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
for figure_element in soup.select('figure[type~="mw:File"]'):
if figure_element:
figure_element.decompose()
def wikipedia_latex(soup: BeautifulSoup, meta: dict):
"""Cleans up wikipedia latex.
Cleans wikipedia latex stuff. Mainly stripping out multiple math representations.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
for math_element in soup.select("span.mwe-math-element"):
img = math_element.find("img")
if not img:
math = math_element.find("math")
if math:
math_text = math.get_text()
else:
math_element.decompose()
else:
math_text = img.get("alt", "")
math_element.string = math_text
return soup, meta
def section_converter(soup: BeautifulSoup, converter: MarkdownConverter):
"""Removes Sections
Contrary to it's name, it removes sections that matches a list found in wikipedia_template.py
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
sections = []
for section in soup.find_all("section"):
if section.find_parents("section"):
section_title = section.find(["h1", "h2", "h3", "h4"])
if section_title:
section_title = section_title.get_text().lower()
if section_title in section_reference_list:
section.extract()
# print("Section skip:", section_title)
continue
section_title = section.find(["h1", "h2", "h3", "h4"])
if section_title:
section_title = section_title.get_text().lower()
if section_title in section_reference_list:
section.extract()
# print("Section skip:", section_title)
continue
text_section = converter.convert_soup(section)
text_section = text_section.rstrip()
if text_section:
sections.append(text_section)
# print(sections)
if sections:
return "\n\n".join(sections)
return ""
def final_pass(soup: BeautifulSoup, meta: dict):
"""Final cleanup pass
Remove data-mw and class elements since those seem to get included inside the markdown which is not what we want.
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
BeautifulSoup4, dict: Standard filter chain response (Soup, Metadata)
"""
tables = []
for elem in soup.find_all(attrs={"class": True}):
elem["class"] = ""
for elem in soup.find_all(attrs={"data-mw": True}):
elem["data-mw"] = ""
return soup, {**meta}
# unicodedata.east_asian_width()
spaces = {
"\n": "",
" ": "",
"\xa0": "",
"#": "",
"[": "",
"]": "",
"\u2002": "",
"\u2003": "",
**{i: "" for i in string.punctuation},
**{i: "" for i in string.whitespace},
}
def get_raw_text_length(text: str):
"""Counts text but with smarts
Double counts CJK characters, normalizes text, remove punctuations and whitespaces.
Args:
text (str): Text string
Returns:
int: Text length
"""
unicodedata.normalize("NFKC", text)
markdown = text.translate(spaces)
text_length = 0
for char in markdown:
text_length += 2 if unicodedata.east_asian_width(char) in ["W", "F", "A"] else 1
return text_length
def get_text_length(markdown: str, meta: dict):
"""Counts text but with smarts
Same as get_raw_text_length but in a filter chain format.
Args:
markdown (str): The markdown article
meta (dict): Metadata
Returns:
str, dict: Standard filter chain response (markdown, Metadata)
"""
text_length_final = get_raw_text_length(markdown)
return markdown, {**meta, "text_length": text_length_final}
def sanitize_punctuations(markdown: str, meta: dict):
"""Cleans punctuations
Converts unusual punctuations into standard ones,
remove excessive new lines, fix some weird punctuations.
This mostly affects latin based languagues. full width characters are not touched.
Args:
markdown (str): The markdown article
meta (dict): Metadata
Returns:
str, dict: Standard filter chain response (markdown, Metadata)
"""
markdown = (
markdown.replace(" . ", ". ")
.replace("“", '"')
.replace("”", '"')
.replace("\n\n\n\n", "\n\n")
.replace("\n\n\n", "\n\n")
.replace(' " ', ' "')
.replace(", , ", ", ")
.replace(".,", ".")
)
return markdown, meta
def markdown_size_filter(markdown: str, meta: dict):
"""Removes articles where the text length (from get_text_length) is less than 1000.
Args:
markdown (str): The markdown article
meta (dict): Metadata
Returns:
str, dict: Standard filter chain response (markdown, Metadata)
"""
if meta["text_length"] < 1000:
# print(meta["text_length"],"Too small for", meta["title"])
return markdown, {**meta, "_drop": True}
if "wikitext" in meta:
del meta["wikitext"]
return markdown, meta
@app.command()
def process_root(folder: pathlib.Path, output_folder: pathlib.Path):
futures = []
with conc.ProcessPoolExecutor(max_workers=180) as executor:
for root_folder in folder.iterdir():
if root_folder.is_dir():
processed_root = (output_folder / root_folder.name).resolve()
print("Processing Root", root_folder, processed_root)
if not processed_root.exists() or not root_folder.is_dir():
processed_root.mkdir(exist_ok=True, parents=True)
# process_folder(root_folder, output_folder / root_folder.name)
for root_file in root_folder.glob("*.ndjson"):
futures.append(
executor.submit(
process_file,
root_file,
processed_root / root_file.name,
progress=False,
)
)
for future in conc.as_completed(futures):
try:
future.result()
except Exception as e:
traceback.print_exception(e)
pass
@app.command()
def process_folder(folder: pathlib.Path, output_folder: pathlib.Path):
if output_folder is not None and not output_folder.is_dir():
output_folder.mkdir(exist_ok=True, parents=True)
with conc.ProcessPoolExecutor(max_workers=180) as executor:
futures = []
for file in folder.glob("*.ndjson"):
futures.append(
executor.submit(
process_file, file, output_folder / file.name, progress=False
)
)
for future in conc.as_completed(futures):
future.result()
@app.command()
def process_file(
file: pathlib.Path,
output_file: typing.Optional[pathlib.Path] = None,
progress: bool = True,
):
soup_instance = MediaWikiSoup()
filter_chain = [
pull_title,
is_stub, # Mark Stubs
is_lsjbot, # Drop Lsjbot articles (Swedish, Cebuano, Waray)
style_merge,
filter_href,
filter_cite_needed,
filter_cite,
filter_redirect,
only_tables_list,
remove_msg_boxes,
remove_infobox,
remove_tables,
wikipedia_latex,
final_pass,
]
markdown_chain = [sanitize_punctuations, get_text_length, markdown_size_filter]
for chain in filter_chain:
soup_instance.add_soup_filter(chain)
for chain in markdown_chain:
soup_instance.add_markdown_filter(chain)
fout = None
if output_file:
fout = open(output_file, "wb")
pbar = None
if progress:
pbar = tqdm.tqdm()
with open(file, "rb") as f:
for line in f:
try:
wiki_data = orjson.loads(line)
except orjson.JSONDecodeError:
pass
if not wiki_data["article_body"].get("wikitext"):
continue
meta = {"wikitext": wiki_data["article_body"]["wikitext"]}
response = soup_instance.soup_filter(
wiki_data["article_body"]["html"], meta=meta
)
# print(response)
if not response:
continue
markdown = section_converter(response[0], soup_instance.converter)
response = soup_instance.markdown_filter(markdown, response[1])
# print(response)
if response and fout:
fout.write(orjson.dumps({"text": response[0], "meta": response[1]}))
fout.write(b"\n")
fout.flush()
if pbar is not None:
pbar.update(1)
if fout:
fout.close()
if pbar is not None:
pbar.close()
if __name__ == "__main__":
app()