File size: 1,676 Bytes
d93fd32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import typing
import orjson, pathlib

import typer

app = typer.Typer()


@app.command()
def get_list_wikis(stats: pathlib.Path, urls:typing.Optional[pathlib.Path]=None):
    if urls:
        texturls = urls.read_text("utf-8").split("\n")
    sites = []
    for wiki in orjson.loads(stats.read_bytes())["data"]:
        sitename, activeusers, admins, articles, edits, files, pages, users = wiki

        sitecode = sitename.split(".")[0]
        if sitecode in [
            "aa",
            "ak",
            "cho",
            "ho",
            "hz",
            "ii",
            "kr",
            "lrc",
            "mh",
            "mus",
            "na",
            "ng",
        ]:
            # Closed, Read-Only
            continue
        elif sitecode in ["tok", "tlh", "ru-sib", "mo"]:
            # Deleted, else hosted.
            continue
        elif (
            articles == 0
            or sitecode.startswith("total")
            or not sitename.endswith("wikipedia")
        ):
            continue
        depth = (edits / pages) * ((pages - articles) / articles) ** 2
        if articles < 100_000 or depth < 5.1:
            continue
        print(
            sitename, "depth", depth, "articles", articles, "active_users", activeusers
        )
        sites.append(sitecode.replace("-","_"))
    if urls:
        filtered_urls = []
        for url in texturls:
            url_wikicode = url.split("/")[-1].split("-")[0].split("wiki")[0]
            if url_wikicode in sites:
                filtered_urls.append(url)
        urls.with_stem(urls.stem + "_filtered").write_text("\n".join(filtered_urls))


if __name__ == "__main__":
    app()