Datasets:

Modalities:
Audio
Text
ArXiv:
Libraries:
Datasets
License:
voxpopuli / voxpopuli.py
polinaeterna's picture
polinaeterna HF staff
add dataset metadate
58b647c
raw
history blame
7.77 kB
from collections import defaultdict
import os
import glob
import csv
from tqdm.auto import tqdm
import datasets
_DESCRIPTION = """
A large-scale multilingual speech corpus for representation learning, semi-supervised learning and interpretation.
"""
_CITATION = """
@inproceedings{wang-etal-2021-voxpopuli,
title = "{V}ox{P}opuli: A Large-Scale Multilingual Speech Corpus for Representation Learning,
Semi-Supervised Learning and Interpretation",
author = "Wang, Changhan and
Riviere, Morgane and
Lee, Ann and
Wu, Anne and
Talnikar, Chaitanya and
Haziza, Daniel and
Williamson, Mary and
Pino, Juan and
Dupoux, Emmanuel",
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics
and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)",
month = aug,
year = "2021",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.acl-long.80",
doi = "10.18653/v1/2021.acl-long.80",
pages = "993--1003",
}
"""
_HOMEPAGE = "https://github.com/facebookresearch/voxpopuli"
_LICENSE = "CC0, also see https://www.europarl.europa.eu/legal-notice/en/"
_LANGUAGES = sorted(
[
"en", "de", "fr", "es", "pl", "it", "ro", "hu", "cs", "nl", "fi", "hr",
"sk", "sl", "et", "lt", "pt", "bg", "el", "lv", "mt", "sv", "da"
]
)
_LANGUAGES_V2 = [f"{x}_v2" for x in _LANGUAGES]
_ASR_LANGUAGES = [
"en", "de", "fr", "es", "pl", "it", "ro", "hu", "cs", "nl", "fi", "hr",
"sk", "sl", "et", "lt"
]
_ASR_ACCENTED_LANGUAGES = [
"en_accented"
]
_YEARS = list(range(2009, 2020 + 1))
# unnecessary
_CONFIG_TO_LANGS = {
"400k": _LANGUAGES,
"100k": _LANGUAGES,
"10k": _LANGUAGES,
"asr": _ASR_LANGUAGES, # + _ASR_ACCENTED_LANGUAGES
}
_CONFIG_TO_YEARS = {
"400k": _YEARS + [f"{y}_2" for y in _YEARS],
"100k": _YEARS,
"10k": [2019, 2020],
"asr": _YEARS,
}
for lang in _LANGUAGES:
_CONFIG_TO_YEARS[lang] = _YEARS
# _CONFIG_TO_YEARS[lang] = [2020]
for lang in _LANGUAGES_V2:
_CONFIG_TO_YEARS[lang] = _YEARS + [f"{y}_2" for y in _YEARS]
_BASE_URL = "https://dl.fbaipublicfiles.com/voxpopuli/"
_DATA_URL = _BASE_URL + "audios/{lang}_{year}.tar"
_ASR_DATA_URL = _BASE_URL + "audios/original_{year}.tar"
_UNLABELLED_META_URL = _BASE_URL + "annotations/unlabelled_v2.tsv.gz"
_ASR_META_URL = _BASE_URL + "annotations/asr/asr_{lang}.tsv.gz"
class VoxpopuliConfig(datasets.BuilderConfig):
"""BuilderConfig for VoxPopuli."""
def __init__(self, name, **kwargs):
"""
Args:
name: `string`, name of dataset config
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(name=name, **kwargs)
name = name.split("_")[0]
self.languages = [name] if name in _LANGUAGES else _CONFIG_TO_LANGS[name]
self.years = _CONFIG_TO_YEARS[name]
class Voxpopuli(datasets.GeneratorBasedBuilder):
"""The VoxPopuli dataset."""
VERSION = datasets.Version("1.3.0") # not sure
BUILDER_CONFIGS = [
VoxpopuliConfig(
name=name,
version=datasets.Version("1.3.0"),
)
for name in _LANGUAGES + _LANGUAGES_V2 + ["10k", "100k", "400k"]
]
# DEFAULT_CONFIG_NAME = "400k"
DEFAULT_WRITER_BATCH_SIZE = 256 # SET THIS TO A LOWER VALUE IF IT USES TOO MUCH RAM SPACE
def _info(self):
try:
import torch
import torchaudio
except ImportError as e:
raise ValueError(
f"{str(e)}.\n" +
"Loading voxpopuli requires `torchaudio` to be installed."
"You can install torchaudio with `pip install torchaudio`."
)
global torchaudio
features = datasets.Features(
{
"path": datasets.Value("string"),
"language": datasets.ClassLabel(names=_LANGUAGES),
"year": datasets.Value("int16"),
"audio": datasets.Audio(sampling_rate=16_000),
"segment_id": datasets.Value("int16"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _read_metadata_unlabelled(self, metadata_path):
# from https://github.com/facebookresearch/voxpopuli/blob/main/voxpopuli/get_unlabelled_data.py#L34
def predicate(id_):
is_plenary = id_.find("PLENARY") > -1
if self.config.name == "10k": # in {"10k", "10k_sd"}
return is_plenary and 20190101 <= int(id_[:8]) < 20200801
elif self.config.name == "100k":
return is_plenary
elif self.config.name in _LANGUAGES:
return is_plenary and id_.endswith(self.config.name)
elif self.config.name in _LANGUAGES_V2:
return id_.endswith(self.config.name.split("_")[0])
return True
metadata = defaultdict(list)
with open(metadata_path, encoding="utf-8") as csv_file:
csv_reader = csv.reader(csv_file, delimiter="\t")
for i, row in tqdm(enumerate(csv_reader)):
if i == 0:
continue
event_id, segment_id, start, end = row
_, lang = event_id.rsplit("_", 1)[-2:]
if lang in self.config.languages and predicate(event_id):
metadata[event_id].append((float(start), float(end)))
return metadata
def _read_metadata_asr(self, metadata_paths):
pass
def _split_generators(self, dl_manager):
metadata_path = dl_manager.download_and_extract(_UNLABELLED_META_URL)
urls = [_DATA_URL.format(lang=language, year=year) for language in self.config.languages for year in self.config.years]
dl_manager.download_config.num_proc = len(urls)
data_dirs = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_dirs": data_dirs,
"metadata_path": metadata_path,
}
),
]
def _generate_examples(self, data_dirs, metadata_path):
metadata = self._read_metadata(metadata_path)
for data_dir in data_dirs:
for file in glob.glob(f"{data_dir}/**/*.ogg", recursive=True):
path_components = file.split(os.sep)
language, year, audio_filename = path_components[-3:]
audio_id, _ = os.path.splitext(audio_filename)
if audio_id not in metadata:
continue
timestamps = metadata[audio_id]
waveform, sr = torchaudio.load(file)
duration = waveform.size(1)
# split audio on the fly and yield segments as arrays - they will be converted to bytes by Audio feature
for segment_id, (start, stop) in enumerate(timestamps):
segment = waveform[:, int(start * sr): min(int(stop * sr), duration)]
yield f"{audio_filename}_{segment_id}", {
"path": file,
"language": language,
"year": year,
"audio": {
"array": segment[0], # segment is a 2-dim array
"sampling_rate": 16_000
},
"segment_id": segment_id,
}