Datasets:
File size: 4,524 Bytes
c8d1be3 219741e c8d1be3 219741e 2a31505 219741e c8d1be3 219741e c8d1be3 219741e 18cdd07 219741e c8d1be3 b1faec5 219741e c8d1be3 219741e c8d1be3 219741e c8d1be3 219741e c8d1be3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import os
from pandas import read_csv, NA
from datasets import GeneratorBasedBuilder, Value, Version, BuilderConfig, Features, DatasetInfo, SplitGenerator, Split, Audio
_DESCRIPTION = '''
This dataset contains transcribed sounds emitted by characters of the League of Legends game.
The data can be useful for building text classification models, fine-tuning language generation models, speech synthesis and speech recognition models.
The underlying web dump for the dataset construction has been last refreshed **20.10.2023**
'''
_HOMEPAGE = 'https://huggingface.co/datasets/zeio/pale'
_LICENSE = 'Apache License Version 2.0'
_URLS = {
'vanilla': 'https://huggingface.co/datasets/zeio/pale/resolve/main/pale.tsv',
'quotes': 'https://huggingface.co/datasets/zeio/pale/resolve/main/quotes.tsv',
'annotated': 'https://huggingface.co/datasets/zeio/pale/resolve/main/annotated.tsv',
'pulled': 'https://huggingface.co/datasets/zeio/pale/resolve/main/pulled.tsv'
}
_SOUND_URL = 'https://huggingface.co/datasets/zeio/pale/resolve/main/sound.tar.xz'
class Pale(GeneratorBasedBuilder):
VERSION = Version('30.10.2023')
BUILDER_CONFIGS = [
BuilderConfig(name = 'quotes', version = VERSION, description = 'Truncated version of the corpus, which does\'t contain sound effects'),
BuilderConfig(name = 'annotated', version = VERSION, description = 'An extended version of the full configuration with a couple of additional columns with labels'),
BuilderConfig(name = 'vanilla', version = VERSION, description = 'All data pulled from the website without significant modifications apart from the web page structure parsing'),
BuilderConfig(name = 'pulled', version = VERSION, description = 'Same as vanilla, but sound files have been pulled from the website, and "source" column is replaced with "sound" column')
]
DEFAULT_CONFIG_NAME = 'quotes'
def _info(self):
if self.config.name == 'vanilla':
features = Features({
'header': Value('string'),
'subheader': Value('string'),
'text': Value('string'),
'source': Value('string'),
'champion': Value('string')
})
elif self.config.name == 'annotated':
features = Features({
'header': Value('string'),
'subheader': Value('string'),
'text': Value('string'),
'source': Value('string'),
'champion': Value('string'),
'quote': Value('bool')
})
elif self.config.name == 'quotes':
features = Features({
'header': Value('string'),
'subheader': Value('string'),
'text': Value('string'),
'champion': Value('string')
})
elif self.config.name == 'pulled':
features = Features({
'header': Value('string'),
'subheader': Value('string'),
'text': Value('string'),
'sound': Audio(sampling_rate = 44_100),
'champion': Value('string')
})
else:
raise ValueError(f'Unknown config: {self.config.name}')
return DatasetInfo(
description=_DESCRIPTION,
features = features,
homepage=_HOMEPAGE,
license=_LICENSE
)
def _split_generators(self, dl_manager):
name = self.config.name
url = _URLS[name]
return [
SplitGenerator(
name = Split.TRAIN,
gen_kwargs = {
"path": dl_manager.download_and_extract(url),
'sound': dl_manager.download_and_extract(_SOUND_URL) if name == 'pulled' else None
}
)
]
def _generate_examples(self, path: str, sound: str):
for i, row in read_csv(path, sep = '\t').iterrows():
if sound is None:
yield i, dict(row)
else:
data = dict(row)
folder = data['folder']
filename = data['filename']
if folder == folder and filename == filename: # if folder and filename are not nan
data['sound'] = os.path.join(sound, folder, f'{filename}.ogg')
else:
data['sound'] = NA
data.pop('folder')
data.pop('filename')
yield i, data
|