|
import re |
|
|
|
import inflect |
|
import torch |
|
from tokenizers import Tokenizer |
|
|
|
|
|
|
|
from unidecode import unidecode |
|
|
|
_whitespace_re = re.compile(r'\s+') |
|
|
|
|
|
|
|
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ |
|
('mrs', 'misess'), |
|
('mr', 'mister'), |
|
('dr', 'doctor'), |
|
('st', 'saint'), |
|
('co', 'company'), |
|
('jr', 'junior'), |
|
('maj', 'major'), |
|
('gen', 'general'), |
|
('drs', 'doctors'), |
|
('rev', 'reverend'), |
|
('lt', 'lieutenant'), |
|
('hon', 'honorable'), |
|
('sgt', 'sergeant'), |
|
('capt', 'captain'), |
|
('esq', 'esquire'), |
|
('ltd', 'limited'), |
|
('col', 'colonel'), |
|
('ft', 'fort'), |
|
]] |
|
|
|
|
|
def expand_abbreviations(text): |
|
for regex, replacement in _abbreviations: |
|
text = re.sub(regex, replacement, text) |
|
return text |
|
|
|
|
|
_inflect = inflect.engine() |
|
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') |
|
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') |
|
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') |
|
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') |
|
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') |
|
_number_re = re.compile(r'[0-9]+') |
|
|
|
|
|
def _remove_commas(m): |
|
return m.group(1).replace(',', '') |
|
|
|
|
|
def _expand_decimal_point(m): |
|
return m.group(1).replace('.', ' point ') |
|
|
|
|
|
def _expand_dollars(m): |
|
match = m.group(1) |
|
parts = match.split('.') |
|
if len(parts) > 2: |
|
return match + ' dollars' |
|
dollars = int(parts[0]) if parts[0] else 0 |
|
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 |
|
if dollars and cents: |
|
dollar_unit = 'dollar' if dollars == 1 else 'dollars' |
|
cent_unit = 'cent' if cents == 1 else 'cents' |
|
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) |
|
elif dollars: |
|
dollar_unit = 'dollar' if dollars == 1 else 'dollars' |
|
return '%s %s' % (dollars, dollar_unit) |
|
elif cents: |
|
cent_unit = 'cent' if cents == 1 else 'cents' |
|
return '%s %s' % (cents, cent_unit) |
|
else: |
|
return 'zero dollars' |
|
|
|
|
|
def _expand_ordinal(m): |
|
return _inflect.number_to_words(m.group(0)) |
|
|
|
|
|
def _expand_number(m): |
|
num = int(m.group(0)) |
|
if num > 1000 and num < 3000: |
|
if num == 2000: |
|
return 'two thousand' |
|
elif num > 2000 and num < 2010: |
|
return 'two thousand ' + _inflect.number_to_words(num % 100) |
|
elif num % 100 == 0: |
|
return _inflect.number_to_words(num // 100) + ' hundred' |
|
else: |
|
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') |
|
else: |
|
return _inflect.number_to_words(num, andword='') |
|
|
|
|
|
def normalize_numbers(text): |
|
text = re.sub(_comma_number_re, _remove_commas, text) |
|
text = re.sub(_pounds_re, r'\1 pounds', text) |
|
text = re.sub(_dollars_re, _expand_dollars, text) |
|
text = re.sub(_decimal_number_re, _expand_decimal_point, text) |
|
text = re.sub(_ordinal_re, _expand_ordinal, text) |
|
text = re.sub(_number_re, _expand_number, text) |
|
return text |
|
|
|
|
|
def expand_numbers(text): |
|
return normalize_numbers(text) |
|
|
|
|
|
def lowercase(text): |
|
return text.lower() |
|
|
|
|
|
def collapse_whitespace(text): |
|
return re.sub(_whitespace_re, ' ', text) |
|
|
|
|
|
def convert_to_ascii(text): |
|
return unidecode(text) |
|
|
|
|
|
def basic_cleaners(text): |
|
'''Basic pipeline that lowercases and collapses whitespace without transliteration.''' |
|
text = lowercase(text) |
|
text = collapse_whitespace(text) |
|
return text |
|
|
|
|
|
def transliteration_cleaners(text): |
|
'''Pipeline for non-English text that transliterates to ASCII.''' |
|
text = convert_to_ascii(text) |
|
text = lowercase(text) |
|
text = collapse_whitespace(text) |
|
return text |
|
|
|
|
|
def english_cleaners(text): |
|
'''Pipeline for English text, including number and abbreviation expansion.''' |
|
text = convert_to_ascii(text) |
|
text = lowercase(text) |
|
text = expand_numbers(text) |
|
text = expand_abbreviations(text) |
|
text = collapse_whitespace(text) |
|
text = text.replace('"', '') |
|
return text |
|
|
|
|
|
class VoiceBpeTokenizer: |
|
def __init__(self, vocab_file='data/tokenizer.json'): |
|
if vocab_file is not None: |
|
self.tokenizer = Tokenizer.from_file(vocab_file) |
|
|
|
def preprocess_text(self, txt): |
|
txt = english_cleaners(txt) |
|
return txt |
|
|
|
def encode(self, txt): |
|
txt = self.preprocess_text(txt) |
|
txt = txt.replace(' ', '[SPACE]') |
|
return self.tokenizer.encode(txt).ids |
|
|
|
def decode(self, seq): |
|
if isinstance(seq, torch.Tensor): |
|
seq = seq.cpu().numpy() |
|
txt = self.tokenizer.decode(seq, skip_special_tokens=False).replace(' ', '') |
|
txt = txt.replace('[SPACE]', ' ') |
|
txt = txt.replace('[STOP]', '') |
|
txt = txt.replace('[UNK]', '') |
|
return txt |