Spaces:
Runtime error
Runtime error
# coding=utf-8 | |
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team and Jangwon Park | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
""" Tokenization classes for KoBERT model """ | |
import logging | |
import os | |
import unicodedata | |
from shutil import copyfile | |
from transformers import PreTrainedTokenizer | |
logger = logging.getLogger(__name__) | |
VOCAB_FILES_NAMES = { | |
"vocab_file": "tokenizer_78b3253a26.model", | |
"vocab_txt": "vocab.txt", | |
} | |
PRETRAINED_VOCAB_FILES_MAP = { | |
"vocab_file": { | |
"monologg/kobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert/tokenizer_78b3253a26.model", | |
"monologg/kobert-lm": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert-lm/tokenizer_78b3253a26.model", | |
"monologg/distilkobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/distilkobert/tokenizer_78b3253a26.model", | |
}, | |
"vocab_txt": { | |
"monologg/kobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert/vocab.txt", | |
"monologg/kobert-lm": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert-lm/vocab.txt", | |
"monologg/distilkobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/distilkobert/vocab.txt", | |
}, | |
} | |
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { | |
"monologg/kobert": 512, | |
"monologg/kobert-lm": 512, | |
"monologg/distilkobert": 512, | |
} | |
PRETRAINED_INIT_CONFIGURATION = { | |
"monologg/kobert": {"do_lower_case": False}, | |
"monologg/kobert-lm": {"do_lower_case": False}, | |
"monologg/distilkobert": {"do_lower_case": False}, | |
} | |
SPIECE_UNDERLINE = "▁" | |
class KoBertTokenizer(PreTrainedTokenizer): | |
""" | |
SentencePiece based tokenizer. Peculiarities: | |
- requires `SentencePiece <https://github.com/google/sentencepiece>`_ | |
""" | |
vocab_files_names = VOCAB_FILES_NAMES | |
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP | |
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION | |
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES | |
def __init__( | |
self, | |
vocab_file, | |
vocab_txt, | |
do_lower_case=False, | |
remove_space=True, | |
keep_accents=False, | |
unk_token="[UNK]", | |
sep_token="[SEP]", | |
pad_token="[PAD]", | |
cls_token="[CLS]", | |
mask_token="[MASK]", | |
**kwargs, | |
): | |
super().__init__( | |
unk_token=unk_token, | |
sep_token=sep_token, | |
pad_token=pad_token, | |
cls_token=cls_token, | |
mask_token=mask_token, | |
**kwargs, | |
) | |
# Build vocab | |
self.token2idx = dict() | |
self.idx2token = [] | |
with open(vocab_txt, "r", encoding="utf-8") as f: | |
for idx, token in enumerate(f): | |
token = token.strip() | |
self.token2idx[token] = idx | |
self.idx2token.append(token) | |
try: | |
import sentencepiece as spm | |
except ImportError: | |
logger.warning( | |
"You need to install SentencePiece to use KoBertTokenizer: https://github.com/google/sentencepiece" | |
"pip install sentencepiece" | |
) | |
self.do_lower_case = do_lower_case | |
self.remove_space = remove_space | |
self.keep_accents = keep_accents | |
self.vocab_file = vocab_file | |
self.vocab_txt = vocab_txt | |
self.sp_model = spm.SentencePieceProcessor() | |
self.sp_model.Load(vocab_file) | |
def vocab_size(self): | |
return len(self.idx2token) | |
def get_vocab(self): | |
return dict(self.token2idx, **self.added_tokens_encoder) | |
def __getstate__(self): | |
state = self.__dict__.copy() | |
state["sp_model"] = None | |
return state | |
def __setstate__(self, d): | |
self.__dict__ = d | |
try: | |
import sentencepiece as spm | |
except ImportError: | |
logger.warning( | |
"You need to install SentencePiece to use KoBertTokenizer: https://github.com/google/sentencepiece" | |
"pip install sentencepiece" | |
) | |
self.sp_model = spm.SentencePieceProcessor() | |
self.sp_model.Load(self.vocab_file) | |
def preprocess_text(self, inputs): | |
if self.remove_space: | |
outputs = " ".join(inputs.strip().split()) | |
else: | |
outputs = inputs | |
outputs = outputs.replace("``", '"').replace("''", '"') | |
if not self.keep_accents: | |
outputs = unicodedata.normalize("NFKD", outputs) | |
outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) | |
if self.do_lower_case: | |
outputs = outputs.lower() | |
return outputs | |
def _tokenize(self, text): | |
"""Tokenize a string.""" | |
text = self.preprocess_text(text) | |
pieces = self.sp_model.encode(text, out_type=str) | |
new_pieces = [] | |
for piece in pieces: | |
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit(): | |
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, "")) | |
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: | |
if len(cur_pieces[0]) == 1: | |
cur_pieces = cur_pieces[1:] | |
else: | |
cur_pieces[0] = cur_pieces[0][1:] | |
cur_pieces.append(piece[-1]) | |
new_pieces.extend(cur_pieces) | |
else: | |
new_pieces.append(piece) | |
return new_pieces | |
def _convert_token_to_id(self, token): | |
""" Converts a token (str/unicode) in an id using the vocab. """ | |
return self.token2idx.get(token, self.token2idx[self.unk_token]) | |
def _convert_id_to_token(self, index): | |
"""Converts an index (integer) in a token (string/unicode) using the vocab.""" | |
return self.idx2token[index] | |
def convert_tokens_to_string(self, tokens): | |
"""Converts a sequence of tokens (strings for sub-words) in a single string.""" | |
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() | |
return out_string | |
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): | |
""" | |
Build model inputs from a sequence or a pair of sequence for sequence classification tasks | |
by concatenating and adding special tokens. | |
A KoBERT sequence has the following format: | |
single sequence: [CLS] X [SEP] | |
pair of sequences: [CLS] A [SEP] B [SEP] | |
""" | |
if token_ids_1 is None: | |
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] | |
cls = [self.cls_token_id] | |
sep = [self.sep_token_id] | |
return cls + token_ids_0 + sep + token_ids_1 + sep | |
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): | |
""" | |
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding | |
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods. | |
Args: | |
token_ids_0: list of ids (must not contain special tokens) | |
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids | |
for sequence pairs | |
already_has_special_tokens: (default False) Set to True if the token list is already formated with | |
special tokens for the model | |
Returns: | |
A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token. | |
""" | |
if already_has_special_tokens: | |
if token_ids_1 is not None: | |
raise ValueError( | |
"You should not supply a second sequence if the provided sequence of " | |
"ids is already formated with special tokens for the model." | |
) | |
return list( | |
map( | |
lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, | |
token_ids_0, | |
) | |
) | |
if token_ids_1 is not None: | |
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] | |
return [1] + ([0] * len(token_ids_0)) + [1] | |
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): | |
""" | |
Creates a mask from the two sequences passed to be used in a sequence-pair classification task. | |
A KoBERT sequence pair mask has the following format: | |
0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 | |
| first sequence | second sequence | |
if token_ids_1 is None, only returns the first portion of the mask (0's). | |
""" | |
sep = [self.sep_token_id] | |
cls = [self.cls_token_id] | |
if token_ids_1 is None: | |
return len(cls + token_ids_0 + sep) * [0] | |
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] | |
def save_vocabulary(self, save_directory): | |
"""Save the sentencepiece vocabulary (copy original file) and special tokens file | |
to a directory. | |
""" | |
if not os.path.isdir(save_directory): | |
logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) | |
return | |
# 1. Save sentencepiece model | |
out_vocab_model = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"]) | |
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_model): | |
copyfile(self.vocab_file, out_vocab_model) | |
# 2. Save vocab.txt | |
index = 0 | |
out_vocab_txt = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_txt"]) | |
with open(out_vocab_txt, "w", encoding="utf-8") as writer: | |
for token, token_index in sorted(self.token2idx.items(), key=lambda kv: kv[1]): | |
if index != token_index: | |
logger.warning( | |
"Saving vocabulary to {}: vocabulary indices are not consecutive." | |
" Please check that the vocabulary is not corrupted!".format(out_vocab_txt) | |
) | |
index = token_index | |
writer.write(token + "\n") | |
index += 1 | |
return out_vocab_model, out_vocab_txt | |