Skip to content

Commit 7f9d720

Browse files
committed
Better loading of special tokens from jsons
1 parent 0c14627 commit 7f9d720

File tree

1 file changed

+32
-5
lines changed

1 file changed

+32
-5
lines changed

convert.py

Lines changed: 32 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ def load(model_plus: 'ModelPlus') -> 'Params':
243243

244244

245245
class SentencePieceVocab:
246-
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path], fname_special_tokens: Optional[Path], vocabtype: Optional[str]) -> None:
246+
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path], fname_special_tokens: Optional[Path], fname_tokenizer_config: Optional[Path], vocabtype: Optional[str]) -> None:
247247
self.vocabtype = vocabtype
248248
if self.vocabtype == "bpe":
249249
self.sentencepiece_tokenizer = json.loads(open(str(fname_tokenizer)).read())
@@ -268,13 +268,40 @@ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path], fn
268268
self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list)
269269
self.fname_tokenizer = fname_tokenizer
270270
self.fname_added_tokens = fname_added_tokens
271-
special_tokens: Dict[str, Dict[str, Any]]
271+
self.special_tokens_map: Dict[int, str] = {}
272+
273+
TOKEN_NAME_TO_ID: Dict[str, int] = {
274+
"unk_token": self.sentencepiece_tokenizer.unk_id(),
275+
"bos_token": self.sentencepiece_tokenizer.bos_id(),
276+
"eos_token": self.sentencepiece_tokenizer.eos_id(),
277+
"pad_token": self.sentencepiece_tokenizer.pad_id()
278+
}
279+
280+
tokenizer_config: Dict[str, Any]
281+
if fname_tokenizer_config is not None:
282+
tokenizer_config = json.load(open(fname_tokenizer_config))
283+
else:
284+
tokenizer_config = {}
285+
for key, value in tokenizer_config.items():
286+
assert isinstance(value, dict) or isinstance(value, str)
287+
if key not in TOKEN_NAME_TO_ID or TOKEN_NAME_TO_ID[key] == -1:
288+
continue
289+
self.special_tokens_map[TOKEN_NAME_TO_ID[key]] = value["content"] if isinstance(value, dict) else value
290+
291+
special_tokens: Dict[str, Any]
272292
if fname_special_tokens is not None:
273293
special_tokens = json.load(open(fname_special_tokens))
274294
else:
275295
special_tokens = {}
276-
token_name_to_id = {"unk_token": self.sentencepiece_tokenizer.unk_id(), "bos_token": self.sentencepiece_tokenizer.bos_id(), "eos_token": self.sentencepiece_tokenizer.eos_id(), "pad_token": self.sentencepiece_tokenizer.pad_id()}
277-
self.special_tokens_map = {token_name_to_id[token_name]: info["content"] if isinstance(info, dict) else info for token_name, info in special_tokens.items() if token_name in token_name_to_id and token_name_to_id[token_name] != -1}
296+
for key, value in special_tokens.items():
297+
assert isinstance(value, dict) or isinstance(value, str)
298+
if key not in TOKEN_NAME_TO_ID:
299+
continue
300+
token_id = TOKEN_NAME_TO_ID[key]
301+
if token_id == -1 or token_id in self.special_tokens_map:
302+
continue
303+
self.special_tokens_map[token_id] = value["content"] if isinstance(value, dict) else value
304+
278305
self.vocab_special_size: int = len(self.added_tokens_list) + len(self.special_tokens_map)
279306

280307
def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]:
@@ -1282,7 +1309,7 @@ def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab:
12821309
special_tokens_path = path.parent / "special_tokens_map.json"
12831310
tokenizer_config_path = path.parent / "tokenizer_config.json"
12841311
print(f"Loading vocab file {path}")
1285-
return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None, special_tokens_path if special_tokens_path.exists() else tokenizer_config_path if tokenizer_config_path.exists() else None,
1312+
return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None, special_tokens_path if special_tokens_path.exists() else None, tokenizer_config_path if tokenizer_config_path.exists() else None,
12861313
vocabtype)
12871314

12881315

0 commit comments

Comments
 (0)