We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 3870164 commit dcd8dfaCopy full SHA for dcd8dfa
convert.py
@@ -463,7 +463,7 @@ def __init__(self, base_path: Path):
463
raise FileNotFoundError('Cannot find tokenizer.model')
464
465
self.sentencepiece_tokenizer = SentencePieceProcessor()
466
- self.sentencepiece_tokenizer.LoadFromFile(fname_tokenizer)
+ self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer))
467
vocab_size = self.sentencepiece_tokenizer.vocab_size()
468
469
new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
0 commit comments