Skip to content

Commit

Permalink
convert-hf : fix exception in sentencepiece with added tokens (ggerga…
Browse files Browse the repository at this point in the history
  • Loading branch information
pcuenca authored and hodlen committed Apr 3, 2024
1 parent ca32cb9 commit ee9a19f
Showing 1 changed file with 8 additions and 4 deletions.
12 changes: 8 additions & 4 deletions convert-hf-to-gguf.py
Expand Up @@ -331,7 +331,7 @@ def _set_vocab_sentencepiece(self):
tokenizer = SentencePieceProcessor(str(tokenizer_path))
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())

for token_id in range(vocab_size):
for token_id in range(tokenizer.vocab_size()):
piece = tokenizer.id_to_piece(token_id)
text = piece.encode("utf-8")
score = tokenizer.get_score(token_id)
Expand All @@ -356,9 +356,13 @@ def _set_vocab_sentencepiece(self):
added_tokens_json = json.load(f)

for key in added_tokens_json:
tokens.append(key.encode("utf-8"))
scores.append(-1000.0)
toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
key = key.encode("utf-8")
if key not in tokens:
tokens.append(key)
scores.append(-1000.0)
toktypes.append(SentencePieceTokenTypes.USER_DEFINED)

assert len(tokens) == vocab_size

self.gguf_writer.add_tokenizer_model("llama")
self.gguf_writer.add_token_list(tokens)
Expand Down

0 comments on commit ee9a19f

Please sign in to comment.