From 7fca39e80c1caed5de8afc55d56e930f0aac5a8c Mon Sep 17 00:00:00 2001 From: Pedro Cuenca Date: Tue, 26 Mar 2024 10:35:49 +0100 Subject: [PATCH] [convert-hf] Fix exception in sentencepiece --- convert-hf-to-gguf.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 723ea18e34c65..c5d2d0b7813d1 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -331,7 +331,7 @@ def _set_vocab_sentencepiece(self): tokenizer = SentencePieceProcessor(str(tokenizer_path)) vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - for token_id in range(vocab_size): + for token_id in range(tokenizer.vocab_size()): piece = tokenizer.id_to_piece(token_id) text = piece.encode("utf-8") score = tokenizer.get_score(token_id) @@ -356,9 +356,13 @@ def _set_vocab_sentencepiece(self): added_tokens_json = json.load(f) for key in added_tokens_json: - tokens.append(key.encode("utf-8")) - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.USER_DEFINED) + key = key.encode("utf-8") + if key not in tokens: + tokens.append(key) + scores.append(-1000.0) + toktypes.append(SentencePieceTokenTypes.USER_DEFINED) + + assert len(tokens) == vocab_size self.gguf_writer.add_tokenizer_model("llama") self.gguf_writer.add_token_list(tokens)