Skip to content

Commit 35c6887

Browse files
committed
optimize convert-hf-to-gguf.py for chatglm model
Signed-off-by: XingXing Qiao <[email protected]>
1 parent 8240833 commit 35c6887

File tree

1 file changed

+8
-10
lines changed

1 file changed

+8
-10
lines changed

convert-hf-to-gguf.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2752,13 +2752,15 @@ def set_vocab(self):
27522752

27532753
text = piece.encode("utf-8")
27542754
score = 0.0
2755-
if len(piece) != 0 and token_id < 64789:
2755+
# Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
2756+
# it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
2757+
if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
27562758
score = tokenizer.tokenizer.sp_model.get_score(token_id)
27572759

27582760
if len(piece) == 0:
27592761
text = f"[PAD{token_id}]".encode("utf-8")
27602762

2761-
if token_id >= 64789:
2763+
if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
27622764
toktype = SentencePieceTokenTypes.UNKNOWN
27632765
tokens.append(text)
27642766
scores.append(score)
@@ -2788,7 +2790,7 @@ def set_vocab(self):
27882790
special_vocab.add_to_gguf(self.gguf_writer)
27892791

27902792
def set_gguf_parameters(self):
2791-
self.gguf_writer.add_name("ChatGLM-6b-chat")
2793+
self.gguf_writer.add_name(self.dir_model.name)
27922794
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
27932795
n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
27942796
n_head_kv = self.hparams.get("multi_query_group_num", n_head)
@@ -2804,16 +2806,12 @@ def set_gguf_parameters(self):
28042806
self.gguf_writer.add_add_bos_token(False)
28052807

28062808
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
2807-
if name.endswith(".rotary_pos_emb.inv_freq"):
2808-
return []
2809-
28102809
del bid # unused
28112810

2812-
name = re.sub(r'transformer\.', '', name)
2813-
2814-
if name == "word_embeddings.weight":
2815-
assert self.tensor_names is not None
2811+
if name.endswith(".rotary_pos_emb.inv_freq"):
2812+
return []
28162813

2814+
name = name.removeprefix("transformer.")
28172815
return [(self.map_tensor_name(name), data_torch)]
28182816

28192817

0 commit comments

Comments
 (0)