Skip to content

Commit 00d5380

Browse files
authored
llama-vocab : add SuperBPE pre-tokenizer (ggml-org#12532)
1 parent 7ea7503 commit 00d5380

File tree

4 files changed

+15
-0
lines changed

4 files changed

+15
-0
lines changed

convert_hf_to_gguf.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -705,6 +705,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
705705
if chkhsh == "ccc2ef013c104be7bae2965776d611e1d7a8a2a9c547dd93a682c9a9fc80352e":
706706
# ref: https://huggingface.co/Xenova/gpt-4o
707707
res = "gpt-4o"
708+
if chkhsh == "7dec86086fcc38b66b7bc1575a160ae21cf705be7718b9d5598190d7c12db76f":
709+
# ref: https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k
710+
res = "superbpe"
708711

709712
if res is None:
710713
logger.warning("\n")

convert_hf_to_gguf_update.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ class TOKENIZER_TYPE(IntEnum):
110110
{"name": "deepseek-v3", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-V3"},
111111
{"name": "deepseek-r1-qwen", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"},
112112
{"name": "gpt-4o", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Xenova/gpt-4o", },
113+
{"name": "superbpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/UW/OLMo2-8B-SuperBPE-t180k", },
113114
]
114115

115116

include/llama.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ extern "C" {
107107
LLAMA_VOCAB_PRE_TYPE_MINERVA = 27,
108108
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM = 28,
109109
LLAMA_VOCAB_PRE_TYPE_GPT4O = 29,
110+
LLAMA_VOCAB_PRE_TYPE_SUPERBPE = 30,
110111
};
111112

112113
enum llama_rope_type {

src/llama-vocab.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -400,6 +400,12 @@ struct llm_tokenizer_bpe : llm_tokenizer {
400400
"[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))*((?=[\\p{L}])([^A-Z]))+(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])?|[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))+((?=[\\p{L}])([^A-Z]))*(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])?|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
401401
};
402402
break;
403+
case LLAMA_VOCAB_PRE_TYPE_SUPERBPE:
404+
regex_exprs = {
405+
"\\p{N}+",
406+
"(?=(\\d{3})+(?!\\d))",
407+
};
408+
break;
403409
default:
404410
// default regex for BPE tokenization pre-processing
405411
regex_exprs = {
@@ -1604,6 +1610,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
16041610
tokenizer_pre == "gpt-4o") {
16051611
pre_type = LLAMA_VOCAB_PRE_TYPE_GPT4O;
16061612
clean_spaces = false;
1613+
} else if (
1614+
tokenizer_pre == "superbpe") {
1615+
pre_type = LLAMA_VOCAB_PRE_TYPE_SUPERBPE;
1616+
clean_spaces = false;
16071617
} else {
16081618
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
16091619
}

0 commit comments

Comments
 (0)