We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 3557b91 commit 21790d6Copy full SHA for 21790d6
src/mistral_inference/main.py
@@ -26,7 +26,7 @@ def is_torchrun() -> bool:
26
27
28
def load_tokenizer(model_path: Path) -> MistralTokenizer:
29
- tokenizer = [f for f in os.listdir(Path(model_path)) if is_tekken(f) or is_sentencepiece(f)]
+ tokenizer = [f for f in os.listdir(model_path) if is_tekken(model_path / f) or is_sentencepiece(model_path / f)]
30
assert (
31
len(tokenizer) > 0
32
), f"No tokenizer in {model_path}, place a `tokenizer.model.[v1,v2,v3]` or `tekken.json` file in {model_path}."
0 commit comments