We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 696ebd7 commit 2601681Copy full SHA for 2601681
scripts/convert_gguf.sh
@@ -0,0 +1,25 @@
1
+#!/bin/bash
2
+
3
+MODEL_NAME=$1
4
+echo $MODEL_NAME
5
6
+git lfs install
7
8
+export CUDA_VISIBLE_DEVICES=-1
9
10
+REPO_URL=https://huggingface.co/yuiseki/$MODEL_NAME
11
+echo $REPO_URL
12
13
+status_code=$(curl --write-out %{http_code} --silent --output /dev/null $REPO_URL)
14
15
+if [[ "$status_code" -ne 200 ]] ; then
16
+ echo "!!! Model: ${MODEL_NAME} NOT FOUUND !!!"
17
+ exit 1
18
+fi
19
20
+[email protected]:yuiseki/$MODEL_NAME.git
21
22
23
+git clone $REPO_PATH
24
+python3 ~/llama.cpp/convert.py $MODEL_NAME
25
+~/llama.cpp/quantize $MODEL_NAME/ggml-model-f16.gguf $MODEL_NAME/$MODEL_NAME-Q4_K_M.gguf Q4_K_M
0 commit comments