iandennismiller commited on
Commit
48b1acd
1 Parent(s): 09d7308

add script that converts HF torch models to gguf

Browse files
Files changed (3) hide show
  1. Readme.md → README.md +8 -0
  2. bin/llama-hf-to-q6_k.sh +69 -0
  3. install.sh +3 -0
Readme.md → README.md RENAMED
@@ -1,3 +1,11 @@
1
  # llama.cpp scripts
2
 
3
  These are scripts that have helped me to manage llama.cpp, llama models, etc.
 
 
 
 
 
 
 
 
 
1
  # llama.cpp scripts
2
 
3
  These are scripts that have helped me to manage llama.cpp, llama models, etc.
4
+
5
+ ## Install
6
+
7
+ Scripts are installed to `~/.local/bin`.
8
+
9
+ ```bash
10
+ bash install.sh
11
+ ```
bin/llama-hf-to-q6_k.sh ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ if [ $# -ne 1 ]; then
4
+ echo "Usage: $0 <hf_name>"
5
+ exit 1
6
+ fi
7
+
8
+ # if ~/.config/llama-hf-to-q6_k.conf does not exist, create it
9
+ if [ ! -f ~/.config/llama-hf-to-q6_k.conf ]; then
10
+ cat <<EOF > ~/.config/llama-hf-to-q6_k.conf
11
+ MODELS_ROOT=~/.ai/models/llama/
12
+ HF_DOWNLOADER=~/.ai/bin/hfdownloader
13
+ STORAGE_PATH=~/scratch/hfdownloader
14
+ PYTHON3_EXEC=~/.virtualenvs/llama.cpp/bin/python3
15
+ QUANTIZE_EXEC=~/Work/llama.cpp/build/bin/quantize
16
+ CONVERT_PY=~/Work/llama.cpp/convert.py
17
+ EOF
18
+ fi
19
+
20
+ source ~/.config/llama-hf-to-q6_k.conf
21
+
22
+ HF_NAME=$1
23
+ ACCOUNT_NAME=$(echo "$HF_NAME" | cut -d '/' -f 1)
24
+ MODEL_NAME=$(echo "$HF_NAME" | cut -d '/' -f 2)
25
+ MODEL_NAME_LOWER=$(echo "$MODEL_NAME" | tr '[:upper:]' '[:lower:]')
26
+ MODEL_F16="$MODELS_ROOT/$ACCOUNT_NAME"/"$MODEL_NAME"/"${MODEL_NAME_LOWER}-f16.gguf"
27
+ MODEL_Q6_K="$MODELS_ROOT/$ACCOUNT_NAME"/"$MODEL_NAME"/"${MODEL_NAME_LOWER}-q6_k.gguf"
28
+
29
+ cat <<EOF
30
+ HF_NAME: $HF_NAME
31
+ ACCOUNT_NAME: $ACCOUNT_NAME
32
+ MODELS_ROOT: $MODELS_ROOT
33
+ MODEL_NAME: $MODEL_NAME
34
+ MODEL_NAME_LOWER: $MODEL_NAME_LOWER
35
+ MODEL_F16: $MODEL_F16
36
+ MODEL_Q6_K: $MODEL_Q6_K
37
+ STORAGE_PATH: $STORAGE_PATH
38
+ EOF
39
+
40
+ $HF_DOWNLOADER \
41
+ --model "$HF_NAME" \
42
+ --storage "$STORAGE_PATH"
43
+
44
+ mkdir -p $MODELS_ROOT/$HF_NAME"
45
+
46
+ HF_TORCH_MODEL=$(ls "$STORAGE_PATH"/"${ACCOUNT_NAME}_${MODEL_NAME}"/*00001*)
47
+
48
+ cat <<EOF
49
+ HF_TORCH_MODEL: $HF_TORCH_MODEL
50
+ EOF
51
+
52
+ ls -alFh "$HF_TORCH_MODEL"
53
+
54
+ $PYTHON3_EXEC \
55
+ $CONVERT_PY \
56
+ --outtype f16 \
57
+ --outfile "$MODEL_F16" \
58
+ "$HF_TORCH_MODEL"
59
+
60
+ ls -alFh "$MODEL_F16"
61
+
62
+ $QUANTIZE_EXEC \
63
+ "$MODEL_F16" \
64
+ "$MODEL_Q6_K" Q6_K
65
+
66
+ ls -alFh "$MODEL_Q6_K"
67
+
68
+ # re 'Exception: Expected added token IDs to be sequential'
69
+ # https://github.com/ggerganov/llama.cpp/issues/3583
install.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ install -d ./bin/llama-hf-to-q6_k.sh ~/.local/bin