File size: 1,818 Bytes
48b1acd
 
 
 
 
 
 
d6ab723
0c7ad1d
 
 
35da1d1
 
 
 
 
 
48b1acd
 
 
0c7ad1d
48b1acd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10aeac7
48b1acd
 
 
 
 
 
 
 
 
 
0c7ad1d
48b1acd
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#!/bin/bash

if [ $# -ne 1 ]; then
    echo "Usage: $0 <hf_name>"
    exit 1
fi

# if conf does not exist, create it
if [ ! -f "$HOME/.config/llama/llama-hf-to-q6_k.conf" ]; then
    mkdir -p "$HOME/.config/llama"
    cat <<EOF > "$HOME/.config/llama/llama-hf-to-q6_k.conf"
MODELS_ROOT=$HOME/.ai/models/llama/
HF_DOWNLOADER=$HOME/.ai/bin/hfdownloader
STORAGE_PATH=$HOME/scratch/hfdownloader
PYTHON3_EXEC=$HOME/.virtualenvs/llama.cpp/bin/python3
QUANTIZE_EXEC=$HOME/Work/llama.cpp/build/bin/quantize
CONVERT_PY=$HOME/Work/llama.cpp/convert.py
EOF
fi

source $HOME/.config/llama/llama-hf-to-q6_k.conf

HF_NAME=$1
ACCOUNT_NAME=$(echo "$HF_NAME" | cut -d '/' -f 1)
MODEL_NAME=$(echo "$HF_NAME" | cut -d '/' -f 2)
MODEL_NAME_LOWER=$(echo "$MODEL_NAME" | tr '[:upper:]' '[:lower:]')
MODEL_F16="$MODELS_ROOT/$ACCOUNT_NAME"/"$MODEL_NAME"/"${MODEL_NAME_LOWER}-f16.gguf"
MODEL_Q6_K="$MODELS_ROOT/$ACCOUNT_NAME"/"$MODEL_NAME"/"${MODEL_NAME_LOWER}-q6_k.gguf"

cat <<EOF
HF_NAME:            $HF_NAME
ACCOUNT_NAME:       $ACCOUNT_NAME
MODELS_ROOT:        $MODELS_ROOT
MODEL_NAME:         $MODEL_NAME
MODEL_NAME_LOWER:   $MODEL_NAME_LOWER
MODEL_F16:          $MODEL_F16
MODEL_Q6_K:         $MODEL_Q6_K
STORAGE_PATH:       $STORAGE_PATH
EOF

$HF_DOWNLOADER \
    --model "$HF_NAME" \
    --storage "$STORAGE_PATH"

mkdir -p "$MODELS_ROOT/$HF_NAME"

HF_TORCH_MODEL=$(ls "$STORAGE_PATH"/"${ACCOUNT_NAME}_${MODEL_NAME}"/*00001*)

cat <<EOF
HF_TORCH_MODEL:     $HF_TORCH_MODEL
EOF

ls -alFh "$HF_TORCH_MODEL"

$PYTHON3_EXEC \
    "$CONVERT_PY" \
    --outtype f16 \
    --outfile "$MODEL_F16" \
    "$HF_TORCH_MODEL"

ls -alFh "$MODEL_F16"

$QUANTIZE_EXEC \
    "$MODEL_F16" \
    "$MODEL_Q6_K" Q6_K

ls -alFh "$MODEL_Q6_K"

# re 'Exception: Expected added token IDs to be sequential'
# https://github.com/ggerganov/llama.cpp/issues/3583