iandennismiller
commited on
Commit
•
2b59313
1
Parent(s):
0c687a2
koboldcpp launch script
Browse filesupdate llama-cpp-python and koboldcpp
llama.sh supports min_p sampling
llama.sh supports capybara
- bin/llama-koboldcpp.sh +39 -0
- bin/llama-update.sh +22 -1
- bin/llama.sh +10 -4
- install.sh +1 -0
bin/llama-koboldcpp.sh
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# if conf does not exist, create it
|
4 |
+
if [ ! -f "$HOME/.config/llama/llama-koboldcpp.conf" ]; then
|
5 |
+
mkdir -p "$HOME/.config/llama"
|
6 |
+
cat <<EOF > "$HOME/.config/llama/llama-koboldcpp.conf"
|
7 |
+
LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
|
8 |
+
LLAMA_CONTEXT_SIZE=8192
|
9 |
+
|
10 |
+
KOBOLD_PORT=5000
|
11 |
+
KOBOLD_LOG=$HOME/.local/var/llama-koboldcpp.log
|
12 |
+
PYTHON_EXEC=$HOME/.virtualenvs/koboldcpp/bin/python
|
13 |
+
EOF
|
14 |
+
fi
|
15 |
+
|
16 |
+
source "$HOME/.config/llama/llama-koboldcpp.conf"
|
17 |
+
|
18 |
+
# if arg1 is "stop" then pkill the koboldcpp.py server
|
19 |
+
if [[ $# -eq 1 ]] && [[ $1 == "stop" ]]; then
|
20 |
+
echo "Stopping koboldcpp server..."
|
21 |
+
pkill -f "koboldcpp.py"
|
22 |
+
echo "ok"
|
23 |
+
exit
|
24 |
+
fi
|
25 |
+
|
26 |
+
# start server and pipe stdout+stderr to log file
|
27 |
+
cd ~/Work/koboldcpp || exit
|
28 |
+
"$PYTHON_EXEC" koboldcpp.py \
|
29 |
+
--gpulayers 1 \
|
30 |
+
--model "$LLAMA_MODEL_NAME" \
|
31 |
+
--contextsize "$LLAMA_CONTEXT_SIZE" \
|
32 |
+
--threads 6 \
|
33 |
+
--skiplauncher \
|
34 |
+
--smartcontext \
|
35 |
+
--noblas \
|
36 |
+
--host "0.0.0.0" \
|
37 |
+
--port "$KOBOLD_PORT" \
|
38 |
+
> "$KOBOLD_LOG" 2>&1 &
|
39 |
+
echo "Started koboldcpp server on port $KOBOLD_PORT"
|
bin/llama-update.sh
CHANGED
@@ -5,11 +5,12 @@ if [ ! -f "$HOME/.config/llama/llama-update.conf" ]; then
|
|
5 |
mkdir -p "$HOME/.config/llama"
|
6 |
cat <<EOF > "$HOME/.config/llama/llama-update.conf"
|
7 |
LLAMA_CPP_GIT=$HOME/Work/llama.cpp
|
|
|
8 |
BIN_PATH=$HOME/.local/bin
|
9 |
EOF
|
10 |
fi
|
11 |
|
12 |
-
source $HOME/.config/llama/llama-update.conf
|
13 |
|
14 |
# if llama.cpp directory does not exist, clone it
|
15 |
if [ ! -d "$LLAMA_CPP_GIT" ]; then
|
@@ -33,3 +34,23 @@ install -c -v bin/ggml-metal.metal "$BIN_PATH"
|
|
33 |
install -c -v -m 755 bin/llava-cli "$BIN_PATH"/llava
|
34 |
install -c -v -m 755 bin/finetune "$BIN_PATH/llama-finetune"
|
35 |
install -c -v -m 755 bin/speculative "$BIN_PATH/llama-speculative"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
mkdir -p "$HOME/.config/llama"
|
6 |
cat <<EOF > "$HOME/.config/llama/llama-update.conf"
|
7 |
LLAMA_CPP_GIT=$HOME/Work/llama.cpp
|
8 |
+
KOBOLDCPP_GIT=$HOME/Work/koboldcpp
|
9 |
BIN_PATH=$HOME/.local/bin
|
10 |
EOF
|
11 |
fi
|
12 |
|
13 |
+
source "$HOME"/.config/llama/llama-update.conf
|
14 |
|
15 |
# if llama.cpp directory does not exist, clone it
|
16 |
if [ ! -d "$LLAMA_CPP_GIT" ]; then
|
|
|
34 |
install -c -v -m 755 bin/llava-cli "$BIN_PATH"/llava
|
35 |
install -c -v -m 755 bin/finetune "$BIN_PATH/llama-finetune"
|
36 |
install -c -v -m 755 bin/speculative "$BIN_PATH/llama-speculative"
|
37 |
+
|
38 |
+
# configured for Metal
|
39 |
+
CMAKE_ARGS="-DLLAMA_METAL=on" "$HOME/.virtualenvs/llama.cpp/bin/pip" install \
|
40 |
+
'llama-cpp-python[server]' \
|
41 |
+
--force-reinstall \
|
42 |
+
--upgrade \
|
43 |
+
--no-cache-dir
|
44 |
+
|
45 |
+
# if koboldcpp directory does not exist, clone it
|
46 |
+
if [ ! -d "$KOBOLDCPP_GIT" ]; then
|
47 |
+
git clone https://github.com/LostRuins/koboldcpp "$KOBOLDCPP_GIT"
|
48 |
+
else
|
49 |
+
cd "$KOBOLDCPP_GIT" || exit
|
50 |
+
git pull
|
51 |
+
fi
|
52 |
+
|
53 |
+
# update koboldcpp
|
54 |
+
cd "$KOBOLDCPP_GIT" || exit
|
55 |
+
make clean
|
56 |
+
make LLAMA_METAL=1 # LLAMA_CLBLAST=1
|
bin/llama.sh
CHANGED
@@ -8,8 +8,9 @@ function llama_init_environment {
|
|
8 |
LLAMA_TEMPERATURE=0.1
|
9 |
LLAMA_CONTEXT_SIZE=4096
|
10 |
LLAMA_REPETITION_PENALTY=1.15
|
11 |
-
LLAMA_TOP_P=0
|
12 |
-
|
|
|
13 |
LLAMA_TEMPLATE=chatml
|
14 |
LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
|
15 |
LLAMA_MODELS_PATH=$HOME/.ai/models/llama
|
@@ -52,10 +53,12 @@ function llama_interactive {
|
|
52 |
--in-suffix "$(get_model_suffix $LLAMA_TEMPLATE)" \
|
53 |
--reverse-prompt "$(get_model_prefix $LLAMA_TEMPLATE)" \
|
54 |
--reverse-prompt "<|im_end|>" \
|
|
|
55 |
--threads "6" \
|
56 |
--temp "$LLAMA_TEMPERATURE" \
|
57 |
--top-p "$LLAMA_TOP_P" \
|
58 |
--top-k "$LLAMA_TOP_K" \
|
|
|
59 |
--repeat-penalty "$LLAMA_REPETITION_PENALTY" \
|
60 |
--ctx-size "$LLAMA_CONTEXT_SIZE" \
|
61 |
--batch-size 1024 \
|
@@ -79,7 +82,7 @@ function get_model_prefix {
|
|
79 |
*alpaca*|*upstage*|*airoboros*|*hermes*)
|
80 |
printf "### Instruction: "
|
81 |
;;
|
82 |
-
*vicuna*|*wizardlm*|*samantha*|*scarlett*)
|
83 |
printf "USER: "
|
84 |
;;
|
85 |
*based*|*yi*)
|
@@ -105,7 +108,7 @@ function get_model_suffix {
|
|
105 |
*alpaca*|*upstage*|*airoboros*|*hermes*)
|
106 |
printf "### Response:"
|
107 |
;;
|
108 |
-
*vicuna*|*wizardlm*|*samantha*|*scarlett*)
|
109 |
printf "ASSISTANT:"
|
110 |
;;
|
111 |
*based*|*yi*)
|
@@ -149,6 +152,9 @@ function get_model_prompt {
|
|
149 |
*orca*)
|
150 |
echo "$LLAMA_PROMPT_PATH/orca.txt"
|
151 |
;;
|
|
|
|
|
|
|
152 |
*)
|
153 |
echo "$LLAMA_PROMPT_PATH/idm-gpt-lite.txt"
|
154 |
;;
|
|
|
8 |
LLAMA_TEMPERATURE=0.1
|
9 |
LLAMA_CONTEXT_SIZE=4096
|
10 |
LLAMA_REPETITION_PENALTY=1.15
|
11 |
+
LLAMA_TOP_P=1.0
|
12 |
+
LLAMA_MIN_P=0.3
|
13 |
+
LLAMA_TOP_K=0
|
14 |
LLAMA_TEMPLATE=chatml
|
15 |
LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
|
16 |
LLAMA_MODELS_PATH=$HOME/.ai/models/llama
|
|
|
53 |
--in-suffix "$(get_model_suffix $LLAMA_TEMPLATE)" \
|
54 |
--reverse-prompt "$(get_model_prefix $LLAMA_TEMPLATE)" \
|
55 |
--reverse-prompt "<|im_end|>" \
|
56 |
+
--reverse-prompt "</s>" \
|
57 |
--threads "6" \
|
58 |
--temp "$LLAMA_TEMPERATURE" \
|
59 |
--top-p "$LLAMA_TOP_P" \
|
60 |
--top-k "$LLAMA_TOP_K" \
|
61 |
+
--min-p "$LLAMA_MIN_P" \
|
62 |
--repeat-penalty "$LLAMA_REPETITION_PENALTY" \
|
63 |
--ctx-size "$LLAMA_CONTEXT_SIZE" \
|
64 |
--batch-size 1024 \
|
|
|
82 |
*alpaca*|*upstage*|*airoboros*|*hermes*)
|
83 |
printf "### Instruction: "
|
84 |
;;
|
85 |
+
*vicuna*|*wizardlm*|*samantha*|*scarlett*|*capybara*)
|
86 |
printf "USER: "
|
87 |
;;
|
88 |
*based*|*yi*)
|
|
|
108 |
*alpaca*|*upstage*|*airoboros*|*hermes*)
|
109 |
printf "### Response:"
|
110 |
;;
|
111 |
+
*vicuna*|*wizardlm*|*samantha*|*scarlett*|*capybara*)
|
112 |
printf "ASSISTANT:"
|
113 |
;;
|
114 |
*based*|*yi*)
|
|
|
152 |
*orca*)
|
153 |
echo "$LLAMA_PROMPT_PATH/orca.txt"
|
154 |
;;
|
155 |
+
*capybara*)
|
156 |
+
echo "$LLAMA_PROMPT_PATH/idm-gpt-lite.txt"
|
157 |
+
;;
|
158 |
*)
|
159 |
echo "$LLAMA_PROMPT_PATH/idm-gpt-lite.txt"
|
160 |
;;
|
install.sh
CHANGED
@@ -13,6 +13,7 @@ install -C -v ./bin/llama-finetune.sh ~/.local/bin
|
|
13 |
install -C -v ./bin/llama.sh ~/.local/bin
|
14 |
install -C -v ./bin/llama-menu.sh ~/.local/bin
|
15 |
install -C -v ./bin/llama-server.sh ~/.local/bin
|
|
|
16 |
install -C -v ./bin/llava-describe.sh ~/.local/bin
|
17 |
install -C -v ./bin/llama-prompt.sh ~/.local/bin
|
18 |
|
|
|
13 |
install -C -v ./bin/llama.sh ~/.local/bin
|
14 |
install -C -v ./bin/llama-menu.sh ~/.local/bin
|
15 |
install -C -v ./bin/llama-server.sh ~/.local/bin
|
16 |
+
install -C -v ./bin/llama-koboldcpp.sh ~/.local/bin
|
17 |
install -C -v ./bin/llava-describe.sh ~/.local/bin
|
18 |
install -C -v ./bin/llama-prompt.sh ~/.local/bin
|
19 |
|