iandennismiller
commited on
Commit
•
a7e8a57
1
Parent(s):
2b59313
integrate Chat-UI
Browse filesprompting with min-p
- bin/llama-chat-ui.sh +24 -0
- bin/llama-prompt.sh +10 -2
- bin/llama-server.sh +9 -0
- bin/llama.sh +3 -3
- install.sh +1 -0
bin/llama-chat-ui.sh
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# if conf does not exist, create it
|
4 |
+
if [ ! -f "$HOME/.config/llama/llama-chat-ui.conf" ]; then
|
5 |
+
mkdir -p "$HOME/.config/llama"
|
6 |
+
cat <<EOF > "$HOME/.config/llama/llama-chat-ui.conf"
|
7 |
+
CHAT_UI_LOG=$HOME/.local/var/llama-chat-ui.log
|
8 |
+
EOF
|
9 |
+
fi
|
10 |
+
|
11 |
+
source "$HOME/.config/llama/llama-chat-ui.conf"
|
12 |
+
|
13 |
+
# if arg1 is "stop" then pkill the chat-ui.py server
|
14 |
+
if [[ $# -eq 1 ]] && [[ $1 == "stop" ]]; then
|
15 |
+
echo "Stopping chat-ui server..."
|
16 |
+
pkill -f "chat-ui/node_modules/.bin/vite"
|
17 |
+
echo "ok"
|
18 |
+
exit
|
19 |
+
fi
|
20 |
+
|
21 |
+
# start server and pipe stdout+stderr to log file
|
22 |
+
cd ~/Work/chat-ui || exit
|
23 |
+
npm run dev > "$CHAT_UI_LOG" 2>&1 &
|
24 |
+
echo "Started chat-ui server"
|
bin/llama-prompt.sh
CHANGED
@@ -6,6 +6,10 @@ if [ ! -f "$HOME/.config/llama/llama-prompt.conf" ]; then
|
|
6 |
cat <<EOF > "$HOME/.config/llama/llama-prompt.conf"
|
7 |
LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
|
8 |
LLAMA_CONTEXT_SIZE=8192
|
|
|
|
|
|
|
|
|
9 |
EOF
|
10 |
fi
|
11 |
|
@@ -31,6 +35,10 @@ llama \
|
|
31 |
--file "$1" \
|
32 |
--model "$LLAMA_MODEL_NAME" \
|
33 |
--ctx-size "$LLAMA_CONTEXT_SIZE" \
|
34 |
-
--
|
35 |
-
--
|
|
|
|
|
|
|
|
|
36 |
--log-disable 2> /dev/null | fmt -w 80
|
|
|
6 |
cat <<EOF > "$HOME/.config/llama/llama-prompt.conf"
|
7 |
LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
|
8 |
LLAMA_CONTEXT_SIZE=8192
|
9 |
+
LLAMA_TEMPERATURE=1.0
|
10 |
+
LLAMA_TOP_P=1.0
|
11 |
+
LLAMA_MIN_P=0.3
|
12 |
+
LLAMA_TOP_K=0
|
13 |
EOF
|
14 |
fi
|
15 |
|
|
|
35 |
--file "$1" \
|
36 |
--model "$LLAMA_MODEL_NAME" \
|
37 |
--ctx-size "$LLAMA_CONTEXT_SIZE" \
|
38 |
+
--temp "$LLAMA_TEMPERATURE" \
|
39 |
+
--min-p "$LLAMA_MIN_P" \
|
40 |
+
--top-p "$LLAMA_TOP_P" \
|
41 |
+
--top-k "$LLAMA_TOP_K" \
|
42 |
+
--min-p "$LLAMA_MIN_P" \
|
43 |
+
--threads 6 \
|
44 |
--log-disable 2> /dev/null | fmt -w 80
|
bin/llama-server.sh
CHANGED
@@ -19,6 +19,14 @@ fi
|
|
19 |
|
20 |
source "$HOME/.config/llama/llama-server.conf"
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
# start server and pipe stdout+stderr to log file
|
23 |
$PYTHON_EXEC -m llama_cpp.server \
|
24 |
--model "$LLAMA_MODEL_NAME" \
|
@@ -26,3 +34,4 @@ $PYTHON_EXEC -m llama_cpp.server \
|
|
26 |
--n_gpu_layers 1 \
|
27 |
--port "$LLAMA_PORT" \
|
28 |
> "$LLAMA_LOG" 2>&1 &
|
|
|
|
19 |
|
20 |
source "$HOME/.config/llama/llama-server.conf"
|
21 |
|
22 |
+
# if arg1 is "stop" then pkill the koboldcpp.py server
|
23 |
+
if [[ $# -eq 1 ]] && [[ $1 == "stop" ]]; then
|
24 |
+
echo "Stopping llama_cpp.server"
|
25 |
+
pkill -f "llama_cpp.server"
|
26 |
+
echo "ok"
|
27 |
+
exit
|
28 |
+
fi
|
29 |
+
|
30 |
# start server and pipe stdout+stderr to log file
|
31 |
$PYTHON_EXEC -m llama_cpp.server \
|
32 |
--model "$LLAMA_MODEL_NAME" \
|
|
|
34 |
--n_gpu_layers 1 \
|
35 |
--port "$LLAMA_PORT" \
|
36 |
> "$LLAMA_LOG" 2>&1 &
|
37 |
+
echo "Started llama-cpp-python server"
|
bin/llama.sh
CHANGED
@@ -146,15 +146,15 @@ function get_model_prompt {
|
|
146 |
*based*)
|
147 |
echo "$LLAMA_PROMPT_PATH/based.txt"
|
148 |
;;
|
|
|
|
|
|
|
149 |
*yi*)
|
150 |
echo "$LLAMA_PROMPT_PATH/yi.txt"
|
151 |
;;
|
152 |
*orca*)
|
153 |
echo "$LLAMA_PROMPT_PATH/orca.txt"
|
154 |
;;
|
155 |
-
*capybara*)
|
156 |
-
echo "$LLAMA_PROMPT_PATH/idm-gpt-lite.txt"
|
157 |
-
;;
|
158 |
*)
|
159 |
echo "$LLAMA_PROMPT_PATH/idm-gpt-lite.txt"
|
160 |
;;
|
|
|
146 |
*based*)
|
147 |
echo "$LLAMA_PROMPT_PATH/based.txt"
|
148 |
;;
|
149 |
+
*capybara*)
|
150 |
+
echo "$LLAMA_PROMPT_PATH/capybara.txt"
|
151 |
+
;;
|
152 |
*yi*)
|
153 |
echo "$LLAMA_PROMPT_PATH/yi.txt"
|
154 |
;;
|
155 |
*orca*)
|
156 |
echo "$LLAMA_PROMPT_PATH/orca.txt"
|
157 |
;;
|
|
|
|
|
|
|
158 |
*)
|
159 |
echo "$LLAMA_PROMPT_PATH/idm-gpt-lite.txt"
|
160 |
;;
|
install.sh
CHANGED
@@ -14,6 +14,7 @@ install -C -v ./bin/llama.sh ~/.local/bin
|
|
14 |
install -C -v ./bin/llama-menu.sh ~/.local/bin
|
15 |
install -C -v ./bin/llama-server.sh ~/.local/bin
|
16 |
install -C -v ./bin/llama-koboldcpp.sh ~/.local/bin
|
|
|
17 |
install -C -v ./bin/llava-describe.sh ~/.local/bin
|
18 |
install -C -v ./bin/llama-prompt.sh ~/.local/bin
|
19 |
|
|
|
14 |
install -C -v ./bin/llama-menu.sh ~/.local/bin
|
15 |
install -C -v ./bin/llama-server.sh ~/.local/bin
|
16 |
install -C -v ./bin/llama-koboldcpp.sh ~/.local/bin
|
17 |
+
install -C -v ./bin/llama-chat-ui.sh ~/.local/bin
|
18 |
install -C -v ./bin/llava-describe.sh ~/.local/bin
|
19 |
install -C -v ./bin/llama-prompt.sh ~/.local/bin
|
20 |
|