iandennismiller commited on
Commit
22e19ef
1 Parent(s): a7e8a57

use llama.cpp server instead of llama-cpp-python

Browse files
Files changed (2) hide show
  1. bin/llama-server.sh +6 -12
  2. bin/llama-update.sh +1 -7
bin/llama-server.sh CHANGED
@@ -1,19 +1,13 @@
1
  #!/bin/bash
2
 
3
- # pip uninstall llama-cpp-python -y
4
- # CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir
5
- # pip install 'llama-cpp-python[server]'
6
-
7
  # if conf does not exist, create it
8
  if [ ! -f "$HOME/.config/llama/llama-server.conf" ]; then
9
  mkdir -p "$HOME/.config/llama"
10
  cat <<EOF > "$HOME/.config/llama/llama-server.conf"
11
  LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
12
  LLAMA_CONTEXT_SIZE=8192
13
-
14
  LLAMA_PORT=8000
15
  LLAMA_LOG=$HOME/.local/var/llama-server.log
16
- PYTHON_EXEC=$HOME/.virtualenvs/llama.cpp/bin/python
17
  EOF
18
  fi
19
 
@@ -21,17 +15,17 @@ source "$HOME/.config/llama/llama-server.conf"
21
 
22
  # if arg1 is "stop" then pkill the koboldcpp.py server
23
  if [[ $# -eq 1 ]] && [[ $1 == "stop" ]]; then
24
- echo "Stopping llama_cpp.server"
25
- pkill -f "llama_cpp.server"
26
  echo "ok"
27
  exit
28
  fi
29
 
30
  # start server and pipe stdout+stderr to log file
31
- $PYTHON_EXEC -m llama_cpp.server \
32
  --model "$LLAMA_MODEL_NAME" \
33
- --n_ctx "$LLAMA_CONTEXT_SIZE" \
34
- --n_gpu_layers 1 \
35
  --port "$LLAMA_PORT" \
36
  > "$LLAMA_LOG" 2>&1 &
37
- echo "Started llama-cpp-python server"
 
1
  #!/bin/bash
2
 
 
 
 
 
3
  # if conf does not exist, create it
4
  if [ ! -f "$HOME/.config/llama/llama-server.conf" ]; then
5
  mkdir -p "$HOME/.config/llama"
6
  cat <<EOF > "$HOME/.config/llama/llama-server.conf"
7
  LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
8
  LLAMA_CONTEXT_SIZE=8192
 
9
  LLAMA_PORT=8000
10
  LLAMA_LOG=$HOME/.local/var/llama-server.log
 
11
  EOF
12
  fi
13
 
 
15
 
16
  # if arg1 is "stop" then pkill the koboldcpp.py server
17
  if [[ $# -eq 1 ]] && [[ $1 == "stop" ]]; then
18
+ echo "Stopping llama server"
19
+ pkill -f "llama-server"
20
  echo "ok"
21
  exit
22
  fi
23
 
24
  # start server and pipe stdout+stderr to log file
25
+ llama-server \
26
  --model "$LLAMA_MODEL_NAME" \
27
+ --ctx-size "$LLAMA_CONTEXT_SIZE" \
28
+ --n-gpu-layers 1 \
29
  --port "$LLAMA_PORT" \
30
  > "$LLAMA_LOG" 2>&1 &
31
+ echo "Started llama.cpp server"
bin/llama-update.sh CHANGED
@@ -34,13 +34,7 @@ install -c -v bin/ggml-metal.metal "$BIN_PATH"
34
  install -c -v -m 755 bin/llava-cli "$BIN_PATH"/llava
35
  install -c -v -m 755 bin/finetune "$BIN_PATH/llama-finetune"
36
  install -c -v -m 755 bin/speculative "$BIN_PATH/llama-speculative"
37
-
38
- # configured for Metal
39
- CMAKE_ARGS="-DLLAMA_METAL=on" "$HOME/.virtualenvs/llama.cpp/bin/pip" install \
40
- 'llama-cpp-python[server]' \
41
- --force-reinstall \
42
- --upgrade \
43
- --no-cache-dir
44
 
45
  # if koboldcpp directory does not exist, clone it
46
  if [ ! -d "$KOBOLDCPP_GIT" ]; then
 
34
  install -c -v -m 755 bin/llava-cli "$BIN_PATH"/llava
35
  install -c -v -m 755 bin/finetune "$BIN_PATH/llama-finetune"
36
  install -c -v -m 755 bin/speculative "$BIN_PATH/llama-speculative"
37
+ install -c -v -m 755 bin/server "$BIN_PATH/llama-server"
 
 
 
 
 
 
38
 
39
  # if koboldcpp directory does not exist, clone it
40
  if [ ! -d "$KOBOLDCPP_GIT" ]; then