iandennismiller commited on
Commit
5796f82
1 Parent(s): 5c8acab

openai-compatible server

Browse files
Files changed (2) hide show
  1. bin/llama-server.sh +24 -0
  2. install.sh +1 -0
bin/llama-server.sh ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # pip uninstall llama-cpp-python -y
4
+ # CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir
5
+ # pip install 'llama-cpp-python[server]'
6
+
7
+ # if conf does not exist, create it
8
+ if [ ! -f "$HOME/.config/llama/llama-server.conf" ]; then
9
+ mkdir -p "$HOME/.config/llama"
10
+ cat <<EOF > "$HOME/.config/llama/llama-server.conf"
11
+ LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
12
+ LLAMA_CONTEXT_SIZE=8192
13
+ LLAMA_PORT=8000
14
+ PYTHON_EXEC=$HOME/.virtualenvs/llama.cpp/bin/python
15
+ EOF
16
+ fi
17
+
18
+ source "$HOME/.config/llama/llama-server.conf"
19
+
20
+ $PYTHON_EXEC -m llama_cpp.server \
21
+ --model "$LLAMA_MODEL_NAME" \
22
+ --n_ctx "$LLAMA_CONTEXT_SIZE" \
23
+ --n_gpu_layers 1 \
24
+ --port "$LLAMA_PORT"
install.sh CHANGED
@@ -9,5 +9,6 @@ install -C -v ./bin/llama-update.sh ~/.local/bin
9
  install -C -v ./bin/llama-finetune.sh ~/.local/bin
10
  install -C -v ./bin/llama.sh ~/.local/bin
11
  install -C -v ./bin/llama-menu.sh ~/.local/bin
 
12
 
13
  echo "done"
 
9
  install -C -v ./bin/llama-finetune.sh ~/.local/bin
10
  install -C -v ./bin/llama.sh ~/.local/bin
11
  install -C -v ./bin/llama-menu.sh ~/.local/bin
12
+ install -C -v ./bin/llama-server.sh ~/.local/bin
13
 
14
  echo "done"