iandennismiller commited on
Commit
0c687a2
1 Parent(s): 904eb75

llama-server.sh logs to file

Browse files
Files changed (1) hide show
  1. bin/llama-server.sh +5 -1
bin/llama-server.sh CHANGED
@@ -10,15 +10,19 @@ if [ ! -f "$HOME/.config/llama/llama-server.conf" ]; then
10
  cat <<EOF > "$HOME/.config/llama/llama-server.conf"
11
  LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
12
  LLAMA_CONTEXT_SIZE=8192
 
13
  LLAMA_PORT=8000
 
14
  PYTHON_EXEC=$HOME/.virtualenvs/llama.cpp/bin/python
15
  EOF
16
  fi
17
 
18
  source "$HOME/.config/llama/llama-server.conf"
19
 
 
20
  $PYTHON_EXEC -m llama_cpp.server \
21
  --model "$LLAMA_MODEL_NAME" \
22
  --n_ctx "$LLAMA_CONTEXT_SIZE" \
23
  --n_gpu_layers 1 \
24
- --port "$LLAMA_PORT"
 
 
10
  cat <<EOF > "$HOME/.config/llama/llama-server.conf"
11
  LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
12
  LLAMA_CONTEXT_SIZE=8192
13
+
14
  LLAMA_PORT=8000
15
+ LLAMA_LOG=$HOME/.local/var/llama-server.log
16
  PYTHON_EXEC=$HOME/.virtualenvs/llama.cpp/bin/python
17
  EOF
18
  fi
19
 
20
  source "$HOME/.config/llama/llama-server.conf"
21
 
22
+ # start server and pipe stdout+stderr to log file
23
  $PYTHON_EXEC -m llama_cpp.server \
24
  --model "$LLAMA_MODEL_NAME" \
25
  --n_ctx "$LLAMA_CONTEXT_SIZE" \
26
  --n_gpu_layers 1 \
27
+ --port "$LLAMA_PORT" \
28
+ > "$LLAMA_LOG" 2>&1 &