File size: 779 Bytes
5796f82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
#!/bin/bash

# pip uninstall llama-cpp-python -y
# CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir
# pip install 'llama-cpp-python[server]'

# if conf does not exist, create it
if [ ! -f "$HOME/.config/llama/llama-server.conf" ]; then
    mkdir -p "$HOME/.config/llama"
    cat <<EOF > "$HOME/.config/llama/llama-server.conf"
LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
LLAMA_CONTEXT_SIZE=8192
LLAMA_PORT=8000
PYTHON_EXEC=$HOME/.virtualenvs/llama.cpp/bin/python
EOF
fi

source "$HOME/.config/llama/llama-server.conf"

$PYTHON_EXEC -m llama_cpp.server \
    --model "$LLAMA_MODEL_NAME" \
    --n_ctx "$LLAMA_CONTEXT_SIZE" \
    --n_gpu_layers 1 \
    --port "$LLAMA_PORT"