|
#!/bin/bash |
|
|
|
|
|
if [ ! -f "$HOME/.config/llama/llama-server.conf" ]; then |
|
mkdir -p "$HOME/.config/llama" |
|
cat <<EOF > "$HOME/.config/llama/llama-server.conf" |
|
LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf |
|
LLAMA_CONTEXT_SIZE=8192 |
|
LLAMA_PORT=8000 |
|
LLAMA_LOG=$HOME/.local/var/llama-server.log |
|
EOF |
|
fi |
|
|
|
source "$HOME/.config/llama/llama-server.conf" |
|
|
|
|
|
if [[ $# -eq 1 ]] && [[ $1 == "stop" ]]; then |
|
echo "Stopping llama server" |
|
pkill -f "llama-server --model" |
|
echo "ok" |
|
exit |
|
fi |
|
|
|
|
|
llama-server \ |
|
--model "$LLAMA_MODEL_NAME" \ |
|
--ctx-size "$LLAMA_CONTEXT_SIZE" \ |
|
--n-gpu-layers 1 \ |
|
--port "$LLAMA_PORT" \ |
|
> "$LLAMA_LOG" 2>&1 & |
|
|
|
echo "Started llama.cpp server. Logging to $LLAMA_LOG" |
|
|