function llama_init_environment { | |
# if conf does not exist, create it | |
if [ ! -f "$HOME/.config/llama/llama-assistant.conf" ]; then | |
mkdir -p "$HOME/.config/llama" | |
cat <<EOF > "$HOME/.config/llama/llama-assistant.conf" | |
LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf | |
LLAMA_TEMPLATE=chatml | |
LLAMA_MODELS_PATH=$HOME/.ai/models/llama | |
LLAMA_PROMPT_PATH=$HOME/.local/share/llama/prompts | |
LLAMA_CACHE_PATH=$HOME/.cache/llama | |
LLAMA_CONTEXT_SIZE=4096 | |
LLAMA_TEMPERATURE=1.6 | |
LLAMA_TOP_P=1.0 | |
LLAMA_MIN_P=0.1 | |
LLAMA_TOP_K=0 | |
LLAMA_REPETITION_PENALTY=1.15 | |
EOF | |
fi | |
source $HOME/.config/llama/llama-assistant.conf | |
} | |
llama.sh $LLAMA_MODEL_NAME $LLAMA_TEMPLATE $LLAMA_CONTEXT_SIZE $LLAMA_TEMPERATURE | |