|
#!/bin/bash |
|
|
|
function llama_init_environment { |
|
|
|
if [ ! -f "$HOME/.config/llama/llama-main.conf" ]; then |
|
mkdir -p "$HOME/.config/llama" |
|
cat <<EOF > "$HOME/.config/llama/llama-main.conf" |
|
LLAMA_MODEL_NAME=teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf |
|
LLAMA_TEMPLATE=chatml |
|
LLAMA_MODELS_PATH=$HOME/.ai/models/llama |
|
LLAMA_PROMPT_PATH=$HOME/.local/share/llama/prompts |
|
LLAMA_CACHE_PATH=$HOME/.cache/llama |
|
LLAMA_CONTEXT_SIZE=4096 |
|
LLAMA_TEMPERATURE=1.6 |
|
LLAMA_TOP_P=1.0 |
|
LLAMA_MIN_P=0.1 |
|
LLAMA_TOP_K=0 |
|
LLAMA_REPETITION_PENALTY=1.15 |
|
EOF |
|
fi |
|
|
|
source $HOME/.config/llama/llama-main.conf |
|
|
|
if [ ! -d "$LLAMA_CACHE_PATH" ]; then |
|
mkdir -p "$LLAMA_CACHE_PATH" |
|
fi |
|
|
|
} |
|
|
|
function llama_interactive { |
|
|
|
if [[ $# -eq 2 ]]; then |
|
LLAMA_MODEL_NAME=$1 |
|
LLAMA_CONTEXT_SIZE=$2 |
|
fi |
|
|
|
|
|
if [[ $# -lt 2 ]] && [[ $# -gt 0 ]]; then |
|
echo "Error: 4 arguments are required." |
|
echo "Usage: llama <model_name> <context_size>" |
|
echo "Example: llama teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf 4096" |
|
return |
|
fi |
|
|
|
LLAMA_TEMPLATE=$(get_template_for_model $LLAMA_MODEL_NAME) |
|
|
|
llama \ |
|
--n-gpu-layers 1 \ |
|
--model "$LLAMA_MODELS_PATH/$LLAMA_MODEL_NAME" \ |
|
--file "$(get_model_prompt $LLAMA_MODEL_NAME)" \ |
|
--in-prefix "$(get_model_prefix $LLAMA_TEMPLATE)" \ |
|
--in-suffix "$(get_model_suffix $LLAMA_TEMPLATE)" \ |
|
--reverse-prompt "$(get_model_prefix $LLAMA_TEMPLATE)" \ |
|
--reverse-prompt "<|im_end|>" \ |
|
--reverse-prompt "</s>" \ |
|
--threads 1 \ |
|
--temp 0 \ |
|
--top-p 1.0 \ |
|
--top-k 40 \ |
|
--min-p 0.5 \ |
|
--repeat-penalty "$LLAMA_REPETITION_PENALTY" \ |
|
--ctx-size "$LLAMA_CONTEXT_SIZE" \ |
|
--batch-size 1024 \ |
|
--n-predict -1 \ |
|
--keep -1 \ |
|
--instruct \ |
|
--no-mmap \ |
|
--color \ |
|
--escape \ |
|
--log-disable |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
} |
|
|
|
function get_model_prefix { |
|
case $1 in |
|
chatml) |
|
printf "<|im_start|>user\\\n" |
|
;; |
|
instruct) |
|
printf "[INST] " |
|
;; |
|
alpaca) |
|
printf "### Instruction: " |
|
;; |
|
vicuna) |
|
printf "USER: " |
|
;; |
|
*) |
|
printf "Input: " |
|
;; |
|
esac |
|
} |
|
|
|
function get_model_suffix { |
|
case $1 in |
|
chatml) |
|
printf "<|im_end|>\n<|im_start|>assistant\\\n" |
|
;; |
|
instruct) |
|
printf " [/INST]" |
|
;; |
|
alpaca) |
|
printf "### Response:" |
|
;; |
|
vicuna) |
|
printf "ASSISTANT:" |
|
;; |
|
*) |
|
printf "Output:" |
|
;; |
|
esac |
|
} |
|
|
|
function get_template_for_model { |
|
case $1 in |
|
*dolphin*) |
|
echo "chatml" |
|
;; |
|
*mixtral-8x7b-instruct*) |
|
echo "instruct" |
|
;; |
|
*upstage*|*airoboros*|*hermes*) |
|
echo "alpaca" |
|
;; |
|
*wizardlm*|*samantha*|*scarlett*|*capybara*) |
|
echo "vicuna" |
|
;; |
|
*) |
|
echo "chatml" |
|
;; |
|
esac |
|
} |
|
|
|
function get_model_prompt { |
|
case $1 in |
|
*guanaco*) |
|
echo "$LLAMA_PROMPT_PATH/guanaco.txt" |
|
;; |
|
*alpaca*|*upstage*|*airoboros*|*hermes*) |
|
echo "$LLAMA_PROMPT_PATH/alpaca.txt" |
|
;; |
|
*dolphin*) |
|
echo "$LLAMA_PROMPT_PATH/chatml-gpt.txt" |
|
;; |
|
*vicuna*|*wizardlm*) |
|
echo "$LLAMA_PROMPT_PATH/vicuna-v11.txt" |
|
;; |
|
*scarlett*) |
|
echo "$LLAMA_PROMPT_PATH/scarlett.txt" |
|
;; |
|
*samantha*) |
|
echo "$LLAMA_PROMPT_PATH/samantha.txt" |
|
;; |
|
*based*) |
|
echo "$LLAMA_PROMPT_PATH/based.txt" |
|
;; |
|
*capybara*) |
|
echo "$LLAMA_PROMPT_PATH/capybara.txt" |
|
;; |
|
*yi*) |
|
echo "$LLAMA_PROMPT_PATH/yi.txt" |
|
;; |
|
*orca*) |
|
echo "$LLAMA_PROMPT_PATH/orca.txt" |
|
;; |
|
*) |
|
echo "$LLAMA_PROMPT_PATH/idm-gpt-lite.txt" |
|
;; |
|
esac |
|
} |
|
|
|
llama_init_environment |
|
llama_interactive $* |
|
|