input="$(< /dev/stdin)" | |
template="$(< $HOME/Work/ai-strategy/template-story.txt)" | |
# template="$(< $HOME/Work/ai-strategy/template-summary.txt)" | |
BIG_MODEL=$HOME/.ai/models/llama/idm/Nous-Hermes-13B-GGUF/nous-hermes-llama2-13b.gguf.q6_K.bin | |
SMALL_MODEL=$HOME/.ai/models/llama/TheBloke/orca_mini_v3_7B-GGUF/orca_mini_v3_7b.Q4_K_M.gguf | |
llama-speculative \ | |
--model "$BIG_MODEL" \ | |
--model-draft "$SMALL_MODEL" \ | |
--grammar-file ../../grammars/json_arr.gbnf \ | |
--escape \ | |
--n-gpu-layers 1 \ | |
--threads 6 \ | |
--n-predict 2048 \ | |
--ctx-size 4096 \ | |
--draft 16 \ | |
--temp -1 \ | |
--prompt "${input}\n\n${template}\n\n" | |