llama-cpp-scripts / bin /from-scratch-training.sh
iandennismiller's picture
include training script but do not install
10e91e5
raw
history blame
1.43 kB
#!/bin/bash
MODEL_NAME=shakespeare
TRAIN_DATA=$HOME/scratch/shakespeare.txt
VOCAB=$HOME/.ai/models/ggml-vocab.bin
LLAMA_CTX=256
LLAMA_EMBD=256
LLAMA_HEAD=32
LLAMA_LAYER=24
LLAMA_BATCH=16
MODEL_OUT=$HOME/.ai/models/llama/ggml-${MODEL_NAME}-${LLAMA_CTX}-${LLAMA_EMBD}x${LLAMA_HEAD}x${LLAMA_LAYER}-f32.bin
CHECKPOINT=$HOME/.ai/training/chk-${MODEL_NAME}-${LLAMA_CTX}-${LLAMA_EMBD}x${LLAMA_HEAD}x${LLAMA_LAYER}.bin
function train() {
time ~/Work/llama.cpp/build/bin/train-text-from-scratch \
--train-data "$TRAIN_DATA" \
--model-out "$MODEL_OUT" \
--checkpoint-in "$CHECKPOINT" \
--checkpoint-out "$CHECKPOINT" \
--vocab-model "$VOCAB" \
--ctx $LLAMA_CTX \
--embd $LLAMA_EMBD \
--head $LLAMA_HEAD \
--layer $LLAMA_LAYER \
--batch $LLAMA_BATCH \
--examples 128 \
--rotmax 128 \
--seed 1 \
--adam-iter 16 \
--print-details-interval 0 \
--predict 16 \
--use-flash \
--threads 6
}
function print_params() {
echo "TRAIN_DATA: $TRAIN_DATA"
echo "CTX: $LLAMA_CTX"
echo "EMBD: $LLAMA_EMBD"
echo "HEAD: $LLAMA_HEAD"
echo "LAYER: $LLAMA_LAYER"
echo "BATCH: $LLAMA_BATCH"
echo
echo "$HOME/Work/llama.cpp/build/bin/main --temp 0.05 --top_k 70 --top_p 0.79 -m $MODEL_OUT --repeat-penalty 1.3"
}
function main() {
print_params
train
print_params
}
main