File size: 2,370 Bytes
d6ab723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#!/bin/bash

# cd $HOME/.local/share
# wget https://raw.githubusercontent.com/brunoklein99/deep-learning-notes/master/shakespeare.txt

# if conf does not exist, create it
if [ ! -f "$HOME/.config/llama/llama-finetune.conf" ]; then
    mkdir -p "$HOME/.config/llama"
    cat <<EOF > "$HOME/.config/llama/llama-finetune.conf"
MODEL=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
DATA=$HOME/.local/share/shakespeare.txt
TRAINING_PATH=$HOME/.ai/training
LORA_RANK=64
CONTEXT_SIZE=2048
ITERATIONS=64
LEARNING_RATE=0.001
EOF
fi

source $HOME/.config/llama/llama-finetune.conf

cd "$TRAINING_PATH" || exit

~/Work/llama.cpp/build/bin/finetune \
  --model-base "$MODEL" \
  --train-data "$DATA" \
  --lora-out "lora-${DATA}.gguf" \
  --ctx "$CONTEXT_SIZE" \
  --adam-iter "$ITERATIONS" \
  --adam-alpha "$LEARNING_RATE" \
  --lora-r "$LORA_RANK" \
  --lora-alpha "$LORA_RANK" \
  --threads 6 \
  --use-checkpointing \
  --use-flash \
  --save-every 1 \
  --rope-freq-base 10000 \
  --rope-freq-scale 1.0 \
  --batch 1 \
  --grad-acc 1 \
  --escape \
  --sample-start "\n" \
  --overlapping-samples \
  --fill-with-next-samples \
  --sample-random-offsets \
  --seed 1

# https://rentry.org/cpu-lora

# --batch N: Larger batch sizes lead to better quality training at the expense of more RAM. Some recommendations say to set this as large as your hardware can support. I've seen a few different data sets that just use a size of 1.

# --grad-acc N: This is an artificial multiplier for the batch size. Using gradient accumulation basically runs more batches in series (instead of in parallel), which provides the same quality benefit as increasing the batch size. This process is slower, but uses much less RAM.

# --lora-r N: Sets the default LoRA Rank, or dimension count. Higher values produce a larger file with better control over the model's content. Small values like 4 or 8 are great for stylistic guidance. Higher values like 128 or 256 are good for teaching content upgrades. 

# --lora-alpha N: set to same value as lora-r.

# --adam-alpha N: "The learning rate is perhaps the most important hyperparameter. If you have time to tune only one hyperparameter, tune the learning rate." This is how much the LoRA learns from each training run. Think of this as how slowly the dataset is being read during the training process.