iandennismiller commited on
Commit
d6ab723
1 Parent(s): d91e7cf

finetuning script

Browse files
bin/llama-finetune.sh ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # cd $HOME/.local/share
4
+ # wget https://raw.githubusercontent.com/brunoklein99/deep-learning-notes/master/shakespeare.txt
5
+
6
+ # if conf does not exist, create it
7
+ if [ ! -f "$HOME/.config/llama/llama-finetune.conf" ]; then
8
+ mkdir -p "$HOME/.config/llama"
9
+ cat <<EOF > "$HOME/.config/llama/llama-finetune.conf"
10
+ MODEL=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf
11
+ DATA=$HOME/.local/share/shakespeare.txt
12
+ TRAINING_PATH=$HOME/.ai/training
13
+ LORA_RANK=64
14
+ CONTEXT_SIZE=2048
15
+ ITERATIONS=64
16
+ LEARNING_RATE=0.001
17
+ EOF
18
+ fi
19
+
20
+ source $HOME/.config/llama/llama-finetune.conf
21
+
22
+ cd "$TRAINING_PATH" || exit
23
+
24
+ ~/Work/llama.cpp/build/bin/finetune \
25
+ --model-base "$MODEL" \
26
+ --train-data "$DATA" \
27
+ --lora-out "lora-${DATA}.gguf" \
28
+ --ctx "$CONTEXT_SIZE" \
29
+ --adam-iter "$ITERATIONS" \
30
+ --adam-alpha "$LEARNING_RATE" \
31
+ --lora-r "$LORA_RANK" \
32
+ --lora-alpha "$LORA_RANK" \
33
+ --threads 6 \
34
+ --use-checkpointing \
35
+ --use-flash \
36
+ --save-every 1 \
37
+ --rope-freq-base 10000 \
38
+ --rope-freq-scale 1.0 \
39
+ --batch 1 \
40
+ --grad-acc 1 \
41
+ --escape \
42
+ --sample-start "\n" \
43
+ --overlapping-samples \
44
+ --fill-with-next-samples \
45
+ --sample-random-offsets \
46
+ --seed 1
47
+
48
+ # https://rentry.org/cpu-lora
49
+
50
+ # --batch N: Larger batch sizes lead to better quality training at the expense of more RAM. Some recommendations say to set this as large as your hardware can support. I've seen a few different data sets that just use a size of 1.
51
+
52
+ # --grad-acc N: This is an artificial multiplier for the batch size. Using gradient accumulation basically runs more batches in series (instead of in parallel), which provides the same quality benefit as increasing the batch size. This process is slower, but uses much less RAM.
53
+
54
+ # --lora-r N: Sets the default LoRA Rank, or dimension count. Higher values produce a larger file with better control over the model's content. Small values like 4 or 8 are great for stylistic guidance. Higher values like 128 or 256 are good for teaching content upgrades.
55
+
56
+ # --lora-alpha N: set to same value as lora-r.
57
+
58
+ # --adam-alpha N: "The learning rate is perhaps the most important hyperparameter. If you have time to tune only one hyperparameter, tune the learning rate." This is how much the LoRA learns from each training run. Think of this as how slowly the dataset is being read during the training process.
bin/llama-hf-to-q6_k.sh CHANGED
@@ -5,7 +5,7 @@ if [ $# -ne 1 ]; then
5
  exit 1
6
  fi
7
 
8
- # if $HOME/.config/llama/llama-hf-to-q6_k.conf does not exist, create it
9
  if [ ! -f "$HOME/.config/llama/llama-hf-to-q6_k.conf" ]; then
10
  mkdir -p "$HOME/.config/llama"
11
  cat <<EOF > "$HOME/.config/llama/llama-hf-to-q6_k.conf"
 
5
  exit 1
6
  fi
7
 
8
+ # if conf does not exist, create it
9
  if [ ! -f "$HOME/.config/llama/llama-hf-to-q6_k.conf" ]; then
10
  mkdir -p "$HOME/.config/llama"
11
  cat <<EOF > "$HOME/.config/llama/llama-hf-to-q6_k.conf"
bin/llama-update.sh CHANGED
@@ -1,6 +1,6 @@
1
  #!/bin/bash
2
 
3
- # if $HOME/.config/llama/llama-hf-to-q6_k.conf does not exist, create it
4
  if [ ! -f "$HOME/.config/llama/llama-update.conf" ]; then
5
  mkdir -p "$HOME/.config/llama"
6
  cat <<EOF > "$HOME/.config/llama/llama-update.conf"
@@ -31,3 +31,5 @@ mkdir -p "$BIN_PATH"
31
  install -c -v -m 755 bin/main "$BIN_PATH/llama"
32
  install -c -v bin/ggml-metal.metal "$BIN_PATH"
33
  install -c -v -m 755 bin/llava "$BIN_PATH"
 
 
 
1
  #!/bin/bash
2
 
3
+ # if conf does not exist, create it
4
  if [ ! -f "$HOME/.config/llama/llama-update.conf" ]; then
5
  mkdir -p "$HOME/.config/llama"
6
  cat <<EOF > "$HOME/.config/llama/llama-update.conf"
 
31
  install -c -v -m 755 bin/main "$BIN_PATH/llama"
32
  install -c -v bin/ggml-metal.metal "$BIN_PATH"
33
  install -c -v -m 755 bin/llava "$BIN_PATH"
34
+ install -c -v -m 755 bin/finetune "$BIN_PATH/llama-finetune"
35
+ install -c -v -m 755 bin/speculative "$BIN_PATH/llama-speculative"
install.sh CHANGED
@@ -6,5 +6,6 @@ mkdir -p ~/.local/bin
6
 
7
  install -C -v ./bin/llama-hf-to-q6_k.sh ~/.local/bin
8
  install -C -v ./bin/llama-update.sh ~/.local/bin
 
9
 
10
  echo "done"
 
6
 
7
  install -C -v ./bin/llama-hf-to-q6_k.sh ~/.local/bin
8
  install -C -v ./bin/llama-update.sh ~/.local/bin
9
+ install -C -v ./bin/llama-finetune.sh ~/.local/bin
10
 
11
  echo "done"