Create run.sh
Browse files
run.sh
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
#To run in the background: ./run.sh > logs.txt 2>&1 &
|
3 |
+
MODEL_KEY=meta-llama/Meta-Llama-3-8B
|
4 |
+
MODEL_PATH=llama3-8B
|
5 |
+
LR=3e-6
|
6 |
+
EPOCH=4
|
7 |
+
SEQ_LEN=1280
|
8 |
+
WARMUP_RATIO=0.05
|
9 |
+
OUTPUT_DIR=results
|
10 |
+
DATASET_FILE=sanitized.jsonl
|
11 |
+
accelerate launch --config_file config.yaml -m star_align.train \
|
12 |
+
--model_key $MODEL_KEY \
|
13 |
+
--model_name_or_path $MODEL_PATH \
|
14 |
+
--use_flash_attention True \
|
15 |
+
--datafile_paths $DATASET_FILE \
|
16 |
+
--output_dir $OUTPUT_DIR \
|
17 |
+
--num_train_epochs $EPOCH \
|
18 |
+
--max_training_seq_length $SEQ_LEN \
|
19 |
+
--gradient_checkpointing \
|
20 |
+
--pad_to_max_length False \
|
21 |
+
--per_device_train_batch_size 2 \
|
22 |
+
--gradient_accumulation_steps 16 \
|
23 |
+
--group_by_length False \
|
24 |
+
--logging_steps 1 \
|
25 |
+
--log_level info \
|
26 |
+
--optim adafactor \
|
27 |
+
--max_grad_norm -1 \
|
28 |
+
--warmup_ratio $WARMUP_RATIO \
|
29 |
+
--learning_rate $LR \
|
30 |
+
--ddp_find_unused_parameters False \
|
31 |
+
--bf16 True \
|
32 |
+
--lr_scheduler_type linear \
|
33 |
+
--report_to wandb \
|
34 |
+
--save_steps 16 \
|
35 |
+
--save_total_limit 30
|