|
batch_size_training: '32' |
|
checkpoint_type: StateDictType.FULL_STATE_DICT |
|
dataset: foundational_dataset |
|
dist_checkpoint_folder: fine-tuned |
|
dist_checkpoint_root_folder: test_run_save |
|
enable_fsdp: 'True' |
|
freeze_layers: 'False' |
|
fsdp_activation_checkpointing: 'True' |
|
gamma: '0.9' |
|
load_peft_model: 'False' |
|
low_cpu_fsdp: 'False' |
|
lr: '0.0001' |
|
micro_batch_size: '32' |
|
mixed_precision: 'True' |
|
model_name: models/v3/llama7b-full-1e-4_low-chunk1024-009-017 |
|
num_epochs: '1' |
|
num_freeze_layers: '1' |
|
num_workers_dataloader: '2' |
|
one_gpu: 'False' |
|
optimizer: AdamW |
|
output_dir: PATH/to/save/PEFT/model |
|
peft_method: lora |
|
pure_bf16: 'True' |
|
quantization: 'False' |
|
run_validation: 'True' |
|
save_model: 'True' |
|
save_optimizer: 'False' |
|
seed: '42' |
|
sharding_strategy: ShardingStrategy.FULL_SHARD |
|
type_of_model: foundational |
|
use_fast_kernels: 'False' |
|
use_fp16: 'False' |
|
use_peft: 'False' |
|
val_batch_size: '64' |
|
weight_decay: '0.0' |
|
|