|
#!/bin/bash |
|
|
|
|
|
|
|
pretrained_model="/content/lora-scripts/sd-models/Animefull-final-pruned.ckpt" |
|
train_data_dir="/content/lora-scripts/train/aki/" |
|
reg_data_dir="" |
|
|
|
|
|
network_module="lycoris.kohya" |
|
network_weights="" |
|
network_dim=32 |
|
network_alpha=16 |
|
|
|
|
|
resolution="512,768" |
|
batch_size=3 |
|
max_train_epoches=15 |
|
save_every_n_epochs=1 |
|
|
|
train_unet_only=0 |
|
train_text_encoder_only=0 |
|
|
|
noise_offset=0 |
|
keep_tokens=1 |
|
|
|
|
|
lr="1.5e-4" |
|
unet_lr="1.5e-4" |
|
text_encoder_lr="1e-5" |
|
lr_scheduler="cosine_with_restarts" |
|
lr_warmup_steps=0 |
|
lr_restart_cycles=1 |
|
|
|
|
|
output_name="bailin_loha" |
|
save_model_as="safetensors" |
|
|
|
|
|
min_bucket_reso=256 |
|
max_bucket_reso=1024 |
|
persistent_data_loader_workers=0 |
|
clip_skip=2 |
|
|
|
|
|
use_8bit_adam=0 |
|
use_lion=1 |
|
|
|
|
|
algo="loha" |
|
conv_dim=8 |
|
conv_alpha=4 |
|
|
|
|
|
export HF_HOME="huggingface" |
|
export TF_CPP_MIN_LOG_LEVEL=3 |
|
|
|
extArgs=() |
|
|
|
if [ $train_unet_only == 1 ]; then extArgs+=("--network_train_unet_only"); fi |
|
|
|
if [ $train_text_encoder_only == 1 ]; then extArgs+=("--network_train_text_encoder_only"); fi |
|
|
|
if [ $network_weights ]; then extArgs+=("--network_weights $network_weights"); fi |
|
|
|
if [ $reg_data_dir ]; then extArgs+=("--reg_data_dir $reg_data_dir"); fi |
|
|
|
if [ $use_8bit_adam == 1 ]; then extArgs+=("--use_8bit_adam"); fi |
|
|
|
if [ $use_lion == 1 ]; then extArgs+=("--use_lion_optimizer"); fi |
|
|
|
if [ $persistent_data_loader_workers == 1 ]; then extArgs+=("--persistent_data_loader_workers"); fi |
|
|
|
if [ $network_module == "lycoris.kohya" ]; then |
|
extArgs+=("--network_args conv_dim=$conv_dim conv_alpha=$conv_alpha algo=$algo") |
|
fi |
|
|
|
if [ $noise_offset ]; then extArgs+=("--noise_offset $noise_offset"); fi |
|
|
|
accelerate launch --num_cpu_threads_per_process=8 "./sd-scripts/train_network.py" \ |
|
--enable_bucket \ |
|
--pretrained_model_name_or_path=$pretrained_model \ |
|
--train_data_dir=$train_data_dir \ |
|
--output_dir="/content/drive/MyDrive/Lora/output/bailin_loha" \ |
|
--logging_dir="/content/drive/MyDrive/Lora/output/bailin_loha/logs" \ |
|
--log_prefix=$output_name \ |
|
--resolution=$resolution \ |
|
--network_module=$network_module \ |
|
--max_train_epochs=$max_train_epoches \ |
|
--learning_rate=$lr \ |
|
--unet_lr=$unet_lr \ |
|
--text_encoder_lr=$text_encoder_lr \ |
|
--lr_scheduler=$lr_scheduler \ |
|
--lr_warmup_steps=$lr_warmup_steps \ |
|
--lr_scheduler_num_cycles=$lr_restart_cycles \ |
|
--network_dim=$network_dim \ |
|
--network_alpha=$network_alpha \ |
|
--output_name=$output_name \ |
|
--train_batch_size=$batch_size \ |
|
--save_every_n_epochs=$save_every_n_epochs \ |
|
--mixed_precision="fp16" \ |
|
--save_precision="fp16" \ |
|
--seed="1337" \ |
|
--cache_latents \ |
|
--clip_skip=$clip_skip \ |
|
--prior_loss_weight=0.3 \ |
|
--max_token_length=225 \ |
|
--caption_extension=".txt" \ |
|
--save_model_as=$save_model_as \ |
|
--min_bucket_reso=$min_bucket_reso \ |
|
--max_bucket_reso=$max_bucket_reso \ |
|
--keep_tokens=$keep_tokens \ |
|
--xformers --shuffle_caption ${extArgs[@]} |
|
|