apply dadaptation

This commit is contained in:
unknown
2023-02-19 18:37:07 +09:00
parent 08ae46b163
commit 045a3dbe48
3 changed files with 27 additions and 0 deletions

View File

@@ -1391,6 +1391,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth:
help="use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使うbitsandbytesのインストールが必要")
parser.add_argument("--use_lion_optimizer", action="store_true",
help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う lion-pytorch のインストールが必要)")
parser.add_argument("--use_dadaptation_optimizer", action="store_true",
help="use dadaptation optimizer (requires dadaptation) / dadaptaionオプティマイザを使う dadaptation のインストールが必要)")
parser.add_argument("--mem_eff_attn", action="store_true",
help="use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う")
parser.add_argument("--xformers", action="store_true",

10
test.bat Normal file
View File

@@ -0,0 +1,10 @@
@echo off
set VENV_DIR=.\venv
set PYTHON=python
call %VENV_DIR%\Scripts\activate.bat
accelerate launch --num_cpu_threads_per_process=2 "train_network.py" --enable_bucket --pretrained_model_name_or_path="D:\NovelAI\stable-diffusion-webui\models\Stable-diffusion\anime/animefull-final-pruned.safetensors" --train_data_dir="D:\NovelAI\additinal pt\Train\다이타쿠 헬리오스 v2/img" --resolution=768,768 --output_dir="D:\NovelAI\additinal pt\Train\다이타쿠 헬리오스 v2/model" --logging_dir="D:\NovelAI\additinal pt\Train\다이타쿠 헬리오스 v2/logs" --network_alpha="16" --training_comment="trigger word : daitaku helios \(umamusume\)" --save_model_as=safetensors --network_module=networks.lora --text_encoder_lr=0.5 --unet_lr=1.0 --network_dim=16 --output_name="daitaku_helios_lora" --lr_scheduler_num_cycles="10" --learning_rate="1.0" --lr_scheduler="constant_with_warmup" --lr_warmup_steps="156" --train_batch_size="4" --max_train_steps="1560" --save_every_n_epochs="1" --mixed_precision="fp16" --save_precision="fp16" --seed="1234" --caption_extension=".txt" --max_token_length=150 --bucket_reso_steps=64 --shuffle_caption --gradient_checkpointing --xformers --use_dadaptation --persistent_data_loader_workers --bucket_no_upscale --random_crop
pause

View File

@@ -37,6 +37,9 @@ def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_sche
logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0]
logs["lr/unet"] = lr_scheduler.get_last_lr()[-1] # may be same to textencoder
if args.use_dadaptation_optimizer: # tracking d*lr value of unet.
logs["lr/d*lr"] = lr_scheduler.optimizers[-1].param_groups[0]['d']*lr_scheduler.optimizers[-1].param_groups[0]['lr']
return logs
@@ -223,6 +226,18 @@ def train(args):
raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです")
print("use Lion optimizer")
optimizer_class = lion_pytorch.Lion
elif args.use_dadaptation_optimizer:
try:
import dadaptation
except ImportError:
raise ImportError("No dadaptation / dadaptation がインストールされていないようです")
print("use dadaptation optimizer")
optimizer_class = dadaptation.DAdaptAdam
if args.network_dim > args.network_alpha:
print('network dimension is greater than network alpha. It possibly makes network blow up.')
if args.learning_rate <= 0.1 or args.text_encoder_lr <= 0.1 or args.unet_lr <= 0.1:
print('learning rate is too low. If using dadaptaion, set learning rate around 1.0.')
print('recommend option: lr=1.0, unet_lr=1.0, txtencoder_lr=0.5')
else:
optimizer_class = torch.optim.AdamW