From 045a3dbe489ef731e593ebbed088d065e77ed87d Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 19 Feb 2023 18:37:07 +0900 Subject: [PATCH 01/10] apply dadaptation --- library/train_util.py | 2 ++ test.bat | 10 ++++++++++ train_network.py | 15 +++++++++++++++ 3 files changed, 27 insertions(+) create mode 100644 test.bat diff --git a/library/train_util.py b/library/train_util.py index 63868f98..2a15fa05 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1391,6 +1391,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: help="use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)") parser.add_argument("--use_lion_optimizer", action="store_true", help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う( lion-pytorch のインストールが必要)") + parser.add_argument("--use_dadaptation_optimizer", action="store_true", + help="use dadaptation optimizer (requires dadaptation) / dadaptaionオプティマイザを使う( dadaptation のインストールが必要)") parser.add_argument("--mem_eff_attn", action="store_true", help="use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う") parser.add_argument("--xformers", action="store_true", diff --git a/test.bat b/test.bat new file mode 100644 index 00000000..de7fa5da --- /dev/null +++ b/test.bat @@ -0,0 +1,10 @@ +@echo off + +set VENV_DIR=.\venv +set PYTHON=python + +call %VENV_DIR%\Scripts\activate.bat + +accelerate launch --num_cpu_threads_per_process=2 "train_network.py" --enable_bucket --pretrained_model_name_or_path="D:\NovelAI\stable-diffusion-webui\models\Stable-diffusion\anime/animefull-final-pruned.safetensors" --train_data_dir="D:\NovelAI\additinal pt\Train\Ÿ ︮ v2/img" --resolution=768,768 --output_dir="D:\NovelAI\additinal pt\Train\Ÿ ︮ v2/model" --logging_dir="D:\NovelAI\additinal pt\Train\Ÿ ︮ v2/logs" --network_alpha="16" --training_comment="trigger word : daitaku helios \(umamusume\)" --save_model_as=safetensors --network_module=networks.lora --text_encoder_lr=0.5 --unet_lr=1.0 --network_dim=16 --output_name="daitaku_helios_lora" --lr_scheduler_num_cycles="10" --learning_rate="1.0" --lr_scheduler="constant_with_warmup" --lr_warmup_steps="156" --train_batch_size="4" --max_train_steps="1560" --save_every_n_epochs="1" --mixed_precision="fp16" --save_precision="fp16" --seed="1234" --caption_extension=".txt" --max_token_length=150 --bucket_reso_steps=64 --shuffle_caption --gradient_checkpointing --xformers --use_dadaptation --persistent_data_loader_workers --bucket_no_upscale --random_crop + +pause \ No newline at end of file diff --git a/train_network.py b/train_network.py index 1489691d..9bf07d84 100644 --- a/train_network.py +++ b/train_network.py @@ -37,6 +37,9 @@ def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_sche logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0] logs["lr/unet"] = lr_scheduler.get_last_lr()[-1] # may be same to textencoder + if args.use_dadaptation_optimizer: # tracking d*lr value of unet. + logs["lr/d*lr"] = lr_scheduler.optimizers[-1].param_groups[0]['d']*lr_scheduler.optimizers[-1].param_groups[0]['lr'] + return logs @@ -223,6 +226,18 @@ def train(args): raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") print("use Lion optimizer") optimizer_class = lion_pytorch.Lion + elif args.use_dadaptation_optimizer: + try: + import dadaptation + except ImportError: + raise ImportError("No dadaptation / dadaptation がインストールされていないようです") + print("use dadaptation optimizer") + optimizer_class = dadaptation.DAdaptAdam + if args.network_dim > args.network_alpha: + print('network dimension is greater than network alpha. It possibly makes network blow up.') + if args.learning_rate <= 0.1 or args.text_encoder_lr <= 0.1 or args.unet_lr <= 0.1: + print('learning rate is too low. If using dadaptaion, set learning rate around 1.0.') + print('recommend option: lr=1.0, unet_lr=1.0, txtencoder_lr=0.5') else: optimizer_class = torch.optim.AdamW From b612d0b091213f39f4864b4cfe63a44f1e1974d7 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 19 Feb 2023 18:37:36 +0900 Subject: [PATCH 02/10] apply dadaptation --- fine_tune.py | 10 ++++++++++ test.bat | 10 ---------- 2 files changed, 10 insertions(+), 10 deletions(-) delete mode 100644 test.bat diff --git a/fine_tune.py b/fine_tune.py index 13241bc6..b368d1ea 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -165,6 +165,16 @@ def train(args): raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") print("use Lion optimizer") optimizer_class = lion_pytorch.Lion + elif args.use_dadaptation_optimizer: + try: + import dadaptation + except ImportError: + raise ImportError("No dadaptation / dadaptation がインストールされていないようです") + print("use dadaptation optimizer") + optimizer_class = dadaptation.DAdaptAdam + if args.learning_rate <= 0.1: + print('learning rate is too low. If using dadaptaion, set learning rate around 1.0.') + print('recommend option: lr=1.0') else: optimizer_class = torch.optim.AdamW diff --git a/test.bat b/test.bat deleted file mode 100644 index de7fa5da..00000000 --- a/test.bat +++ /dev/null @@ -1,10 +0,0 @@ -@echo off - -set VENV_DIR=.\venv -set PYTHON=python - -call %VENV_DIR%\Scripts\activate.bat - -accelerate launch --num_cpu_threads_per_process=2 "train_network.py" --enable_bucket --pretrained_model_name_or_path="D:\NovelAI\stable-diffusion-webui\models\Stable-diffusion\anime/animefull-final-pruned.safetensors" --train_data_dir="D:\NovelAI\additinal pt\Train\Ÿ ︮ v2/img" --resolution=768,768 --output_dir="D:\NovelAI\additinal pt\Train\Ÿ ︮ v2/model" --logging_dir="D:\NovelAI\additinal pt\Train\Ÿ ︮ v2/logs" --network_alpha="16" --training_comment="trigger word : daitaku helios \(umamusume\)" --save_model_as=safetensors --network_module=networks.lora --text_encoder_lr=0.5 --unet_lr=1.0 --network_dim=16 --output_name="daitaku_helios_lora" --lr_scheduler_num_cycles="10" --learning_rate="1.0" --lr_scheduler="constant_with_warmup" --lr_warmup_steps="156" --train_batch_size="4" --max_train_steps="1560" --save_every_n_epochs="1" --mixed_precision="fp16" --save_precision="fp16" --seed="1234" --caption_extension=".txt" --max_token_length=150 --bucket_reso_steps=64 --shuffle_caption --gradient_checkpointing --xformers --use_dadaptation --persistent_data_loader_workers --bucket_no_upscale --random_crop - -pause \ No newline at end of file From b29c5a750cc76960c4f684c41cad12132d2cf52f Mon Sep 17 00:00:00 2001 From: mgz-dev <49577754+mgz-dev@users.noreply.github.com> Date: Sun, 19 Feb 2023 17:45:09 -0600 Subject: [PATCH 03/10] expand optimizer options and refactor Refactor code to make it easier to add new optimizers, and support alternate optimizer parameters -move redundant code to train_util for initializing optimizers - add SGD Nesterov optimizers as option (since they are already available) - add new parameters which may be helpful for tuning existing and new optimizers --- fine_tune.py | 23 +---------- library/train_util.py | 81 +++++++++++++++++++++++++++++++++----- train_db.py | 23 +---------- train_network.py | 25 +----------- train_textual_inversion.py | 24 +---------- 5 files changed, 80 insertions(+), 96 deletions(-) diff --git a/fine_tune.py b/fine_tune.py index 13241bc6..a3588c37 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -149,27 +149,7 @@ def train(args): # 学習に必要なクラスを準備する print("prepare optimizer, data loader etc.") - - # 8-bit Adamを使う - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") - print("use 8-bit Adam optimizer") - optimizer_class = bnb.optim.AdamW8bit - elif args.use_lion_optimizer: - try: - import lion_pytorch - except ImportError: - raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") - print("use Lion optimizer") - optimizer_class = lion_pytorch.Lion - else: - optimizer_class = torch.optim.AdamW - - # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 - optimizer = optimizer_class(params_to_optimize, lr=args.learning_rate) + optimizer_name, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -351,6 +331,7 @@ if __name__ == '__main__': train_util.add_dataset_arguments(parser, False, True, True) train_util.add_training_arguments(parser, False) train_util.add_sd_saving_arguments(parser) + train_util.add_optimizer_arguments(parser) parser.add_argument("--diffusers_xformers", action='store_true', help='use xformers by diffusers / Diffusersでxformersを使用する') diff --git a/library/train_util.py b/library/train_util.py index 63868f98..581ad77f 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1366,6 +1366,26 @@ def add_sd_models_arguments(parser: argparse.ArgumentParser): help="pretrained model to train, directory to Diffusers model or StableDiffusion checkpoint / 学習元モデル、Diffusers形式モデルのディレクトリまたはStableDiffusionのckptファイル") +def add_optimizer_arguments(parser: argparse.ArgumentParser): + parser.add_argument("--optimizer_type", type=str, default="AdamW", + help="Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit") + + parser.add_argument("--learning_rate", type=float, default=2.0e-6, help="learning rate / 学習率") + parser.add_argument("--optimizer_momentum", type=float, default=0.9, + help="Momentum value for optimizers") + parser.add_argument("--optimizer_weightdecay", type=float, default=0.01, + help="Weight decay for optimizers") + parser.add_argument("--optimizer_beta1", type=float, default=0.9, + help="beta1 parameter for Adam optimizers") + parser.add_argument("--optimizer_beta2", type=float, default=0.999, + help="beta2 parameter for Adam optimizers") + + parser.add_argument("--lr_scheduler", type=str, default="constant", + help="scheduler to use for learning rate / 学習率のスケジューラ: linear, cosine, cosine_with_restarts, polynomial, constant (default), constant_with_warmup") + parser.add_argument("--lr_warmup_steps", type=int, default=0, + help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)") + + def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: bool): parser.add_argument("--output_dir", type=str, default=None, help="directory to output trained model / 学習後のモデル出力先ディレクトリ") @@ -1387,10 +1407,6 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: parser.add_argument("--train_batch_size", type=int, default=1, help="batch size for training / 学習時のバッチサイズ") parser.add_argument("--max_token_length", type=int, default=None, choices=[None, 150, 225], help="max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)") - parser.add_argument("--use_8bit_adam", action="store_true", - help="use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)") - parser.add_argument("--use_lion_optimizer", action="store_true", - help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う( lion-pytorch のインストールが必要)") parser.add_argument("--mem_eff_attn", action="store_true", help="use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う") parser.add_argument("--xformers", action="store_true", @@ -1398,7 +1414,6 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: parser.add_argument("--vae", type=str, default=None, help="path to checkpoint of vae to replace / VAEを入れ替える場合、VAEのcheckpointファイルまたはディレクトリ") - parser.add_argument("--learning_rate", type=float, default=2.0e-6, help="learning rate / 学習率") parser.add_argument("--max_train_steps", type=int, default=1600, help="training steps / 学習ステップ数") parser.add_argument("--max_train_epochs", type=int, default=None, help="training epochs (overrides max_train_steps) / 学習エポック数(max_train_stepsを上書きします)") @@ -1419,10 +1434,6 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: parser.add_argument("--logging_dir", type=str, default=None, help="enable logging and output TensorBoard log to this directory / ログ出力を有効にしてこのディレクトリにTensorBoard用のログを出力する") parser.add_argument("--log_prefix", type=str, default=None, help="add prefix for each log directory / ログディレクトリ名の先頭に追加する文字列") - parser.add_argument("--lr_scheduler", type=str, default="constant", - help="scheduler to use for learning rate / 学習率のスケジューラ: linear, cosine, cosine_with_restarts, polynomial, constant (default), constant_with_warmup") - parser.add_argument("--lr_warmup_steps", type=int, default=0, - help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)") parser.add_argument("--noise_offset", type=float, default=None, help="enable noise offset with this value (if enabled, around 0.1 is recommended) / Noise offsetを有効にしてこの値を設定する(有効にする場合は0.1程度を推奨)") parser.add_argument("--lowram", action="store_true", @@ -1503,6 +1514,58 @@ def add_sd_saving_arguments(parser: argparse.ArgumentParser): # region utils +# "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit" + +def get_optimizer(args, trainable_params): + # Prepare optimizer/学習に必要なクラスを準備する + optimizer_type = args.optimizer_type.lower() + + betas = (args.optimizer_beta1, args.optimizer_beta2) + weight_decay = args.optimizer_weightdecay + momentum = args.optimizer_momentum + lr = args.learning_rate + + if optimizer_type == "AdamW8bit".lower(): + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") + print(f"use 8-bit AdamW optimizer | betas: {betas}, Weight Decay: {weight_decay}") + optimizer_class = bnb.optim.AdamW8bit + optimizer = optimizer_class(trainable_params, lr=lr, betas=betas, weight_decay=weight_decay) + + elif optimizer_type == "SGDNesterov8bit".lower(): + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") + print(f"use 8-bit SGD with Nesterov optimizer | Momentum: {momentum}, Weight Decay: {weight_decay}") + optimizer_class = bnb.optim.SGD8bit + optimizer = optimizer_class(trainable_params, lr=lr, momentum=momentum, weight_decay=weight_decay, nesterov=True) + + elif optimizer_type == "Lion".lower(): + try: + import lion_pytorch + except ImportError: + raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") + print(f"use Lion optimizer | betas: {betas}, Weight Decay: {weight_decay}") + optimizer_class = lion_pytorch.Lion + optimizer = optimizer_class(trainable_params, lr=lr, betas=betas, weight_decay=weight_decay) + + elif optimizer_type == "SGDNesterov".lower(): + print(f"use SGD with Nesterov optimizer | Momentum: {momentum}, Weight Decay: {weight_decay}") + optimizer_class = torch.optim.SGD + optimizer = optimizer_class(trainable_params, lr=lr, momentum=momentum, weight_decay=weight_decay, nesterov=True) + + else: + print(f"use AdamW optimizer | betas: {betas}, Weight Decay: {weight_decay}") + optimizer_class = torch.optim.AdamW + optimizer = optimizer_class(trainable_params, lr=lr, betas=betas, weight_decay=weight_decay) + + optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__ + + return optimizer_name, optimizer + def prepare_dataset_args(args: argparse.Namespace, support_metadata: bool): # backward compatibility diff --git a/train_db.py b/train_db.py index 1903c4c4..51e588fc 100644 --- a/train_db.py +++ b/train_db.py @@ -115,32 +115,12 @@ def train(args): # 学習に必要なクラスを準備する print("prepare optimizer, data loader etc.") - - # 8-bit Adamを使う - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") - print("use 8-bit Adam optimizer") - optimizer_class = bnb.optim.AdamW8bit - elif args.use_lion_optimizer: - try: - import lion_pytorch - except ImportError: - raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") - print("use Lion optimizer") - optimizer_class = lion_pytorch.Lion - else: - optimizer_class = torch.optim.AdamW - if train_text_encoder: trainable_params = (itertools.chain(unet.parameters(), text_encoder.parameters())) else: trainable_params = unet.parameters() - # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 - optimizer = optimizer_class(trainable_params, lr=args.learning_rate) + optimizer_name, optimizer = train_util.get_optimizer(args, trainable_params) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -352,6 +332,7 @@ if __name__ == '__main__': train_util.add_dataset_arguments(parser, True, False, True) train_util.add_training_arguments(parser, True) train_util.add_sd_saving_arguments(parser) + train_util.add_optimizer_arguments(parser) parser.add_argument("--no_token_padding", action="store_true", help="disable token padding (same as Diffuser's DreamBooth) / トークンのpaddingを無効にする(Diffusers版DreamBoothと同じ動作)") diff --git a/train_network.py b/train_network.py index 1489691d..b030391c 100644 --- a/train_network.py +++ b/train_network.py @@ -208,30 +208,8 @@ def train(args): # 学習に必要なクラスを準備する print("prepare optimizer, data loader etc.") - # 8-bit Adamを使う - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") - print("use 8-bit Adam optimizer") - optimizer_class = bnb.optim.AdamW8bit - elif args.use_lion_optimizer: - try: - import lion_pytorch - except ImportError: - raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") - print("use Lion optimizer") - optimizer_class = lion_pytorch.Lion - else: - optimizer_class = torch.optim.AdamW - - optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__ - trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr) - - # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 - optimizer = optimizer_class(trainable_params, lr=args.learning_rate) + optimizer_name, optimizer = train_util.get_optimizer(args, trainable_params) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -555,6 +533,7 @@ if __name__ == '__main__': train_util.add_sd_models_arguments(parser) train_util.add_dataset_arguments(parser, True, True, True) train_util.add_training_arguments(parser, True) + train_util.add_optimizer_arguments(parser) parser.add_argument("--no_metadata", action='store_true', help="do not save metadata in output model / メタデータを出力先モデルに保存しない") parser.add_argument("--save_model_as", type=str, default="safetensors", choices=[None, "ckpt", "pt", "safetensors"], diff --git a/train_textual_inversion.py b/train_textual_inversion.py index ffec0516..1913da7e 100644 --- a/train_textual_inversion.py +++ b/train_textual_inversion.py @@ -198,29 +198,8 @@ def train(args): # 学習に必要なクラスを準備する print("prepare optimizer, data loader etc.") - - # 8-bit Adamを使う - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") - print("use 8-bit Adam optimizer") - optimizer_class = bnb.optim.AdamW8bit - elif args.use_lion_optimizer: - try: - import lion_pytorch - except ImportError: - raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") - print("use Lion optimizer") - optimizer_class = lion_pytorch.Lion - else: - optimizer_class = torch.optim.AdamW - trainable_params = text_encoder.get_input_embeddings().parameters() - - # betaやweight decayはdiffusers DreamBoothもDreamBooth SDもデフォルト値のようなのでオプションはとりあえず省略 - optimizer = optimizer_class(trainable_params, lr=args.learning_rate) + optimizer_name, optimizer = train_util.get_optimizer(args, trainable_params) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -491,6 +470,7 @@ if __name__ == '__main__': train_util.add_sd_models_arguments(parser) train_util.add_dataset_arguments(parser, True, True, False) train_util.add_training_arguments(parser, True) + train_util.add_optimizer_arguments(parser) parser.add_argument("--save_model_as", type=str, default="pt", choices=[None, "ckpt", "pt", "safetensors"], help="format to save the model (default is .pt) / モデル保存時の形式(デフォルトはpt)") From 663aad2b0dfc1f2963f439a88ee18c42265c2eca Mon Sep 17 00:00:00 2001 From: Kohya S Date: Mon, 20 Feb 2023 22:47:43 +0900 Subject: [PATCH 04/10] refactor get_scheduler etc. --- fine_tune.py | 13 +++-- library/train_util.py | 110 ++++++++++++++++++++++++++++++------- train_db.py | 12 ++-- train_network.py | 74 ++----------------------- train_textual_inversion.py | 10 +++- 5 files changed, 119 insertions(+), 100 deletions(-) diff --git a/fine_tune.py b/fine_tune.py index a3588c37..96aa362b 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -149,7 +149,7 @@ def train(args): # 学習に必要なクラスを準備する print("prepare optimizer, data loader etc.") - optimizer_name, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) + _, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -163,8 +163,10 @@ def train(args): print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する - lr_scheduler = diffusers.optimization.get_scheduler( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps) + lr_scheduler = train_util.get_scheduler_fix( + args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -284,8 +286,11 @@ def train(args): current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず if args.logging_dir is not None: logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} + if args.optimizer_type == "DAdaptation".lower(): # tracking d*lr value + logs["lr/d*lr"] = lr_scheduler.optimizers[0].param_groups[0]['d']*lr_scheduler.optimizers[0].param_groups[0]['lr'] accelerator.log(logs, step=global_step) + # TODO moving averageにする loss_total += current_loss avr_loss = loss_total / (step+1) logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} @@ -295,7 +300,7 @@ def train(args): break if args.logging_dir is not None: - logs = {"epoch_loss": loss_total / len(train_dataloader)} + logs = {"loss/epoch": loss_total / len(train_dataloader)} accelerator.log(logs, step=epoch+1) accelerator.wait_for_everyone() diff --git a/library/train_util.py b/library/train_util.py index 1a28c39a..329b27fc 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -5,6 +5,7 @@ import json import shutil import time from typing import Dict, List, NamedTuple, Tuple +from typing import Optional, Union from accelerate import Accelerator from torch.autograd.function import Function import glob @@ -17,9 +18,11 @@ from io import BytesIO from tqdm import tqdm import torch +from torch.optim import Optimizer from torchvision import transforms from transformers import CLIPTokenizer import diffusers +from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION from diffusers import DDPMScheduler, StableDiffusionPipeline import albumentations as albu import numpy as np @@ -1368,12 +1371,18 @@ def add_sd_models_arguments(parser: argparse.ArgumentParser): def add_optimizer_arguments(parser: argparse.ArgumentParser): parser.add_argument("--optimizer_type", type=str, default="AdamW", - help="Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit") + help="Optimizer to use / オプティマイザの種類: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, DAdaptation") + parser.add_argument("--use_8bit_adam", action="store_true", + help="use 8bit AdamW optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)") + parser.add_argument("--use_lion_optimizer", action="store_true", + help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う( lion-pytorch のインストールが必要)") + # parser.add_argument("--use_dadaptation_optimizer", action="store_true", + # help="use dadaptation optimizer (requires dadaptation) / dadaptaionオプティマイザを使う( dadaptation のインストールが必要)") parser.add_argument("--learning_rate", type=float, default=2.0e-6, help="learning rate / 学習率") parser.add_argument("--optimizer_momentum", type=float, default=0.9, - help="Momentum value for optimizers") - parser.add_argument("--optimizer_weightdecay", type=float, default=0.01, + help="Momentum value for optimizers for SGD optimizers") + parser.add_argument("--optimizer_weight_decay", type=float, default=0.01, help="Weight decay for optimizers") parser.add_argument("--optimizer_beta1", type=float, default=0.9, help="beta1 parameter for Adam optimizers") @@ -1407,12 +1416,6 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: parser.add_argument("--train_batch_size", type=int, default=1, help="batch size for training / 学習時のバッチサイズ") parser.add_argument("--max_token_length", type=int, default=None, choices=[None, 150, 225], help="max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)") - # parser.add_argument("--use_8bit_adam", action="store_true", - # help="use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)") - # parser.add_argument("--use_lion_optimizer", action="store_true", - # help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う( lion-pytorch のインストールが必要)") - # parser.add_argument("--use_dadaptation_optimizer", action="store_true", - # help="use dadaptation optimizer (requires dadaptation) / dadaptaionオプティマイザを使う( dadaptation のインストールが必要)") parser.add_argument("--mem_eff_attn", action="store_true", help="use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う") parser.add_argument("--xformers", action="store_true", @@ -1520,14 +1523,19 @@ def add_sd_saving_arguments(parser: argparse.ArgumentParser): # region utils -# "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, dadaption" def get_optimizer(args, trainable_params): - # Prepare optimizer/学習に必要なクラスを準備する - optimizer_type = args.optimizer_type.lower() + # "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, dadaptation" + + optimizer_type = args.optimizer_type + if args.use_8bit_adam: + optimizer_type = "AdamW8bit" + elif args.use_lion_optimizer: + optimizer_type = "Lion" + optimizer_type = optimizer_type.lower() betas = (args.optimizer_beta1, args.optimizer_beta2) - weight_decay = args.optimizer_weightdecay + weight_decay = args.optimizer_weight_decay momentum = args.optimizer_momentum lr = args.learning_rate @@ -1563,17 +1571,18 @@ def get_optimizer(args, trainable_params): optimizer_class = torch.optim.SGD optimizer = optimizer_class(trainable_params, lr=lr, momentum=momentum, weight_decay=weight_decay, nesterov=True) - elif optimizer_type == "dadaptation".lower(): + elif optimizer_type == "DAdaptation".lower(): try: import dadaptation except ImportError: raise ImportError("No dadaptation / dadaptation がインストールされていないようです") - print(f"use dadaptation optimizer") + print(f"use D-Adaptation Adam optimizer | betas: {betas}, Weight Decay: {weight_decay}") optimizer_class = dadaptation.DAdaptAdam - if args.learning_rate <= 0.1: - print('learning rate is too low. If using dadaptaion, set learning rate around 1.0.') - print('recommend option: lr=1.0') - optimizer = optimizer_class(trainable_params, lr=lr) + if lr <= 0.1: + print(f'learning rate is too low. If using dadaptation, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: {lr}') + print('recommend option: lr=1.0 / 推奨は1.0です') + optimizer = optimizer_class(trainable_params, lr=lr, betas=betas, weight_decay=weight_decay) + else: print(f"use AdamW optimizer | betas: {betas}, Weight Decay: {weight_decay}") optimizer_class = torch.optim.AdamW @@ -1584,6 +1593,69 @@ def get_optimizer(args, trainable_params): return optimizer_name, optimizer +# Monkeypatch newer get_scheduler() function overridng current version of diffusers.optimizer.get_scheduler +# code is taken from https://github.com/huggingface/diffusers diffusers.optimizer, commit d87cc15977b87160c30abaace3894e802ad9e1e6 +# Which is a newer release of diffusers than currently packaged with sd-scripts +# This code can be removed when newer diffusers version (v0.12.1 or greater) is tested and implemented to sd-scripts + + +def get_scheduler_fix( + name: Union[str, SchedulerType], + optimizer: Optimizer, + num_warmup_steps: Optional[int] = None, + num_training_steps: Optional[int] = None, + num_cycles: int = 1, + power: float = 1.0, +): + """ + Unified API to get any scheduler from its name. + Args: + name (`str` or `SchedulerType`): + The name of the scheduler to use. + optimizer (`torch.optim.Optimizer`): + The optimizer that will be used during training. + num_warmup_steps (`int`, *optional*): + The number of warmup steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + num_training_steps (`int``, *optional*): + The number of training steps to do. This is not required by all schedulers (hence the argument being + optional), the function will raise an error if it's unset and the scheduler type requires it. + num_cycles (`int`, *optional*): + The number of hard restarts used in `COSINE_WITH_RESTARTS` scheduler. + power (`float`, *optional*, defaults to 1.0): + Power factor. See `POLYNOMIAL` scheduler + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + """ + name = SchedulerType(name) + schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] + if name == SchedulerType.CONSTANT: + return schedule_func(optimizer) + + # All other schedulers require `num_warmup_steps` + if num_warmup_steps is None: + raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") + + if name == SchedulerType.CONSTANT_WITH_WARMUP: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) + + # All other schedulers require `num_training_steps` + if num_training_steps is None: + raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") + + if name == SchedulerType.COSINE_WITH_RESTARTS: + return schedule_func( + optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles + ) + + if name == SchedulerType.POLYNOMIAL: + return schedule_func( + optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, power=power + ) + + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) + + def prepare_dataset_args(args: argparse.Namespace, support_metadata: bool): # backward compatibility if args.caption_extention is not None: diff --git a/train_db.py b/train_db.py index 51e588fc..268d90a1 100644 --- a/train_db.py +++ b/train_db.py @@ -120,7 +120,7 @@ def train(args): else: trainable_params = unet.parameters() - optimizer_name, optimizer = train_util.get_optimizer(args, trainable_params) + _, optimizer = train_util.get_optimizer(args, trainable_params) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -136,9 +136,11 @@ def train(args): if args.stop_text_encoder_training is None: args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end - # lr schedulerを用意する - lr_scheduler = diffusers.optimization.get_scheduler( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps) + # lr schedulerを用意する TODO gradient_accumulation_stepsの扱いが何かおかしいかもしれない。後で確認する + lr_scheduler = train_util.get_scheduler_fix( + args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, + num_training_steps=args.max_train_steps, + num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -280,6 +282,8 @@ def train(args): current_loss = loss.detach().item() if args.logging_dir is not None: logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} + if args.optimizer_type == "DAdaptation".lower(): # tracking d*lr value + logs["lr/d*lr"] = lr_scheduler.optimizers[0].param_groups[0]['d']*lr_scheduler.optimizers[0].param_groups[0]['lr'] accelerator.log(logs, step=global_step) if epoch == 0: diff --git a/train_network.py b/train_network.py index df987325..3c4fb8d9 100644 --- a/train_network.py +++ b/train_network.py @@ -1,8 +1,5 @@ -from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION -from torch.optim import Optimizer from torch.cuda.amp import autocast from torch.nn.parallel import DistributedDataParallel as DDP -from typing import Optional, Union import importlib import argparse import gc @@ -26,6 +23,7 @@ def collate_fn(examples): return examples[0] +# TODO 他のスクリプトと共通化する def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_scheduler): logs = {"loss/current": current_loss, "loss/average": avr_loss} @@ -37,75 +35,12 @@ def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_sche logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0] logs["lr/unet"] = lr_scheduler.get_last_lr()[-1] # may be same to textencoder - if args.use_dadaptation_optimizer: # tracking d*lr value of unet. + if args.optimizer_type == "DAdaptation".lower(): # tracking d*lr value of unet. logs["lr/d*lr"] = lr_scheduler.optimizers[-1].param_groups[0]['d']*lr_scheduler.optimizers[-1].param_groups[0]['lr'] return logs -# Monkeypatch newer get_scheduler() function overridng current version of diffusers.optimizer.get_scheduler -# code is taken from https://github.com/huggingface/diffusers diffusers.optimizer, commit d87cc15977b87160c30abaace3894e802ad9e1e6 -# Which is a newer release of diffusers than currently packaged with sd-scripts -# This code can be removed when newer diffusers version (v0.12.1 or greater) is tested and implemented to sd-scripts - - -def get_scheduler_fix( - name: Union[str, SchedulerType], - optimizer: Optimizer, - num_warmup_steps: Optional[int] = None, - num_training_steps: Optional[int] = None, - num_cycles: int = 1, - power: float = 1.0, -): - """ - Unified API to get any scheduler from its name. - Args: - name (`str` or `SchedulerType`): - The name of the scheduler to use. - optimizer (`torch.optim.Optimizer`): - The optimizer that will be used during training. - num_warmup_steps (`int`, *optional*): - The number of warmup steps to do. This is not required by all schedulers (hence the argument being - optional), the function will raise an error if it's unset and the scheduler type requires it. - num_training_steps (`int``, *optional*): - The number of training steps to do. This is not required by all schedulers (hence the argument being - optional), the function will raise an error if it's unset and the scheduler type requires it. - num_cycles (`int`, *optional*): - The number of hard restarts used in `COSINE_WITH_RESTARTS` scheduler. - power (`float`, *optional*, defaults to 1.0): - Power factor. See `POLYNOMIAL` scheduler - last_epoch (`int`, *optional*, defaults to -1): - The index of the last epoch when resuming training. - """ - name = SchedulerType(name) - schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] - if name == SchedulerType.CONSTANT: - return schedule_func(optimizer) - - # All other schedulers require `num_warmup_steps` - if num_warmup_steps is None: - raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") - - if name == SchedulerType.CONSTANT_WITH_WARMUP: - return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) - - # All other schedulers require `num_training_steps` - if num_training_steps is None: - raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") - - if name == SchedulerType.COSINE_WITH_RESTARTS: - return schedule_func( - optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles - ) - - if name == SchedulerType.POLYNOMIAL: - return schedule_func( - optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, power=power - ) - - return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps) - - def train(args): session_id = random.randint(0, 2**32) training_started_at = time.time() @@ -164,7 +99,7 @@ def train(args): if args.lowram: text_encoder.to("cuda") unet.to("cuda") - + # モデルに xformers とか memory efficient attention を組み込む train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers) @@ -226,8 +161,7 @@ def train(args): print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する - # lr_scheduler = diffusers.optimization.get_scheduler( - lr_scheduler = get_scheduler_fix( + lr_scheduler = train_util.get_scheduler_fix( args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) diff --git a/train_textual_inversion.py b/train_textual_inversion.py index 1913da7e..07dcc199 100644 --- a/train_textual_inversion.py +++ b/train_textual_inversion.py @@ -199,7 +199,7 @@ def train(args): # 学習に必要なクラスを準備する print("prepare optimizer, data loader etc.") trainable_params = text_encoder.get_input_embeddings().parameters() - optimizer_name, optimizer = train_util.get_optimizer(args, trainable_params) + _, optimizer = train_util.get_optimizer(args, trainable_params) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -213,8 +213,10 @@ def train(args): print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する - lr_scheduler = diffusers.optimization.get_scheduler( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps) + lr_scheduler = train_util.get_scheduler_fix( + args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) # acceleratorがなんかよろしくやってくれるらしい text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( @@ -356,6 +358,8 @@ def train(args): current_loss = loss.detach().item() if args.logging_dir is not None: logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} + if args.optimizer_type == "DAdaptation".lower(): # tracking d*lr value + logs["lr/d*lr"] = lr_scheduler.optimizers[0].param_groups[0]['d']*lr_scheduler.optimizers[0].param_groups[0]['lr'] accelerator.log(logs, step=global_step) loss_total += current_loss From 9ab964d0b83a0c3d3936d40317cbcebc68111ee7 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Wed, 22 Feb 2023 21:09:47 +0900 Subject: [PATCH 05/10] Add Adafactor optimzier --- fine_tune.py | 17 ++-- library/train_util.py | 181 +++++++++++++++++++++++++++++-------- train_db.py | 17 ++-- train_network.py | 29 +++--- train_textual_inversion.py | 17 ++-- 5 files changed, 181 insertions(+), 80 deletions(-) diff --git a/fine_tune.py b/fine_tune.py index 96aa362b..80290e72 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -149,7 +149,7 @@ def train(args): # 学習に必要なクラスを準備する print("prepare optimizer, data loader etc.") - _, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) + _, _, optimizer = train_util.get_optimizer(args, trainable_params=params_to_optimize) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -163,10 +163,9 @@ def train(args): print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する - lr_scheduler = train_util.get_scheduler_fix( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) + lr_scheduler = train_util.get_scheduler_fix(args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -268,11 +267,11 @@ def train(args): loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) - if accelerator.sync_gradients: + if accelerator.sync_gradients and args.max_grad_norm != 0.0: params_to_clip = [] for m in training_models: params_to_clip.extend(m.parameters()) - accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm) + accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() @@ -285,8 +284,8 @@ def train(args): current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず if args.logging_dir is not None: - logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} - if args.optimizer_type == "DAdaptation".lower(): # tracking d*lr value + logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])} + if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value logs["lr/d*lr"] = lr_scheduler.optimizers[0].param_groups[0]['d']*lr_scheduler.optimizers[0].param_groups[0]['lr'] accelerator.log(logs, step=global_step) diff --git a/library/train_util.py b/library/train_util.py index 329b27fc..37642dd5 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1,6 +1,7 @@ # common functions for training import argparse +import importlib import json import shutil import time @@ -21,6 +22,7 @@ import torch from torch.optim import Optimizer from torchvision import transforms from transformers import CLIPTokenizer +import transformers import diffusers from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION from diffusers import DDPMScheduler, StableDiffusionPipeline @@ -1371,28 +1373,29 @@ def add_sd_models_arguments(parser: argparse.ArgumentParser): def add_optimizer_arguments(parser: argparse.ArgumentParser): parser.add_argument("--optimizer_type", type=str, default="AdamW", - help="Optimizer to use / オプティマイザの種類: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, DAdaptation") + help="Optimizer to use / オプティマイザの種類: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, DAdaptation, AdaFactor") + + # backward compatibility parser.add_argument("--use_8bit_adam", action="store_true", help="use 8bit AdamW optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)") parser.add_argument("--use_lion_optimizer", action="store_true", help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う( lion-pytorch のインストールが必要)") - # parser.add_argument("--use_dadaptation_optimizer", action="store_true", - # help="use dadaptation optimizer (requires dadaptation) / dadaptaionオプティマイザを使う( dadaptation のインストールが必要)") parser.add_argument("--learning_rate", type=float, default=2.0e-6, help="learning rate / 学習率") - parser.add_argument("--optimizer_momentum", type=float, default=0.9, - help="Momentum value for optimizers for SGD optimizers") - parser.add_argument("--optimizer_weight_decay", type=float, default=0.01, - help="Weight decay for optimizers") - parser.add_argument("--optimizer_beta1", type=float, default=0.9, - help="beta1 parameter for Adam optimizers") - parser.add_argument("--optimizer_beta2", type=float, default=0.999, - help="beta2 parameter for Adam optimizers") + parser.add_argument("--max_grad_norm", default=1.0, type=float, + help="Max gradient norm, 0 for no clipping / 勾配正規化の最大norm、0でclippingを行わない") + + parser.add_argument("--optimizer_args", type=str, default=None, nargs='*', + help="additional arguments for optimizer (like \"weight_decay=0.01 betas=0.9,0.999 ...\") / オプティマイザの追加引数(例: \"weight_decay=0.01 betas=0.9,0.999 ...\")") parser.add_argument("--lr_scheduler", type=str, default="constant", - help="scheduler to use for learning rate / 学習率のスケジューラ: linear, cosine, cosine_with_restarts, polynomial, constant (default), constant_with_warmup") + help="scheduler to use for learning rate / 学習率のスケジューラ: linear, cosine, cosine_with_restarts, polynomial, constant (default), constant_with_warmup, adafactor") parser.add_argument("--lr_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)") + parser.add_argument("--lr_scheduler_num_cycles", type=int, default=1, + help="Number of restarts for cosine scheduler with restarts / cosine with restartsスケジューラでのリスタート回数") + parser.add_argument("--lr_scheduler_power", type=float, default=1, + help="Polynomial power for polynomial scheduler / polynomialスケジューラでのpolynomial power") def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: bool): @@ -1525,18 +1528,37 @@ def add_sd_saving_arguments(parser: argparse.ArgumentParser): def get_optimizer(args, trainable_params): - # "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, dadaptation" + # "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, DAdaptation, Adafactor" optimizer_type = args.optimizer_type if args.use_8bit_adam: + print(f"*** use_8bit_adam option is specified. optimizer_type is ignored / use_8bit_adamオプションが指定されているためoptimizer_typeは無視されます") optimizer_type = "AdamW8bit" elif args.use_lion_optimizer: + print(f"*** use_lion_optimizer option is specified. optimizer_type is ignored / use_lion_optimizerオプションが指定されているためoptimizer_typeは無視されます") optimizer_type = "Lion" optimizer_type = optimizer_type.lower() - betas = (args.optimizer_beta1, args.optimizer_beta2) - weight_decay = args.optimizer_weight_decay - momentum = args.optimizer_momentum + # 引数を分解する:boolとfloat、tupleのみ対応 + optimizer_kwargs = {} + if args.optimizer_args is not None and len(args.optimizer_args) > 0: + for arg in args.optimizer_args: + key, value = arg.split('=') + + value = value.split(",") + for i in range(len(value)): + if value[i].lower() == "true" or value[i].lower() == "false": + value[i] = (value[i].lower() == "true") + else: + value[i] = float(value[i]) + if len(value) == 1: + value = value[0] + else: + value = tuple(value) + + optimizer_kwargs[key] = value + print("optkwargs:", optimizer_kwargs) + lr = args.learning_rate if optimizer_type == "AdamW8bit".lower(): @@ -1544,53 +1566,128 @@ def get_optimizer(args, trainable_params): import bitsandbytes as bnb except ImportError: raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") - print(f"use 8-bit AdamW optimizer | betas: {betas}, Weight Decay: {weight_decay}") + print(f"use 8-bit AdamW optimizer | {optimizer_kwargs}") optimizer_class = bnb.optim.AdamW8bit - optimizer = optimizer_class(trainable_params, lr=lr, betas=betas, weight_decay=weight_decay) + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) elif optimizer_type == "SGDNesterov8bit".lower(): try: import bitsandbytes as bnb except ImportError: raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") - print(f"use 8-bit SGD with Nesterov optimizer | Momentum: {momentum}, Weight Decay: {weight_decay}") + print(f"use 8-bit SGD with Nesterov optimizer | {optimizer_kwargs}") + if "momentum" not in optimizer_kwargs: + print(f"8-bit SGD with Nesterov must be with momentum, set momentum to 0.9 / 8-bit SGD with Nesterovはmomentum指定が必須のため0.9に設定します") + optimizer_kwargs["momentum"] = 0.9 + optimizer_class = bnb.optim.SGD8bit - optimizer = optimizer_class(trainable_params, lr=lr, momentum=momentum, weight_decay=weight_decay, nesterov=True) + optimizer = optimizer_class(trainable_params, lr=lr, nesterov=True, **optimizer_kwargs) elif optimizer_type == "Lion".lower(): try: import lion_pytorch except ImportError: raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") - print(f"use Lion optimizer | betas: {betas}, Weight Decay: {weight_decay}") + print(f"use Lion optimizer | {optimizer_kwargs}") optimizer_class = lion_pytorch.Lion - optimizer = optimizer_class(trainable_params, lr=lr, betas=betas, weight_decay=weight_decay) + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) elif optimizer_type == "SGDNesterov".lower(): - print(f"use SGD with Nesterov optimizer | Momentum: {momentum}, Weight Decay: {weight_decay}") + print(f"use SGD with Nesterov optimizer | {optimizer_kwargs}") + if "momentum" not in optimizer_kwargs: + print(f"SGD with Nesterov must be with momentum, set momentum to 0.9 / SGD with Nesterovはmomentum指定が必須のため0.9に設定します") + optimizer_kwargs["momentum"] = 0.9 + optimizer_class = torch.optim.SGD - optimizer = optimizer_class(trainable_params, lr=lr, momentum=momentum, weight_decay=weight_decay, nesterov=True) + optimizer = optimizer_class(trainable_params, lr=lr, nesterov=True, **optimizer_kwargs) elif optimizer_type == "DAdaptation".lower(): try: import dadaptation except ImportError: raise ImportError("No dadaptation / dadaptation がインストールされていないようです") - print(f"use D-Adaptation Adam optimizer | betas: {betas}, Weight Decay: {weight_decay}") - optimizer_class = dadaptation.DAdaptAdam - if lr <= 0.1: - print(f'learning rate is too low. If using dadaptation, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: {lr}') + print(f"use D-Adaptation Adam optimizer | {optimizer_kwargs}") + + min_lr = lr + if type(trainable_params) == list and type(trainable_params[0]) == dict: + for group in trainable_params: + min_lr = min(min_lr, group.get("lr", lr)) + + if min_lr <= 0.1: + print( + f'learning rate is too low. If using dadaptation, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: {min_lr}') print('recommend option: lr=1.0 / 推奨は1.0です') - optimizer = optimizer_class(trainable_params, lr=lr, betas=betas, weight_decay=weight_decay) + + optimizer_class = dadaptation.DAdaptAdam + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + elif optimizer_type == "Adafactor".lower(): + # 引数を確認して適宜補正する + if "relative_step" not in optimizer_kwargs: + optimizer_kwargs["relative_step"] = True # default + if not optimizer_kwargs["relative_step"] and optimizer_kwargs.get("warmup_init", False): + print(f"set relative_step to True because warmup_init is True / warmup_initがTrueのためrelative_stepをTrueにします") + optimizer_kwargs["relative_step"] = True + print(f"use Adafactor optimizer | {optimizer_kwargs}") + + if optimizer_kwargs["relative_step"]: + print(f"relative_step is true / relative_stepがtrueです") + if lr != 0.0: + print(f"learning rate is used as initial_lr / 指定したlearning rateはinitial_lrとして使用されます") + args.learning_rate = None + + # trainable_paramsがgroupだった時の処理:lrを削除する + if type(trainable_params) == list and type(trainable_params[0]) == dict: + has_group_lr = False + for group in trainable_params: + p = group.pop("lr", None) + has_group_lr = has_group_lr or (p is not None) + + if has_group_lr: + # 一応argsを無効にしておく TODO 依存関係が逆転してるのであまり望ましくない + print(f"unet_lr and text_encoder_lr are ignored / unet_lrとtext_encoder_lrは無視されます") + args.unet_lr = None + args.text_encoder_lr = None + + if args.lr_scheduler != "adafactor": + print(f"use adafactor_scheduler / スケジューラにadafactor_schedulerを使用します") + args.lr_scheduler = f"adafactor:{lr}" # ちょっと微妙だけど + + lr = None + else: + if args.max_grad_norm != 0.0: + print(f"because max_grad_norm is set, clip_grad_norm is enabled. consider set to 0 / max_grad_normが設定されているためclip_grad_normが有効になります。0に設定して無効にしたほうがいいかもしれません") + if args.lr_scheduler != "constant_with_warmup": + print(f"constant_with_warmup will be good / スケジューラはconstant_with_warmupが良いかもしれません") + if optimizer_kwargs.get("clip_threshold", 1.0) != 1.0: + print(f"clip_threshold=1.0 will be good / clip_thresholdは1.0が良いかもしれません") + + optimizer_class = transformers.optimization.Adafactor + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + elif optimizer_type == "AdamW".lower(): + print(f"use AdamW optimizer | {optimizer_kwargs}") + optimizer_class = torch.optim.AdamW + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) else: - print(f"use AdamW optimizer | betas: {betas}, Weight Decay: {weight_decay}") - optimizer_class = torch.optim.AdamW - optimizer = optimizer_class(trainable_params, lr=lr, betas=betas, weight_decay=weight_decay) + # 任意のoptimizerを使う + optimizer_type = args.optimizer_type # lowerでないやつ(微妙) + print(f"use {optimizer_type} | {optimizer_kwargs}") + if "." not in optimizer_type: + optimizer_module = torch.optim + else: + values = optimizer_type.split(".") + optimizer_module = importlib.import_module(".".join(values[:-1])) + optimizer_type = values[-1] + + optimizer_class = getattr(optimizer_module, optimizer_type) + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__ + optimizer_args = ",".join([f"{k}={v}" for k, v in optimizer_kwargs.items()]) - return optimizer_name, optimizer + return optimizer_name, optimizer_args, optimizer # Monkeypatch newer get_scheduler() function overridng current version of diffusers.optimizer.get_scheduler @@ -1627,6 +1724,12 @@ def get_scheduler_fix( last_epoch (`int`, *optional*, defaults to -1): The index of the last epoch when resuming training. """ + if name.startswith("adafactor"): + assert type(optimizer) == transformers.optimization.Adafactor, f"adafactor scheduler must be used with Adafactor optimizer / adafactor schedulerはAdafactorオプティマイザと同時に使ってください" + initial_lr = float(name.split(':')[1]) + # print("adafactor scheduler init lr", initial_lr) + return transformers.optimization.AdafactorSchedule(optimizer, initial_lr) + name = SchedulerType(name) schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: @@ -1744,13 +1847,19 @@ def prepare_dtype(args: argparse.Namespace): def load_target_model(args: argparse.Namespace, weight_dtype): - load_stable_diffusion_format = os.path.isfile(args.pretrained_model_name_or_path) # determine SD or Diffusers + name_or_path = args.pretrained_model_name_or_path + name_or_path = os.readlink(name_or_path) if os.path.islink(name_or_path) else name_or_path + load_stable_diffusion_format = os.path.isfile(name_or_path) # determine SD or Diffusers if load_stable_diffusion_format: print("load StableDiffusion checkpoint") - text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.pretrained_model_name_or_path) + text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, name_or_path) else: print("load Diffusers pretrained models") - pipe = StableDiffusionPipeline.from_pretrained(args.pretrained_model_name_or_path, tokenizer=None, safety_checker=None) + try: + pipe = StableDiffusionPipeline.from_pretrained(name_or_path, tokenizer=None, safety_checker=None) + except EnvironmentError as ex: + print( + f"model is not found as a file or in Hugging Face, perhaps file name is wrong? / 指定したモデル名のファイル、またはHugging Faceのモデルが見つかりません。ファイル名が誤っているかもしれません: {name_or_path}") text_encoder = pipe.text_encoder vae = pipe.vae unet = pipe.unet diff --git a/train_db.py b/train_db.py index 268d90a1..03fba1a6 100644 --- a/train_db.py +++ b/train_db.py @@ -120,7 +120,7 @@ def train(args): else: trainable_params = unet.parameters() - _, optimizer = train_util.get_optimizer(args, trainable_params) + _, _, optimizer = train_util.get_optimizer(args, trainable_params) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -137,10 +137,9 @@ def train(args): args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end # lr schedulerを用意する TODO gradient_accumulation_stepsの扱いが何かおかしいかもしれない。後で確認する - lr_scheduler = train_util.get_scheduler_fix( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, - num_training_steps=args.max_train_steps, - num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) + lr_scheduler = train_util.get_scheduler_fix(args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, + num_training_steps=args.max_train_steps, + num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -263,12 +262,12 @@ def train(args): loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし accelerator.backward(loss) - if accelerator.sync_gradients: + if accelerator.sync_gradients and args.max_grad_norm != 0.0: if train_text_encoder: params_to_clip = (itertools.chain(unet.parameters(), text_encoder.parameters())) else: params_to_clip = unet.parameters() - accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm) + accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() @@ -281,8 +280,8 @@ def train(args): current_loss = loss.detach().item() if args.logging_dir is not None: - logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} - if args.optimizer_type == "DAdaptation".lower(): # tracking d*lr value + logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])} + if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value logs["lr/d*lr"] = lr_scheduler.optimizers[0].param_groups[0]['d']*lr_scheduler.optimizers[0].param_groups[0]['lr'] accelerator.log(logs, step=global_step) diff --git a/train_network.py b/train_network.py index 3c4fb8d9..b01ec117 100644 --- a/train_network.py +++ b/train_network.py @@ -28,14 +28,14 @@ def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_sche logs = {"loss/current": current_loss, "loss/average": avr_loss} if args.network_train_unet_only: - logs["lr/unet"] = lr_scheduler.get_last_lr()[0] + logs["lr/unet"] = float(lr_scheduler.get_last_lr()[0]) elif args.network_train_text_encoder_only: - logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0] + logs["lr/textencoder"] = float(lr_scheduler.get_last_lr()[0]) else: - logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0] - logs["lr/unet"] = lr_scheduler.get_last_lr()[-1] # may be same to textencoder + logs["lr/textencoder"] = float(lr_scheduler.get_last_lr()[0]) + logs["lr/unet"] = float(lr_scheduler.get_last_lr()[-1]) # may be same to textencoder - if args.optimizer_type == "DAdaptation".lower(): # tracking d*lr value of unet. + if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value of unet. logs["lr/d*lr"] = lr_scheduler.optimizers[-1].param_groups[0]['d']*lr_scheduler.optimizers[-1].param_groups[0]['lr'] return logs @@ -147,7 +147,7 @@ def train(args): print("prepare optimizer, data loader etc.") trainable_params = network.prepare_optimizer_params(args.text_encoder_lr, args.unet_lr) - optimizer_name, optimizer = train_util.get_optimizer(args, trainable_params) + optimizer_name, optimizer_args, optimizer = train_util.get_optimizer(args, trainable_params) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -161,10 +161,9 @@ def train(args): print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する - lr_scheduler = train_util.get_scheduler_fix( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) + lr_scheduler = train_util.get_scheduler_fix(args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする if args.full_fp16: @@ -287,7 +286,7 @@ def train(args): "ss_bucket_info": json.dumps(train_dataset.bucket_info), "ss_training_comment": args.training_comment, # will not be updated after training "ss_sd_scripts_commit_hash": train_util.get_git_revision_hash(), - "ss_optimizer": optimizer_name + "ss_optimizer": optimizer_name + (f"({optimizer_args})" if len(optimizer_args) > 0 else "") } # uncomment if another network is added @@ -380,9 +379,9 @@ def train(args): loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし accelerator.backward(loss) - if accelerator.sync_gradients: + if accelerator.sync_gradients and args.max_grad_norm != 0.0: params_to_clip = network.get_trainable_params() - accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm) + accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() @@ -478,10 +477,6 @@ if __name__ == '__main__': parser.add_argument("--unet_lr", type=float, default=None, help="learning rate for U-Net / U-Netの学習率") parser.add_argument("--text_encoder_lr", type=float, default=None, help="learning rate for Text Encoder / Text Encoderの学習率") - parser.add_argument("--lr_scheduler_num_cycles", type=int, default=1, - help="Number of restarts for cosine scheduler with restarts / cosine with restartsスケジューラでのリスタート回数") - parser.add_argument("--lr_scheduler_power", type=float, default=1, - help="Polynomial power for polynomial scheduler / polynomialスケジューラでのpolynomial power") parser.add_argument("--network_weights", type=str, default=None, help="pretrained weights for network / 学習するネットワークの初期重み") diff --git a/train_textual_inversion.py b/train_textual_inversion.py index 07dcc199..b4ddd763 100644 --- a/train_textual_inversion.py +++ b/train_textual_inversion.py @@ -199,7 +199,7 @@ def train(args): # 学習に必要なクラスを準備する print("prepare optimizer, data loader etc.") trainable_params = text_encoder.get_input_embeddings().parameters() - _, optimizer = train_util.get_optimizer(args, trainable_params) + _, _, optimizer = train_util.get_optimizer(args, trainable_params) # dataloaderを準備する # DataLoaderのプロセス数:0はメインプロセスになる @@ -213,10 +213,9 @@ def train(args): print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する - lr_scheduler = train_util.get_scheduler_fix( - args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) + lr_scheduler = train_util.get_scheduler_fix(args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) # acceleratorがなんかよろしくやってくれるらしい text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( @@ -338,9 +337,9 @@ def train(args): loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし accelerator.backward(loss) - if accelerator.sync_gradients: + if accelerator.sync_gradients and args.max_grad_norm != 0.0: params_to_clip = text_encoder.get_input_embeddings().parameters() - accelerator.clip_grad_norm_(params_to_clip, 1.0) # args.max_grad_norm) + accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() @@ -357,8 +356,8 @@ def train(args): current_loss = loss.detach().item() if args.logging_dir is not None: - logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} - if args.optimizer_type == "DAdaptation".lower(): # tracking d*lr value + logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])} + if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value logs["lr/d*lr"] = lr_scheduler.optimizers[0].param_groups[0]['d']*lr_scheduler.optimizers[0].param_groups[0]['lr'] accelerator.log(logs, step=global_step) From f2b300a2218b95f0672aa27309e69063fe3dedf6 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Wed, 22 Feb 2023 22:04:53 +0900 Subject: [PATCH 06/10] Add about optimizer --- train_network_README-ja.md | 40 +++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/train_network_README-ja.md b/train_network_README-ja.md index e0ebaf76..4a507ffc 100644 --- a/train_network_README-ja.md +++ b/train_network_README-ja.md @@ -50,11 +50,13 @@ accelerate launch --num_cpu_threads_per_process 1 train_network.py --train_data_dir=..\data\db\char1 --output_dir=..\lora_train1 --reg_data_dir=..\data\db\reg1 --prior_loss_weight=1.0 --resolution=448,640 --train_batch_size=1 --learning_rate=1e-4 - --max_train_steps=400 --use_8bit_adam --xformers --mixed_precision=fp16 + --max_train_steps=400 --optimizer_type=AdamW8bit --xformers --mixed_precision=fp16 --save_every_n_epochs=1 --save_model_as=safetensors --clip_skip=2 --seed=42 --color_aug --network_module=networks.lora ``` +(2023/2/22:オプティマイザの指定方法が変わりました。[こちら](#オプティマイザの指定について)をご覧ください。) + --output_dirオプションで指定したフォルダに、LoRAのモデルが保存されます。 その他、以下のオプションが指定できます。 @@ -76,6 +78,42 @@ accelerate launch --num_cpu_threads_per_process 1 train_network.py --network_train_unet_onlyと--network_train_text_encoder_onlyの両方とも未指定時(デフォルト)はText EncoderとU-Netの両方のLoRAモジュールを有効にします。 +## オプティマイザの指定について + +--optimizer_type オプションでオプティマイザの種類を指定します。以下が指定できます。 + +- AdamW : [torch.optim.AdamW](https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html) + - 過去のバージョンのオプション未指定時と同じ +- AdamW8bit : 引数は同上 + - 過去のバージョンの--use_8bit_adam指定時と同じ +- Lion : https://github.com/lucidrains/lion-pytorch + - 過去のバージョンの--use_lion_optimizer指定時と同じ +- SGDNesterov : [torch.optim.SGD](https://pytorch.org/docs/stable/generated/torch.optim.SGD.html), nesterov=True +- SGDNesterov8bit : 引数は同上 +- DAdaptation : https://github.com/facebookresearch/dadaptation +- AdaFactor : [Transformers AdaFactor](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules) +- 任意のオプティマイザ + +オプティマイザのオプション引数は--optimizer_argsオプションで指定してください。key=valueの形式で、複数の値が指定できます。また、valueはカンマ区切りで複数の値が指定できます。たとえばAdamWオプティマイザに引数を指定する場合は、``--optimizer_args weight_decay=0.01 betas=.9,.999``のようになります。 + +オプション引数を指定する場合は、それぞれのオプティマイザの仕様をご確認ください。 + +一部のオプティマイザでは必須の引数があり、省略すると自動的に追加されます(SGDNesterovのmomentumなど)。コンソールの出力を確認してください。 + +D-Adaptationオプティマイザは学習率を自動調整します。学習率のオプションに指定した値は学習率そのものではなくD-Adaptationが決定した学習率の適用率になりますので、通常は1.0を指定してください。Text EncoderにU-Netの半分の学習率を指定したい場合は、``--text_encoder_lr=0.5 --unet_lr=1.0``と指定します。 + +AdaFactorオプティマイザはrelative_step=Trueを指定すると学習率を自動調整できます(省略時はデフォルトで追加されます)。自動調整する場合は学習率のスケジューラにはadafactor_schedulerが強制的に使用されます。またscale_parameterとwarmup_initを指定するとよいようです。 + +自動調整する場合のオプション指定はたとえば ``--optimizer_args "relative_step=True" "scale_parameter=True" "warmup_init=True"`` のようになります。 + +学習率を自動調整しない場合はオプション引数 ``relative_step=False`` を追加してください。その場合、学習率のスケジューラにはconstant_with_warmupが、また勾配のclip normをしないことが推奨されているようです。そのため引数は ``--optimizer_type=adafactor --optimizer_args "relative_step=False" --lr_scheduler="constant_with_warmup" --max_grad_norm=0.0`` のようになります。 + +### 任意のオプティマイザを使う + +``torch.optim`` のオプティマイザを使う場合にはクラス名のみを(``--optimizer_type=RMSprop``など)、他のモジュールのオプティマイザを使う時は「モジュール名.クラス名」を指定してください(``--optimizer_type=bitsandbytes.optim.lamb.LAMB``など)。 + +(内部でimportlibしているだけで動作は未確認です。必要ならパッケージをインストールしてください。) + ## マージスクリプトについて merge_lora.pyでStable DiffusionのモデルにLoRAの学習結果をマージしたり、複数のLoRAモデルをマージしたりできます。 From 125039f491d6aa52719efdc2b8ea239cf3e042dc Mon Sep 17 00:00:00 2001 From: Kohya S Date: Wed, 22 Feb 2023 22:06:47 +0900 Subject: [PATCH 07/10] update readme --- README.md | 121 +++++++----------------------------------------------- 1 file changed, 15 insertions(+), 106 deletions(-) diff --git a/README.md b/README.md index a1adcb27..835ebd81 100644 --- a/README.md +++ b/README.md @@ -124,112 +124,21 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser ## Change History -- 19 Feb. 2023, 2023/2/19: - - Add ``--use_lion_optimizer`` to each training script to use [Lion optimizer](https://github.com/lucidrains/lion-pytorch). - - Please install Lion optimizer with ``pip install lion-pytorch`` (it is not in ``requirements.txt`` currently.) - - Add ``--lowram`` option to ``train_network.py``. Load models to VRAM instead of VRAM (for machines which have bigger VRAM than RAM such as Colab and Kaggle). Thanks to Isotr0py! - - Default behavior (without lowram) has reverted to the same as before 14 Feb. - - Fixed git commit hash to be set correctly regardless of the working directory. Thanks to vladmandic! - - - ``--use_lion_optimizer`` オプションを各学習スクリプトに追加しました。 [Lion optimizer](https://github.com/lucidrains/lion-pytorch) を使用できます。 - - あらかじめ ``pip install lion-pytorch`` でインストールしてください(現在は ``requirements.txt`` に含まれていません)。 - - ``--lowram`` オプションを ``train_network.py`` に追加しました。モデルをRAMではなくVRAMに読み込みます(ColabやKaggleなど、VRAMがRAMに比べて多い環境で有効です)。 Isotr0py 氏に感謝します。 - - lowram オプションなしのデフォルト動作は2/14より前と同じに戻しました。 - - git commit hash を現在のフォルダ位置に関わらず正しく取得するように修正しました。vladmandic 氏に感謝します。 - -- 16 Feb. 2023, 2023/2/16: - - Noise offset is recorded to the metadata. Thanks to space-nuko! - - Show the moving average loss to prevent loss jumping in ``train_network.py`` and ``train_db.py``. Thanks to shirayu! - - Noise offsetがメタデータに記録されるようになりました。space-nuko氏に感謝します。 - - ``train_network.py``と``train_db.py``で学習中に表示されるlossの値が移動平均になりました。epochの先頭で表示されるlossが大きく変動する事象を解決します。shirayu氏に感謝します。 -- 14 Feb. 2023, 2023/2/14: - - Add support with multi-gpu trainining for ``train_network.py``. Thanks to Isotr0py! - - Add ``--verbose`` option for ``resize_lora.py``. For details, see [this PR](https://github.com/kohya-ss/sd-scripts/pull/179). Thanks to mgz-dev! - - Git commit hash is added to the metadata for LoRA. Thanks to space-nuko! - - Add ``--noise_offset`` option for each training scripts. - - Implementation of https://www.crosslabs.org//blog/diffusion-with-offset-noise - - This option may improve ability to generate darker/lighter images. May work with LoRA. - - ``train_network.py``でマルチGPU学習をサポートしました。Isotr0py氏に感謝します。 - - ``--verbose``オプションを ``resize_lora.py`` に追加しました。表示される情報の詳細は [こちらのPR](https://github.com/kohya-ss/sd-scripts/pull/179) をご参照ください。mgz-dev氏に感謝します。 - - LoRAのメタデータにgitのcommit hashを追加しました。space-nuko氏に感謝します。 - - ``--noise_offset`` オプションを各学習スクリプトに追加しました。 - - こちらの記事の実装になります: https://www.crosslabs.org//blog/diffusion-with-offset-noise - - 全体的に暗い、明るい画像の生成結果が良くなる可能性があるようです。LoRA学習でも有効なようです。 - -- 11 Feb. 2023, 2023/2/11: - - ``lora_interrogator.py`` is added in ``networks`` folder. See ``python networks\lora_interrogator.py -h`` for usage. - - For LoRAs where the activation word is unknown, this script compares the output of Text Encoder after applying LoRA to that of unapplied to find out which token is affected by LoRA. Hopefully you can figure out the activation word. LoRA trained with captions does not seem to be able to interrogate. - - Batch size can be large (like 64 or 128). - - ``train_textual_inversion.py`` now supports multiple init words. - - Following feature is reverted to be the same as before. Sorry for confusion: - > Now the number of data in each batch is limited to the number of actual images (not duplicated). Because a certain bucket may contain smaller number of actual images, so the batch may contain same (duplicated) images. - - - ``lora_interrogator.py`` を ``network``フォルダに追加しました。使用法は ``python networks\lora_interrogator.py -h`` でご確認ください。 - - このスクリプトは、起動promptがわからないLoRAについて、LoRA適用前後のText Encoderの出力を比較することで、どのtokenの出力が変化しているかを調べます。運が良ければ起動用の単語が分かります。キャプション付きで学習されたLoRAは影響が広範囲に及ぶため、調査は難しいようです。 - - バッチサイズはわりと大きくできます(64や128など)。 - - ``train_textual_inversion.py`` で複数のinit_word指定が可能になりました。 - - 次の機能を削除し元に戻しました。混乱を招き申し訳ありません。 - > これらのオプションによりbucketが細分化され、ひとつのバッチ内に同一画像が重複して存在することが増えたため、バッチサイズを``そのbucketの画像種類数``までに制限する機能を追加しました。 - -- 10 Feb. 2023, 2023/2/10: - - Updated ``requirements.txt`` to prevent upgrading with pip taking a long time or failure to upgrade. - - ``resize_lora.py`` keeps the metadata of the model. ``dimension is resized from ...`` is added to the top of ``ss_training_comment``. - - ``merge_lora.py`` supports models with different ``alpha``s. If there is a problem, old version is ``merge_lora_old.py``. - - ``svd_merge_lora.py`` is added. This script merges LoRA models with any rank (dim) and alpha, and approximate a new LoRA with svd for a specified rank (dim). - - Note: merging scripts erase the metadata currently. - - ``resize_images_to_resolution.py`` supports multibyte characters in filenames. - - pipでの更新が長時間掛かったり、更新に失敗したりするのを防ぐため、``requirements.txt``を更新しました。 - - ``resize_lora.py``がメタデータを保持するようになりました。 ``dimension is resized from ...`` という文字列が ``ss_training_comment`` の先頭に追加されます。 - - ``merge_lora.py``がalphaが異なるモデルをサポートしました。 何か問題がありましたら旧バージョン ``merge_lora_old.py`` をお使いください。 - - ``svd_merge_lora.py`` を追加しました。 複数の任意のdim (rank)、alphaのLoRAモデルをマージし、svdで任意dim(rank)のLoRAで近似します。 - - 注:マージ系のスクリプトは現時点ではメタデータを消去しますのでご注意ください。 - - ``resize_images_to_resolution.py``が日本語ファイル名をサポートしました。 - -- 9 Feb. 2023, 2023/2/9: - - Caption dropout is supported in ``train_db.py``, ``fine_tune.py`` and ``train_network.py``. Thanks to forestsource! - - ``--caption_dropout_rate`` option specifies the dropout rate for captions (0~1.0, 0.1 means 10% chance for dropout). If dropout occurs, the image is trained with the empty caption. Default is 0 (no dropout). - - ``--caption_dropout_every_n_epochs`` option specifies how many epochs to drop captions. If ``3`` is specified, in epoch 3, 6, 9 ..., images are trained with all captions empty. Default is None (no dropout). - - ``--caption_tag_dropout_rate`` option specified the dropout rate for tags (comma separated tokens) (0~1.0, 0.1 means 10% chance for dropout). If dropout occurs, the tag is removed from the caption. If ``--keep_tokens`` option is set, these tokens (tags) are not dropped. Default is 0 (no droupout). - - The bulk image downsampling script is added. Documentation is [here](https://github.com/kohya-ss/sd-scripts/blob/main/train_network_README-ja.md#%E7%94%BB%E5%83%8F%E3%83%AA%E3%82%B5%E3%82%A4%E3%82%BA%E3%82%B9%E3%82%AF%E3%83%AA%E3%83%97%E3%83%88) (in Jpanaese). Thanks to bmaltais! - - Typo check is added. Thanks to shirayu! - - キャプションのドロップアウトを``train_db.py``、``fine_tune.py``、``train_network.py``の各スクリプトに追加しました。forestsource氏に感謝します。 - - ``--caption_dropout_rate``オプションでキャプションのドロップアウト率を指定します(0~1.0、 0.1を指定すると10%の確率でドロップアウト)。ドロップアウトされた場合、画像は空のキャプションで学習されます。デフォルトは 0 (ドロップアウトなし)です。 - - ``--caption_dropout_every_n_epochs`` オプションで何エポックごとにキャプションを完全にドロップアウトするか指定します。たとえば``3``を指定すると、エポック3、6、9……で、すべての画像がキャプションなしで学習されます。デフォルトは None (ドロップアウトなし)です。 - - ``--caption_tag_dropout_rate`` オプションで各タグ(カンマ区切りの各部分)のドロップアウト率を指定します(0~1.0、 0.1を指定すると10%の確率でドロップアウト)。ドロップアウトが起きるとそのタグはそのときだけキャプションから取り除かれて学習されます。``--keep_tokens`` オプションを指定していると、シャッフルされない部分のタグはドロップアウトされません。デフォルトは 0 (ドロップアウトなし)です。 - - 画像の一括縮小スクリプトを追加しました。ドキュメントは [こちら](https://github.com/kohya-ss/sd-scripts/blob/main/train_network_README-ja.md#%E7%94%BB%E5%83%8F%E3%83%AA%E3%82%B5%E3%82%A4%E3%82%BA%E3%82%B9%E3%82%AF%E3%83%AA%E3%83%97%E3%83%88) です。bmaltais氏に感謝します。 - - 誤字チェッカが追加されました。shirayu氏に感謝します。 - -- 6 Feb. 2023, 2023/2/6: - - ``--bucket_reso_steps`` and ``--bucket_no_upscale`` options are added to training scripts (fine tuning, DreamBooth, LoRA and Textual Inversion) and ``prepare_buckets_latents.py``. - - ``--bucket_reso_steps`` takes the steps for buckets in aspect ratio bucketing. Default is 64, same as before. - - Any value greater than or equal to 1 can be specified; 64 is highly recommended and a value divisible by 8 is recommended. - - If less than 64 is specified, padding will occur within U-Net. The result is unknown. - - If you specify a value that is not divisible by 8, it will be truncated to divisible by 8 inside VAE, because the size of the latent is 1/8 of the image size. - - If ``--bucket_no_upscale`` option is specified, images smaller than the bucket size will be processed without upscaling. - - Internally, a bucket smaller than the image size is created (for example, if the image is 300x300 and ``bucket_reso_steps=64``, the bucket is 256x256). The image will be trimmed. - - Implementation of [#130](https://github.com/kohya-ss/sd-scripts/issues/130). - - Images with an area larger than the maximum size specified by ``--resolution`` are downsampled to the max bucket size. - - Now the number of data in each batch is limited to the number of actual images (not duplicated). Because a certain bucket may contain smaller number of actual images, so the batch may contain same (duplicated) images. - - ``--random_crop`` now also works with buckets enabled. - - Instead of always cropping the center of the image, the image is shifted left, right, up, and down to be used as the training data. This is expected to train to the edges of the image. - - Implementation of discussion [#34](https://github.com/kohya-ss/sd-scripts/discussions/34). - - - ``--bucket_reso_steps``および``--bucket_no_upscale``オプションを、学習スクリプトおよび``prepare_buckets_latents.py``に追加しました。 - - ``--bucket_reso_steps``オプションでは、bucketの解像度の単位を指定できます。デフォルトは64で、今までと同じ動作です。 - - 1以上の任意の値を指定できます。基本的には64を推奨します。64以外の値では、8で割り切れる値を推奨します。 - - 64未満を指定するとU-Netの内部でpaddingが発生します。どのような結果になるかは未知数です。 - - 8で割り切れない値を指定すると余りはVAE内部で切り捨てられます。 - - ``--bucket_no_upscale``オプションを指定すると、bucketサイズよりも小さい画像は拡大せずそのまま処理します。 - - 内部的には画像サイズ以下のサイズのbucketを作成します(たとえば画像が300x300で``bucket_reso_steps=64``の場合、256x256のbucket)。余りは都度trimmingされます。 - - [#130](https://github.com/kohya-ss/sd-scripts/issues/130) を実装したものです。 - - ``--resolution``で指定した最大サイズよりも面積が大きい画像は、最大サイズと同じ面積になるようアスペクト比を維持したまま縮小され、そのサイズを元にbucketが作られます。 - - これらのオプションによりbucketが細分化され、ひとつのバッチ内に同一画像が重複して存在することが増えたため、バッチサイズを``そのbucketの画像種類数``までに制限する機能を追加しました。 - - たとえば繰り返し回数10で、あるbucketに1枚しか画像がなく、バッチサイズが10以上のとき、今まではepoch内で、同一画像を10枚含むバッチが1回だけ使用されていました。 - - 機能追加後はepoch内にサイズ1のバッチが10回、使用されます。 - - ``--random_crop``がbucketを有効にした場合にも機能するようになりました。 - - 常に画像の中央を切り取るのではなく、左右、上下にずらして教師データにします。これにより画像端まで学習されることが期待されます。 - - discussionの[#34](https://github.com/kohya-ss/sd-scripts/discussions/34)を実装したものです。 - +- 22 Feb. 2023, 2023/2/22: + - Refactor optmizer options. Thanks to mgz-dev! + - Add ``--optimizer_type`` option for each training script. Please see help. Japanese documentation is here. + - ``--use_8bit_adam`` and ``--use_lion_optimizer`` options also work, but override above option. + - Add SGDNesterov and its 8bit. + - Add D-Adaptation optimizer. Thanks to BootsofLagrangian and all! + - Please see https://github.com/kohya-ss/sd-scripts/issues/181 for details. + - Add AdaFactor optimizer. Thanks to Toshiaki! + - オプティマイザ関連のオプションを見直しました。mgz-dev氏に感謝します。 + - ``--optimizer_type`` を各学習スクリプトに追加しました。ドキュメントはこちら。 + - ``--use_8bit_adam`` と ``--use_lion_optimizer`` のオプションは依然として動作しますがoptimizer_typeを上書きしますのでご注意ください。 + - SGDNesterov オプティマイザおよびその8bit版を追加しました。 + - D-Adaptation オプティマイザを追加しました。BootsofLagrangian 氏および諸氏に感謝します。 + - こちらのissueもあわせてご覧ください。 https://github.com/kohya-ss/sd-scripts/issues/181 + - AdaFactor オプティマイザを追加しました。Toshiaki氏に感謝します。 Please read [Releases](https://github.com/kohya-ss/sd-scripts/releases) for recent updates. 最近の更新情報は [Release](https://github.com/kohya-ss/sd-scripts/releases) をご覧ください。 From e13e503cbcca13cbb9965c717ef80d316c5862a1 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Wed, 22 Feb 2023 22:10:32 +0900 Subject: [PATCH 08/10] update readme --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 835ebd81..f9ebf840 100644 --- a/README.md +++ b/README.md @@ -132,6 +132,7 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser - Add D-Adaptation optimizer. Thanks to BootsofLagrangian and all! - Please see https://github.com/kohya-ss/sd-scripts/issues/181 for details. - Add AdaFactor optimizer. Thanks to Toshiaki! + - Extra lr scheduler settings (num_cycles etc.) are working in training scripts other than ``train_network.py``. - オプティマイザ関連のオプションを見直しました。mgz-dev氏に感謝します。 - ``--optimizer_type`` を各学習スクリプトに追加しました。ドキュメントはこちら。 - ``--use_8bit_adam`` と ``--use_lion_optimizer`` のオプションは依然として動作しますがoptimizer_typeを上書きしますのでご注意ください。 @@ -139,6 +140,7 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser - D-Adaptation オプティマイザを追加しました。BootsofLagrangian 氏および諸氏に感謝します。 - こちらのissueもあわせてご覧ください。 https://github.com/kohya-ss/sd-scripts/issues/181 - AdaFactor オプティマイザを追加しました。Toshiaki氏に感謝します。 + - 追加のスケジューラ設定(num_cycles等)が ``train_network.py`` 以外の学習スクリプトでも使えるようになりました。 Please read [Releases](https://github.com/kohya-ss/sd-scripts/releases) for recent updates. 最近の更新情報は [Release](https://github.com/kohya-ss/sd-scripts/releases) をご覧ください。 From 7a0d2a2d454f42c5fd17fbe1ffb0b44c3b3d21bb Mon Sep 17 00:00:00 2001 From: Kohya S Date: Wed, 22 Feb 2023 22:16:23 +0900 Subject: [PATCH 09/10] update readme --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index f9ebf840..595619b9 100644 --- a/README.md +++ b/README.md @@ -133,6 +133,7 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser - Please see https://github.com/kohya-ss/sd-scripts/issues/181 for details. - Add AdaFactor optimizer. Thanks to Toshiaki! - Extra lr scheduler settings (num_cycles etc.) are working in training scripts other than ``train_network.py``. + - Add ``--max_grad_norm`` option for each training script for gradient clipping. ``0.0`` disables clipping. - オプティマイザ関連のオプションを見直しました。mgz-dev氏に感謝します。 - ``--optimizer_type`` を各学習スクリプトに追加しました。ドキュメントはこちら。 - ``--use_8bit_adam`` と ``--use_lion_optimizer`` のオプションは依然として動作しますがoptimizer_typeを上書きしますのでご注意ください。 @@ -141,6 +142,7 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser - こちらのissueもあわせてご覧ください。 https://github.com/kohya-ss/sd-scripts/issues/181 - AdaFactor オプティマイザを追加しました。Toshiaki氏に感謝します。 - 追加のスケジューラ設定(num_cycles等)が ``train_network.py`` 以外の学習スクリプトでも使えるようになりました。 + - 勾配クリップ時の最大normを指定する ``--max_grad_norm`` オプションを追加しました。``0.0``を指定するとクリップしなくなります。 Please read [Releases](https://github.com/kohya-ss/sd-scripts/releases) for recent updates. 最近の更新情報は [Release](https://github.com/kohya-ss/sd-scripts/releases) をご覧ください。 From f68a48b354572d4f046a41c67b69ff91004a77d5 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Wed, 22 Feb 2023 22:19:36 +0900 Subject: [PATCH 10/10] update readme --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 595619b9..927c4f70 100644 --- a/README.md +++ b/README.md @@ -134,6 +134,7 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser - Add AdaFactor optimizer. Thanks to Toshiaki! - Extra lr scheduler settings (num_cycles etc.) are working in training scripts other than ``train_network.py``. - Add ``--max_grad_norm`` option for each training script for gradient clipping. ``0.0`` disables clipping. + - Symbolic link can be loaded in each training script. Thanks to TkskKurumi! - オプティマイザ関連のオプションを見直しました。mgz-dev氏に感謝します。 - ``--optimizer_type`` を各学習スクリプトに追加しました。ドキュメントはこちら。 - ``--use_8bit_adam`` と ``--use_lion_optimizer`` のオプションは依然として動作しますがoptimizer_typeを上書きしますのでご注意ください。 @@ -143,6 +144,7 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser - AdaFactor オプティマイザを追加しました。Toshiaki氏に感謝します。 - 追加のスケジューラ設定(num_cycles等)が ``train_network.py`` 以外の学習スクリプトでも使えるようになりました。 - 勾配クリップ時の最大normを指定する ``--max_grad_norm`` オプションを追加しました。``0.0``を指定するとクリップしなくなります。 + - 各学習スクリプトでシンボリックリンクが読み込めるようになりました。TkskKurumi氏に感謝します。 Please read [Releases](https://github.com/kohya-ss/sd-scripts/releases) for recent updates. 最近の更新情報は [Release](https://github.com/kohya-ss/sd-scripts/releases) をご覧ください。