From 8544e219b087afd3aa883921d0c1adbabb003261 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Sun, 15 Jan 2023 17:29:42 +0900 Subject: [PATCH 01/44] Fix typos --- README-ja.md | 2 +- README.md | 4 ++-- gen_img_diffusers.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README-ja.md b/README-ja.md index 95327b36..f1106960 100644 --- a/README-ja.md +++ b/README-ja.md @@ -99,7 +99,7 @@ accelerate configの質問には以下のように答えてください。(bf1 ``` ※場合によって ``ValueError: fp16 mixed precision requires a GPU`` というエラーが出ることがあるようです。この場合、6番目の質問( -``What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:``)に「0」と答えてください。(id `0`のGPUが使われます。) +``What GPU(s) (by id) should be used for training on this machine as a comma-separated list? [all]:``)に「0」と答えてください。(id `0`のGPUが使われます。) ## アップグレード diff --git a/README.md b/README.md index 3f3ecf79..5f1b15ce 100644 --- a/README.md +++ b/README.md @@ -92,8 +92,8 @@ Answers to accelerate config: - fp16 ``` -note: Some user reports ``ValueError: fp16 mixed precision requires a GPU`` is occured in training. In this case, answer `0` for the 6th question: -``What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]:`` +note: Some user reports ``ValueError: fp16 mixed precision requires a GPU`` is occurred in training. In this case, answer `0` for the 6th question: +``What GPU(s) (by id) should be used for training on this machine as a comma-separated list? [all]:`` (Single GPU with id `0` will be used.) diff --git a/gen_img_diffusers.py b/gen_img_diffusers.py index 4edfe0b2..7b4ef2e5 100644 --- a/gen_img_diffusers.py +++ b/gen_img_diffusers.py @@ -2518,9 +2518,9 @@ if __name__ == '__main__': parser.add_argument("--bf16", action='store_true', help='use bfloat16 / bfloat16を指定し省メモリ化する') parser.add_argument("--xformers", action='store_true', help='use xformers / xformersを使用し高速化する') parser.add_argument("--diffusers_xformers", action='store_true', - help='use xformers by diffusers (Hypernetworks doen\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)') + help='use xformers by diffusers (Hypernetworks doesn\'t work) / Diffusersでxformersを使用する(Hypernetwork利用不可)') parser.add_argument("--opt_channels_last", action='store_true', - help='set channels last option to model / モデルにchannles lastを指定し最適化する') + help='set channels last option to model / モデルにchannels lastを指定し最適化する') parser.add_argument("--network_module", type=str, default=None, nargs='*', help='Hypernetwork module to use / Hypernetworkを使う時そのモジュール名') parser.add_argument("--network_weights", type=str, default=None, nargs='*', From df9cb2f11c4706558fe2aaa329cf70758e1e03e6 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Sun, 15 Jan 2023 17:52:22 +0900 Subject: [PATCH 02/44] Add --save_last_n_epochs_model and --save_last_n_epochs_state --- library/train_util.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index 57ebf1b0..bd59d831 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1029,6 +1029,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: parser.add_argument("--save_every_n_epochs", type=int, default=None, help="save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する") parser.add_argument("--save_last_n_epochs", type=int, default=None, help="save last N checkpoints / 最大Nエポック保存する") + parser.add_argument("--save_last_n_epochs_model", type=int, default=None, help="save last N checkpoints / 最大Nエポックモデル保存する") + parser.add_argument("--save_last_n_epochs_state", type=int, default=None, help="save last N checkpoints / 最大Nエポックstate保存する") parser.add_argument("--save_state", action="store_true", help="save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する") parser.add_argument("--resume", type=str, default=None, help="saved state to resume training / 学習再開するモデルのstate") @@ -1303,10 +1305,11 @@ def save_on_epoch_end(args: argparse.Namespace, save_func, remove_old_func, epoc os.makedirs(args.output_dir, exist_ok=True) save_func() - if args.save_last_n_epochs is not None: - remove_epoch_no = epoch_no - args.save_every_n_epochs * args.save_last_n_epochs + last_n_epoch = args.save_last_n_epochs_model if args.save_last_n_epochs_model else args.save_last_n_epochs + if last_n_epoch is not None: + remove_epoch_no = epoch_no - args.save_every_n_epochs * last_n_epoch remove_old_func(remove_epoch_no) - return saving, remove_epoch_no + return saving def save_sd_model_on_epoch_end(args: argparse.Namespace, accelerator, src_path: str, save_stable_diffusion_format: bool, use_safetensors: bool, save_dtype: torch.dtype, epoch: int, num_train_epochs: int, global_step: int, text_encoder, unet, vae): @@ -1346,14 +1349,20 @@ def save_sd_model_on_epoch_end(args: argparse.Namespace, accelerator, src_path: save_func = save_du remove_old_func = remove_du - saving, remove_epoch_no = save_on_epoch_end(args, save_func, remove_old_func, epoch_no, num_train_epochs) + saving = save_on_epoch_end(args, save_func, remove_old_func, epoch_no, num_train_epochs) if saving and args.save_state: - save_state_on_epoch_end(args, accelerator, model_name, epoch_no, remove_epoch_no) + save_state_on_epoch_end(args, accelerator, model_name, epoch_no) -def save_state_on_epoch_end(args: argparse.Namespace, accelerator, model_name, epoch_no, remove_epoch_no): +def save_state_on_epoch_end(args: argparse.Namespace, accelerator, model_name, epoch_no): print("saving state.") accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(model_name, epoch_no))) + + remove_epoch_no = None + last_n_epoch = args.save_last_n_epochs_model if args.save_last_n_epochs_state else args.save_last_n_epochs + if last_n_epoch is not None: + remove_epoch_no = epoch_no - args.save_every_n_epochs * last_n_epoch + if remove_epoch_no is not None: state_dir_old = os.path.join(args.output_dir, EPOCH_STATE_NAME.format(model_name, remove_epoch_no)) if os.path.exists(state_dir_old): From d30ea7966d5aaca379e0a00f3178fdcc62d8fe2c Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Sun, 15 Jan 2023 17:56:49 +0900 Subject: [PATCH 03/44] Updated help --- library/train_util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index bd59d831..6170782b 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1029,8 +1029,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: parser.add_argument("--save_every_n_epochs", type=int, default=None, help="save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する") parser.add_argument("--save_last_n_epochs", type=int, default=None, help="save last N checkpoints / 最大Nエポック保存する") - parser.add_argument("--save_last_n_epochs_model", type=int, default=None, help="save last N checkpoints / 最大Nエポックモデル保存する") - parser.add_argument("--save_last_n_epochs_state", type=int, default=None, help="save last N checkpoints / 最大Nエポックstate保存する") + parser.add_argument("--save_last_n_epochs_model", type=int, default=None, help="save last N checkpoints of model (overrides the value of --save_last_n_epochs) / 最大Nエポックモデルを保存する(--save_last_n_epochsの指定を上書きします)") + parser.add_argument("--save_last_n_epochs_state", type=int, default=None, help="save last N checkpoints of state (overrides the value of --save_last_n_epochs)/ 最大Nエポックstateを保存する(--save_last_n_epochsの指定を上書きします)") parser.add_argument("--save_state", action="store_true", help="save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する") parser.add_argument("--resume", type=str, default=None, help="saved state to resume training / 学習再開するモデルのstate") From a8882238698f7b69e693ded173b31411f48c8034 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Sun, 15 Jan 2023 18:02:17 +0900 Subject: [PATCH 04/44] Fix a bug --- library/train_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/train_util.py b/library/train_util.py index 6170782b..b9c4199b 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1359,7 +1359,7 @@ def save_state_on_epoch_end(args: argparse.Namespace, accelerator, model_name, e accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(model_name, epoch_no))) remove_epoch_no = None - last_n_epoch = args.save_last_n_epochs_model if args.save_last_n_epochs_state else args.save_last_n_epochs + last_n_epoch = args.save_last_n_epochs_state if args.save_last_n_epochs_state else args.save_last_n_epochs if last_n_epoch is not None: remove_epoch_no = epoch_no - args.save_every_n_epochs * last_n_epoch From c6e28faa576701c6cd04e2abc2a356008f133997 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Sun, 15 Jan 2023 19:43:37 +0900 Subject: [PATCH 05/44] Save state when args.save_last_n_epochs_state is designated --- library/train_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/train_util.py b/library/train_util.py index b9c4199b..63444f00 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1350,7 +1350,7 @@ def save_sd_model_on_epoch_end(args: argparse.Namespace, accelerator, src_path: remove_old_func = remove_du saving = save_on_epoch_end(args, save_func, remove_old_func, epoch_no, num_train_epochs) - if saving and args.save_state: + if saving and args.save_state or args.save_last_n_epochs_state is not None: save_state_on_epoch_end(args, accelerator, model_name, epoch_no) From 3815b82bef06ff3015b11542bd672ad768378dc1 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Mon, 16 Jan 2023 21:02:27 +0900 Subject: [PATCH 06/44] Removed --save_last_n_epochs_model --- library/train_util.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index 63444f00..aee762d5 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1029,7 +1029,6 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: parser.add_argument("--save_every_n_epochs", type=int, default=None, help="save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する") parser.add_argument("--save_last_n_epochs", type=int, default=None, help="save last N checkpoints / 最大Nエポック保存する") - parser.add_argument("--save_last_n_epochs_model", type=int, default=None, help="save last N checkpoints of model (overrides the value of --save_last_n_epochs) / 最大Nエポックモデルを保存する(--save_last_n_epochsの指定を上書きします)") parser.add_argument("--save_last_n_epochs_state", type=int, default=None, help="save last N checkpoints of state (overrides the value of --save_last_n_epochs)/ 最大Nエポックstateを保存する(--save_last_n_epochsの指定を上書きします)") parser.add_argument("--save_state", action="store_true", help="save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する") @@ -1305,9 +1304,8 @@ def save_on_epoch_end(args: argparse.Namespace, save_func, remove_old_func, epoc os.makedirs(args.output_dir, exist_ok=True) save_func() - last_n_epoch = args.save_last_n_epochs_model if args.save_last_n_epochs_model else args.save_last_n_epochs - if last_n_epoch is not None: - remove_epoch_no = epoch_no - args.save_every_n_epochs * last_n_epoch + if args.save_last_n_epochs is not None: + remove_epoch_no = epoch_no - args.save_every_n_epochs * args.save_last_n_epochs remove_old_func(remove_epoch_no) return saving From fda66db0d8f50db398fef6badb8dce092398a2de Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Tue, 17 Jan 2023 22:05:39 +0900 Subject: [PATCH 07/44] Update README.md Add about gradient checkpointing --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 5f1b15ce..dcb5fc3b 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,11 @@ This repository contains training, generation and utility scripts for Stable Dif ## Updates +- 17 Jan. 2023, 2023/1/17 + - __Important Notice__ + It seems that only a part of LoRA modules are trained when ``gradient_checkpointing`` is enabled. The cause is under investigation, but for the time being, please train without ``gradient_checkpointing``. + - __重要なお知らせ__ + ``gradient_checkpointing`` を有効にすると LoRA モジュールの一部しか学習されないようです。原因は調査中ですが当面は ``gradient_checkpointing`` を指定せずに学習してください。 - 15 Jan. 2023, 2023/1/15 - Added ``--max_train_epochs`` and ``--max_data_loader_n_workers`` option for each training script. - If you specify the number of training epochs with ``--max_train_epochs``, the number of steps is calculated from the number of epochs automatically. From 3eb8fb187501352b54c4735b41c67128d3517ae3 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Wed, 18 Jan 2023 01:31:38 +0900 Subject: [PATCH 08/44] Make not to save state when args.save_state is False --- library/train_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/train_util.py b/library/train_util.py index aee762d5..3a7c2c8a 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1348,7 +1348,7 @@ def save_sd_model_on_epoch_end(args: argparse.Namespace, accelerator, src_path: remove_old_func = remove_du saving = save_on_epoch_end(args, save_func, remove_old_func, epoch_no, num_train_epochs) - if saving and args.save_state or args.save_last_n_epochs_state is not None: + if saving and args.save_state: save_state_on_epoch_end(args, accelerator, model_name, epoch_no) From de1dde1a06b5a591a376383d441a1710a743f1b1 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Tue, 17 Jan 2023 16:28:35 -0800 Subject: [PATCH 09/44] More helpful metadata - dataset/reg image dirs - random session ID - keep_tokens - training date - output name --- library/train_util.py | 4 ++++ train_network.py | 15 +++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index 57ebf1b0..e3ff1a38 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -79,6 +79,8 @@ class BaseDataset(torch.utils.data.Dataset): self.debug_dataset = debug_dataset self.random_crop = random_crop self.token_padding_disabled = False + self.dataset_dirs = {} + self.reg_dataset_dirs = {} self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2 @@ -523,6 +525,7 @@ class DreamBoothDataset(BaseDataset): for img_path, caption in zip(img_paths, captions): info = ImageInfo(img_path, n_repeats, caption, False, img_path) self.register_image(info) + self.dataset_dirs[dir] = {"n_repeats": n_repeats, "img_count": len(img_paths)} print(f"{num_train_images} train images with repeating.") self.num_train_images = num_train_images @@ -539,6 +542,7 @@ class DreamBoothDataset(BaseDataset): for img_path, caption in zip(img_paths, captions): info = ImageInfo(img_path, n_repeats, caption, True, img_path) reg_infos.append(info) + self.reg_dataset_dirs[dir] = {"n_repeats": n_repeats, "img_count": len(img_paths)} print(f"{num_reg_images} reg images.") if num_train_images < num_reg_images: diff --git a/train_network.py b/train_network.py index c0a881ad..098145f2 100644 --- a/train_network.py +++ b/train_network.py @@ -3,6 +3,9 @@ import argparse import gc import math import os +import random +import time +import json from tqdm import tqdm import torch @@ -19,6 +22,8 @@ def collate_fn(examples): def train(args): + session_id = random.randint(0, 2**32) + training_started_at = time.time() train_util.verify_training_args(args) train_util.prepare_dataset_args(args, True) @@ -203,10 +208,13 @@ def train(args): print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}") metadata = { + "ss_session_id": session_id, # random integer indicating which group of epochs the model came from + "ss_training_started_at": training_started_at, # unix timestamp + "ss_output_name": args.output_name, "ss_learning_rate": args.learning_rate, "ss_text_encoder_lr": args.text_encoder_lr, "ss_unet_lr": args.unet_lr, - "ss_num_train_images": train_dataset.num_train_images, # includes repeating TODO more detailed data + "ss_num_train_images": train_dataset.num_train_images, # includes repeating "ss_num_reg_images": train_dataset.num_reg_images, "ss_num_batches_per_epoch": len(train_dataloader), "ss_num_epochs": num_train_epochs, @@ -232,7 +240,10 @@ def train(args): "ss_enable_bucket": bool(train_dataset.enable_bucket), # TODO move to BaseDataset from DB/FT "ss_min_bucket_reso": args.min_bucket_reso, # TODO get from dataset "ss_max_bucket_reso": args.max_bucket_reso, - "ss_seed": args.seed + "ss_seed": args.seed, + "ss_keep_tokens": args.keep_tokens, + "ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs), + "ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs), } # uncomment if another network is added From 303c3410e26916d84a39e6ad04a295b610a6629b Mon Sep 17 00:00:00 2001 From: michaelgzhang <49577754+mgz-dev@users.noreply.github.com> Date: Wed, 18 Jan 2023 13:10:13 -0600 Subject: [PATCH 10/44] expand details in tensorboard logs - Update tensorboard logging to track both unet and textencoder learning rates - Update tensorboard logging to track both current and moving average epoch loss - Clean up tensorboard log variable names for dashboard formatting --- library/train_util.py | 12 ++++++++++++ train_network.py | 11 ++++++----- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index 57ebf1b0..eb11d6fb 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1388,5 +1388,17 @@ def save_state_on_train_end(args: argparse.Namespace, accelerator): model_name = DEFAULT_LAST_OUTPUT_NAME if args.output_name is None else args.output_name accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME.format(model_name))) +def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_scheduler): + logs = {"loss/current": current_loss, "loss/average": avr_loss} + + if args.network_train_unet_only: + logs["lr/unet"] = lr_scheduler.get_last_lr()[0] + elif args.network_train_text_encoder_only: + logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0] + else: + logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0] + logs["lr/unet"] = lr_scheduler.get_last_lr()[-1] + + return logs # endregion diff --git a/train_network.py b/train_network.py index c0a881ad..cac63295 100644 --- a/train_network.py +++ b/train_network.py @@ -330,20 +330,21 @@ def train(args): global_step += 1 current_loss = loss.detach().item() - if args.logging_dir is not None: - logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} - accelerator.log(logs, step=global_step) - loss_total += current_loss avr_loss = loss_total / (step+1) logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) + if args.logging_dir is not None: + logs = train_util.generate_step_logs(args, current_loss, avr_loss, lr_scheduler) + + accelerator.log(logs, step=global_step) + if global_step >= args.max_train_steps: break if args.logging_dir is not None: - logs = {"epoch_loss": loss_total / len(train_dataloader)} + logs = {"loss/epoch": loss_total / len(train_dataloader)} accelerator.log(logs, step=epoch+1) accelerator.wait_for_everyone() From da48f74e7bce80c4a708a0328e10f00dc9fdbe0a Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 18 Jan 2023 23:00:16 -0800 Subject: [PATCH 11/44] Add new version model/VAE hash to training metadata --- library/train_util.py | 15 ++++++++++++++- train_network.py | 2 ++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/library/train_util.py b/library/train_util.py index e3ff1a38..59bd2a03 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -11,6 +11,7 @@ import glob import math import os import random +import hashlib from tqdm import tqdm import torch @@ -753,9 +754,9 @@ def default(val, d): def model_hash(filename): + """Old model hash used by stable-diffusion-webui""" try: with open(filename, "rb") as file: - import hashlib m = hashlib.sha256() file.seek(0x100000) @@ -765,6 +766,18 @@ def model_hash(filename): return 'NOFILE' +def calculate_sha256(filename): + """New model hash used by stable-diffusion-webui""" + hash_sha256 = hashlib.sha256() + blksize = 1024 * 1024 + + with open(filename, "rb") as f: + for chunk in iter(lambda: f.read(blksize), b""): + hash_sha256.update(chunk) + + return hash_sha256.hexdigest() + + # flash attention forwards and backwards # https://arxiv.org/abs/2205.14135 diff --git a/train_network.py b/train_network.py index 098145f2..c759e66b 100644 --- a/train_network.py +++ b/train_network.py @@ -254,6 +254,7 @@ def train(args): sd_model_name = args.pretrained_model_name_or_path if os.path.exists(sd_model_name): metadata["ss_sd_model_hash"] = train_util.model_hash(sd_model_name) + metadata["ss_new_sd_model_hash"] = train_util.calculate_sha256(sd_model_name) sd_model_name = os.path.basename(sd_model_name) metadata["ss_sd_model_name"] = sd_model_name @@ -261,6 +262,7 @@ def train(args): vae_name = args.vae if os.path.exists(vae_name): metadata["ss_vae_hash"] = train_util.model_hash(vae_name) + metadata["ss_new_vae_hash"] = train_util.calculate_sha256(vae_name) vae_name = os.path.basename(vae_name) metadata["ss_vae_name"] = vae_name From e6a8c9d269b4952a6944dfe0e78a1f89bd036971 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Thu, 19 Jan 2023 20:39:33 +0900 Subject: [PATCH 12/44] Fix some LoRA not trained if gradient checkpointing --- train_network.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/train_network.py b/train_network.py index c0a881ad..03fd01e7 100644 --- a/train_network.py +++ b/train_network.py @@ -166,6 +166,9 @@ def train(args): if args.gradient_checkpointing: # according to TI example in Diffusers, train is required unet.train() text_encoder.train() + + # set top parameter requires_grad = True for gradient checkpointing works + text_encoder.text_model.embeddings.requires_grad_(True) else: unet.eval() text_encoder.eval() From 758323532b843e88d3e0fa782986d477a3b7e956 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Thu, 19 Jan 2023 20:59:45 +0900 Subject: [PATCH 13/44] add save_last_n_epochs_state to train_network --- library/train_util.py | 10 +++------- train_network.py | 4 ++-- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index 3a7c2c8a..aa65dc3c 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1299,7 +1299,6 @@ def get_epoch_ckpt_name(args: argparse.Namespace, use_safetensors, epoch): def save_on_epoch_end(args: argparse.Namespace, save_func, remove_old_func, epoch_no: int, num_train_epochs: int): saving = epoch_no % args.save_every_n_epochs == 0 and epoch_no < num_train_epochs - remove_epoch_no = None if saving: os.makedirs(args.output_dir, exist_ok=True) save_func() @@ -1356,12 +1355,9 @@ def save_state_on_epoch_end(args: argparse.Namespace, accelerator, model_name, e print("saving state.") accelerator.save_state(os.path.join(args.output_dir, EPOCH_STATE_NAME.format(model_name, epoch_no))) - remove_epoch_no = None - last_n_epoch = args.save_last_n_epochs_state if args.save_last_n_epochs_state else args.save_last_n_epochs - if last_n_epoch is not None: - remove_epoch_no = epoch_no - args.save_every_n_epochs * last_n_epoch - - if remove_epoch_no is not None: + last_n_epochs = args.save_last_n_epochs_state if args.save_last_n_epochs_state else args.save_last_n_epochs + if last_n_epochs is not None: + remove_epoch_no = epoch_no - args.save_every_n_epochs * last_n_epochs state_dir_old = os.path.join(args.output_dir, EPOCH_STATE_NAME.format(model_name, remove_epoch_no)) if os.path.exists(state_dir_old): print(f"removing old state: {state_dir_old}") diff --git a/train_network.py b/train_network.py index 03fd01e7..b2c7b579 100644 --- a/train_network.py +++ b/train_network.py @@ -367,9 +367,9 @@ def train(args): print(f"removing old checkpoint: {old_ckpt_file}") os.remove(old_ckpt_file) - saving, remove_epoch_no = train_util.save_on_epoch_end(args, save_func, remove_old_func, epoch + 1, num_train_epochs) + saving = train_util.save_on_epoch_end(args, save_func, remove_old_func, epoch + 1, num_train_epochs) if saving and args.save_state: - train_util.save_state_on_epoch_end(args, accelerator, model_name, epoch + 1, remove_epoch_no) + train_util.save_state_on_epoch_end(args, accelerator, model_name, epoch + 1) # end of epoch From 687044519b6c4f6166145b20cba2d7f2e1df9b8a Mon Sep 17 00:00:00 2001 From: Kohya S Date: Thu, 19 Jan 2023 21:43:34 +0900 Subject: [PATCH 14/44] Fix TE training stops at max steps if ecpochs set --- train_db.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/train_db.py b/train_db.py index bbef3da7..8ac503ea 100644 --- a/train_db.py +++ b/train_db.py @@ -92,10 +92,7 @@ def train(args): gc.collect() # 学習を準備する:モデルを適切な状態にする - if args.stop_text_encoder_training is None: - args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end - - train_text_encoder = args.stop_text_encoder_training >= 0 + train_text_encoder = args.stop_text_encoder_training is None or args.stop_text_encoder_training >= 0 unet.requires_grad_(True) # 念のため追加 text_encoder.requires_grad_(train_text_encoder) if not train_text_encoder: @@ -143,6 +140,9 @@ def train(args): args.max_train_steps = args.max_train_epochs * len(train_dataloader) print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") + if args.stop_text_encoder_training is None: + args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end + # lr schedulerを用意する lr_scheduler = diffusers.optimization.get_scheduler( args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, num_training_steps=args.max_train_steps) From 943eae12118f1cb2ddb6c96c038103e777c833b1 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Thu, 19 Jan 2023 22:04:16 +0900 Subject: [PATCH 15/44] Add LoRA weights checking script --- networks/check_lora_weights.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 networks/check_lora_weights.py diff --git a/networks/check_lora_weights.py b/networks/check_lora_weights.py new file mode 100644 index 00000000..1140e3b3 --- /dev/null +++ b/networks/check_lora_weights.py @@ -0,0 +1,31 @@ +import argparse +import os +import torch +from safetensors.torch import load_file + + +def main(file): + print(f"loading: {file}") + if os.path.splitext(file)[1] == '.safetensors': + sd = load_file(file) + else: + sd = torch.load(file, map_location='cpu') + + values = [] + + keys = list(sd.keys()) + for key in keys: + if 'lora_up' in key: + values.append((key, sd[key])) + print(f"number of LoRA-up modules: {len(values)}") + + for key, value in values: + print(f"{key},{torch.mean(torch.abs(value))}") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("file", type=str, help="model file to check / 重みを確認するモデルファイル") + args = parser.parse_args() + + main(args.file) From cae42728aba035a7ac305a40c75954530c3c3444 Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Thu, 19 Jan 2023 22:21:11 +0900 Subject: [PATCH 16/44] Update README.md --- README.md | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index dcb5fc3b..659e8c07 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,48 @@ This repository contains training, generation and utility scripts for Stable Dif ## Updates +- 19 Jan. 2023, 2023/1/19 + - Fix a part of LoRA modules are not trained when ``gradient_checkpointing`` is enabled. + - Add ``--save_last_n_epochs_state`` option. You can specify how many state folders to keep, apart from how many models to keep. Thanks to shirayu! + - Fix Text Encoder training stops at ``max_train_steps`` even if ``max_train_epochs`` is set in `train_db.py``. + - Added script to check LoRA weights. You can check weights by ``python networks\check_lora_weights.py ``. If some modules are not trained, the value is ``0.0`` like following. + - ``lora_te_text_model_encoder_layers_11_*`` is not trained with ``clip_skip=2``, so ``0.0`` is okay for these modules. + - 一部のLoRAモジュールが ``gradient_checkpointing`` を有効にすると学習されない不具合を修正しました。ご不便をおかけしました。 + - ``--save_last_n_epochs_state`` オプションを追加しました。モデルの保存数とは別に、stateフォルダの保存数を指定できます。shirayu氏に感謝します。 + - ``train_db.py`` で、``max_train_epochs`` を指定していても、``max_train_steps`` のステップでText Encoderの学習が停止してしまう不具合を修正しました。 + - LoRAの重みをチェックするスクリプトを追加してあります。``python networks\check_lora_weights.py `` のように実行してください。学習していない重みがあると、値が 下のように ``0.0`` になります。 + - ``lora_te_text_model_encoder_layers_11_`` で始まる部分は ``clip_skip=2`` の場合は学習されないため、``0.0`` で正常です。 + +- example result of ``check_lora_weights.py``, Text Encoder and a part of U-Net are not trained: +``` +number of LoRA-up modules: 264 +lora_te_text_model_encoder_layers_0_mlp_fc1.lora_up.weight,0.0 +lora_te_text_model_encoder_layers_0_mlp_fc2.lora_up.weight,0.0 +lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_up.weight,0.0 +: +lora_unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj.lora_up.weight,0.0 +lora_unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2.lora_up.weight,0.0 +lora_unet_mid_block_attentions_0_proj_in.lora_up.weight,0.003503334941342473 +lora_unet_mid_block_attentions_0_proj_out.lora_up.weight,0.004308608360588551 +: +``` + +- all modules are trained: +``` +number of LoRA-up modules: 264 +lora_te_text_model_encoder_layers_0_mlp_fc1.lora_up.weight,0.0028684409335255623 +lora_te_text_model_encoder_layers_0_mlp_fc2.lora_up.weight,0.0029794853180646896 +lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_up.weight,0.002507600700482726 +lora_te_text_model_encoder_layers_0_self_attn_out_proj.lora_up.weight,0.002639499492943287 +: +``` + - 17 Jan. 2023, 2023/1/17 - - __Important Notice__ - It seems that only a part of LoRA modules are trained when ``gradient_checkpointing`` is enabled. The cause is under investigation, but for the time being, please train without ``gradient_checkpointing``. + - __Important Notice__ + It seems that only a part of LoRA modules are trained when ``gradient_checkpointing`` is enabled. The cause is under investigation, but for the time being, please train without ``gradient_checkpointing``. __The issue is fixed now.__ - __重要なお知らせ__ - ``gradient_checkpointing`` を有効にすると LoRA モジュールの一部しか学習されないようです。原因は調査中ですが当面は ``gradient_checkpointing`` を指定せずに学習してください。 + ``gradient_checkpointing`` を有効にすると LoRA モジュールの一部しか学習されないようです。原因は調査中ですが当面は ``gradient_checkpointing`` を指定せずに学習してください。__問題は修正されました。__ + - 15 Jan. 2023, 2023/1/15 - Added ``--max_train_epochs`` and ``--max_data_loader_n_workers`` option for each training script. - If you specify the number of training epochs with ``--max_train_epochs``, the number of steps is calculated from the number of epochs automatically. From 9ff26af68bdf40e3c9d8355f6fc4f802e94f58cb Mon Sep 17 00:00:00 2001 From: Kohya S Date: Sat, 21 Jan 2023 12:36:31 +0900 Subject: [PATCH 17/44] Update to add grad_ckpting etc to metadata --- library/train_util.py | 29 ++++++++++++++++++++++------- train_network.py | 11 ++++++----- 2 files changed, 28 insertions(+), 12 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index 94175b98..0fdbadc1 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -80,8 +80,11 @@ class BaseDataset(torch.utils.data.Dataset): self.debug_dataset = debug_dataset self.random_crop = random_crop self.token_padding_disabled = False - self.dataset_dirs = {} - self.reg_dataset_dirs = {} + self.dataset_dirs_info = {} + self.reg_dataset_dirs_info = {} + self.enable_bucket = False + self.min_bucket_reso = None + self.max_bucket_reso = None self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2 @@ -466,6 +469,8 @@ class DreamBoothDataset(BaseDataset): assert max(resolution) <= max_bucket_reso, f"max_bucket_reso must be equal or greater than resolution / max_bucket_resoは最大解像度より小さくできません。解像度を小さくするかmin_bucket_resoを大きくしてください" self.bucket_resos, self.bucket_aspect_ratios = model_util.make_bucket_resolutions( (self.width, self.height), min_bucket_reso, max_bucket_reso) + self.min_bucket_reso = min_bucket_reso + self.max_bucket_reso = max_bucket_reso else: self.bucket_resos = [(self.width, self.height)] self.bucket_aspect_ratios = [self.width / self.height] @@ -526,7 +531,7 @@ class DreamBoothDataset(BaseDataset): for img_path, caption in zip(img_paths, captions): info = ImageInfo(img_path, n_repeats, caption, False, img_path) self.register_image(info) - self.dataset_dirs[dir] = {"n_repeats": n_repeats, "img_count": len(img_paths)} + self.dataset_dirs_info[os.path.basename(dir)] = {"n_repeats": n_repeats, "img_count": len(img_paths)} print(f"{num_train_images} train images with repeating.") self.num_train_images = num_train_images @@ -543,7 +548,7 @@ class DreamBoothDataset(BaseDataset): for img_path, caption in zip(img_paths, captions): info = ImageInfo(img_path, n_repeats, caption, True, img_path) reg_infos.append(info) - self.reg_dataset_dirs[dir] = {"n_repeats": n_repeats, "img_count": len(img_paths)} + self.reg_dataset_dirs_info[os.path.basename(dir)] = {"n_repeats": n_repeats, "img_count": len(img_paths)} print(f"{num_reg_images} reg images.") if num_train_images < num_reg_images: @@ -616,6 +621,8 @@ class FineTuningDataset(BaseDataset): self.num_train_images = len(metadata) * dataset_repeats self.num_reg_images = 0 + self.dataset_dirs_info[os.path.basename(self.train_data_dir)] = {"n_repeats": dataset_repeats, "img_count": len(metadata)} + # check existence of all npz files if not self.color_aug: npz_any = False @@ -658,6 +665,8 @@ class FineTuningDataset(BaseDataset): assert max(resolution) <= max_bucket_reso, f"max_bucket_reso must be equal or greater than resolution / max_bucket_resoは最大解像度より小さくできません。解像度を小さくするかmin_bucket_resoを大きくしてください" self.bucket_resos, self.bucket_aspect_ratios = model_util.make_bucket_resolutions( (self.width, self.height), min_bucket_reso, max_bucket_reso) + self.min_bucket_reso = min_bucket_reso + self.max_bucket_reso = max_bucket_reso else: self.bucket_resos = [(self.width, self.height)] self.bucket_aspect_ratios = [self.width / self.height] @@ -670,6 +679,9 @@ class FineTuningDataset(BaseDataset): self.bucket_resos.sort() self.bucket_aspect_ratios = [w / h for w, h in self.bucket_resos] + self.min_bucket_reso = min([min(reso) for reso in resos]) + self.max_bucket_reso = max([max(reso) for reso in resos]) + def image_key_to_npz_file(self, image_key): base_name = os.path.splitext(image_key)[0] npz_file_norm = base_name + '.npz' @@ -1046,7 +1058,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: parser.add_argument("--save_every_n_epochs", type=int, default=None, help="save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する") parser.add_argument("--save_last_n_epochs", type=int, default=None, help="save last N checkpoints / 最大Nエポック保存する") - parser.add_argument("--save_last_n_epochs_state", type=int, default=None, help="save last N checkpoints of state (overrides the value of --save_last_n_epochs)/ 最大Nエポックstateを保存する(--save_last_n_epochsの指定を上書きします)") + parser.add_argument("--save_last_n_epochs_state", type=int, default=None, + help="save last N checkpoints of state (overrides the value of --save_last_n_epochs)/ 最大Nエポックstateを保存する(--save_last_n_epochsの指定を上書きします)") parser.add_argument("--save_state", action="store_true", help="save training state additionally (including optimizer states etc.) / optimizerなど学習状態も含めたstateを追加で保存する") parser.add_argument("--resume", type=str, default=None, help="saved state to resume training / 学習再開するモデルのstate") @@ -1065,8 +1078,10 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: parser.add_argument("--learning_rate", type=float, default=2.0e-6, help="learning rate / 学習率") parser.add_argument("--max_train_steps", type=int, default=1600, help="training steps / 学習ステップ数") - parser.add_argument("--max_train_epochs", type=int, default=None, help="training epochs (overrides max_train_steps) / 学習エポック数(max_train_stepsを上書きします)") - parser.add_argument("--max_data_loader_n_workers", type=int, default=8, help="max num workers for DataLoader (lower is less main RAM usage, faster epoch start and slower data loading) / DataLoaderの最大プロセス数(小さい値ではメインメモリの使用量が減りエポック間の待ち時間が減りますが、データ読み込みは遅くなります)") + parser.add_argument("--max_train_epochs", type=int, default=None, + help="training epochs (overrides max_train_steps) / 学習エポック数(max_train_stepsを上書きします)") + parser.add_argument("--max_data_loader_n_workers", type=int, default=8, + help="max num workers for DataLoader (lower is less main RAM usage, faster epoch start and slower data loading) / DataLoaderの最大プロセス数(小さい値ではメインメモリの使用量が減りエポック間の待ち時間が減りますが、データ読み込みは遅くなります)") parser.add_argument("--seed", type=int, default=None, help="random seed for training / 学習時の乱数のseed") parser.add_argument("--gradient_checkpointing", action="store_true", help="enable gradient checkpointing / grandient checkpointingを有効にする") diff --git a/train_network.py b/train_network.py index 73370ee2..8b4e008b 100644 --- a/train_network.py +++ b/train_network.py @@ -223,6 +223,7 @@ def train(args): "ss_num_epochs": num_train_epochs, "ss_batch_size_per_device": args.train_batch_size, "ss_total_batch_size": total_batch_size, + "ss_gradient_checkpointing": args.gradient_checkpointing, "ss_gradient_accumulation_steps": args.gradient_accumulation_steps, "ss_max_train_steps": args.max_train_steps, "ss_lr_warmup_steps": args.lr_warmup_steps, @@ -240,13 +241,13 @@ def train(args): "ss_random_crop": bool(args.random_crop), "ss_shuffle_caption": bool(args.shuffle_caption), "ss_cache_latents": bool(args.cache_latents), - "ss_enable_bucket": bool(train_dataset.enable_bucket), # TODO move to BaseDataset from DB/FT - "ss_min_bucket_reso": args.min_bucket_reso, # TODO get from dataset - "ss_max_bucket_reso": args.max_bucket_reso, + "ss_enable_bucket": bool(train_dataset.enable_bucket), + "ss_min_bucket_reso": train_dataset.min_bucket_reso, + "ss_max_bucket_reso": train_dataset.max_bucket_reso, "ss_seed": args.seed, "ss_keep_tokens": args.keep_tokens, - "ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs), - "ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs), + "ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs_info), + "ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs_info), } # uncomment if another network is added From 22ee0ac467ffc914fe174b7006308d6cbf7a6f63 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Sat, 21 Jan 2023 12:51:17 +0900 Subject: [PATCH 18/44] Move TE/UN loss calc to train script --- library/train_util.py | 12 ------------ train_network.py | 17 +++++++++++++++-- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index ffa099ef..0fdbadc1 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1423,17 +1423,5 @@ def save_state_on_train_end(args: argparse.Namespace, accelerator): model_name = DEFAULT_LAST_OUTPUT_NAME if args.output_name is None else args.output_name accelerator.save_state(os.path.join(args.output_dir, LAST_STATE_NAME.format(model_name))) -def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_scheduler): - logs = {"loss/current": current_loss, "loss/average": avr_loss} - - if args.network_train_unet_only: - logs["lr/unet"] = lr_scheduler.get_last_lr()[0] - elif args.network_train_text_encoder_only: - logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0] - else: - logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0] - logs["lr/unet"] = lr_scheduler.get_last_lr()[-1] - - return logs # endregion diff --git a/train_network.py b/train_network.py index bd45d980..70db4450 100644 --- a/train_network.py +++ b/train_network.py @@ -21,6 +21,20 @@ def collate_fn(examples): return examples[0] +def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_scheduler): + logs = {"loss/current": current_loss, "loss/average": avr_loss} + + if args.network_train_unet_only: + logs["lr/unet"] = lr_scheduler.get_last_lr()[0] + elif args.network_train_text_encoder_only: + logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0] + else: + logs["lr/textencoder"] = lr_scheduler.get_last_lr()[0] + logs["lr/unet"] = lr_scheduler.get_last_lr()[-1] # may be same to textencoder + + return logs + + def train(args): session_id = random.randint(0, 2**32) training_started_at = time.time() @@ -353,8 +367,7 @@ def train(args): progress_bar.set_postfix(**logs) if args.logging_dir is not None: - logs = train_util.generate_step_logs(args, current_loss, avr_loss, lr_scheduler) - + logs = generate_step_logs(args, current_loss, avr_loss, lr_scheduler) accelerator.log(logs, step=global_step) if global_step >= args.max_train_steps: From b4636d4185d21485e8fae162f3ad9b8db02d4b36 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Sat, 21 Jan 2023 20:37:34 +0900 Subject: [PATCH 19/44] Add scaling alpha for LoRA --- gen_img_diffusers.py | 24 +++++++++----------- networks/lora.py | 53 +++++++++++++++++++++++++++++++++++--------- train_network.py | 8 +++++-- 3 files changed, 59 insertions(+), 26 deletions(-) diff --git a/gen_img_diffusers.py b/gen_img_diffusers.py index 7b4ef2e5..e025c74e 100644 --- a/gen_img_diffusers.py +++ b/gen_img_diffusers.py @@ -1981,7 +1981,6 @@ def main(args): imported_module = importlib.import_module(network_module) network_mul = 1.0 if args.network_mul is None or len(args.network_mul) <= i else args.network_mul[i] - network_dim = None if args.network_dim is None or len(args.network_dim) <= i else args.network_dim[i] net_kwargs = {} if args.network_args and i < len(args.network_args): @@ -1992,22 +1991,21 @@ def main(args): key, value = net_arg.split("=") net_kwargs[key] = value - network = imported_module.create_network(network_mul, network_dim, vae, text_encoder, unet, **net_kwargs) - if network is None: - return - if args.network_weights and i < len(args.network_weights): network_weight = args.network_weights[i] print("load network weights from:", network_weight) - if os.path.splitext(network_weight)[1] == '.safetensors': - from safetensors.torch import safe_open - with safe_open(network_weight, framework="pt") as f: - metadata = f.metadata() - if metadata is not None: - print(f"metadata for: {network_weight}: {metadata}") + from safetensors.torch import safe_open + with safe_open(network_weight, framework="pt") as f: + metadata = f.metadata() + if metadata is not None: + print(f"metadata for: {network_weight}: {metadata}") - network.load_weights(network_weight) + network = imported_module.create_network_from_weights(network_mul, network_weight, vae, text_encoder, unet, **net_kwargs) + else: + raise ValueError("No weight. Weight is required.") + if network is None: + return network.apply_to(text_encoder, unet) @@ -2526,8 +2524,6 @@ if __name__ == '__main__': parser.add_argument("--network_weights", type=str, default=None, nargs='*', help='Hypernetwork weights to load / Hypernetworkの重み') parser.add_argument("--network_mul", type=float, default=None, nargs='*', help='Hypernetwork multiplier / Hypernetworkの効果の倍率') - parser.add_argument("--network_dim", type=int, default=None, nargs='*', - help='network dimensions (depends on each network) / モジュールの次元数(ネットワークにより定義は異なります)') parser.add_argument("--network_args", type=str, default=None, nargs='*', help='additional argmuments for network (key=value) / ネットワークへの追加の引数') parser.add_argument("--clip_skip", type=int, default=None, help='layer number from bottom to use in CLIP / CLIPの後ろからn層目の出力を使う') diff --git a/networks/lora.py b/networks/lora.py index 3f8244e0..9243f1e1 100644 --- a/networks/lora.py +++ b/networks/lora.py @@ -13,9 +13,11 @@ class LoRAModule(torch.nn.Module): replaces forward method of the original Linear, instead of replacing the original Linear module. """ - def __init__(self, lora_name, org_module: torch.nn.Module, multiplier=1.0, lora_dim=4): + def __init__(self, lora_name, org_module: torch.nn.Module, multiplier=1.0, lora_dim=4, alpha=1): + """ if alpha == 0 or None, alpha is rank (no scaling). """ super().__init__() self.lora_name = lora_name + self.lora_dim = lora_dim if org_module.__class__.__name__ == 'Conv2d': in_dim = org_module.in_channels @@ -28,6 +30,12 @@ class LoRAModule(torch.nn.Module): self.lora_down = torch.nn.Linear(in_dim, lora_dim, bias=False) self.lora_up = torch.nn.Linear(lora_dim, out_dim, bias=False) + if type(alpha) == torch.Tensor: + alpha = alpha.detach().numpy() + alpha = lora_dim if alpha is None or alpha == 0 else alpha + self.scale = alpha / self.lora_dim + self.register_buffer('alpha', torch.tensor(alpha)) # 定数として扱える + # same as microsoft's torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5)) torch.nn.init.zeros_(self.lora_up.weight) @@ -41,13 +49,37 @@ class LoRAModule(torch.nn.Module): del self.org_module def forward(self, x): - return self.org_forward(x) + self.lora_up(self.lora_down(x)) * self.multiplier + return self.org_forward(x) + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale -def create_network(multiplier, network_dim, vae, text_encoder, unet, **kwargs): +def create_network(multiplier, network_dim, network_alpha, vae, text_encoder, unet, **kwargs): if network_dim is None: network_dim = 4 # default - network = LoRANetwork(text_encoder, unet, multiplier=multiplier, lora_dim=network_dim) + network = LoRANetwork(text_encoder, unet, multiplier=multiplier, lora_dim=network_dim, alpha=network_alpha) + return network + + +def create_network_from_weights(multiplier, file, vae, text_encoder, unet, **kwargs): + if os.path.splitext(file)[1] == '.safetensors': + from safetensors.torch import load_file, safe_open + weights_sd = load_file(file) + else: + weights_sd = torch.load(file, map_location='cpu') + + # get dim (rank) + network_alpha = None + network_dim = None + for key, value in weights_sd.items(): + if network_alpha is None and 'alpha' in key: + network_alpha = value + if network_dim is None and 'lora_down' in key and len(value.size()) == 2: + network_dim = value.size()[0] + + if network_alpha is None: + network_alpha = network_dim + + network = LoRANetwork(text_encoder, unet, multiplier=multiplier, lora_dim=network_dim, alpha=network_alpha) + network.weights_sd = weights_sd return network @@ -57,10 +89,11 @@ class LoRANetwork(torch.nn.Module): LORA_PREFIX_UNET = 'lora_unet' LORA_PREFIX_TEXT_ENCODER = 'lora_te' - def __init__(self, text_encoder, unet, multiplier=1.0, lora_dim=4) -> None: + def __init__(self, text_encoder, unet, multiplier=1.0, lora_dim=4, alpha=1) -> None: super().__init__() self.multiplier = multiplier self.lora_dim = lora_dim + self.alpha = alpha # create module instances def create_modules(prefix, root_module: torch.nn.Module, target_replace_modules) -> list[LoRAModule]: @@ -71,7 +104,7 @@ class LoRANetwork(torch.nn.Module): if child_module.__class__.__name__ == "Linear" or (child_module.__class__.__name__ == "Conv2d" and child_module.kernel_size == (1, 1)): lora_name = prefix + '.' + name + '.' + child_name lora_name = lora_name.replace('.', '_') - lora = LoRAModule(lora_name, child_module, self.multiplier, self.lora_dim) + lora = LoRAModule(lora_name, child_module, self.multiplier, self.lora_dim, self.alpha) loras.append(lora) return loras @@ -149,21 +182,21 @@ class LoRANetwork(torch.nn.Module): return params self.requires_grad_(True) - params = [] + all_params = [] if self.text_encoder_loras: param_data = {'params': enumerate_params(self.text_encoder_loras)} if text_encoder_lr is not None: param_data['lr'] = text_encoder_lr - params.append(param_data) + all_params.append(param_data) if self.unet_loras: param_data = {'params': enumerate_params(self.unet_loras)} if unet_lr is not None: param_data['lr'] = unet_lr - params.append(param_data) + all_params.append(param_data) - return params + return all_params def prepare_grad_etc(self, text_encoder, unet): self.requires_grad_(True) diff --git a/train_network.py b/train_network.py index 70db4450..88014ddb 100644 --- a/train_network.py +++ b/train_network.py @@ -107,7 +107,8 @@ def train(args): key, value = net_arg.split('=') net_kwargs[key] = value - network = network_module.create_network(1.0, args.network_dim, vae, text_encoder, unet, **net_kwargs) + # if a new network is added in future, add if ~ then blocks for each network (;'∀') + network = network_module.create_network(1.0, args.network_dim, args.network_alpha, vae, text_encoder, unet, **net_kwargs) if network is None: return @@ -243,7 +244,8 @@ def train(args): "ss_lr_warmup_steps": args.lr_warmup_steps, "ss_lr_scheduler": args.lr_scheduler, "ss_network_module": args.network_module, - "ss_network_dim": args.network_dim, # None means default because another network than LoRA may have another default dim + "ss_network_dim": args.network_dim, # None means default because another network than LoRA may have another default dim + "ss_network_alpha": args.network_alpha, # some networks may not use this value "ss_mixed_precision": args.mixed_precision, "ss_full_fp16": bool(args.full_fp16), "ss_v2": bool(args.v2), @@ -445,6 +447,8 @@ if __name__ == '__main__': parser.add_argument("--network_module", type=str, default=None, help='network module to train / 学習対象のネットワークのモジュール') parser.add_argument("--network_dim", type=int, default=None, help='network dimensions (depends on each network) / モジュールの次元数(ネットワークにより定義は異なります)') + parser.add_argument("--network_alpha", type=float, default=1, + help='alpha for LoRA weight scaling, 0 for no scaling (same as old version) / LoRaの重み調整のalpha値、0で調整なし(旧バージョンと同じ)') parser.add_argument("--network_args", type=str, default=None, nargs='*', help='additional argmuments for network (key=value) / ネットワークへの追加の引数') parser.add_argument("--network_train_unet_only", action="store_true", help="only training U-Net part / U-Net関連部分のみ学習する") From 5e817e4343cdf839096b430877609c6f52749a30 Mon Sep 17 00:00:00 2001 From: forestsource Date: Sun, 22 Jan 2023 02:57:12 +0900 Subject: [PATCH 20/44] Add save_n_epoch_ratio --- fine_tune.py | 2 ++ library/train_util.py | 2 ++ train_db.py | 2 ++ train_network.py | 2 ++ 4 files changed, 8 insertions(+) diff --git a/fine_tune.py b/fine_tune.py index 02f665bd..8e615203 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -200,6 +200,8 @@ def train(args): # epoch数を計算する num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0): + args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1 # 学習する total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps diff --git a/library/train_util.py b/library/train_util.py index aa65dc3c..5ff0280e 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1028,6 +1028,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: choices=[None, "float", "fp16", "bf16"], help="precision in saving / 保存時に精度を変更して保存する") parser.add_argument("--save_every_n_epochs", type=int, default=None, help="save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する") + parser.add_argument("--save_n_epoch_ratio", type=int, default=None, + help="save checkpoint N epoch ratio / 学習中のモデルを指定のエポック割合で保存する") parser.add_argument("--save_last_n_epochs", type=int, default=None, help="save last N checkpoints / 最大Nエポック保存する") parser.add_argument("--save_last_n_epochs_state", type=int, default=None, help="save last N checkpoints of state (overrides the value of --save_last_n_epochs)/ 最大Nエポックstateを保存する(--save_last_n_epochsの指定を上書きします)") parser.add_argument("--save_state", action="store_true", diff --git a/train_db.py b/train_db.py index 8ac503ea..fe6fd4e6 100644 --- a/train_db.py +++ b/train_db.py @@ -176,6 +176,8 @@ def train(args): # epoch数を計算する num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0): + args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1 # 学習する total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps diff --git a/train_network.py b/train_network.py index b2c7b579..d3282da9 100644 --- a/train_network.py +++ b/train_network.py @@ -192,6 +192,8 @@ def train(args): # epoch数を計算する num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0): + args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1 # 学習する total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps From 6e279730cf476230a79fa0c10568047f1d7753f3 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Sun, 22 Jan 2023 10:44:29 +0900 Subject: [PATCH 21/44] Fix weights checking script to use float32 --- networks/check_lora_weights.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/networks/check_lora_weights.py b/networks/check_lora_weights.py index 1140e3b3..4ee3f575 100644 --- a/networks/check_lora_weights.py +++ b/networks/check_lora_weights.py @@ -15,12 +15,13 @@ def main(file): keys = list(sd.keys()) for key in keys: - if 'lora_up' in key: + if 'lora_up' in key or 'lora_down' in key: values.append((key, sd[key])) - print(f"number of LoRA-up modules: {len(values)}") + print(f"number of LoRA modules: {len(values)}") for key, value in values: - print(f"{key},{torch.mean(torch.abs(value))}") + value = value.to(torch.float32) + print(f"{key},{torch.mean(torch.abs(value))},{torch.min(torch.abs(value))}") if __name__ == '__main__': From d3bc5a14136e173ee184dde196b9f5d6b90a6a8b Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Sun, 22 Jan 2023 10:55:57 +0900 Subject: [PATCH 22/44] Update README.md --- README.md | 58 +++++++------------------------------------------------ 1 file changed, 7 insertions(+), 51 deletions(-) diff --git a/README.md b/README.md index 659e8c07..7df4f26c 100644 --- a/README.md +++ b/README.md @@ -2,58 +2,14 @@ This repository contains training, generation and utility scripts for Stable Dif ## Updates -- 19 Jan. 2023, 2023/1/19 - - Fix a part of LoRA modules are not trained when ``gradient_checkpointing`` is enabled. - - Add ``--save_last_n_epochs_state`` option. You can specify how many state folders to keep, apart from how many models to keep. Thanks to shirayu! - - Fix Text Encoder training stops at ``max_train_steps`` even if ``max_train_epochs`` is set in `train_db.py``. - - Added script to check LoRA weights. You can check weights by ``python networks\check_lora_weights.py ``. If some modules are not trained, the value is ``0.0`` like following. - - ``lora_te_text_model_encoder_layers_11_*`` is not trained with ``clip_skip=2``, so ``0.0`` is okay for these modules. - - 一部のLoRAモジュールが ``gradient_checkpointing`` を有効にすると学習されない不具合を修正しました。ご不便をおかけしました。 - - ``--save_last_n_epochs_state`` オプションを追加しました。モデルの保存数とは別に、stateフォルダの保存数を指定できます。shirayu氏に感謝します。 - - ``train_db.py`` で、``max_train_epochs`` を指定していても、``max_train_steps`` のステップでText Encoderの学習が停止してしまう不具合を修正しました。 - - LoRAの重みをチェックするスクリプトを追加してあります。``python networks\check_lora_weights.py `` のように実行してください。学習していない重みがあると、値が 下のように ``0.0`` になります。 - - ``lora_te_text_model_encoder_layers_11_`` で始まる部分は ``clip_skip=2`` の場合は学習されないため、``0.0`` で正常です。 +- 22 Jan. 2023, 2023/1/22 + - Fix script to check LoRA weights ``check_lora_weights.py``. Some layer weights were shown as ``0.0`` even if the layer is trained, because of the overflow of ``torch.mean``. Sorry for the confusion. + - Noe the script shows the mean of the absolute values of the weights, and the minimum of the absolute values of the weights. + - LoRAの重みをチェックするスクリプト ``check_lora_weights.py`` を修正しました。一部のレイヤーで学習されているにもかかわらず重みが ``0.0`` と表示されていました。混乱を招き申し訳ありません。 + - スクリプトを「重みの絶対の平均」と「重みの絶対値の最小値」を表示するよう修正しました。 -- example result of ``check_lora_weights.py``, Text Encoder and a part of U-Net are not trained: -``` -number of LoRA-up modules: 264 -lora_te_text_model_encoder_layers_0_mlp_fc1.lora_up.weight,0.0 -lora_te_text_model_encoder_layers_0_mlp_fc2.lora_up.weight,0.0 -lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_up.weight,0.0 -: -lora_unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_0_proj.lora_up.weight,0.0 -lora_unet_down_blocks_2_attentions_1_transformer_blocks_0_ff_net_2.lora_up.weight,0.0 -lora_unet_mid_block_attentions_0_proj_in.lora_up.weight,0.003503334941342473 -lora_unet_mid_block_attentions_0_proj_out.lora_up.weight,0.004308608360588551 -: -``` - -- all modules are trained: -``` -number of LoRA-up modules: 264 -lora_te_text_model_encoder_layers_0_mlp_fc1.lora_up.weight,0.0028684409335255623 -lora_te_text_model_encoder_layers_0_mlp_fc2.lora_up.weight,0.0029794853180646896 -lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_up.weight,0.002507600700482726 -lora_te_text_model_encoder_layers_0_self_attn_out_proj.lora_up.weight,0.002639499492943287 -: -``` - -- 17 Jan. 2023, 2023/1/17 - - __Important Notice__ - It seems that only a part of LoRA modules are trained when ``gradient_checkpointing`` is enabled. The cause is under investigation, but for the time being, please train without ``gradient_checkpointing``. __The issue is fixed now.__ - - __重要なお知らせ__ - ``gradient_checkpointing`` を有効にすると LoRA モジュールの一部しか学習されないようです。原因は調査中ですが当面は ``gradient_checkpointing`` を指定せずに学習してください。__問題は修正されました。__ - -- 15 Jan. 2023, 2023/1/15 - - Added ``--max_train_epochs`` and ``--max_data_loader_n_workers`` option for each training script. - - If you specify the number of training epochs with ``--max_train_epochs``, the number of steps is calculated from the number of epochs automatically. - - You can set the number of workers for DataLoader with ``--max_data_loader_n_workers``, default is 8. The lower number may reduce the main memory usage and the time between epochs, but may cause slower dataloading (training). - - ``--max_train_epochs`` と ``--max_data_loader_n_workers`` のオプションが学習スクリプトに追加されました。 - - ``--max_train_epochs`` で学習したいエポック数を指定すると、必要なステップ数が自動的に計算され設定されます。 - - ``--max_data_loader_n_workers`` で DataLoader の worker 数が指定できます(デフォルトは8)。値を小さくするとメインメモリの使用量が減り、エポック間の待ち時間も短くなるようです。ただしデータ読み込み(学習時間)は長くなる可能性があります。 - -Please read [release version 0.3.0](https://github.com/kohya-ss/sd-scripts/releases/tag/v0.3.0) for recent updates. -最近の更新情報は [release version 0.3.0](https://github.com/kohya-ss/sd-scripts/releases/tag/v0.3.0) をご覧ください。 +Please read [Releases](https://github.com/kohya-ss/sd-scripts/releases) for recent updates. +最近の更新情報は [Release](https://github.com/kohya-ss/sd-scripts/releases) をご覧ください。 ## From 8746188ed764d1a57d713a4a8345ef103992d66c Mon Sep 17 00:00:00 2001 From: Kohya S Date: Sun, 22 Jan 2023 18:33:19 +0900 Subject: [PATCH 23/44] Add traning_comment metadata. --- gen_img_diffusers.py | 11 ++++++----- train_network.py | 7 +++++-- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/gen_img_diffusers.py b/gen_img_diffusers.py index e025c74e..19c63acf 100644 --- a/gen_img_diffusers.py +++ b/gen_img_diffusers.py @@ -1995,11 +1995,12 @@ def main(args): network_weight = args.network_weights[i] print("load network weights from:", network_weight) - from safetensors.torch import safe_open - with safe_open(network_weight, framework="pt") as f: - metadata = f.metadata() - if metadata is not None: - print(f"metadata for: {network_weight}: {metadata}") + if model_util.is_safetensors(network_weight): + from safetensors.torch import safe_open + with safe_open(network_weight, framework="pt") as f: + metadata = f.metadata() + if metadata is not None: + print(f"metadata for: {network_weight}: {metadata}") network = imported_module.create_network_from_weights(network_mul, network_weight, vae, text_encoder, unet, **net_kwargs) else: diff --git a/train_network.py b/train_network.py index 88014ddb..393d8f9d 100644 --- a/train_network.py +++ b/train_network.py @@ -245,7 +245,7 @@ def train(args): "ss_lr_scheduler": args.lr_scheduler, "ss_network_module": args.network_module, "ss_network_dim": args.network_dim, # None means default because another network than LoRA may have another default dim - "ss_network_alpha": args.network_alpha, # some networks may not use this value + "ss_network_alpha": args.network_alpha, # some networks may not use this value "ss_mixed_precision": args.mixed_precision, "ss_full_fp16": bool(args.full_fp16), "ss_v2": bool(args.v2), @@ -264,6 +264,7 @@ def train(args): "ss_keep_tokens": args.keep_tokens, "ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs_info), "ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs_info), + "ss_training_comment": args.training_comment # will not be updated after training } # uncomment if another network is added @@ -448,12 +449,14 @@ if __name__ == '__main__': parser.add_argument("--network_dim", type=int, default=None, help='network dimensions (depends on each network) / モジュールの次元数(ネットワークにより定義は異なります)') parser.add_argument("--network_alpha", type=float, default=1, - help='alpha for LoRA weight scaling, 0 for no scaling (same as old version) / LoRaの重み調整のalpha値、0で調整なし(旧バージョンと同じ)') + help='alpha for LoRA weight scaling, default 1, 0 for no scaling (same as old version) / LoRaの重み調整のalpha値、デフォルト1、0で調整なし(旧バージョンと同じ)') parser.add_argument("--network_args", type=str, default=None, nargs='*', help='additional argmuments for network (key=value) / ネットワークへの追加の引数') parser.add_argument("--network_train_unet_only", action="store_true", help="only training U-Net part / U-Net関連部分のみ学習する") parser.add_argument("--network_train_text_encoder_only", action="store_true", help="only training Text Encoder part / Text Encoder関連部分のみ学習する") + parser.add_argument("--training_comment", type=str, default=None, + help="arbitrary comment string stored in metadata / メタデータに記録する任意のコメント文字列") args = parser.parse_args() train(args) From ddfe94b33bc47b7fbdbcff88de02e46a91b06fd9 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Sun, 22 Jan 2023 21:33:35 +0900 Subject: [PATCH 24/44] Update for alpha value --- networks/extract_lora_from_models.py | 24 ++++++++++------- networks/merge_lora.py | 40 +++++++++++++++++++++------- 2 files changed, 45 insertions(+), 19 deletions(-) diff --git a/networks/extract_lora_from_models.py b/networks/extract_lora_from_models.py index 0a4c3a00..84d705cf 100644 --- a/networks/extract_lora_from_models.py +++ b/networks/extract_lora_from_models.py @@ -44,9 +44,9 @@ def svd(args): print(f"loading SD model : {args.model_tuned}") text_encoder_t, _, unet_t = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.model_tuned) - # create LoRA network to extract weights - lora_network_o = lora.create_network(1.0, args.dim, None, text_encoder_o, unet_o) - lora_network_t = lora.create_network(1.0, args.dim, None, text_encoder_t, unet_t) + # create LoRA network to extract weights: Use dim (rank) as alpha + lora_network_o = lora.create_network(1.0, args.dim, args.dim, None, text_encoder_o, unet_o) + lora_network_t = lora.create_network(1.0, args.dim, args.dim, None, text_encoder_t, unet_t) assert len(lora_network_o.text_encoder_loras) == len( lora_network_t.text_encoder_loras), f"model version is different (SD1.x vs SD2.x) / それぞれのモデルのバージョンが違います(SD1.xベースとSD2.xベース) " @@ -77,10 +77,10 @@ def svd(args): module_t = lora_t.org_module diff = module_t.weight - module_o.weight diff = diff.float() - + if args.device: diff = diff.to(args.device) - + diffs[lora_name] = diff # make LoRA with svd @@ -116,6 +116,9 @@ def svd(args): print(f"LoRA has {len(lora_sd)} weights.") for key in list(lora_sd.keys()): + if "alpha" in key: + continue + lora_name = key.split('.')[0] i = 0 if "lora_up" in key else 1 @@ -124,7 +127,7 @@ def svd(args): if len(lora_sd[key].size()) == 4: weights = weights.unsqueeze(2).unsqueeze(3) - assert weights.size() == lora_sd[key].size() + assert weights.size() == lora_sd[key].size(), f"size unmatch: {key}" lora_sd[key] = weights # load state dict to LoRA and save it @@ -135,7 +138,10 @@ def svd(args): if dir_name and not os.path.exists(dir_name): os.makedirs(dir_name, exist_ok=True) - lora_network_o.save_weights(args.save_to, save_dtype, {}) + # minimum metadata + metadata = {"ss_network_dim": str(args.dim), "ss_network_alpha": str(args.dim)} + + lora_network_o.save_weights(args.save_to, save_dtype, metadata) print(f"LoRA weights are saved to: {args.save_to}") @@ -151,8 +157,8 @@ if __name__ == '__main__': help="Stable Diffusion tuned model, LoRA is difference of `original to tuned`: ckpt or safetensors file / 派生モデル(生成されるLoRAは元→派生の差分になります)、ckptまたはsafetensors") parser.add_argument("--save_to", type=str, default=None, help="destination file name: ckpt or safetensors file / 保存先のファイル名、ckptまたはsafetensors") - parser.add_argument("--dim", type=int, default=4, help="dimension of LoRA (default 4) / LoRAの次元数(デフォルト4)") - parser.add_argument("--device", type=str, default=None, help="device to use, 'cuda' for GPU / 計算を行うデバイス、'cuda'でGPUを使う") + parser.add_argument("--dim", type=int, default=4, help="dimension (rank) of LoRA (default 4) / LoRAの次元数(rank)(デフォルト4)") + parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う") args = parser.parse_args() svd(args) diff --git a/networks/merge_lora.py b/networks/merge_lora.py index d873a8ef..1d4cb3b5 100644 --- a/networks/merge_lora.py +++ b/networks/merge_lora.py @@ -61,6 +61,7 @@ def merge_to_sd_model(text_encoder, unet, models, ratios, merge_dtype): for key in lora_sd.keys(): if "lora_down" in key: up_key = key.replace("lora_down", "lora_up") + alpha_key = key[:key.index("lora_down")] + 'alpha' # find original module for this lora module_name = '.'.join(key.split('.')[:-2]) # remove trailing ".lora_down.weight" @@ -73,14 +74,18 @@ def merge_to_sd_model(text_encoder, unet, models, ratios, merge_dtype): down_weight = lora_sd[key] up_weight = lora_sd[up_key] + dim = down_weight.size()[0] + alpha = lora_sd.get(alpha_key, dim) + scale = alpha / dim + # W <- W + U * D weight = module.weight if len(weight.size()) == 2: # linear - weight = weight + ratio * (up_weight @ down_weight) + weight = weight + ratio * (up_weight @ down_weight) * scale else: # conv2d - weight = weight + ratio * (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3) + weight = weight + ratio * (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3) * scale module.weight = torch.nn.Parameter(weight) @@ -88,20 +93,35 @@ def merge_to_sd_model(text_encoder, unet, models, ratios, merge_dtype): def merge_lora_models(models, ratios, merge_dtype): merged_sd = {} + alpha = None + dim = None for model, ratio in zip(models, ratios): print(f"loading: {model}") lora_sd = load_state_dict(model, merge_dtype) print(f"merging...") for key in lora_sd.keys(): - if key in merged_sd: - assert merged_sd[key].size() == lora_sd[key].size( - ), f"weights shape mismatch merging v1 and v2, different dims? / 重みのサイズが合いません。v1とv2、または次元数の異なるモデルはマージできません" - merged_sd[key] = merged_sd[key] + lora_sd[key] * ratio + if 'alpha' in key: + if key in merged_sd: + assert merged_sd[key] == lora_sd[key], f"alpha mismatch / alphaが異なる場合、現時点ではマージできません" + else: + alpha = lora_sd[key].detach().numpy() + merged_sd[key] = lora_sd[key] else: - merged_sd[key] = lora_sd[key] * ratio + if key in merged_sd: + assert merged_sd[key].size() == lora_sd[key].size( + ), f"weights shape mismatch merging v1 and v2, different dims? / 重みのサイズが合いません。v1とv2、または次元数の異なるモデルはマージできません" + merged_sd[key] = merged_sd[key] + lora_sd[key] * ratio + else: + if "lora_down" in key: + dim = lora_sd[key].size()[0] + merged_sd[key] = lora_sd[key] * ratio - return merged_sd + print(f"dim (rank): {dim}, alpha: {alpha}") + if alpha is None: + alpha = dim + + return merged_sd, dim, alpha def merge(args): @@ -132,7 +152,7 @@ def merge(args): model_util.save_stable_diffusion_checkpoint(args.v2, args.save_to, text_encoder, unet, args.sd_model, 0, 0, save_dtype, vae) else: - state_dict = merge_lora_models(args.models, args.ratios, merge_dtype) + state_dict, _, _ = merge_lora_models(args.models, args.ratios, merge_dtype) print(f"saving model to: {args.save_to}") save_to_file(args.save_to, state_dict, state_dict, save_dtype) @@ -145,7 +165,7 @@ if __name__ == '__main__': parser.add_argument("--save_precision", type=str, default=None, choices=[None, "float", "fp16", "bf16"], help="precision in saving, same to merging if omitted / 保存時に精度を変更して保存する、省略時はマージ時の精度と同じ") parser.add_argument("--precision", type=str, default="float", - choices=["float", "fp16", "bf16"], help="precision in merging / マージの計算時の精度") + choices=["float", "fp16", "bf16"], help="precision in merging (float is recommended) / マージの計算時の精度(floatを推奨)") parser.add_argument("--sd_model", type=str, default=None, help="Stable Diffusion model to load: ckpt or safetensors file, merge LoRA models if omitted / 読み込むモデル、ckptまたはsafetensors。省略時はLoRAモデル同士をマージする") parser.add_argument("--save_to", type=str, default=None, From a7218574f23eee53d36fba32eb1f18b64c49d9e3 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Sun, 22 Jan 2023 21:33:48 +0900 Subject: [PATCH 25/44] Update help message --- train_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train_network.py b/train_network.py index 393d8f9d..d60ae9a0 100644 --- a/train_network.py +++ b/train_network.py @@ -449,7 +449,7 @@ if __name__ == '__main__': parser.add_argument("--network_dim", type=int, default=None, help='network dimensions (depends on each network) / モジュールの次元数(ネットワークにより定義は異なります)') parser.add_argument("--network_alpha", type=float, default=1, - help='alpha for LoRA weight scaling, default 1, 0 for no scaling (same as old version) / LoRaの重み調整のalpha値、デフォルト1、0で調整なし(旧バージョンと同じ)') + help='alpha for LoRA weight scaling, default 1 (same as network_dim for same behavior as old version) / LoRaの重み調整のalpha値、デフォルト1(旧バージョンと同じ動作をするにはnetwork_dimと同じ値を指定)') parser.add_argument("--network_args", type=str, default=None, nargs='*', help='additional argmuments for network (key=value) / ネットワークへの追加の引数') parser.add_argument("--network_train_unet_only", action="store_true", help="only training U-Net part / U-Net関連部分のみ学習する") From 4eb356f165a9310a97ba675ccdbccef385f3c1f7 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Sun, 22 Jan 2023 21:33:58 +0900 Subject: [PATCH 26/44] Upate readme --- README.md | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 7df4f26c..14970cc0 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,33 @@ This repository contains training, generation and utility scripts for Stable Dif ## Updates -- 22 Jan. 2023, 2023/1/22 - - Fix script to check LoRA weights ``check_lora_weights.py``. Some layer weights were shown as ``0.0`` even if the layer is trained, because of the overflow of ``torch.mean``. Sorry for the confusion. - - Noe the script shows the mean of the absolute values of the weights, and the minimum of the absolute values of the weights. - - LoRAの重みをチェックするスクリプト ``check_lora_weights.py`` を修正しました。一部のレイヤーで学習されているにもかかわらず重みが ``0.0`` と表示されていました。混乱を招き申し訳ありません。 - - スクリプトを「重みの絶対の平均」と「重みの絶対値の最小値」を表示するよう修正しました。 +__Stable Diffusion web UI now seems to support LoRA trained by ``sd-scripts``.__ Thank you for great work!!! + +Note: Currently the LoRA models trained by release 0.4.0 does not seem to be supported. If you use Web UI native LoRA support, please use release 0.3.2 for now. + +The LoRA models for SD 2.x is not supported too. + +- 22 Jan. 2023 + - Add ``--network_alpha`` option to specify ``alpha`` value to prevent underflows for stable training. Thanks to CCRcmcpe! + - Details of the issue are described in https://github.com/kohya-ss/sd-webui-additional-networks/issues/49 . + - The default value is ``1``, scale ``1 / rank (or dimension)``. Set same value as ``network_dim`` for same behavior to old version. + - Add logging for the learning rate for U-Net and Text Encoder independently, and for running average epoch loss. Thanks to mgz-dev! + - Add more metadata such as dataset/reg image dirs, session ID, output name etc... See #77 for details. Thanks to space-nuko! + - __Now the metadata includes the folder name (the basename of the folder contains image files, not fullpath).__ If you do not want it, disable metadata storing with ``--no_metadata`` option. + - Add ``--training_comment`` option. You can specify an arbitrary string and refer to it by the extension. + +Stable Diffusion web UI本体で当リポジトリで学習したLoRAモデルによる画像生成がサポートされたようです。 + +注:現時点ではversion 0.4.0で学習したモデルはサポートされないようです。Web UI本体の生成機能を使う場合には、version 0.3.2を引き続きご利用ください。またSD2.x用のLoRAモデルもサポートされないようです。 + +- 2023/1/22 + - アンダーフローを防ぎ安定して学習するための ``alpha`` 値を指定する、``--network_alpha`` オプションを追加しました。CCRcmcpe 氏に感謝します。 + - 問題の詳細はこちらをご覧ください: https://github.com/kohya-ss/sd-webui-additional-networks/issues/49 + - デフォルト値は ``1`` で、重みを ``1 / rank (dimension・次元数)`` します。``network_dim`` と同じ値を指定すると旧バージョンと同じ動作になります。 + - U-Net と Text Encoder のそれぞれの学習率、エポックの平均lossをログに記録するようになりました。mgz-dev 氏に感謝します。 + - 画像ディレクトリ、セッションID、出力名などいくつかの項目がメタデータに追加されました(詳細は #77 を参照)。space-nuko氏に感謝します。 + - __メタデータにフォルダ名が含まれるようになりました(画像を含むフォルダの名前のみで、フルパスではありません)。__ もし望まない場合には ``--no_metadata`` オプションでメタデータの記録を止めてください。 + - ``--training_comment`` オプションを追加しました。任意の文字列を指定でき、Web UI拡張から参照できます。 Please read [Releases](https://github.com/kohya-ss/sd-scripts/releases) for recent updates. 最近の更新情報は [Release](https://github.com/kohya-ss/sd-scripts/releases) をご覧ください。 From a3171714cec70ec08daa6ec8c9a934564ef2dd72 Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Sun, 22 Jan 2023 21:57:59 +0900 Subject: [PATCH 27/44] Update README.md --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 14970cc0..1dc11c94 100644 --- a/README.md +++ b/README.md @@ -12,8 +12,9 @@ The LoRA models for SD 2.x is not supported too. - Add ``--network_alpha`` option to specify ``alpha`` value to prevent underflows for stable training. Thanks to CCRcmcpe! - Details of the issue are described in https://github.com/kohya-ss/sd-webui-additional-networks/issues/49 . - The default value is ``1``, scale ``1 / rank (or dimension)``. Set same value as ``network_dim`` for same behavior to old version. + - LoRA with a large dimension (rank) seems to require a higher learning rate with ``alpha=1`` (e.g. 1e-3 for 128-dim, still investigating).  - Add logging for the learning rate for U-Net and Text Encoder independently, and for running average epoch loss. Thanks to mgz-dev! - - Add more metadata such as dataset/reg image dirs, session ID, output name etc... See #77 for details. Thanks to space-nuko! + - Add more metadata such as dataset/reg image dirs, session ID, output name etc... See https://github.com/kohya-ss/sd-scripts/pull/77 for details. Thanks to space-nuko! - __Now the metadata includes the folder name (the basename of the folder contains image files, not fullpath).__ If you do not want it, disable metadata storing with ``--no_metadata`` option. - Add ``--training_comment`` option. You can specify an arbitrary string and refer to it by the extension. @@ -25,8 +26,9 @@ Stable Diffusion web UI本体で当リポジトリで学習したLoRAモデル - アンダーフローを防ぎ安定して学習するための ``alpha`` 値を指定する、``--network_alpha`` オプションを追加しました。CCRcmcpe 氏に感謝します。 - 問題の詳細はこちらをご覧ください: https://github.com/kohya-ss/sd-webui-additional-networks/issues/49 - デフォルト値は ``1`` で、重みを ``1 / rank (dimension・次元数)`` します。``network_dim`` と同じ値を指定すると旧バージョンと同じ動作になります。 + - ``alpha=1``の場合、次元数(rank)の多いLoRAモジュールでは学習率を高めにしたほうが良いようです(128次元で1e-3など)。 - U-Net と Text Encoder のそれぞれの学習率、エポックの平均lossをログに記録するようになりました。mgz-dev 氏に感謝します。 - - 画像ディレクトリ、セッションID、出力名などいくつかの項目がメタデータに追加されました(詳細は #77 を参照)。space-nuko氏に感謝します。 + - 画像ディレクトリ、セッションID、出力名などいくつかの項目がメタデータに追加されました(詳細は https://github.com/kohya-ss/sd-scripts/pull/77 を参照)。space-nuko氏に感謝します。 - __メタデータにフォルダ名が含まれるようになりました(画像を含むフォルダの名前のみで、フルパスではありません)。__ もし望まない場合には ``--no_metadata`` オプションでメタデータの記録を止めてください。 - ``--training_comment`` オプションを追加しました。任意の文字列を指定でき、Web UI拡張から参照できます。 From 0ca064287e7fabc74e7a82a981d1ab4ffe6341e9 Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Sun, 22 Jan 2023 22:03:15 +0900 Subject: [PATCH 28/44] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1dc11c94..18e3a20d 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ Stable Diffusion web UI本体で当リポジトリで学習したLoRAモデル - 2023/1/22 - アンダーフローを防ぎ安定して学習するための ``alpha`` 値を指定する、``--network_alpha`` オプションを追加しました。CCRcmcpe 氏に感謝します。 - 問題の詳細はこちらをご覧ください: https://github.com/kohya-ss/sd-webui-additional-networks/issues/49 - - デフォルト値は ``1`` で、重みを ``1 / rank (dimension・次元数)`` します。``network_dim`` と同じ値を指定すると旧バージョンと同じ動作になります。 + - デフォルト値は ``1`` で、LoRAの計算結果を ``1 / rank (dimension・次元数)`` 倍します(つまり小さくなります。これにより同じ効果を出すために必要なLoRAの重みの変化が大きくなるため、アンダーフローが避けられるようになります)。``network_dim`` と同じ値を指定すると旧バージョンと同じ動作になります。 - ``alpha=1``の場合、次元数(rank)の多いLoRAモジュールでは学習率を高めにしたほうが良いようです(128次元で1e-3など)。 - U-Net と Text Encoder のそれぞれの学習率、エポックの平均lossをログに記録するようになりました。mgz-dev 氏に感謝します。 - 画像ディレクトリ、セッションID、出力名などいくつかの項目がメタデータに追加されました(詳細は https://github.com/kohya-ss/sd-scripts/pull/77 を参照)。space-nuko氏に感謝します。 From 4ba166797830cb29d631da9c7ef1465340ebbeb6 Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Sun, 22 Jan 2023 22:19:07 +0900 Subject: [PATCH 29/44] Update README.md --- README.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 18e3a20d..0f6c7db6 100644 --- a/README.md +++ b/README.md @@ -4,15 +4,14 @@ This repository contains training, generation and utility scripts for Stable Dif __Stable Diffusion web UI now seems to support LoRA trained by ``sd-scripts``.__ Thank you for great work!!! -Note: Currently the LoRA models trained by release 0.4.0 does not seem to be supported. If you use Web UI native LoRA support, please use release 0.3.2 for now. +Note: Currently the LoRA models trained by release v0.4.0 does not seem to be supported. If you use Web UI native LoRA support, please use release 0.3.2 for now. The LoRA models for SD 2.x is not supported too in Web UI. -The LoRA models for SD 2.x is not supported too. - -- 22 Jan. 2023 +- Release v0.4.0: 22 Jan. 2023 - Add ``--network_alpha`` option to specify ``alpha`` value to prevent underflows for stable training. Thanks to CCRcmcpe! - Details of the issue are described in https://github.com/kohya-ss/sd-webui-additional-networks/issues/49 . - The default value is ``1``, scale ``1 / rank (or dimension)``. Set same value as ``network_dim`` for same behavior to old version. - LoRA with a large dimension (rank) seems to require a higher learning rate with ``alpha=1`` (e.g. 1e-3 for 128-dim, still investigating).  + - For generating images in Web UI, __the latest version of the extension ``sd-webui-additional-networks`` (v0.3.0 or later) is required for the models trained with this release or later.__ - Add logging for the learning rate for U-Net and Text Encoder independently, and for running average epoch loss. Thanks to mgz-dev! - Add more metadata such as dataset/reg image dirs, session ID, output name etc... See https://github.com/kohya-ss/sd-scripts/pull/77 for details. Thanks to space-nuko! - __Now the metadata includes the folder name (the basename of the folder contains image files, not fullpath).__ If you do not want it, disable metadata storing with ``--no_metadata`` option. @@ -22,11 +21,12 @@ Stable Diffusion web UI本体で当リポジトリで学習したLoRAモデル 注:現時点ではversion 0.4.0で学習したモデルはサポートされないようです。Web UI本体の生成機能を使う場合には、version 0.3.2を引き続きご利用ください。またSD2.x用のLoRAモデルもサポートされないようです。 -- 2023/1/22 +- Release 0.4.0: 2023/1/22 - アンダーフローを防ぎ安定して学習するための ``alpha`` 値を指定する、``--network_alpha`` オプションを追加しました。CCRcmcpe 氏に感謝します。 - 問題の詳細はこちらをご覧ください: https://github.com/kohya-ss/sd-webui-additional-networks/issues/49 - デフォルト値は ``1`` で、LoRAの計算結果を ``1 / rank (dimension・次元数)`` 倍します(つまり小さくなります。これにより同じ効果を出すために必要なLoRAの重みの変化が大きくなるため、アンダーフローが避けられるようになります)。``network_dim`` と同じ値を指定すると旧バージョンと同じ動作になります。 - ``alpha=1``の場合、次元数(rank)の多いLoRAモジュールでは学習率を高めにしたほうが良いようです(128次元で1e-3など)。 + - __このバージョンのスクリプトで学習したモデルをWeb UIで使うためには ``sd-webui-additional-networks`` 拡張の最新版(v0.3.0以降)が必要となります。__ - U-Net と Text Encoder のそれぞれの学習率、エポックの平均lossをログに記録するようになりました。mgz-dev 氏に感謝します。 - 画像ディレクトリ、セッションID、出力名などいくつかの項目がメタデータに追加されました(詳細は https://github.com/kohya-ss/sd-scripts/pull/77 を参照)。space-nuko氏に感謝します。 - __メタデータにフォルダ名が含まれるようになりました(画像を含むフォルダの名前のみで、フルパスではありません)。__ もし望まない場合には ``--no_metadata`` オプションでメタデータの記録を止めてください。 @@ -87,7 +87,7 @@ Open a regular Powershell terminal and type the following inside: git clone https://github.com/kohya-ss/sd-scripts.git cd sd-scripts -python -m venv --system-site-packages venv +python -m venv venv .\venv\Scripts\activate pip install torch==1.12.1+cu116 torchvision==0.13.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 @@ -99,9 +99,10 @@ cp .\bitsandbytes_windows\cextension.py .\venv\Lib\site-packages\bitsandbytes\ce cp .\bitsandbytes_windows\main.py .\venv\Lib\site-packages\bitsandbytes\cuda_setup\main.py accelerate config - ``` +update: ``python -m venv venv`` is seemed to be safer than ``python -m venv --system-site-packages venv`` (some user have packages in global python). + Answers to accelerate config: ```txt From 25f8ac731f9dff855eebfd339ddecbd0f561eb3c Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Sun, 22 Jan 2023 22:22:53 +0900 Subject: [PATCH 30/44] Update README-ja.md --- README-ja.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README-ja.md b/README-ja.md index f1106960..d8db9d36 100644 --- a/README-ja.md +++ b/README-ja.md @@ -16,9 +16,10 @@ GUIやPowerShellスクリプトなど、より使いやすくする機能が[bma 当リポジトリ内およびnote.comに記事がありますのでそちらをご覧ください(将来的にはすべてこちらへ移すかもしれません)。 -* note.com [環境整備とDreamBooth学習スクリプトについて](https://note.com/kohya_ss/n/nba4eceaa4594) +* [DreamBoothの学習について](./train_db_README-ja.md) * [fine-tuningのガイド](./fine_tune_README_ja.md): BLIPによるキャプショニングと、DeepDanbooruまたはWD14 taggerによるタグ付けを含みます +* [LoRAの学習について](./train_network_README-ja.md) * note.com [画像生成スクリプト](https://note.com/kohya_ss/n/n2693183a798e) * note.com [モデル変換スクリプト](https://note.com/kohya_ss/n/n374f316fe4ad) @@ -44,12 +45,11 @@ PowerShellを使う場合、venvを使えるようにするためには以下の 通常の(管理者ではない)PowerShellを開き以下を順に実行します。 - ```powershell git clone https://github.com/kohya-ss/sd-scripts.git cd sd-scripts -python -m venv --system-site-packages venv +python -m venv venv .\venv\Scripts\activate pip install torch==1.12.1+cu116 torchvision==0.13.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 @@ -70,7 +70,7 @@ accelerate config git clone https://github.com/kohya-ss/sd-scripts.git cd sd-scripts -python -m venv --system-site-packages venv +python -m venv venv .\venv\Scripts\activate pip install torch==1.12.1+cu116 torchvision==0.13.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116 @@ -84,6 +84,8 @@ copy /y .\bitsandbytes_windows\main.py .\venv\Lib\site-packages\bitsandbytes\cud accelerate config ``` +(注:``python -m venv venv`` のほうが ``python -m venv --system-site-packages venv`` より安全そうなため書き換えました。globalなpythonにパッケージがインストールしてあると、後者だといろいろと問題が起きます。) + accelerate configの質問には以下のように答えてください。(bf16で学習する場合、最後の質問にはbf16と答えてください。) ※0.15.0から日本語環境では選択のためにカーソルキーを押すと落ちます(……)。数字キーの0、1、2……で選択できますので、そちらを使ってください。 From 56bc806d52f30f33922b4d92d751086e6d6260b4 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Sun, 22 Jan 2023 23:10:10 +0900 Subject: [PATCH 31/44] ``--network_dim`` is removed from ``gen_img_diffusers.py`` --- train_network_README-ja.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train_network_README-ja.md b/train_network_README-ja.md index 77ef4c17..8e329e9b 100644 --- a/train_network_README-ja.md +++ b/train_network_README-ja.md @@ -138,7 +138,7 @@ v1で学習したLoRAとv2で学習したLoRA、次元数の異なるLoRAはマ ## 当リポジトリ内の画像生成スクリプトで生成する -gen_img_diffusers.pyに、--network_module、--network_weights、--network_dim(省略可)の各オプションを追加してください。意味は学習時と同様です。 +gen_img_diffusers.pyに、--network_module、--network_weightsの各オプションを追加してください。意味は学習時と同様です。 --network_mulオプションで0~1.0の数値を指定すると、LoRAの適用率を変えられます。 From ebdb624d294e22d4ab3f7c83e5d46b95c8cbc220 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Mon, 23 Jan 2023 00:25:32 +0900 Subject: [PATCH 32/44] Fix a link --- train_db_README-ja.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train_db_README-ja.md b/train_db_README-ja.md index 53ee715f..e6644320 100644 --- a/train_db_README-ja.md +++ b/train_db_README-ja.md @@ -159,7 +159,7 @@ v2.xモデルでWebUIで画像生成する場合、モデルの仕様が記述 ![image](https://user-images.githubusercontent.com/52813779/210776915-061d79c3-6582-42c2-8884-8b91d2f07313.png) -各yamlファイルは[https://github.com/Stability-AI/stablediffusion/tree/main/configs/stable-diffusion](Stability AIのSD2.0のリポジトリ)にあります。 +各yamlファイルは[Stability AIのSD2.0のリポジトリ](https://github.com/Stability-AI/stablediffusion/tree/main/configs/stable-diffusion]にあります。 # その他の学習オプション From f7fbdc4b2aa52986cdab2e5482ba840457c6428f Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Mon, 23 Jan 2023 17:21:04 -0800 Subject: [PATCH 33/44] Precalculate .safetensors model hashes after training --- library/train_util.py | 45 +++++++++++++++++++++++++++++++++++++++++++ networks/lora.py | 10 ++++++++++ 2 files changed, 55 insertions(+) diff --git a/library/train_util.py b/library/train_util.py index 0fdbadc1..bbc68aae 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -12,6 +12,7 @@ import math import os import random import hashlib +from io import BytesIO from tqdm import tqdm import torch @@ -25,6 +26,7 @@ from PIL import Image import cv2 from einops import rearrange from torch import einsum +import safetensors.torch import library.model_util as model_util @@ -790,6 +792,49 @@ def calculate_sha256(filename): return hash_sha256.hexdigest() +def precalculate_safetensors_hashes(tensors, metadata): + """Precalculate the model hashes needed by sd-webui-additional-networks to + save time on indexing the model later.""" + + # Because writing user metadata to the file can change the result of + # sd_models.model_hash(), only retain the training metadata for purposes of + # calculating the hash, as they are meant to be immutable + metadata = {k: v for k, v in metadata.items() if k.startswith("ss_")} + + bytes = safetensors.torch.save(tensors, metadata) + b = BytesIO(bytes) + + model_hash = addnet_hash_safetensors(b) + legacy_hash = addnet_hash_legacy(b) + return model_hash, legacy_hash + + +def addnet_hash_legacy(b): + """Old model hash used by sd-webui-additional-networks for .safetensors format files""" + m = hashlib.sha256() + + b.seek(0x100000) + m.update(b.read(0x10000)) + return m.hexdigest()[0:8] + + +def addnet_hash_safetensors(b): + """New model hash used by sd-webui-additional-networks for .safetensors format files""" + hash_sha256 = hashlib.sha256() + blksize = 1024 * 1024 + + b.seek(0) + header = b.read(8) + n = int.from_bytes(header, "little") + + offset = n + 8 + b.seek(offset) + for chunk in iter(lambda: b.read(blksize), b""): + hash_sha256.update(chunk) + + return hash_sha256.hexdigest() + + # flash attention forwards and backwards # https://arxiv.org/abs/2205.14135 diff --git a/networks/lora.py b/networks/lora.py index 9243f1e1..bbc65164 100644 --- a/networks/lora.py +++ b/networks/lora.py @@ -7,6 +7,8 @@ import math import os import torch +from library import train_util + class LoRAModule(torch.nn.Module): """ @@ -221,6 +223,14 @@ class LoRANetwork(torch.nn.Module): if os.path.splitext(file)[1] == '.safetensors': from safetensors.torch import save_file + + # Precalculate model hashes to save time on indexing + if metadata is None: + metadata = {} + model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata) + metadata["sshs_model_hash"] = model_hash + metadata["sshs_legacy_hash"] = legacy_hash + save_file(state_dict, file, metadata) else: torch.save(state_dict, file) From 66051883fb93017d50b17d182b4d3d2e281741a9 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Mon, 23 Jan 2023 17:26:58 -0800 Subject: [PATCH 34/44] Add bucketing metadata --- library/train_util.py | 4 ++++ train_network.py | 1 + 2 files changed, 5 insertions(+) diff --git a/library/train_util.py b/library/train_util.py index 0fdbadc1..e63ee828 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -85,6 +85,7 @@ class BaseDataset(torch.utils.data.Dataset): self.enable_bucket = False self.min_bucket_reso = None self.max_bucket_reso = None + self.bucket_info = None self.tokenizer_max_length = self.tokenizer.model_max_length if max_token_length is None else max_token_length + 2 @@ -217,9 +218,12 @@ class BaseDataset(torch.utils.data.Dataset): self.buckets[bucket_index].append(image_info.image_key) if self.enable_bucket: + self.bucket_info = {"buckets": {}} print("number of images (including repeats) / 各bucketの画像枚数(繰り返し回数を含む)") for i, (reso, img_keys) in enumerate(zip(bucket_resos, self.buckets)): + self.bucket_info["buckets"][i] = {"resolution": reso, "count": len(img_keys)} print(f"bucket {i}: resolution {reso}, count: {len(img_keys)}") + self.bucket_info["img_ar_errors"] = img_ar_errors img_ar_errors = np.array(img_ar_errors) print(f"mean ar error (without repeats): {np.mean(np.abs(img_ar_errors))}") diff --git a/train_network.py b/train_network.py index d60ae9a0..5eada8f1 100644 --- a/train_network.py +++ b/train_network.py @@ -264,6 +264,7 @@ def train(args): "ss_keep_tokens": args.keep_tokens, "ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs_info), "ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs_info), + "ss_bucket_info": json.dumps(train_dataset.bucket_info), "ss_training_comment": args.training_comment # will not be updated after training } From 552cdbd6d8b921f1b052120b8435ca7657e68a9b Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Tue, 24 Jan 2023 18:39:05 +0900 Subject: [PATCH 35/44] Fix markdown --- train_db_README-ja.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/train_db_README-ja.md b/train_db_README-ja.md index e6644320..1f37cab1 100644 --- a/train_db_README-ja.md +++ b/train_db_README-ja.md @@ -159,7 +159,7 @@ v2.xモデルでWebUIで画像生成する場合、モデルの仕様が記述 ![image](https://user-images.githubusercontent.com/52813779/210776915-061d79c3-6582-42c2-8884-8b91d2f07313.png) -各yamlファイルは[Stability AIのSD2.0のリポジトリ](https://github.com/Stability-AI/stablediffusion/tree/main/configs/stable-diffusion]にあります。 +各yamlファイルは[Stability AIのSD2.0のリポジトリ](https://github.com/Stability-AI/stablediffusion/tree/main/configs/stable-diffusion)にあります。 # その他の学習オプション From bf3a13bb4e4c4d45f2bedd3fbb752f33b3ec907b Mon Sep 17 00:00:00 2001 From: Kohya S Date: Tue, 24 Jan 2023 18:57:21 +0900 Subject: [PATCH 36/44] Fix error for loading bf16 weights --- networks/lora.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/networks/lora.py b/networks/lora.py index 9243f1e1..b936bfb2 100644 --- a/networks/lora.py +++ b/networks/lora.py @@ -31,7 +31,7 @@ class LoRAModule(torch.nn.Module): self.lora_up = torch.nn.Linear(lora_dim, out_dim, bias=False) if type(alpha) == torch.Tensor: - alpha = alpha.detach().numpy() + alpha = alpha.detach().float().numpy() # without casting, bf16 causes error alpha = lora_dim if alpha is None or alpha == 0 else alpha self.scale = alpha / self.lora_dim self.register_buffer('alpha', torch.tensor(alpha)) # 定数として扱える From 9f644d8dc3cc435cf64fb3e2f7a169ac173410f2 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Tue, 24 Jan 2023 20:16:21 +0900 Subject: [PATCH 37/44] Change default save format to safetensors --- train_network.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train_network.py b/train_network.py index 07aed4eb..8a8acc7d 100644 --- a/train_network.py +++ b/train_network.py @@ -440,8 +440,8 @@ if __name__ == '__main__': train_util.add_training_arguments(parser, True) parser.add_argument("--no_metadata", action='store_true', help="do not save metadata in output model / メタデータを出力先モデルに保存しない") - parser.add_argument("--save_model_as", type=str, default="pt", choices=[None, "ckpt", "pt", "safetensors"], - help="format to save the model (default is .pt) / モデル保存時の形式(デフォルトはpt)") + parser.add_argument("--save_model_as", type=str, default="safetensors", choices=[None, "ckpt", "pt", "safetensors"], + help="format to save the model (default is .safetensors) / モデル保存時の形式(デフォルトはsafetensors)") parser.add_argument("--unet_lr", type=float, default=None, help="learning rate for U-Net / U-Netの学習率") parser.add_argument("--text_encoder_lr", type=float, default=None, help="learning rate for Text Encoder / Text Encoderの学習率") From 91a50ea63734b548ae593a474c5248aa8307f5c0 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Tue, 24 Jan 2023 20:17:15 +0900 Subject: [PATCH 38/44] Change img_ar_errors to mean because too many imgs --- library/train_util.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index 55a9aacd..f967c5f8 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -225,9 +225,12 @@ class BaseDataset(torch.utils.data.Dataset): for i, (reso, img_keys) in enumerate(zip(bucket_resos, self.buckets)): self.bucket_info["buckets"][i] = {"resolution": reso, "count": len(img_keys)} print(f"bucket {i}: resolution {reso}, count: {len(img_keys)}") - self.bucket_info["img_ar_errors"] = img_ar_errors + img_ar_errors = np.array(img_ar_errors) - print(f"mean ar error (without repeats): {np.mean(np.abs(img_ar_errors))}") + mean_img_ar_error = np.mean(np.abs(img_ar_errors)) + self.bucket_info["mean_img_ar_error"] = mean_img_ar_error + print(f"mean ar error (without repeats): {mean_img_ar_error}") + # 参照用indexを作る self.buckets_indices: list(BucketBatchIndex) = [] @@ -834,7 +837,7 @@ def addnet_hash_safetensors(b): offset = n + 8 b.seek(offset) for chunk in iter(lambda: b.read(blksize), b""): - hash_sha256.update(chunk) + hash_sha256.update(chunk) return hash_sha256.hexdigest() @@ -1107,7 +1110,7 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: parser.add_argument("--save_every_n_epochs", type=int, default=None, help="save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する") parser.add_argument("--save_n_epoch_ratio", type=int, default=None, - help="save checkpoint N epoch ratio / 学習中のモデルを指定のエポック割合で保存する") + help="save checkpoint N epoch ratio (for example 5 means save at least 5 files total) / 学習中のモデルを指定のエポック割合で保存する(たとえば5を指定すると最低5個のファイルが保存される)") parser.add_argument("--save_last_n_epochs", type=int, default=None, help="save last N checkpoints / 最大Nエポック保存する") parser.add_argument("--save_last_n_epochs_state", type=int, default=None, help="save last N checkpoints of state (overrides the value of --save_last_n_epochs)/ 最大Nエポックstateを保存する(--save_last_n_epochsの指定を上書きします)") From 46029b2707b202449a8b0572ee4af8e6f2d73243 Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Tue, 24 Jan 2023 20:57:33 +0900 Subject: [PATCH 39/44] Update README.md --- README.md | 40 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 0f6c7db6..63616894 100644 --- a/README.md +++ b/README.md @@ -2,35 +2,27 @@ This repository contains training, generation and utility scripts for Stable Dif ## Updates -__Stable Diffusion web UI now seems to support LoRA trained by ``sd-scripts``.__ Thank you for great work!!! +__Stable Diffusion web UI now seems to support LoRA trained by ``sd-scripts``.__ Thank you for great work!!! -Note: Currently the LoRA models trained by release v0.4.0 does not seem to be supported. If you use Web UI native LoRA support, please use release 0.3.2 for now. The LoRA models for SD 2.x is not supported too in Web UI. +Note: The LoRA models for SD 2.x is not supported too in Web UI. -- Release v0.4.0: 22 Jan. 2023 - - Add ``--network_alpha`` option to specify ``alpha`` value to prevent underflows for stable training. Thanks to CCRcmcpe! - - Details of the issue are described in https://github.com/kohya-ss/sd-webui-additional-networks/issues/49 . - - The default value is ``1``, scale ``1 / rank (or dimension)``. Set same value as ``network_dim`` for same behavior to old version. - - LoRA with a large dimension (rank) seems to require a higher learning rate with ``alpha=1`` (e.g. 1e-3 for 128-dim, still investigating).  - - For generating images in Web UI, __the latest version of the extension ``sd-webui-additional-networks`` (v0.3.0 or later) is required for the models trained with this release or later.__ - - Add logging for the learning rate for U-Net and Text Encoder independently, and for running average epoch loss. Thanks to mgz-dev! - - Add more metadata such as dataset/reg image dirs, session ID, output name etc... See https://github.com/kohya-ss/sd-scripts/pull/77 for details. Thanks to space-nuko! - - __Now the metadata includes the folder name (the basename of the folder contains image files, not fullpath).__ If you do not want it, disable metadata storing with ``--no_metadata`` option. - - Add ``--training_comment`` option. You can specify an arbitrary string and refer to it by the extension. +- 24 Jan. 2023, 2023/1/24 + - Change the default save format to ``.safetensors`` for ``train_network.py``. + - Add ``--save_n_epoch_ratio`` option to specify how often to save. Thanks to forestsource! + - For example, if 5 is specified, 5 (or 6) files will be saved in training. + - Add feature to pre-caclulate hash to reduce loading time in the extension. Thanks to space-nuko! + - Add bucketing matadata. Thanks to space-nuko! + - Fix an error with bf16 model in ``gen_img_diffusers.py``. + - ``train_network.py`` のモデル保存形式のデフォルトを ``.safetensors`` に変更しました。 + - モデルを保存する頻度を指定する ``--save_n_epoch_ratio`` オプションが追加されました。forestsource氏に感謝します。 + - たとえば 5 を指定すると、学習終了までに合計で5個(または6個)のファイルが保存されます。 + - 拡張でモデル読み込み時間を短縮するためのハッシュ事前計算の機能を追加しました。space-nuko氏に感謝します。 + - メタデータにbucket情報が追加されました。space-nuko氏に感謝します。 + - ``gen_img_diffusers.py`` でbf16形式のモデルを読み込んだときのエラーを修正しました。 Stable Diffusion web UI本体で当リポジトリで学習したLoRAモデルによる画像生成がサポートされたようです。 -注:現時点ではversion 0.4.0で学習したモデルはサポートされないようです。Web UI本体の生成機能を使う場合には、version 0.3.2を引き続きご利用ください。またSD2.x用のLoRAモデルもサポートされないようです。 - -- Release 0.4.0: 2023/1/22 - - アンダーフローを防ぎ安定して学習するための ``alpha`` 値を指定する、``--network_alpha`` オプションを追加しました。CCRcmcpe 氏に感謝します。 - - 問題の詳細はこちらをご覧ください: https://github.com/kohya-ss/sd-webui-additional-networks/issues/49 - - デフォルト値は ``1`` で、LoRAの計算結果を ``1 / rank (dimension・次元数)`` 倍します(つまり小さくなります。これにより同じ効果を出すために必要なLoRAの重みの変化が大きくなるため、アンダーフローが避けられるようになります)。``network_dim`` と同じ値を指定すると旧バージョンと同じ動作になります。 - - ``alpha=1``の場合、次元数(rank)の多いLoRAモジュールでは学習率を高めにしたほうが良いようです(128次元で1e-3など)。 - - __このバージョンのスクリプトで学習したモデルをWeb UIで使うためには ``sd-webui-additional-networks`` 拡張の最新版(v0.3.0以降)が必要となります。__ - - U-Net と Text Encoder のそれぞれの学習率、エポックの平均lossをログに記録するようになりました。mgz-dev 氏に感謝します。 - - 画像ディレクトリ、セッションID、出力名などいくつかの項目がメタデータに追加されました(詳細は https://github.com/kohya-ss/sd-scripts/pull/77 を参照)。space-nuko氏に感謝します。 - - __メタデータにフォルダ名が含まれるようになりました(画像を含むフォルダの名前のみで、フルパスではありません)。__ もし望まない場合には ``--no_metadata`` オプションでメタデータの記録を止めてください。 - - ``--training_comment`` オプションを追加しました。任意の文字列を指定でき、Web UI拡張から参照できます。 +注:SD2.x用のLoRAモデルはサポートされないようです。 Please read [Releases](https://github.com/kohya-ss/sd-scripts/releases) for recent updates. 最近の更新情報は [Release](https://github.com/kohya-ss/sd-scripts/releases) をご覧ください。 From c425afb08b4c878a9e7893185ef2f06e0b65475e Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Wed, 25 Jan 2023 14:00:42 +0900 Subject: [PATCH 40/44] Update README.md --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 63616894..db03e215 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,11 @@ note: Some user reports ``ValueError: fp16 mixed precision requires a GPU`` is o (Single GPU with id `0` will be used.) +### about PyTorch and xformers + +Other versions of PyTorch and xformers seem to have problems with training. +If there is no other reason, please install the specified version. + ## Upgrade When a new release comes out you can upgrade your repo with the following command: From a68501bede09de4e13c09bc3f202d7c5d7668491 Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Wed, 25 Jan 2023 14:02:27 +0900 Subject: [PATCH 41/44] Update README-ja.md --- README-ja.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README-ja.md b/README-ja.md index d8db9d36..fa0f892e 100644 --- a/README-ja.md +++ b/README-ja.md @@ -1,7 +1,7 @@ ## リポジトリについて Stable Diffusionの学習、画像生成、その他のスクリプトを入れたリポジトリです。 -[README in English](./README.md) +[README in English](./README.md) ←更新情報はこちらにあります GUIやPowerShellスクリプトなど、より使いやすくする機能が[bmaltais氏のリポジトリ](https://github.com/bmaltais/kohya_ss)で提供されています(英語です)のであわせてご覧ください。bmaltais氏に感謝します。 @@ -103,6 +103,10 @@ accelerate configの質問には以下のように答えてください。(bf1 ※場合によって ``ValueError: fp16 mixed precision requires a GPU`` というエラーが出ることがあるようです。この場合、6番目の質問( ``What GPU(s) (by id) should be used for training on this machine as a comma-separated list? [all]:``)に「0」と答えてください。(id `0`のGPUが使われます。) +### PyTorchとxformersのバージョンについて + +他のバージョンでは学習がうまくいかない場合があるようです。特に他の理由がなければ指定のバージョンをお使いください。 + ## アップグレード 新しいリリースがあった場合、以下のコマンドで更新できます。 From e0d7f1d99d4c248e4c50ae20296f32e8d3d445df Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Thu, 26 Jan 2023 08:32:05 +0900 Subject: [PATCH 42/44] Update train_db_README-ja.md --- train_db_README-ja.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/train_db_README-ja.md b/train_db_README-ja.md index 1f37cab1..85ae35aa 100644 --- a/train_db_README-ja.md +++ b/train_db_README-ja.md @@ -72,7 +72,7 @@ identifierとclassを使い、たとえば「shs dog」などでモデルを学 ※LoRA等の追加ネットワークを学習する場合のコマンドは ``train_db.py`` ではなく ``train_network.py`` となります。また追加でnetwork_\*オプションが必要となりますので、LoRAのガイドを参照してください。 ``` -accelerate launch --num_cpu_threads_per_process 8 train_db.py +accelerate launch --num_cpu_threads_per_process 1 train_db.py --pretrained_model_name_or_path=<.ckptまたは.safetensordまたはDiffusers版モデルのディレクトリ> --train_data_dir=<学習用データのディレクトリ> --reg_data_dir=<正則化画像のディレクトリ> @@ -89,7 +89,7 @@ accelerate launch --num_cpu_threads_per_process 8 train_db.py --gradient_checkpointing ``` -num_cpu_threads_per_processにはCPUコア数を指定するとよいようです。 +num_cpu_threads_per_processには通常は1を指定するとよいようです。 pretrained_model_name_or_pathに追加学習を行う元となるモデルを指定します。Stable Diffusionのcheckpointファイル(.ckptまたは.safetensors)、Diffusersのローカルディスクにあるモデルディレクトリ、DiffusersのモデルID("stabilityai/stable-diffusion-2"など)が指定できます。学習後のモデルの保存形式はデフォルトでは元のモデルと同じになります(save_model_asオプションで変更できます)。 From b5ba4635120d13ee0915e287db2b72006eac1894 Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Thu, 26 Jan 2023 08:32:51 +0900 Subject: [PATCH 43/44] Update fine_tune_README_ja.md --- fine_tune_README_ja.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fine_tune_README_ja.md b/fine_tune_README_ja.md index f7634905..9dcd34af 100644 --- a/fine_tune_README_ja.md +++ b/fine_tune_README_ja.md @@ -324,7 +324,7 @@ __※引数を都度書き換えて、別のメタデータファイルに書き ## 学習の実行 たとえば以下のように実行します。以下は省メモリ化のための設定です。 ``` -accelerate launch --num_cpu_threads_per_process 8 fine_tune.py +accelerate launch --num_cpu_threads_per_process 1 fine_tune.py --pretrained_model_name_or_path=model.ckpt --in_json meta_lat.json --train_data_dir=train_data @@ -336,7 +336,7 @@ accelerate launch --num_cpu_threads_per_process 8 fine_tune.py --save_every_n_epochs=4 ``` -accelerateのnum_cpu_threads_per_processにはCPUのコア数を指定するとよいようです。 +accelerateのnum_cpu_threads_per_processには通常は1を指定するとよいようです。 pretrained_model_name_or_pathに学習対象のモデルを指定します(Stable DiffusionのcheckpointかDiffusersのモデル)。Stable Diffusionのcheckpointは.ckptと.safetensorsに対応しています(拡張子で自動判定)。 From 591e3c1813a243acafb33f3ae9f72cf1a085e51e Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Thu, 26 Jan 2023 08:37:14 +0900 Subject: [PATCH 44/44] Update train_network_README-ja.md --- train_network_README-ja.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/train_network_README-ja.md b/train_network_README-ja.md index 8e329e9b..e67d8cd0 100644 --- a/train_network_README-ja.md +++ b/train_network_README-ja.md @@ -24,7 +24,7 @@ DreamBoothの手法(identifier(sksなど)とclass、オプションで正 [DreamBoothのガイド](./train_db_README-ja.md) を参照してデータを用意してください。 -学習するとき、train_db.pyの代わりにtrain_network.pyを指定してください。 +学習するとき、train_db.pyの代わりにtrain_network.pyを指定してください。そして「LoRAの学習のためのオプション」にあるようにLoRA関連のオプション(``network_dim``や``network_alpha``など)を追加してください。 ほぼすべてのオプション(Stable Diffusionのモデル保存関係を除く)が使えますが、stop_text_encoder_trainingはサポートしていません。 @@ -32,7 +32,7 @@ DreamBoothの手法(identifier(sksなど)とclass、オプションで正 [fine-tuningのガイド](./fine_tune_README_ja.md) を参照し、各手順を実行してください。 -学習するとき、fine_tune.pyの代わりにtrain_network.pyを指定してください。ほぼすべてのオプション(モデル保存関係を除く)がそのまま使えます。 +学習するとき、fine_tune.pyの代わりにtrain_network.pyを指定してください。ほぼすべてのオプション(モデル保存関係を除く)がそのまま使えます。そして「LoRAの学習のためのオプション」にあるようにLoRA関連のオプション(``network_dim``や``network_alpha``など)を追加してください。 なお「latentsの事前取得」は行わなくても動作します。VAEから学習時(またはキャッシュ時)にlatentを取得するため学習速度は遅くなりますが、代わりにcolor_augが使えるようになります。 @@ -45,7 +45,7 @@ train_network.pyでは--network_moduleオプションに、学習対象のモジ 以下はコマンドラインの例です(DreamBooth手法)。 ``` -accelerate launch --num_cpu_threads_per_process 12 train_network.py +accelerate launch --num_cpu_threads_per_process 1 train_network.py --pretrained_model_name_or_path=..\models\model.ckpt --train_data_dir=..\data\db\char1 --output_dir=..\lora_train1 --reg_data_dir=..\data\db\reg1 --prior_loss_weight=1.0 @@ -60,7 +60,9 @@ accelerate launch --num_cpu_threads_per_process 12 train_network.py その他、以下のオプションが指定できます。 * --network_dim - * LoRAの次元数を指定します(``--networkdim=4``など)。省略時は4になります。数が多いほど表現力は増しますが、学習に必要なメモリ、時間は増えます。また闇雲に増やしても良くないようです。 + * LoRAのRANKを指定します(``--networkdim=4``など)。省略時は4になります。数が多いほど表現力は増しますが、学習に必要なメモリ、時間は増えます。また闇雲に増やしても良くないようです。 +* --network_alpha + * アンダーフローを防ぎ安定して学習するための ``alpha`` 値を指定します。デフォルトは1です。``network_dim``と同じ値を指定すると以前のバージョンと同じ動作になります。 * --network_weights * 学習前に学習済みのLoRAの重みを読み込み、そこから追加で学習します。 * --network_train_unet_only @@ -126,7 +128,7 @@ python networks\merge_lora.py --ratiosにそれぞれのモデルの比率(どのくらい重みを元モデルに反映するか)を0~1.0の数値で指定します。二つのモデルを一対一でマージす場合は、「0.5 0.5」になります。「1.0 1.0」では合計の重みが大きくなりすぎて、恐らく結果はあまり望ましくないものになると思われます。 -v1で学習したLoRAとv2で学習したLoRA、次元数の異なるLoRAはマージできません。U-NetだけのLoRAとU-Net+Text EncoderのLoRAはマージできるはずですが、結果は未知数です。 +v1で学習したLoRAとv2で学習したLoRA、rank(次元数)や``alpha``の異なるLoRAはマージできません。U-NetだけのLoRAとU-Net+Text EncoderのLoRAはマージできるはずですが、結果は未知数です。 ### その他のオプション