diff --git a/README.md b/README.md index 5389b867..daf72c6e 100644 --- a/README.md +++ b/README.md @@ -127,35 +127,21 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser ## Change History - -- 19 Mar. 2023, 2023/3/19: - - Add a function to load training config with `.toml` to each training script. Thanks to Linaqruf for this great contribution! - - Specify `.toml` file with `--config_file`. `.toml` file has `key=value` entries. Keys are same as command line options. See [#241](https://github.com/kohya-ss/sd-scripts/pull/241) for details. - - All sub-sections are combined to a single dictionary (the section names are ignored.) - - Omitted arguments are the default values for command line arguments. - - Command line args override the arguments in `.toml`. - - With `--output_config` option, you can output current command line options to the `.toml` specified with`--config_file`. Please use as a template. - - Add `--lr_scheduler_type` and `--lr_scheduler_args` arguments for custom LR scheduler to each training script. Thanks to Isotr0py! [#271](https://github.com/kohya-ss/sd-scripts/pull/271) - - Same as the optimizer. - - Add sample image generation with weight and no length limit. Thanks to mio2333! [#288](https://github.com/kohya-ss/sd-scripts/pull/288) - - `( )`, `(xxxx:1.2)` and `[ ]` can be used. - - Fix exception on training model in diffusers format with `train_network.py` Thanks to orenwang! [#290](https://github.com/kohya-ss/sd-scripts/pull/290) - - - 各学習スクリプトでコマンドライン引数の代わりに`.toml` ファイルで引数を指定できるようになりました。Linaqruf氏の多大な貢献に感謝します。 - - `--config_file` で `.toml` ファイルを指定してください。ファイルは `key=value` 形式の行で指定し、key はコマンドラインオプションと同じです。詳細は [#241](https://github.com/kohya-ss/sd-scripts/pull/241) をご覧ください。 - - ファイル内のサブセクションはすべて無視されます。 - - 省略した引数はコマンドライン引数のデフォルト値になります。 - - コマンドライン引数で `.toml` の設定を上書きできます。 - - `--output_config` オプションを指定すると、現在のコマンドライン引数を`--config_file` オプションで指定した `.toml` ファイルに出力します。ひな形としてご利用ください。 - - 任意のスケジューラを使うための `--lr_scheduler_type` と `--lr_scheduler_args` オプションを各学習スクリプトに追加しました。Isotr0py氏に感謝します。 [#271](https://github.com/kohya-ss/sd-scripts/pull/271) - - 任意のオプティマイザ指定と同じ形式です。 - - 学習中のサンプル画像出力でプロンプトの重みづけができるようになりました。また長さ制限も緩和されています。mio2333氏に感謝します。 [#288](https://github.com/kohya-ss/sd-scripts/pull/288) - - `( )`、 `(xxxx:1.2)` や `[ ]` が使えます。 - - `train_network.py` でローカルのDiffusersモデルを指定した時のエラーを修正しました。orenwang氏に感謝します。 [#290](https://github.com/kohya-ss/sd-scripts/pull/290) - -- 11 Mar. 2023, 2023/3/11: - - Fix `svd_merge_lora.py` causes an error about the device. - - `svd_merge_lora.py` でデバイス関連のエラーが発生する不具合を修正しました。 +- 21 Mar. 2023, 2023/3/21: + - Add `--vae_batch_size` for faster latents caching to each training script. This batches VAE calls. + - Please start with`2` or `4` depending on the size of VRAM. + - Fix a number of training steps with `--gradient_accumulation_steps` and `--max_train_epochs`. Thanks to tsukimiya! + - Extract parser setup to external scripts. Thanks to robertsmieja! + - Fix an issue without `.npz` and with `--full_path` in training. + - Support extensions with upper cases for images for not Windows environment. + - Fix `resize_lora.py` to work with LoRA with dynamic rank (including `conv_dim != network_dim`). Thanks to toshiaki! + - latentsのキャッシュを高速化する`--vae_batch_size` オプションを各学習スクリプトに追加しました。VAE呼び出しをバッチ化します。 + -VRAMサイズに応じて、`2` か `4` 程度から試してください。 + - `--gradient_accumulation_steps` と `--max_train_epochs` を指定した時、当該のepochで学習が止まらない不具合を修正しました。tsukimiya氏に感謝します。 + - 外部のスクリプト用に引数parserの構築が関数化されました。robertsmieja氏に感謝します。 + - 学習時、`--full_path` 指定時に `.npz` が存在しない場合の不具合を解消しました。 + - Windows以外の環境向けに、画像ファイルの大文字の拡張子をサポートしました。 + - `resize_lora.py` を dynamic rank (rankが各LoRAモジュールで異なる場合、`conv_dim` が `network_dim` と異なる場合も含む)の時に正しく動作しない不具合を修正しました。toshiaki氏に感謝します。 - Sample image generation: diff --git a/fine_tune.py b/fine_tune.py index d927bd73..1acf478f 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -138,7 +138,7 @@ def train(args): vae.requires_grad_(False) vae.eval() with torch.no_grad(): - train_dataset_group.cache_latents(vae) + train_dataset_group.cache_latents(vae, args.vae_batch_size) vae.to("cpu") if torch.cuda.is_available(): torch.cuda.empty_cache() @@ -194,7 +194,7 @@ def train(args): # 学習ステップ数を計算する if args.max_train_epochs is not None: - args.max_train_steps = args.max_train_epochs * len(train_dataloader) + args.max_train_steps = args.max_train_epochs * math.ceil(len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps) print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する @@ -240,7 +240,7 @@ def train(args): print(f" num epochs / epoch数: {num_train_epochs}") print(f" batch size per device / バッチサイズ: {args.train_batch_size}") print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}") - print(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}") + print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}") print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}") progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps") @@ -387,7 +387,7 @@ def train(args): print("model saved.") -if __name__ == "__main__": +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() train_util.add_sd_models_arguments(parser) @@ -400,6 +400,12 @@ if __name__ == "__main__": parser.add_argument("--diffusers_xformers", action="store_true", help="use xformers by diffusers / Diffusersでxformersを使用する") parser.add_argument("--train_text_encoder", action="store_true", help="train text encoder / text encoderも学習する") + return parser + + +if __name__ == "__main__": + parser = setup_parser() + args = parser.parse_args() args = train_util.read_config_from_file(args, parser) diff --git a/finetune/clean_captions_and_tags.py b/finetune/clean_captions_and_tags.py index 11a59b1f..68839ecc 100644 --- a/finetune/clean_captions_and_tags.py +++ b/finetune/clean_captions_and_tags.py @@ -163,13 +163,19 @@ def main(args): print("done!") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() # parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ") parser.add_argument("in_json", type=str, help="metadata file to input / 読み込むメタデータファイル") parser.add_argument("out_json", type=str, help="metadata file to output / メタデータファイル書き出し先") parser.add_argument("--debug", action="store_true", help="debug mode") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args, unknown = parser.parse_known_args() if len(unknown) == 1: print("WARNING: train_data_dir argument is removed. This script will not work with three arguments in future. Please specify two arguments: in_json and out_json.") diff --git a/finetune/make_captions.py b/finetune/make_captions.py index a2a35b39..e690349a 100644 --- a/finetune/make_captions.py +++ b/finetune/make_captions.py @@ -133,7 +133,7 @@ def main(args): print("done!") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ") parser.add_argument("--caption_weights", type=str, default="https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_large_caption.pth", @@ -153,6 +153,12 @@ if __name__ == '__main__': parser.add_argument('--seed', default=42, type=int, help='seed for reproducibility / 再現性を確保するための乱数seed') parser.add_argument("--debug", action="store_true", help="debug mode") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() # スペルミスしていたオプションを復元する diff --git a/finetune/make_captions_by_git.py b/finetune/make_captions_by_git.py index ebc91920..06af5598 100644 --- a/finetune/make_captions_by_git.py +++ b/finetune/make_captions_by_git.py @@ -127,7 +127,7 @@ def main(args): print("done!") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ") parser.add_argument("--caption_extension", type=str, default=".caption", help="extension of caption file / 出力されるキャプションファイルの拡張子") @@ -141,5 +141,11 @@ if __name__ == '__main__': help="remove like `with the words xxx` from caption / `with the words xxx`のような部分をキャプションから削除する") parser.add_argument("--debug", action="store_true", help="debug mode") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() main(args) diff --git a/finetune/merge_captions_to_metadata.py b/finetune/merge_captions_to_metadata.py index 491e4591..241f6f90 100644 --- a/finetune/merge_captions_to_metadata.py +++ b/finetune/merge_captions_to_metadata.py @@ -46,7 +46,7 @@ def main(args): print("done!") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ") parser.add_argument("out_json", type=str, help="metadata file to output / メタデータファイル書き出し先") @@ -61,6 +61,12 @@ if __name__ == '__main__': help="recursively look for training tags in all child folders of train_data_dir / train_data_dirのすべての子フォルダにある学習タグを再帰的に探す") parser.add_argument("--debug", action="store_true", help="debug mode") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() # スペルミスしていたオプションを復元する diff --git a/finetune/merge_dd_tags_to_metadata.py b/finetune/merge_dd_tags_to_metadata.py index 8823a9c8..db1bff6d 100644 --- a/finetune/merge_dd_tags_to_metadata.py +++ b/finetune/merge_dd_tags_to_metadata.py @@ -47,7 +47,7 @@ def main(args): print("done!") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ") parser.add_argument("out_json", type=str, help="metadata file to output / メタデータファイル書き出し先") @@ -61,5 +61,11 @@ if __name__ == '__main__': help="extension of caption (tag) file / 読み込むキャプション(タグ)ファイルの拡張子") parser.add_argument("--debug", action="store_true", help="debug mode, print tags") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() main(args) diff --git a/finetune/prepare_buckets_latents.py b/finetune/prepare_buckets_latents.py index ab01d9d5..8d9a38ab 100644 --- a/finetune/prepare_buckets_latents.py +++ b/finetune/prepare_buckets_latents.py @@ -229,7 +229,7 @@ def main(args): print("done!") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ") parser.add_argument("in_json", type=str, help="metadata file to input / 読み込むメタデータファイル") @@ -257,5 +257,11 @@ if __name__ == '__main__': parser.add_argument("--skip_existing", action="store_true", help="skip images if npz already exists (both normal and flipped exists if flip_aug is enabled) / npzが既に存在する画像をスキップする(flip_aug有効時は通常、反転の両方が存在する画像をスキップ)") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() main(args) diff --git a/finetune/tag_images_by_wd14_tagger.py b/finetune/tag_images_by_wd14_tagger.py index 609b8c50..2286115e 100644 --- a/finetune/tag_images_by_wd14_tagger.py +++ b/finetune/tag_images_by_wd14_tagger.py @@ -173,7 +173,7 @@ def main(args): print("done!") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ") parser.add_argument("--repo_id", type=str, default=DEFAULT_WD14_TAGGER_REPO, @@ -191,6 +191,12 @@ if __name__ == '__main__': parser.add_argument("--caption_extension", type=str, default=".txt", help="extension of caption file / 出力されるキャプションファイルの拡張子") parser.add_argument("--debug", action="store_true", help="debug mode") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() # スペルミスしていたオプションを復元する diff --git a/gen_img_diffusers.py b/gen_img_diffusers.py index 8a185170..38bc86e9 100644 --- a/gen_img_diffusers.py +++ b/gen_img_diffusers.py @@ -2690,7 +2690,7 @@ def main(args): print("done!") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--v2", action='store_true', help='load Stable Diffusion v2.0 model / Stable Diffusion 2.0のモデルを読み込む') @@ -2786,5 +2786,11 @@ if __name__ == '__main__': parser.add_argument("--control_net_ratios", type=float, default=None, nargs='*', help='ControlNet guidance ratio for steps / ControlNetでガイドするステップ比率') + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() main(args) diff --git a/library/train_util.py b/library/train_util.py index e68444a0..ffe81d69 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -73,8 +73,7 @@ DEFAULT_LAST_OUTPUT_NAME = "last" # region dataset -IMAGE_EXTENSIONS = [".png", ".jpg", ".jpeg", ".webp", ".bmp"] -# , ".PNG", ".JPG", ".JPEG", ".WEBP", ".BMP"] # Linux? +IMAGE_EXTENSIONS = [".png", ".jpg", ".jpeg", ".webp", ".bmp", ".PNG", ".JPG", ".JPEG", ".WEBP", ".BMP"] class ImageInfo: @@ -675,10 +674,19 @@ class BaseDataset(torch.utils.data.Dataset): def is_latent_cacheable(self): return all([not subset.color_aug and not subset.random_crop for subset in self.subsets]) - def cache_latents(self, vae): - # TODO ここを高速化したい + def cache_latents(self, vae, vae_batch_size=1): + # ちょっと速くした print("caching latents.") - for info in tqdm(self.image_data.values()): + + image_infos = list(self.image_data.values()) + + # sort by resolution + image_infos.sort(key=lambda info: info.bucket_reso[0] * info.bucket_reso[1]) + + # split by resolution + batches = [] + batch = [] + for info in image_infos: subset = self.image_to_subset[info.image_key] if info.latents_npz is not None: @@ -689,18 +697,42 @@ class BaseDataset(torch.utils.data.Dataset): info.latents_flipped = torch.FloatTensor(info.latents_flipped) continue - image = self.load_image(info.absolute_path) - image = self.trim_and_resize_if_required(subset, image, info.bucket_reso, info.resized_size) + # if last member of batch has different resolution, flush the batch + if len(batch) > 0 and batch[-1].bucket_reso != info.bucket_reso: + batches.append(batch) + batch = [] - img_tensor = self.image_transforms(image) - img_tensor = img_tensor.unsqueeze(0).to(device=vae.device, dtype=vae.dtype) - info.latents = vae.encode(img_tensor).latent_dist.sample().squeeze(0).to("cpu") + batch.append(info) + + # if number of data in batch is enough, flush the batch + if len(batch) >= vae_batch_size: + batches.append(batch) + batch = [] + + if len(batch) > 0: + batches.append(batch) + + # iterate batches + for batch in tqdm(batches, smoothing=1, total=len(batches)): + images = [] + for info in batch: + image = self.load_image(info.absolute_path) + image = self.trim_and_resize_if_required(subset, image, info.bucket_reso, info.resized_size) + image = self.image_transforms(image) + images.append(image) + + img_tensors = torch.stack(images, dim=0) + img_tensors = img_tensors.to(device=vae.device, dtype=vae.dtype) + + latents = vae.encode(img_tensors).latent_dist.sample().to("cpu") + for info, latent in zip(batch, latents): + info.latents = latent if subset.flip_aug: - image = image[:, ::-1].copy() # cannot convert to Tensor without copy - img_tensor = self.image_transforms(image) - img_tensor = img_tensor.unsqueeze(0).to(device=vae.device, dtype=vae.dtype) - info.latents_flipped = vae.encode(img_tensor).latent_dist.sample().squeeze(0).to("cpu") + img_tensors = torch.flip(img_tensors, dims=[3]) + latents = vae.encode(img_tensors).latent_dist.sample().to("cpu") + for info, latent in zip(batch, latents): + info.latents_flipped = latent def get_image_size(self, image_path): image = Image.open(image_path) @@ -1197,6 +1229,10 @@ class FineTuningDataset(BaseDataset): npz_file_flip = None return npz_file_norm, npz_file_flip + # if not full path, check image_dir. if image_dir is None, return None + if subset.image_dir is None: + return None, None + # image_key is relative path npz_file_norm = os.path.join(subset.image_dir, image_key + ".npz") npz_file_flip = os.path.join(subset.image_dir, image_key + "_flip.npz") @@ -1237,10 +1273,10 @@ class DatasetGroup(torch.utils.data.ConcatDataset): # for dataset in self.datasets: # dataset.make_buckets() - def cache_latents(self, vae): + def cache_latents(self, vae, vae_batch_size=1): for i, dataset in enumerate(self.datasets): print(f"[Dataset {i}]") - dataset.cache_latents(vae) + dataset.cache_latents(vae, vae_batch_size) def is_latent_cacheable(self) -> bool: return all([dataset.is_latent_cacheable() for dataset in self.datasets]) @@ -1986,6 +2022,7 @@ def add_dataset_arguments( action="store_true", help="cache latents to reduce memory (augmentations must be disabled) / メモリ削減のためにlatentをcacheする(augmentationは使用不可)", ) + parser.add_argument("--vae_batch_size", type=int, default=1, help="batch size for caching latents / latentのcache時のバッチサイズ") parser.add_argument( "--enable_bucket", action="store_true", help="enable buckets for multi aspect ratio training / 複数解像度学習のためのbucketを有効にする" ) diff --git a/networks/check_lora_weights.py b/networks/check_lora_weights.py index 6bd9ccd9..bb8dcd6b 100644 --- a/networks/check_lora_weights.py +++ b/networks/check_lora_weights.py @@ -24,9 +24,16 @@ def main(file): print(f"{key},{str(tuple(value.size())).replace(', ', '-')},{torch.mean(torch.abs(value))},{torch.min(torch.abs(value))}") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("file", type=str, help="model file to check / 重みを確認するモデルファイル") + + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() main(args.file) diff --git a/networks/extract_lora_from_models.py b/networks/extract_lora_from_models.py index b5d18d9b..9aa28485 100644 --- a/networks/extract_lora_from_models.py +++ b/networks/extract_lora_from_models.py @@ -162,7 +162,7 @@ def svd(args): print(f"LoRA weights are saved to: {args.save_to}") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--v2", action='store_true', help='load Stable Diffusion v2.x model / Stable Diffusion 2.xのモデルを読み込む') @@ -179,5 +179,11 @@ if __name__ == '__main__': help="dimension (rank) of LoRA for Conv2d-3x3 (default None, disabled) / LoRAのConv2d-3x3の次元数(rank)(デフォルトNone、適用なし)") parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() svd(args) diff --git a/networks/lora_interrogator.py b/networks/lora_interrogator.py index 2c06d876..2891798b 100644 --- a/networks/lora_interrogator.py +++ b/networks/lora_interrogator.py @@ -105,7 +105,7 @@ def interrogate(args): print(f"[{i:3d}]: {token:5d} {string:<20s}: {diff:.5f}") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--v2", action='store_true', help='load Stable Diffusion v2.x model / Stable Diffusion 2.xのモデルを読み込む') @@ -118,5 +118,11 @@ if __name__ == '__main__': parser.add_argument("--clip_skip", type=int, default=None, help="use output of nth layer from back of text encoder (n>=1) / text encoderの後ろからn番目の層の出力を用いる(nは1以上)") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() interrogate(args) diff --git a/networks/merge_lora.py b/networks/merge_lora.py index 09dee4de..8d97392f 100644 --- a/networks/merge_lora.py +++ b/networks/merge_lora.py @@ -197,7 +197,7 @@ def merge(args): save_to_file(args.save_to, state_dict, state_dict, save_dtype) -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--v2", action='store_true', help='load Stable Diffusion v2.x model / Stable Diffusion 2.xのモデルを読み込む') @@ -214,5 +214,11 @@ if __name__ == '__main__': parser.add_argument("--ratios", type=float, nargs='*', help="ratios for each model / それぞれのLoRAモデルの比率") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() merge(args) diff --git a/networks/merge_lora_old.py b/networks/merge_lora_old.py index 1d4cb3b5..c4b6efce 100644 --- a/networks/merge_lora_old.py +++ b/networks/merge_lora_old.py @@ -158,7 +158,7 @@ def merge(args): save_to_file(args.save_to, state_dict, state_dict, save_dtype) -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--v2", action='store_true', help='load Stable Diffusion v2.x model / Stable Diffusion 2.xのモデルを読み込む') @@ -175,5 +175,11 @@ if __name__ == '__main__': parser.add_argument("--ratios", type=float, nargs='*', help="ratios for each model / それぞれのLoRAモデルの比率") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() merge(args) diff --git a/networks/resize_lora.py b/networks/resize_lora.py index 09a19c19..2bd86599 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -208,18 +208,28 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn with torch.no_grad(): for key, value in tqdm(lora_sd.items()): + weight_name = None if 'lora_down' in key: block_down_name = key.split(".")[0] + weight_name = key.split(".")[-1] lora_down_weight = value - if 'lora_up' in key: - block_up_name = key.split(".")[0] - lora_up_weight = value + else: + continue + + # find corresponding lora_up and alpha + block_up_name = block_down_name + lora_up_weight = lora_sd.get(block_up_name + '.lora_up.' + weight_name, None) + lora_alpha = lora_sd.get(block_down_name + '.alpha', None) weights_loaded = (lora_down_weight is not None and lora_up_weight is not None) - if (block_down_name == block_up_name) and weights_loaded: + if weights_loaded: conv2d = (len(lora_down_weight.size()) == 4) + if lora_alpha is None: + scale = 1.0 + else: + scale = lora_alpha/lora_down_weight.size()[0] if conv2d: full_weight_matrix = merge_conv(lora_down_weight, lora_up_weight, device) @@ -311,7 +321,7 @@ def resize(args): save_to_file(args.save_to, state_dict, state_dict, save_dtype, metadata) -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--save_precision", type=str, default=None, @@ -329,7 +339,12 @@ if __name__ == '__main__': help="Specify dynamic resizing method, --new_rank is used as a hard limit for max rank") parser.add_argument("--dynamic_param", type=float, default=None, help="Specify target for dynamic reduction") - + + return parser + + +if __name__ == '__main__': + parser = setup_parser() args = parser.parse_args() resize(args) diff --git a/networks/svd_merge_lora.py b/networks/svd_merge_lora.py index 73228769..9d17efba 100644 --- a/networks/svd_merge_lora.py +++ b/networks/svd_merge_lora.py @@ -164,7 +164,7 @@ def merge(args): save_to_file(args.save_to, state_dict, save_dtype) -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--save_precision", type=str, default=None, choices=[None, "float", "fp16", "bf16"], help="precision in saving, same to merging if omitted / 保存時に精度を変更して保存する、省略時はマージ時の精度と同じ") @@ -182,5 +182,11 @@ if __name__ == '__main__': help="Specify rank of output LoRA for Conv2d 3x3, None for same as new_rank / 出力するConv2D 3x3 LoRAのrank (dim)、Noneでnew_rankと同じ") parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() merge(args) diff --git a/tools/canny.py b/tools/canny.py index 2f01bbf9..5e080689 100644 --- a/tools/canny.py +++ b/tools/canny.py @@ -13,12 +13,18 @@ def canny(args): print("done!") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--input", type=str, default=None, help="input path") parser.add_argument("--output", type=str, default=None, help="output path") parser.add_argument("--thres1", type=int, default=32, help="thres1") parser.add_argument("--thres2", type=int, default=224, help="thres2") + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() canny(args) diff --git a/tools/convert_diffusers20_original_sd.py b/tools/convert_diffusers20_original_sd.py index 6c142848..7c7cc1c5 100644 --- a/tools/convert_diffusers20_original_sd.py +++ b/tools/convert_diffusers20_original_sd.py @@ -61,7 +61,7 @@ def convert(args): print(f"model saved.") -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--v1", action='store_true', help='load v1.x model (v1 or v2 is required to load checkpoint) / 1.xのモデルを読み込む') @@ -84,6 +84,11 @@ if __name__ == '__main__': help="model to load: checkpoint file or Diffusers model's directory / 読み込むモデル、checkpointかDiffusers形式モデルのディレクトリ") parser.add_argument("model_to_save", type=str, default=None, help="model to save: checkpoint (with extension) or Diffusers model's directory (without extension) / 変換後のモデル、拡張子がある場合はcheckpoint、ない場合はDiffusesモデルとして保存") + return parser + + +if __name__ == '__main__': + parser = setup_parser() args = parser.parse_args() convert(args) diff --git a/tools/detect_face_rotate.py b/tools/detect_face_rotate.py index 4d5e58d4..68dec6ca 100644 --- a/tools/detect_face_rotate.py +++ b/tools/detect_face_rotate.py @@ -214,7 +214,7 @@ def process(args): buf.tofile(f) -if __name__ == '__main__': +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() parser.add_argument("--src_dir", type=str, help="directory to load images / 画像を読み込むディレクトリ") parser.add_argument("--dst_dir", type=str, help="directory to save images / 画像を保存するディレクトリ") @@ -234,6 +234,13 @@ if __name__ == '__main__': parser.add_argument("--multiple_faces", action="store_true", help="output each faces / 複数の顔が見つかった場合、それぞれを切り出す") parser.add_argument("--debug", action="store_true", help="render rect for face / 処理後画像の顔位置に矩形を描画します") + + return parser + + +if __name__ == '__main__': + parser = setup_parser() + args = parser.parse_args() process(args) diff --git a/tools/resize_images_to_resolution.py b/tools/resize_images_to_resolution.py index c98cc889..2d3224c4 100644 --- a/tools/resize_images_to_resolution.py +++ b/tools/resize_images_to_resolution.py @@ -98,7 +98,7 @@ def resize_images(src_img_folder, dst_img_folder, max_resolution="512x512", divi shutil.copy(os.path.join(src_img_folder, asoc_file), os.path.join(dst_img_folder, new_asoc_file)) -def main(): +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( description='Resize images in a folder to a specified max resolution(s) / 指定されたフォルダ内の画像を指定した最大画像サイズ(面積)以下にアスペクト比を維持したままリサイズします') parser.add_argument('src_img_folder', type=str, help='Source folder containing the images / 元画像のフォルダ') @@ -113,6 +113,12 @@ def main(): parser.add_argument('--copy_associated_files', action='store_true', help='Copy files with same base name to images (captions etc) / 画像と同じファイル名(拡張子を除く)のファイルもコピーする') + return parser + + +def main(): + parser = setup_parser() + args = parser.parse_args() resize_images(args.src_img_folder, args.dst_img_folder, args.max_resolution, args.divisible_by, args.interpolation, args.save_as_png, args.copy_associated_files) diff --git a/train_db.py b/train_db.py index 81aeda19..527f8e9b 100644 --- a/train_db.py +++ b/train_db.py @@ -114,7 +114,7 @@ def train(args): vae.requires_grad_(False) vae.eval() with torch.no_grad(): - train_dataset_group.cache_latents(vae) + train_dataset_group.cache_latents(vae, args.vae_batch_size) vae.to("cpu") if torch.cuda.is_available(): torch.cuda.empty_cache() @@ -159,7 +159,7 @@ def train(args): # 学習ステップ数を計算する if args.max_train_epochs is not None: - args.max_train_steps = args.max_train_epochs * len(train_dataloader) + args.max_train_steps = args.max_train_epochs * math.ceil(len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps) print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") if args.stop_text_encoder_training is None: @@ -381,7 +381,7 @@ def train(args): print("model saved.") -if __name__ == "__main__": +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() train_util.add_sd_models_arguments(parser) @@ -403,6 +403,12 @@ if __name__ == "__main__": help="steps to stop text encoder training, -1 for no training / Text Encoderの学習を止めるステップ数、-1で最初から学習しない", ) + return parser + + +if __name__ == "__main__": + parser = setup_parser() + args = parser.parse_args() args = train_util.read_config_from_file(args, parser) diff --git a/train_network.py b/train_network.py index 5cb08f15..dce70618 100644 --- a/train_network.py +++ b/train_network.py @@ -139,7 +139,7 @@ def train(args): vae.requires_grad_(False) vae.eval() with torch.no_grad(): - train_dataset_group.cache_latents(vae) + train_dataset_group.cache_latents(vae, args.vae_batch_size) vae.to("cpu") if torch.cuda.is_available(): torch.cuda.empty_cache() @@ -196,7 +196,7 @@ def train(args): # 学習ステップ数を計算する if args.max_train_epochs is not None: - args.max_train_steps = args.max_train_epochs * math.ceil(len(train_dataloader) / accelerator.num_processes) + args.max_train_steps = args.max_train_epochs * math.ceil(len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps) if is_main_process: print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") @@ -654,7 +654,7 @@ def train(args): print("model saved.") -if __name__ == "__main__": +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() train_util.add_sd_models_arguments(parser) @@ -697,6 +697,12 @@ if __name__ == "__main__": "--training_comment", type=str, default=None, help="arbitrary comment string stored in metadata / メタデータに記録する任意のコメント文字列" ) + return parser + + +if __name__ == "__main__": + parser = setup_parser() + args = parser.parse_args() args = train_util.read_config_from_file(args, parser) diff --git a/train_textual_inversion.py b/train_textual_inversion.py index e4ab7b5c..85f0d57c 100644 --- a/train_textual_inversion.py +++ b/train_textual_inversion.py @@ -228,7 +228,7 @@ def train(args): vae.requires_grad_(False) vae.eval() with torch.no_grad(): - train_dataset_group.cache_latents(vae) + train_dataset_group.cache_latents(vae, args.vae_batch_size) vae.to("cpu") if torch.cuda.is_available(): torch.cuda.empty_cache() @@ -257,7 +257,7 @@ def train(args): # 学習ステップ数を計算する if args.max_train_epochs is not None: - args.max_train_steps = args.max_train_epochs * len(train_dataloader) + args.max_train_steps = args.max_train_epochs * math.ceil(len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps) print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する @@ -526,7 +526,7 @@ def load_weights(file): return emb -if __name__ == "__main__": +def setup_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser() train_util.add_sd_models_arguments(parser) @@ -565,6 +565,12 @@ if __name__ == "__main__": help="ignore caption and use default templates for stype / キャプションは使わずデフォルトのスタイル用テンプレートで学習する", ) + return parser + + +if __name__ == "__main__": + parser = setup_parser() + args = parser.parse_args() args = train_util.read_config_from_file(args, parser)