diff --git a/README-ja.md b/README-ja.md index adf44d2f..064464c0 100644 --- a/README-ja.md +++ b/README-ja.md @@ -64,6 +64,12 @@ cp .\bitsandbytes_windows\main.py .\venv\Lib\site-packages\bitsandbytes\cuda_set accelerate config ``` + + コマンドプロンプトでは以下になります。 diff --git a/README.md b/README.md index 62551f27..a1adcb27 100644 --- a/README.md +++ b/README.md @@ -124,6 +124,24 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser ## Change History +- 19 Feb. 2023, 2023/2/19: + - Add ``--use_lion_optimizer`` to each training script to use [Lion optimizer](https://github.com/lucidrains/lion-pytorch). + - Please install Lion optimizer with ``pip install lion-pytorch`` (it is not in ``requirements.txt`` currently.) + - Add ``--lowram`` option to ``train_network.py``. Load models to VRAM instead of VRAM (for machines which have bigger VRAM than RAM such as Colab and Kaggle). Thanks to Isotr0py! + - Default behavior (without lowram) has reverted to the same as before 14 Feb. + - Fixed git commit hash to be set correctly regardless of the working directory. Thanks to vladmandic! + + - ``--use_lion_optimizer`` オプションを各学習スクリプトに追加しました。 [Lion optimizer](https://github.com/lucidrains/lion-pytorch) を使用できます。 + - あらかじめ ``pip install lion-pytorch`` でインストールしてください(現在は ``requirements.txt`` に含まれていません)。 + - ``--lowram`` オプションを ``train_network.py`` に追加しました。モデルをRAMではなくVRAMに読み込みます(ColabやKaggleなど、VRAMがRAMに比べて多い環境で有効です)。 Isotr0py 氏に感謝します。 + - lowram オプションなしのデフォルト動作は2/14より前と同じに戻しました。 + - git commit hash を現在のフォルダ位置に関わらず正しく取得するように修正しました。vladmandic 氏に感謝します。 + +- 16 Feb. 2023, 2023/2/16: + - Noise offset is recorded to the metadata. Thanks to space-nuko! + - Show the moving average loss to prevent loss jumping in ``train_network.py`` and ``train_db.py``. Thanks to shirayu! + - Noise offsetがメタデータに記録されるようになりました。space-nuko氏に感謝します。 + - ``train_network.py``と``train_db.py``で学習中に表示されるlossの値が移動平均になりました。epochの先頭で表示されるlossが大きく変動する事象を解決します。shirayu氏に感謝します。 - 14 Feb. 2023, 2023/2/14: - Add support with multi-gpu trainining for ``train_network.py``. Thanks to Isotr0py! - Add ``--verbose`` option for ``resize_lora.py``. For details, see [this PR](https://github.com/kohya-ss/sd-scripts/pull/179). Thanks to mgz-dev! diff --git a/fine_tune.py b/fine_tune.py index 3ba63063..13241bc6 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -158,6 +158,13 @@ def train(args): raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") print("use 8-bit Adam optimizer") optimizer_class = bnb.optim.AdamW8bit + elif args.use_lion_optimizer: + try: + import lion_pytorch + except ImportError: + raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") + print("use Lion optimizer") + optimizer_class = lion_pytorch.Lion else: optimizer_class = torch.optim.AdamW diff --git a/library/train_util.py b/library/train_util.py index 415f9b70..63868f98 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1103,7 +1103,7 @@ def addnet_hash_safetensors(b): def get_git_revision_hash() -> str: try: - return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('ascii').strip() + return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=os.path.dirname(__file__)).decode('ascii').strip() except: return "(unknown)" @@ -1389,6 +1389,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: help="max token length of text encoder (default for 75, 150 or 225) / text encoderのトークンの最大長(未指定で75、150または225が指定可)") parser.add_argument("--use_8bit_adam", action="store_true", help="use 8bit Adam optimizer (requires bitsandbytes) / 8bit Adamオプティマイザを使う(bitsandbytesのインストールが必要)") + parser.add_argument("--use_lion_optimizer", action="store_true", + help="use Lion optimizer (requires lion-pytorch) / Lionオプティマイザを使う( lion-pytorch のインストールが必要)") parser.add_argument("--mem_eff_attn", action="store_true", help="use memory efficient attention for CrossAttention / CrossAttentionに省メモリ版attentionを使う") parser.add_argument("--xformers", action="store_true", @@ -1423,6 +1425,8 @@ def add_training_arguments(parser: argparse.ArgumentParser, support_dreambooth: help="Number of steps for the warmup in the lr scheduler (default is 0) / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)") parser.add_argument("--noise_offset", type=float, default=None, help="enable noise offset with this value (if enabled, around 0.1 is recommended) / Noise offsetを有効にしてこの値を設定する(有効にする場合は0.1程度を推奨)") + parser.add_argument("--lowram", action="store_true", + help="enable low RAM optimization. e.g. load models to VRAM instead of RAM (for machines which have bigger VRAM than RAM such as Colab and Kaggle) / メインメモリが少ない環境向け最適化を有効にする。たとえばVRAMにモデルを読み込むなど(ColabやKaggleなどRAMに比べてVRAMが多い環境向け)") if support_dreambooth: # DreamBooth training diff --git a/train_db.py b/train_db.py index 4a50dc94..1903c4c4 100644 --- a/train_db.py +++ b/train_db.py @@ -124,6 +124,13 @@ def train(args): raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") print("use 8-bit Adam optimizer") optimizer_class = bnb.optim.AdamW8bit + elif args.use_lion_optimizer: + try: + import lion_pytorch + except ImportError: + raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") + print("use Lion optimizer") + optimizer_class = lion_pytorch.Lion else: optimizer_class = torch.optim.AdamW @@ -206,6 +213,8 @@ def train(args): if accelerator.is_main_process: accelerator.init_trackers("dreambooth") + loss_list = [] + loss_total = 0.0 for epoch in range(num_train_epochs): print(f"epoch {epoch+1}/{num_train_epochs}") train_dataset.set_current_epoch(epoch + 1) @@ -216,7 +225,6 @@ def train(args): if args.gradient_checkpointing or global_step < args.stop_text_encoder_training: text_encoder.train() - loss_total = 0 for step, batch in enumerate(train_dataloader): # 指定したステップ数でText Encoderの学習を止める if global_step == args.stop_text_encoder_training: @@ -294,8 +302,13 @@ def train(args): logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} accelerator.log(logs, step=global_step) + if epoch == 0: + loss_list.append(current_loss) + else: + loss_total -= loss_list[step] + loss_list[step] = current_loss loss_total += current_loss - avr_loss = loss_total / (step+1) + avr_loss = loss_total / len(loss_list) logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) @@ -303,7 +316,7 @@ def train(args): break if args.logging_dir is not None: - logs = {"epoch_loss": loss_total / len(train_dataloader)} + logs = {"loss/epoch": loss_total / len(loss_list)} accelerator.log(logs, step=epoch+1) accelerator.wait_for_everyone() diff --git a/train_network.py b/train_network.py index 1b8046d2..b41a52a9 100644 --- a/train_network.py +++ b/train_network.py @@ -156,9 +156,12 @@ def train(args): # モデルを読み込む text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype) - # unnecessary, but work on low-ram device - text_encoder.to("cuda") - unet.to("cuda") + + # work on low-ram device + if args.lowram: + text_encoder.to("cuda") + unet.to("cuda") + # モデルに xformers とか memory efficient attention を組み込む train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers) @@ -213,6 +216,13 @@ def train(args): raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") print("use 8-bit Adam optimizer") optimizer_class = bnb.optim.AdamW8bit + elif args.use_lion_optimizer: + try: + import lion_pytorch + except ImportError: + raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") + print("use Lion optimizer") + optimizer_class = lion_pytorch.Lion else: optimizer_class = torch.optim.AdamW @@ -353,6 +363,7 @@ def train(args): "ss_max_bucket_reso": train_dataset.max_bucket_reso, "ss_seed": args.seed, "ss_keep_tokens": args.keep_tokens, + "ss_noise_offset": args.noise_offset, "ss_dataset_dirs": json.dumps(train_dataset.dataset_dirs_info), "ss_reg_dataset_dirs": json.dumps(train_dataset.reg_dataset_dirs_info), "ss_tag_frequency": json.dumps(train_dataset.tag_frequency), @@ -392,6 +403,8 @@ def train(args): if accelerator.is_main_process: accelerator.init_trackers("network_train") + loss_list = [] + loss_total = 0.0 for epoch in range(num_train_epochs): print(f"epoch {epoch+1}/{num_train_epochs}") train_dataset.set_current_epoch(epoch + 1) @@ -400,7 +413,6 @@ def train(args): network.on_epoch_start(text_encoder, unet) - loss_total = 0 for step, batch in enumerate(train_dataloader): with accelerator.accumulate(network): with torch.no_grad(): @@ -464,8 +476,13 @@ def train(args): global_step += 1 current_loss = loss.detach().item() + if epoch == 0: + loss_list.append(current_loss) + else: + loss_total -= loss_list[step] + loss_list[step] = current_loss loss_total += current_loss - avr_loss = loss_total / (step+1) + avr_loss = loss_total / len(loss_list) logs = {"loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) @@ -477,7 +494,7 @@ def train(args): break if args.logging_dir is not None: - logs = {"loss/epoch": loss_total / len(train_dataloader)} + logs = {"loss/epoch": loss_total / len(loss_list)} accelerator.log(logs, step=epoch+1) accelerator.wait_for_everyone() diff --git a/train_textual_inversion.py b/train_textual_inversion.py index 010bd04b..ffec0516 100644 --- a/train_textual_inversion.py +++ b/train_textual_inversion.py @@ -207,6 +207,13 @@ def train(args): raise ImportError("No bitsand bytes / bitsandbytesがインストールされていないようです") print("use 8-bit Adam optimizer") optimizer_class = bnb.optim.AdamW8bit + elif args.use_lion_optimizer: + try: + import lion_pytorch + except ImportError: + raise ImportError("No lion_pytorch / lion_pytorch がインストールされていないようです") + print("use Lion optimizer") + optimizer_class = lion_pytorch.Lion else: optimizer_class = torch.optim.AdamW