mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-08 22:35:09 +00:00
Support for more Dadaptation (#455)
* Update train_util.py for add DAdaptAdan and DAdaptSGD * Update train_util.py for DAdaptadam * Update train_network.py for dadapt * Update train_README-ja.md for DAdapt * Update train_util.py for DAdapt * Update train_network.py for DAdaptAdaGrad * Update train_db.py for DAdapt * Update fine_tune.py for DAdapt * Update train_textual_inversion.py for DAdapt * Update train_textual_inversion_XTI.py for DAdapt
This commit is contained in:
@@ -381,7 +381,7 @@ def train(args):
|
|||||||
current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず
|
current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず
|
||||||
if args.logging_dir is not None:
|
if args.logging_dir is not None:
|
||||||
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
||||||
if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value
|
if args.optimizer_type.lower() == "DAdaptation".lower() or args.optimizer_type.lower() == "DAdaptAdam".lower() or args.optimizer_type.lower() == "DAdaptAdaGrad".lower() or args.optimizer_type.lower() == "DAdaptAdan".lower() or args.optimizer_type.lower() == "DAdaptSGD".lower(): # tracking d*lr value
|
||||||
logs["lr/d*lr"] = (
|
logs["lr/d*lr"] = (
|
||||||
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
|
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1885,7 +1885,7 @@ def add_optimizer_arguments(parser: argparse.ArgumentParser):
|
|||||||
"--optimizer_type",
|
"--optimizer_type",
|
||||||
type=str,
|
type=str,
|
||||||
default="",
|
default="",
|
||||||
help="Optimizer to use / オプティマイザの種類: AdamW (default), AdamW8bit, Lion, Lion8bit,SGDNesterov, SGDNesterov8bit, DAdaptation, AdaFactor",
|
help="Optimizer to use / オプティマイザの種類: AdamW (default), AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, DAdaptation(DAdaptAdam), DAdaptAdaGrad, DAdaptAdan, DAdaptSGD, AdaFactor",
|
||||||
)
|
)
|
||||||
|
|
||||||
# backward compatibility
|
# backward compatibility
|
||||||
@@ -2467,7 +2467,7 @@ def resume_from_local_or_hf_if_specified(accelerator, args):
|
|||||||
|
|
||||||
|
|
||||||
def get_optimizer(args, trainable_params):
|
def get_optimizer(args, trainable_params):
|
||||||
# "Optimizer to use: AdamW, AdamW8bit, Lion, Lion8bit, SGDNesterov, SGDNesterov8bit, DAdaptation, Adafactor"
|
# "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, DAdaptation, DAdaptation(DAdaptAdam), DAdaptAdaGrad, DAdaptAdan, DAdaptSGD, Adafactor"
|
||||||
|
|
||||||
optimizer_type = args.optimizer_type
|
optimizer_type = args.optimizer_type
|
||||||
if args.use_8bit_adam:
|
if args.use_8bit_adam:
|
||||||
@@ -2570,7 +2570,7 @@ def get_optimizer(args, trainable_params):
|
|||||||
optimizer_class = torch.optim.SGD
|
optimizer_class = torch.optim.SGD
|
||||||
optimizer = optimizer_class(trainable_params, lr=lr, nesterov=True, **optimizer_kwargs)
|
optimizer = optimizer_class(trainable_params, lr=lr, nesterov=True, **optimizer_kwargs)
|
||||||
|
|
||||||
elif optimizer_type == "DAdaptation".lower():
|
elif optimizer_type == "DAdaptation".lower() or optimizer_type == "DAdaptAdam".lower():
|
||||||
try:
|
try:
|
||||||
import dadaptation
|
import dadaptation
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -2599,6 +2599,93 @@ def get_optimizer(args, trainable_params):
|
|||||||
optimizer_class = dadaptation.DAdaptAdam
|
optimizer_class = dadaptation.DAdaptAdam
|
||||||
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||||
|
|
||||||
|
elif optimizer_type == "DAdaptAdaGrad".lower():
|
||||||
|
try:
|
||||||
|
import dadaptation
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError("No dadaptation / dadaptation がインストールされていないようです")
|
||||||
|
print(f"use D-Adaptation AdaGrad optimizer | {optimizer_kwargs}")
|
||||||
|
|
||||||
|
actual_lr = lr
|
||||||
|
lr_count = 1
|
||||||
|
if type(trainable_params) == list and type(trainable_params[0]) == dict:
|
||||||
|
lrs = set()
|
||||||
|
actual_lr = trainable_params[0].get("lr", actual_lr)
|
||||||
|
for group in trainable_params:
|
||||||
|
lrs.add(group.get("lr", actual_lr))
|
||||||
|
lr_count = len(lrs)
|
||||||
|
|
||||||
|
if actual_lr <= 0.1:
|
||||||
|
print(
|
||||||
|
f"learning rate is too low. If using dadaptation, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: lr={actual_lr}"
|
||||||
|
)
|
||||||
|
print("recommend option: lr=1.0 / 推奨は1.0です")
|
||||||
|
if lr_count > 1:
|
||||||
|
print(
|
||||||
|
f"when multiple learning rates are specified with dadaptation (e.g. for Text Encoder and U-Net), only the first one will take effect / D-Adaptationで複数の学習率を指定した場合(Text EncoderとU-Netなど)、最初の学習率のみが有効になります: lr={actual_lr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
optimizer_class = dadaptation.DAdaptAdaGrad
|
||||||
|
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||||
|
|
||||||
|
elif optimizer_type == "DAdaptAdan".lower():
|
||||||
|
try:
|
||||||
|
import dadaptation
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError("No dadaptation / dadaptation がインストールされていないようです")
|
||||||
|
print(f"use D-Adaptation Adan optimizer | {optimizer_kwargs}")
|
||||||
|
|
||||||
|
actual_lr = lr
|
||||||
|
lr_count = 1
|
||||||
|
if type(trainable_params) == list and type(trainable_params[0]) == dict:
|
||||||
|
lrs = set()
|
||||||
|
actual_lr = trainable_params[0].get("lr", actual_lr)
|
||||||
|
for group in trainable_params:
|
||||||
|
lrs.add(group.get("lr", actual_lr))
|
||||||
|
lr_count = len(lrs)
|
||||||
|
|
||||||
|
if actual_lr <= 0.1:
|
||||||
|
print(
|
||||||
|
f"learning rate is too low. If using dadaptation, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: lr={actual_lr}"
|
||||||
|
)
|
||||||
|
print("recommend option: lr=1.0 / 推奨は1.0です")
|
||||||
|
if lr_count > 1:
|
||||||
|
print(
|
||||||
|
f"when multiple learning rates are specified with dadaptation (e.g. for Text Encoder and U-Net), only the first one will take effect / D-Adaptationで複数の学習率を指定した場合(Text EncoderとU-Netなど)、最初の学習率のみが有効になります: lr={actual_lr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
optimizer_class = dadaptation.DAdaptAdan
|
||||||
|
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||||
|
|
||||||
|
elif optimizer_type == "DAdaptSGD".lower():
|
||||||
|
try:
|
||||||
|
import dadaptation
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError("No dadaptation / dadaptation がインストールされていないようです")
|
||||||
|
print(f"use D-Adaptation SGD optimizer | {optimizer_kwargs}")
|
||||||
|
|
||||||
|
actual_lr = lr
|
||||||
|
lr_count = 1
|
||||||
|
if type(trainable_params) == list and type(trainable_params[0]) == dict:
|
||||||
|
lrs = set()
|
||||||
|
actual_lr = trainable_params[0].get("lr", actual_lr)
|
||||||
|
for group in trainable_params:
|
||||||
|
lrs.add(group.get("lr", actual_lr))
|
||||||
|
lr_count = len(lrs)
|
||||||
|
|
||||||
|
if actual_lr <= 0.1:
|
||||||
|
print(
|
||||||
|
f"learning rate is too low. If using dadaptation, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: lr={actual_lr}"
|
||||||
|
)
|
||||||
|
print("recommend option: lr=1.0 / 推奨は1.0です")
|
||||||
|
if lr_count > 1:
|
||||||
|
print(
|
||||||
|
f"when multiple learning rates are specified with dadaptation (e.g. for Text Encoder and U-Net), only the first one will take effect / D-Adaptationで複数の学習率を指定した場合(Text EncoderとU-Netなど)、最初の学習率のみが有効になります: lr={actual_lr}"
|
||||||
|
)
|
||||||
|
|
||||||
|
optimizer_class = dadaptation.DAdaptSGD
|
||||||
|
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||||
|
|
||||||
elif optimizer_type == "Adafactor".lower():
|
elif optimizer_type == "Adafactor".lower():
|
||||||
# 引数を確認して適宜補正する
|
# 引数を確認して適宜補正する
|
||||||
if "relative_step" not in optimizer_kwargs:
|
if "relative_step" not in optimizer_kwargs:
|
||||||
|
|||||||
@@ -566,7 +566,10 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b
|
|||||||
- Lion8bit : 引数は同上
|
- Lion8bit : 引数は同上
|
||||||
- SGDNesterov : [torch.optim.SGD](https://pytorch.org/docs/stable/generated/torch.optim.SGD.html), nesterov=True
|
- SGDNesterov : [torch.optim.SGD](https://pytorch.org/docs/stable/generated/torch.optim.SGD.html), nesterov=True
|
||||||
- SGDNesterov8bit : 引数は同上
|
- SGDNesterov8bit : 引数は同上
|
||||||
- DAdaptation : https://github.com/facebookresearch/dadaptation
|
- DAdaptation(DAdaptAdam) : https://github.com/facebookresearch/dadaptation
|
||||||
|
- DAdaptAdaGrad : 引数は同上
|
||||||
|
- DAdaptAdan : 引数は同上
|
||||||
|
- DAdaptSGD : 引数は同上
|
||||||
- AdaFactor : [Transformers AdaFactor](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules)
|
- AdaFactor : [Transformers AdaFactor](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules)
|
||||||
- 任意のオプティマイザ
|
- 任意のオプティマイザ
|
||||||
|
|
||||||
|
|||||||
@@ -367,7 +367,7 @@ def train(args):
|
|||||||
current_loss = loss.detach().item()
|
current_loss = loss.detach().item()
|
||||||
if args.logging_dir is not None:
|
if args.logging_dir is not None:
|
||||||
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
||||||
if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value
|
if args.optimizer_type.lower() == "DAdaptation".lower() or args.optimizer_type.lower() == "DAdaptAdam".lower() or args.optimizer_type.lower() == "DAdaptAdaGrad".lower() or args.optimizer_type.lower() == "DAdaptAdan".lower() or args.optimizer_type.lower() == "DAdaptSGD".lower(): # tracking d*lr value
|
||||||
logs["lr/d*lr"] = (
|
logs["lr/d*lr"] = (
|
||||||
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
|
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_sche
|
|||||||
logs["lr/textencoder"] = float(lrs[0])
|
logs["lr/textencoder"] = float(lrs[0])
|
||||||
logs["lr/unet"] = float(lrs[-1]) # may be same to textencoder
|
logs["lr/unet"] = float(lrs[-1]) # may be same to textencoder
|
||||||
|
|
||||||
if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value of unet.
|
if args.optimizer_type.lower() == "DAdaptation".lower() or args.optimizer_type.lower() == "DAdaptAdam".lower() or args.optimizer_type.lower() == "DAdaptAdaGrad".lower() or args.optimizer_type.lower() == "DAdaptAdan".lower() or args.optimizer_type.lower() == "DAdaptSGD".lower(): # tracking d*lr value of unet.
|
||||||
logs["lr/d*lr"] = lr_scheduler.optimizers[-1].param_groups[0]["d"] * lr_scheduler.optimizers[-1].param_groups[0]["lr"]
|
logs["lr/d*lr"] = lr_scheduler.optimizers[-1].param_groups[0]["d"] * lr_scheduler.optimizers[-1].param_groups[0]["lr"]
|
||||||
else:
|
else:
|
||||||
idx = 0
|
idx = 0
|
||||||
@@ -53,7 +53,7 @@ def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_sche
|
|||||||
|
|
||||||
for i in range(idx, len(lrs)):
|
for i in range(idx, len(lrs)):
|
||||||
logs[f"lr/group{i}"] = float(lrs[i])
|
logs[f"lr/group{i}"] = float(lrs[i])
|
||||||
if args.optimizer_type.lower() == "DAdaptation".lower():
|
if args.optimizer_type.lower() == "DAdaptation".lower() or args.optimizer_type.lower() == "DAdaptAdam".lower() or args.optimizer_type.lower() == "DAdaptAdaGrad".lower() or args.optimizer_type.lower() == "DAdaptAdan".lower() or args.optimizer_type.lower() == "DAdaptSGD".lower():
|
||||||
logs[f"lr/d*lr/group{i}"] = (
|
logs[f"lr/d*lr/group{i}"] = (
|
||||||
lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"]
|
lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"]
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -465,7 +465,7 @@ def train(args):
|
|||||||
current_loss = loss.detach().item()
|
current_loss = loss.detach().item()
|
||||||
if args.logging_dir is not None:
|
if args.logging_dir is not None:
|
||||||
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
||||||
if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value
|
if args.optimizer_type.lower() == "DAdaptation".lower() or args.optimizer_type.lower() == "DAdaptAdam".lower() or args.optimizer_type.lower() == "DAdaptAdaGrad".lower() or args.optimizer_type.lower() == "DAdaptAdan".lower() or args.optimizer_type.lower() == "DAdaptSGD".lower(): # tracking d*lr value
|
||||||
logs["lr/d*lr"] = (
|
logs["lr/d*lr"] = (
|
||||||
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
|
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -504,7 +504,7 @@ def train(args):
|
|||||||
current_loss = loss.detach().item()
|
current_loss = loss.detach().item()
|
||||||
if args.logging_dir is not None:
|
if args.logging_dir is not None:
|
||||||
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
|
||||||
if args.optimizer_type.lower() == "DAdaptation".lower(): # tracking d*lr value
|
if args.optimizer_type.lower() == "DAdaptation".lower() or args.optimizer_type.lower() == "DAdaptAdam".lower() or args.optimizer_type.lower() == "DAdaptAdaGrad".lower() or args.optimizer_type.lower() == "DAdaptAdan".lower() or args.optimizer_type.lower() == "DAdaptSGD".lower(): # tracking d*lr value
|
||||||
logs["lr/d*lr"] = (
|
logs["lr/d*lr"] = (
|
||||||
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
|
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
|
||||||
)
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user