mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-09 06:45:09 +00:00
Allow unknown schedule-free optimizers to continue to module loader
This commit is contained in:
@@ -4600,7 +4600,7 @@ def resume_from_local_or_hf_if_specified(accelerator, args):
|
|||||||
|
|
||||||
def get_optimizer(args, trainable_params):
|
def get_optimizer(args, trainable_params):
|
||||||
# "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, PagedAdamW, PagedAdamW8bit, PagedAdamW32bit, Lion8bit, PagedLion8bit, AdEMAMix8bit, PagedAdEMAMix8bit, DAdaptation(DAdaptAdamPreprint), DAdaptAdaGrad, DAdaptAdam, DAdaptAdan, DAdaptAdanIP, DAdaptLion, DAdaptSGD, Adafactor"
|
# "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, PagedAdamW, PagedAdamW8bit, PagedAdamW32bit, Lion8bit, PagedLion8bit, AdEMAMix8bit, PagedAdEMAMix8bit, DAdaptation(DAdaptAdamPreprint), DAdaptAdaGrad, DAdaptAdam, DAdaptAdan, DAdaptAdanIP, DAdaptLion, DAdaptSGD, Adafactor"
|
||||||
|
|
||||||
optimizer_type = args.optimizer_type
|
optimizer_type = args.optimizer_type
|
||||||
if args.use_8bit_adam:
|
if args.use_8bit_adam:
|
||||||
assert (
|
assert (
|
||||||
@@ -4874,6 +4874,7 @@ def get_optimizer(args, trainable_params):
|
|||||||
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||||
|
|
||||||
elif optimizer_type.endswith("schedulefree".lower()):
|
elif optimizer_type.endswith("schedulefree".lower()):
|
||||||
|
should_train_optimizer = True
|
||||||
try:
|
try:
|
||||||
import schedulefree as sf
|
import schedulefree as sf
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@@ -4885,10 +4886,10 @@ def get_optimizer(args, trainable_params):
|
|||||||
optimizer_class = sf.SGDScheduleFree
|
optimizer_class = sf.SGDScheduleFree
|
||||||
logger.info(f"use SGDScheduleFree optimizer | {optimizer_kwargs}")
|
logger.info(f"use SGDScheduleFree optimizer | {optimizer_kwargs}")
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unknown optimizer type: {optimizer_type}")
|
optimizer_class = None
|
||||||
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
|
||||||
# make optimizer as train mode: we don't need to call train again, because eval will not be called in training loop
|
if optimizer_class is not None:
|
||||||
optimizer.train()
|
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
|
||||||
|
|
||||||
if optimizer is None:
|
if optimizer is None:
|
||||||
# 任意のoptimizerを使う
|
# 任意のoptimizerを使う
|
||||||
@@ -4990,6 +4991,10 @@ def get_optimizer(args, trainable_params):
|
|||||||
optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__
|
optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__
|
||||||
optimizer_args = ",".join([f"{k}={v}" for k, v in optimizer_kwargs.items()])
|
optimizer_args = ",".join([f"{k}={v}" for k, v in optimizer_kwargs.items()])
|
||||||
|
|
||||||
|
if hasattr(optimizer, 'train') and callable(optimizer.train):
|
||||||
|
# make optimizer as train mode: we don't need to call train again, because eval will not be called in training loop
|
||||||
|
optimizer.train()
|
||||||
|
|
||||||
return optimizer_name, optimizer_args, optimizer
|
return optimizer_name, optimizer_args, optimizer
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user