update README and clean up code for schedulefree optimizer

This commit is contained in:
Kohya S
2024-12-01 22:00:44 +09:00
parent 14c9ba925f
commit 1dc873d9b4
2 changed files with 6 additions and 5 deletions

View File

@@ -16,9 +16,11 @@ The command to install PyTorch is as follows:
1 Dec, 2024: 1 Dec, 2024:
- Pseudo Huber loss is now available for FLUX.1 and SD3.5 training. See [#1808](https://github.com/kohya-ss/sd-scripts/pull/1808) for details. Thanks to recris! - Pseudo Huber loss is now available for FLUX.1 and SD3.5 training. See PR [#1808](https://github.com/kohya-ss/sd-scripts/pull/1808) for details. Thanks to recris!
- Specify `--loss_type huber` or `--loss_type smooth_l1` to use it. `--huber_c` and `--huber_scale` are also available. - Specify `--loss_type huber` or `--loss_type smooth_l1` to use it. `--huber_c` and `--huber_scale` are also available.
- [Prodigy + ScheduleFree](https://github.com/LoganBooker/prodigy-plus-schedule-free) is supported. See PR [#1811](https://github.com/kohya-ss/sd-scripts/pull/1811) for details. Thanks to rockerBOO!
Nov 14, 2024: Nov 14, 2024:
- Improved the implementation of block swap and made it available for both FLUX.1 and SD3 LoRA training. See [FLUX.1 LoRA training](#flux1-lora-training) etc. for how to use the new options. Training is possible with about 8-10GB of VRAM. - Improved the implementation of block swap and made it available for both FLUX.1 and SD3 LoRA training. See [FLUX.1 LoRA training](#flux1-lora-training) etc. for how to use the new options. Training is possible with about 8-10GB of VRAM.

View File

@@ -4609,7 +4609,7 @@ def resume_from_local_or_hf_if_specified(accelerator, args):
def get_optimizer(args, trainable_params): def get_optimizer(args, trainable_params):
# "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, PagedAdamW, PagedAdamW8bit, PagedAdamW32bit, Lion8bit, PagedLion8bit, AdEMAMix8bit, PagedAdEMAMix8bit, DAdaptation(DAdaptAdamPreprint), DAdaptAdaGrad, DAdaptAdam, DAdaptAdan, DAdaptAdanIP, DAdaptLion, DAdaptSGD, Adafactor" # "Optimizer to use: AdamW, AdamW8bit, Lion, SGDNesterov, SGDNesterov8bit, PagedAdamW, PagedAdamW8bit, PagedAdamW32bit, Lion8bit, PagedLion8bit, AdEMAMix8bit, PagedAdEMAMix8bit, DAdaptation(DAdaptAdamPreprint), DAdaptAdaGrad, DAdaptAdam, DAdaptAdan, DAdaptAdanIP, DAdaptLion, DAdaptSGD, Adafactor"
optimizer_type = args.optimizer_type optimizer_type = args.optimizer_type
if args.use_8bit_adam: if args.use_8bit_adam:
assert ( assert (
@@ -4883,7 +4883,6 @@ def get_optimizer(args, trainable_params):
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
elif optimizer_type.endswith("schedulefree".lower()): elif optimizer_type.endswith("schedulefree".lower()):
should_train_optimizer = True
try: try:
import schedulefree as sf import schedulefree as sf
except ImportError: except ImportError:
@@ -5000,8 +4999,8 @@ def get_optimizer(args, trainable_params):
optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__ optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__
optimizer_args = ",".join([f"{k}={v}" for k, v in optimizer_kwargs.items()]) optimizer_args = ",".join([f"{k}={v}" for k, v in optimizer_kwargs.items()])
if hasattr(optimizer, 'train') and callable(optimizer.train): if hasattr(optimizer, "train") and callable(optimizer.train):
# make optimizer as train mode: we don't need to call train again, because eval will not be called in training loop # make optimizer as train mode before training for schedulefree optimizer. the optimizer will be in eval mode in sampling and saving.
optimizer.train() optimizer.train()
return optimizer_name, optimizer_args, optimizer return optimizer_name, optimizer_args, optimizer