fix flux fine tuning to work

This commit is contained in:
kohya-ss
2024-08-17 15:54:32 +09:00
parent 400955d3ea
commit 25f77f6ef0
2 changed files with 6 additions and 4 deletions

View File

@@ -9,6 +9,10 @@ __Please update PyTorch to 2.4.0. We have tested with `torch==2.4.0` and `torchv
The command to install PyTorch is as follows: The command to install PyTorch is as follows:
`pip3 install torch==2.4.0 torchvision==0.19.0 --index-url https://download.pytorch.org/whl/cu124` `pip3 install torch==2.4.0 torchvision==0.19.0 --index-url https://download.pytorch.org/whl/cu124`
Aug 17. 2024:
Added a script `flux_train.py` to train FLUX.1. The script is experimental and not an optimized version. It needs >28GB VRAM for training.
Aug 16, 2024: Aug 16, 2024:
Added a script `networks/flux_merge_lora.py` to merge LoRA into FLUX.1 checkpoint. See [Merge LoRA to FLUX.1 checkpoint](#merge-lora-to-flux1-checkpoint) for details. Added a script `networks/flux_merge_lora.py` to merge LoRA into FLUX.1 checkpoint. See [Merge LoRA to FLUX.1 checkpoint](#merge-lora-to-flux1-checkpoint) for details.

View File

@@ -674,8 +674,6 @@ def train(args):
# if is_main_process: # if is_main_process:
flux = accelerator.unwrap_model(flux) flux = accelerator.unwrap_model(flux)
clip_l = accelerator.unwrap_model(clip_l) clip_l = accelerator.unwrap_model(clip_l)
clip_g = accelerator.unwrap_model(clip_g)
if t5xxl is not None:
t5xxl = accelerator.unwrap_model(t5xxl) t5xxl = accelerator.unwrap_model(t5xxl)
accelerator.end_training() accelerator.end_training()
@@ -686,7 +684,7 @@ def train(args):
del accelerator # この後メモリを使うのでこれは消す del accelerator # この後メモリを使うのでこれは消す
if is_main_process: if is_main_process:
flux_train_utils.save_flux_model_on_train_end(args, save_dtype, epoch, global_step, flux, ae) flux_train_utils.save_flux_model_on_train_end(args, save_dtype, epoch, global_step, flux)
logger.info("model saved.") logger.info("model saved.")