mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-09 06:45:09 +00:00
fix flux fine tuning to work
This commit is contained in:
@@ -9,6 +9,10 @@ __Please update PyTorch to 2.4.0. We have tested with `torch==2.4.0` and `torchv
|
||||
The command to install PyTorch is as follows:
|
||||
`pip3 install torch==2.4.0 torchvision==0.19.0 --index-url https://download.pytorch.org/whl/cu124`
|
||||
|
||||
|
||||
Aug 17. 2024:
|
||||
Added a script `flux_train.py` to train FLUX.1. The script is experimental and not an optimized version. It needs >28GB VRAM for training.
|
||||
|
||||
Aug 16, 2024:
|
||||
|
||||
Added a script `networks/flux_merge_lora.py` to merge LoRA into FLUX.1 checkpoint. See [Merge LoRA to FLUX.1 checkpoint](#merge-lora-to-flux1-checkpoint) for details.
|
||||
|
||||
@@ -674,9 +674,7 @@ def train(args):
|
||||
# if is_main_process:
|
||||
flux = accelerator.unwrap_model(flux)
|
||||
clip_l = accelerator.unwrap_model(clip_l)
|
||||
clip_g = accelerator.unwrap_model(clip_g)
|
||||
if t5xxl is not None:
|
||||
t5xxl = accelerator.unwrap_model(t5xxl)
|
||||
t5xxl = accelerator.unwrap_model(t5xxl)
|
||||
|
||||
accelerator.end_training()
|
||||
|
||||
@@ -686,7 +684,7 @@ def train(args):
|
||||
del accelerator # この後メモリを使うのでこれは消す
|
||||
|
||||
if is_main_process:
|
||||
flux_train_utils.save_flux_model_on_train_end(args, save_dtype, epoch, global_step, flux, ae)
|
||||
flux_train_utils.save_flux_model_on_train_end(args, save_dtype, epoch, global_step, flux)
|
||||
logger.info("model saved.")
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user