support T5XXL LoRA, reduce peak memory usage #1560

This commit is contained in:
Kohya S
2024-09-04 23:15:27 +09:00
parent b7cff0a754
commit 56cb2fc885
2 changed files with 59 additions and 16 deletions

View File

@@ -392,7 +392,7 @@ def create_network_from_weights(multiplier, file, ae, text_encoders, flux, weigh
modules_dim[lora_name] = dim
# logger.info(lora_name, value.size(), dim)
if train_t5xxl is None:
if train_t5xxl is None or train_t5xxl is False:
train_t5xxl = "lora_te3" in lora_name
if train_t5xxl is None: