fix: support LoRA w/o TE for create_network_from_weights

This commit is contained in:
Kohya S
2025-07-13 20:46:24 +09:00
parent 1a9bf2ab56
commit 88dc3213a9

View File

@@ -562,23 +562,26 @@ class LoRANetwork(torch.nn.Module):
# Set dim/alpha to modules dim/alpha
if modules_dim is not None and modules_alpha is not None:
# モジュール指定あり
# network from weights
if lora_name in modules_dim:
dim = modules_dim[lora_name]
alpha = modules_alpha[lora_name]
else:
dim = 0 # skip if not found
# Set dims to type_dims
if is_lumina and type_dims is not None:
identifier = [
("attention",), # attention layers
("mlp",), # MLP layers
("modulation",), # modulation layers
("refiner",), # refiner blocks
]
for i, d in enumerate(type_dims):
if d is not None and all([id in lora_name for id in identifier[i]]):
dim = d # may be 0 for skip
break
else:
# Set dims to type_dims
if is_lumina and type_dims is not None:
identifier = [
("attention",), # attention layers
("mlp",), # MLP layers
("modulation",), # modulation layers
("refiner",), # refiner blocks
]
for i, d in enumerate(type_dims):
if d is not None and all([id in lora_name for id in identifier[i]]):
dim = d # may be 0 for skip
break
# Drop blocks if we are only training some blocks
if (