mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-10 15:00:23 +00:00
fix: support LoRA w/o TE for create_network_from_weights
This commit is contained in:
@@ -562,23 +562,26 @@ class LoRANetwork(torch.nn.Module):
|
||||
|
||||
# Set dim/alpha to modules dim/alpha
|
||||
if modules_dim is not None and modules_alpha is not None:
|
||||
# モジュール指定あり
|
||||
# network from weights
|
||||
if lora_name in modules_dim:
|
||||
dim = modules_dim[lora_name]
|
||||
alpha = modules_alpha[lora_name]
|
||||
else:
|
||||
dim = 0 # skip if not found
|
||||
|
||||
# Set dims to type_dims
|
||||
if is_lumina and type_dims is not None:
|
||||
identifier = [
|
||||
("attention",), # attention layers
|
||||
("mlp",), # MLP layers
|
||||
("modulation",), # modulation layers
|
||||
("refiner",), # refiner blocks
|
||||
]
|
||||
for i, d in enumerate(type_dims):
|
||||
if d is not None and all([id in lora_name for id in identifier[i]]):
|
||||
dim = d # may be 0 for skip
|
||||
break
|
||||
else:
|
||||
# Set dims to type_dims
|
||||
if is_lumina and type_dims is not None:
|
||||
identifier = [
|
||||
("attention",), # attention layers
|
||||
("mlp",), # MLP layers
|
||||
("modulation",), # modulation layers
|
||||
("refiner",), # refiner blocks
|
||||
]
|
||||
for i, d in enumerate(type_dims):
|
||||
if d is not None and all([id in lora_name for id in identifier[i]]):
|
||||
dim = d # may be 0 for skip
|
||||
break
|
||||
|
||||
# Drop blocks if we are only training some blocks
|
||||
if (
|
||||
|
||||
Reference in New Issue
Block a user