This commit is contained in:
Symbiomatrix
2025-06-22 01:00:59 +03:00
committed by woctordho
parent 63ec59fc0b
commit 3ce0c6e71f

View File

@@ -240,7 +240,7 @@ def resize_lora_model(lora_sd, new_rank, new_conv_rank, save_dtype, device, dyna
for key, value in tqdm(lora_sd.items()):
weight_name = None
if LORAFMT[0] in key:
block_down_name = key.rsplit(f".LORAFMT[0]", 1)[0]
block_down_name = key.rsplit(f".{LORAFMT[0]}", 1)[0]
weight_name = key.rsplit(".", 1)[-1]
lora_down_weight = value
else:
@@ -248,7 +248,7 @@ def resize_lora_model(lora_sd, new_rank, new_conv_rank, save_dtype, device, dyna
# find corresponding lora_up and alpha
block_up_name = block_down_name
lora_up_weight = lora_sd.get(block_up_name + f".LORAFMT[1]." + weight_name, None)
lora_up_weight = lora_sd.get(block_up_name + f".{LORAFMT[1]}." + weight_name, None)
lora_alpha = lora_sd.get(block_down_name + ".alpha", None)
weights_loaded = lora_down_weight is not None and lora_up_weight is not None
@@ -286,8 +286,8 @@ def resize_lora_model(lora_sd, new_rank, new_conv_rank, save_dtype, device, dyna
verbose_str += "\n"
new_alpha = param_dict["new_alpha"]
o_lora_sd[block_down_name + f".LORAFMT[0].weight"] = param_dict[LORAFMT[0]].to(save_dtype).contiguous()
o_lora_sd[block_up_name + f".LORAFMT[1].weight"] = param_dict[LORAFMT[1]].to(save_dtype).contiguous()
o_lora_sd[block_down_name + f".{LORAFMT[0]}.weight"] = param_dict[LORAFMT[0]].to(save_dtype).contiguous()
o_lora_sd[block_up_name + f".{LORAFMT[1]}.weight"] = param_dict[LORAFMT[1]].to(save_dtype).contiguous()
o_lora_sd[block_up_name + ".alpha"] = torch.tensor(param_dict["new_alpha"]).to(save_dtype)
block_down_name = None