diff --git a/networks/resize_lora.py b/networks/resize_lora.py index e10d35bc..7beeb25e 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -5,148 +5,169 @@ import argparse import os import torch -from safetensors.torch import load_file, save_file +from safetensors.torch import load_file, save_file, safe_open from tqdm import tqdm +from library import train_util, model_util + def load_state_dict(file_name, dtype): - if os.path.splitext(file_name)[1] == '.safetensors': + if model_util.is_safetensors(file_name): sd = load_file(file_name) + with safe_open(file_name, framework="pt") as f: + metadata = f.metadata() else: sd = torch.load(file_name, map_location='cpu') + metadata = None + for key in list(sd.keys()): if type(sd[key]) == torch.Tensor: sd[key] = sd[key].to(dtype) - return sd + + return sd, metadata -def save_to_file(file_name, model, state_dict, dtype): +def save_to_file(file_name, model, state_dict, dtype, metadata): if dtype is not None: for key in list(state_dict.keys()): if type(state_dict[key]) == torch.Tensor: state_dict[key] = state_dict[key].to(dtype) - if os.path.splitext(file_name)[1] == '.safetensors': - save_file(model, file_name) + if model_util.is_safetensors(file_name): + save_file(model, file_name, metadata) else: torch.save(model, file_name) - -def resize_lora_model(model, new_rank, merge_dtype, save_dtype): - print("Loading Model...") - lora_sd = load_state_dict(model, merge_dtype) +def resize_lora_model(lora_sd, new_rank, save_dtype, device): + network_alpha = None + network_dim = None - network_alpha = None - network_dim = None + CLAMP_QUANTILE = 0.99 - CLAMP_QUANTILE = 0.99 + # Extract loaded lora dim and alpha + for key, value in lora_sd.items(): + if network_alpha is None and 'alpha' in key: + network_alpha = value + if network_dim is None and 'lora_down' in key and len(value.size()) == 2: + network_dim = value.size()[0] + if network_alpha is not None and network_dim is not None: + break + if network_alpha is None: + network_alpha = network_dim - # Extract loaded lora dim and alpha - for key, value in lora_sd.items(): - if network_alpha is None and 'alpha' in key: - network_alpha = value - if network_dim is None and 'lora_down' in key and len(value.size()) == 2: - network_dim = value.size()[0] - if network_alpha is not None and network_dim is not None: - break - if network_alpha is None: - network_alpha = network_dim + scale = network_alpha/network_dim + new_alpha = float(scale*new_rank) # calculate new alpha from scale - scale = network_alpha/network_dim - new_alpha = float(scale*new_rank) # calculate new alpha from scale + print(f"old dimension: {network_dim}, old alpha: {network_alpha}, new alpha: {new_alpha}") - print(f"dimension: {network_dim}, alpha: {network_alpha}, new alpha: {new_alpha}") + lora_down_weight = None + lora_up_weight = None - lora_down_weight = None - lora_up_weight = None + o_lora_sd = lora_sd.copy() + block_down_name = None + block_up_name = None - o_lora_sd = lora_sd.copy() - block_down_name = None - block_up_name = None + print("resizing lora...") + with torch.no_grad(): + for key, value in tqdm(lora_sd.items()): + if 'lora_down' in key: + block_down_name = key.split(".")[0] + lora_down_weight = value + if 'lora_up' in key: + block_up_name = key.split(".")[0] + lora_up_weight = value - print("resizing lora...") - with torch.no_grad(): - for key, value in tqdm(lora_sd.items()): - if 'lora_down' in key: - block_down_name = key.split(".")[0] - lora_down_weight = value - if 'lora_up' in key: - block_up_name = key.split(".")[0] - lora_up_weight = value + weights_loaded = (lora_down_weight is not None and lora_up_weight is not None) - weights_loaded = (lora_down_weight is not None and lora_up_weight is not None) + if (block_down_name == block_up_name) and weights_loaded: - if (block_down_name == block_up_name) and weights_loaded: + conv2d = (len(lora_down_weight.size()) == 4) - conv2d = (len(lora_down_weight.size()) == 4) - - if conv2d: - lora_down_weight = lora_down_weight.squeeze() - lora_up_weight = lora_up_weight.squeeze() + if conv2d: + lora_down_weight = lora_down_weight.squeeze() + lora_up_weight = lora_up_weight.squeeze() - if args.device: - org_device = lora_up_weight.device - lora_up_weight = lora_up_weight.to(args.device) - lora_down_weight = lora_down_weight.to(args.device) + if device: + org_device = lora_up_weight.device + lora_up_weight = lora_up_weight.to(args.device) + lora_down_weight = lora_down_weight.to(args.device) - full_weight_matrix = torch.matmul(lora_up_weight, lora_down_weight) + full_weight_matrix = torch.matmul(lora_up_weight, lora_down_weight) - U, S, Vh = torch.linalg.svd(full_weight_matrix) + U, S, Vh = torch.linalg.svd(full_weight_matrix) - U = U[:, :new_rank] - S = S[:new_rank] - U = U @ torch.diag(S) + U = U[:, :new_rank] + S = S[:new_rank] + U = U @ torch.diag(S) - Vh = Vh[:new_rank, :] + Vh = Vh[:new_rank, :] - dist = torch.cat([U.flatten(), Vh.flatten()]) - hi_val = torch.quantile(dist, CLAMP_QUANTILE) - low_val = -hi_val + dist = torch.cat([U.flatten(), Vh.flatten()]) + hi_val = torch.quantile(dist, CLAMP_QUANTILE) + low_val = -hi_val - U = U.clamp(low_val, hi_val) - Vh = Vh.clamp(low_val, hi_val) - - if conv2d: - U = U.unsqueeze(2).unsqueeze(3) - Vh = Vh.unsqueeze(2).unsqueeze(3) - - if args.device: - U = U.to(org_device) - Vh = Vh.to(org_device) + U = U.clamp(low_val, hi_val) + Vh = Vh.clamp(low_val, hi_val) - o_lora_sd[block_down_name + "." + "lora_down.weight"] = Vh.to(save_dtype).contiguous() - o_lora_sd[block_up_name + "." + "lora_up.weight"] = U.to(save_dtype).contiguous() - o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(new_alpha).to(save_dtype) + if conv2d: + U = U.unsqueeze(2).unsqueeze(3) + Vh = Vh.unsqueeze(2).unsqueeze(3) - block_down_name = None - block_up_name = None - lora_down_weight = None - lora_up_weight = None - weights_loaded = False + if args.device: + U = U.to(org_device) + Vh = Vh.to(org_device) + + o_lora_sd[block_down_name + "." + "lora_down.weight"] = Vh.to(save_dtype).contiguous() + o_lora_sd[block_up_name + "." + "lora_up.weight"] = U.to(save_dtype).contiguous() + o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(new_alpha).to(save_dtype) + + block_down_name = None + block_up_name = None + lora_down_weight = None + lora_up_weight = None + weights_loaded = False + + print("resizing complete") + return o_lora_sd, network_dim, new_alpha - print("resizing complete") - return o_lora_sd def resize(args): - def str_to_dtype(p): - if p == 'float': - return torch.float - if p == 'fp16': - return torch.float16 - if p == 'bf16': - return torch.bfloat16 - return None + def str_to_dtype(p): + if p == 'float': + return torch.float + if p == 'fp16': + return torch.float16 + if p == 'bf16': + return torch.bfloat16 + return None - merge_dtype = str_to_dtype('float') # matmul method above only seems to work in float32 - save_dtype = str_to_dtype(args.save_precision) - if save_dtype is None: - save_dtype = merge_dtype + merge_dtype = str_to_dtype('float') # matmul method above only seems to work in float32 + save_dtype = str_to_dtype(args.save_precision) + if save_dtype is None: + save_dtype = merge_dtype - state_dict = resize_lora_model(args.model, args.new_rank, merge_dtype, save_dtype) + print("loading Model...") + lora_sd, metadata = load_state_dict(args.model, merge_dtype) - print(f"saving model to: {args.save_to}") - save_to_file(args.save_to, state_dict, state_dict, save_dtype) + print("resizing rank...") + state_dict, old_dim, new_alpha = resize_lora_model(lora_sd, args.new_rank, save_dtype, args.device) + + # update metadata + if metadata is None: + metadata = {} + + comment = metadata.get("ss_training_comment", "") + metadata["ss_training_comment"] = f"dimension is resized from {old_dim} to {args.new_rank}; {comment}" + metadata["ss_network_dim"] = str(args.new_rank) + metadata["ss_network_alpha"] = str(new_alpha) + + model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata) + metadata["sshs_model_hash"] = model_hash + metadata["sshs_legacy_hash"] = legacy_hash + + print(f"saving model to: {args.save_to}") + save_to_file(args.save_to, state_dict, state_dict, save_dtype, metadata) if __name__ == '__main__':