format by black

This commit is contained in:
Kohya S
2023-09-24 11:26:28 +09:00
parent 55886a0116
commit 8052bcd5cd

View File

@@ -13,13 +13,14 @@ MIN_SV = 1e-6
# Model save and load functions # Model save and load functions
def load_state_dict(file_name, dtype): def load_state_dict(file_name, dtype):
if model_util.is_safetensors(file_name): if model_util.is_safetensors(file_name):
sd = load_file(file_name) sd = load_file(file_name)
with safe_open(file_name, framework="pt") as f: with safe_open(file_name, framework="pt") as f:
metadata = f.metadata() metadata = f.metadata()
else: else:
sd = torch.load(file_name, map_location='cpu') sd = torch.load(file_name, map_location="cpu")
metadata = None metadata = None
for key in list(sd.keys()): for key in list(sd.keys()):
@@ -43,6 +44,7 @@ def save_to_file(file_name, model, state_dict, dtype, metadata):
# Indexing functions # Indexing functions
def index_sv_cumulative(S, target): def index_sv_cumulative(S, target):
original_sum = float(torch.sum(S)) original_sum = float(torch.sum(S))
cumulative_sums = torch.cumsum(S, dim=0) / original_sum cumulative_sums = torch.cumsum(S, dim=0) / original_sum
@@ -138,6 +140,7 @@ def merge_linear(lora_down, lora_up, device):
# Calculate new rank # Calculate new rank
def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1): def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1):
param_dict = {} param_dict = {}
@@ -159,7 +162,6 @@ def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1):
new_rank = rank new_rank = rank
new_alpha = float(scale * new_rank) new_alpha = float(scale * new_rank)
if S[0] <= MIN_SV: # Zero matrix, set dim to 1 if S[0] <= MIN_SV: # Zero matrix, set dim to 1
new_rank = 1 new_rank = 1
new_alpha = float(scale * new_rank) new_alpha = float(scale * new_rank)
@@ -167,7 +169,6 @@ def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1):
new_rank = rank new_rank = rank
new_alpha = float(scale * new_rank) new_alpha = float(scale * new_rank)
# Calculate resize info # Calculate resize info
s_sum = torch.sum(torch.abs(S)) s_sum = torch.sum(torch.abs(S))
s_rank = torch.sum(torch.abs(S[:new_rank])) s_rank = torch.sum(torch.abs(S[:new_rank]))
@@ -194,9 +195,9 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn
# Extract loaded lora dim and alpha # Extract loaded lora dim and alpha
for key, value in lora_sd.items(): for key, value in lora_sd.items():
if network_alpha is None and 'alpha' in key: if network_alpha is None and "alpha" in key:
network_alpha = value network_alpha = value
if network_dim is None and 'lora_down' in key and len(value.size()) == 2: if network_dim is None and "lora_down" in key and len(value.size()) == 2:
network_dim = value.size()[0] network_dim = value.size()[0]
if network_alpha is not None and network_dim is not None: if network_alpha is not None and network_dim is not None:
break break
@@ -218,7 +219,7 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn
with torch.no_grad(): with torch.no_grad():
for key, value in tqdm(lora_sd.items()): for key, value in tqdm(lora_sd.items()):
weight_name = None weight_name = None
if 'lora_down' in key: if "lora_down" in key:
block_down_name = key.split(".")[0] block_down_name = key.split(".")[0]
weight_name = key.split(".")[-1] weight_name = key.split(".")[-1]
lora_down_weight = value lora_down_weight = value
@@ -227,14 +228,13 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn
# find corresponding lora_up and alpha # find corresponding lora_up and alpha
block_up_name = block_down_name block_up_name = block_down_name
lora_up_weight = lora_sd.get(block_up_name + '.lora_up.' + weight_name, None) lora_up_weight = lora_sd.get(block_up_name + ".lora_up." + weight_name, None)
lora_alpha = lora_sd.get(block_down_name + '.alpha', None) lora_alpha = lora_sd.get(block_down_name + ".alpha", None)
weights_loaded = (lora_down_weight is not None and lora_up_weight is not None) weights_loaded = lora_down_weight is not None and lora_up_weight is not None
if weights_loaded: if weights_loaded:
conv2d = len(lora_down_weight.size()) == 4
conv2d = (len(lora_down_weight.size()) == 4)
if lora_alpha is None: if lora_alpha is None:
scale = 1.0 scale = 1.0
else: else:
@@ -248,24 +248,26 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn
param_dict = extract_linear(full_weight_matrix, new_rank, dynamic_method, dynamic_param, device, scale) param_dict = extract_linear(full_weight_matrix, new_rank, dynamic_method, dynamic_param, device, scale)
if verbose: if verbose:
max_ratio = param_dict['max_ratio'] max_ratio = param_dict["max_ratio"]
sum_retained = param_dict['sum_retained'] sum_retained = param_dict["sum_retained"]
fro_retained = param_dict['fro_retained'] fro_retained = param_dict["fro_retained"]
if not np.isnan(fro_retained): if not np.isnan(fro_retained):
fro_list.append(float(fro_retained)) fro_list.append(float(fro_retained))
verbose_str += f"{block_down_name:75} | " verbose_str += f"{block_down_name:75} | "
verbose_str+=f"sum(S) retained: {sum_retained:.1%}, fro retained: {fro_retained:.1%}, max(S) ratio: {max_ratio:0.1f}" verbose_str += (
f"sum(S) retained: {sum_retained:.1%}, fro retained: {fro_retained:.1%}, max(S) ratio: {max_ratio:0.1f}"
)
if verbose and dynamic_method: if verbose and dynamic_method:
verbose_str += f", dynamic | dim: {param_dict['new_rank']}, alpha: {param_dict['new_alpha']}\n" verbose_str += f", dynamic | dim: {param_dict['new_rank']}, alpha: {param_dict['new_alpha']}\n"
else: else:
verbose_str += f"\n" verbose_str += f"\n"
new_alpha = param_dict['new_alpha'] new_alpha = param_dict["new_alpha"]
o_lora_sd[block_down_name + "." + "lora_down.weight"] = param_dict["lora_down"].to(save_dtype).contiguous() o_lora_sd[block_down_name + "." + "lora_down.weight"] = param_dict["lora_down"].to(save_dtype).contiguous()
o_lora_sd[block_up_name + "." + "lora_up.weight"] = param_dict["lora_up"].to(save_dtype).contiguous() o_lora_sd[block_up_name + "." + "lora_up.weight"] = param_dict["lora_up"].to(save_dtype).contiguous()
o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(param_dict['new_alpha']).to(save_dtype) o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(param_dict["new_alpha"]).to(save_dtype)
block_down_name = None block_down_name = None
block_up_name = None block_up_name = None
@@ -283,23 +285,27 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn
def resize(args): def resize(args):
if args.save_to is None or not (args.save_to.endswith('.ckpt') or args.save_to.endswith('.pt') or args.save_to.endswith('.pth') or args.save_to.endswith('.safetensors')): if args.save_to is None or not (
args.save_to.endswith(".ckpt")
or args.save_to.endswith(".pt")
or args.save_to.endswith(".pth")
or args.save_to.endswith(".safetensors")
):
raise Exception("The --save_to argument must be specified and must be a .ckpt , .pt, .pth or .safetensors file.") raise Exception("The --save_to argument must be specified and must be a .ckpt , .pt, .pth or .safetensors file.")
def str_to_dtype(p): def str_to_dtype(p):
if p == 'float': if p == "float":
return torch.float return torch.float
if p == 'fp16': if p == "fp16":
return torch.float16 return torch.float16
if p == 'bf16': if p == "bf16":
return torch.bfloat16 return torch.bfloat16
return None return None
if args.dynamic_method and not args.dynamic_param: if args.dynamic_method and not args.dynamic_param:
raise Exception("If using dynamic_method, then dynamic_param is required") raise Exception("If using dynamic_method, then dynamic_param is required")
merge_dtype = str_to_dtype('float') # matmul method above only seems to work in float32 merge_dtype = str_to_dtype("float") # matmul method above only seems to work in float32
save_dtype = str_to_dtype(args.save_precision) save_dtype = str_to_dtype(args.save_precision)
if save_dtype is None: if save_dtype is None:
save_dtype = merge_dtype save_dtype = merge_dtype
@@ -308,7 +314,9 @@ def resize(args):
lora_sd, metadata = load_state_dict(args.model, merge_dtype) lora_sd, metadata = load_state_dict(args.model, merge_dtype)
print("Resizing Lora...") print("Resizing Lora...")
state_dict, old_dim, new_alpha = resize_lora_model(lora_sd, args.new_rank, save_dtype, args.device, args.dynamic_method, args.dynamic_param, args.verbose) state_dict, old_dim, new_alpha = resize_lora_model(
lora_sd, args.new_rank, save_dtype, args.device, args.dynamic_method, args.dynamic_param, args.verbose
)
# update metadata # update metadata
if metadata is None: if metadata is None:
@@ -321,9 +329,11 @@ def resize(args):
metadata["ss_network_dim"] = str(args.new_rank) metadata["ss_network_dim"] = str(args.new_rank)
metadata["ss_network_alpha"] = str(new_alpha) metadata["ss_network_alpha"] = str(new_alpha)
else: else:
metadata["ss_training_comment"] = f"Dynamic resize with {args.dynamic_method}: {args.dynamic_param} from {old_dim}; {comment}" metadata[
metadata["ss_network_dim"] = 'Dynamic' "ss_training_comment"
metadata["ss_network_alpha"] = 'Dynamic' ] = f"Dynamic resize with {args.dynamic_method}: {args.dynamic_param} from {old_dim}; {comment}"
metadata["ss_network_dim"] = "Dynamic"
metadata["ss_network_alpha"] = "Dynamic"
model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata) model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata)
metadata["sshs_model_hash"] = model_hash metadata["sshs_model_hash"] = model_hash
@@ -336,26 +346,38 @@ def resize(args):
def setup_parser() -> argparse.ArgumentParser: def setup_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--save_precision", type=str, default=None, parser.add_argument(
choices=[None, "float", "fp16", "bf16"], help="precision in saving, float if omitted / 保存時の精度、未指定時はfloat") "--save_precision",
parser.add_argument("--new_rank", type=int, default=4, type=str,
help="Specify rank of output LoRA / 出力するLoRAのrank (dim)") default=None,
parser.add_argument("--save_to", type=str, default=None, choices=[None, "float", "fp16", "bf16"],
help="destination file name: ckpt or safetensors file / 保存先のファイル名、ckptまたはsafetensors") help="precision in saving, float if omitted / 保存時の精度、未指定時はfloat",
parser.add_argument("--model", type=str, default=None, )
help="LoRA model to resize at to new rank: ckpt or safetensors file / 読み込むLoRAモデル、ckptまたはsafetensors") parser.add_argument("--new_rank", type=int, default=4, help="Specify rank of output LoRA / 出力するLoRAのrank (dim)")
parser.add_argument(
"--save_to", type=str, default=None, help="destination file name: ckpt or safetensors file / 保存先のファイル名、ckptまたはsafetensors"
)
parser.add_argument(
"--model",
type=str,
default=None,
help="LoRA model to resize at to new rank: ckpt or safetensors file / 読み込むLoRAモデル、ckptまたはsafetensors",
)
parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う") parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う")
parser.add_argument("--verbose", action="store_true", parser.add_argument("--verbose", action="store_true", help="Display verbose resizing information / rank変更時の詳細情報を出力する")
help="Display verbose resizing information / rank変更時の詳細情報を出力する") parser.add_argument(
parser.add_argument("--dynamic_method", type=str, default=None, choices=[None, "sv_ratio", "sv_fro", "sv_cumulative"], "--dynamic_method",
help="Specify dynamic resizing method, --new_rank is used as a hard limit for max rank") type=str,
parser.add_argument("--dynamic_param", type=float, default=None, default=None,
help="Specify target for dynamic reduction") choices=[None, "sv_ratio", "sv_fro", "sv_cumulative"],
help="Specify dynamic resizing method, --new_rank is used as a hard limit for max rank",
)
parser.add_argument("--dynamic_param", type=float, default=None, help="Specify target for dynamic reduction")
return parser return parser
if __name__ == '__main__': if __name__ == "__main__":
parser = setup_parser() parser = setup_parser()
args = parser.parse_args() args = parser.parse_args()