From efe4c983410dfb02185cf3cef4851191e4380f1e Mon Sep 17 00:00:00 2001 From: mgz-dev <49577754+mgz-dev@users.noreply.github.com> Date: Tue, 28 Feb 2023 14:55:15 -0600 Subject: [PATCH 01/13] Enable ability to resize lora dim based off ratios --- networks/resize_lora.py | 44 +++++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/networks/resize_lora.py b/networks/resize_lora.py index 271de8ef..c4d8a4d8 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -38,10 +38,11 @@ def save_to_file(file_name, model, state_dict, dtype, metadata): torch.save(model, file_name) -def resize_lora_model(lora_sd, new_rank, save_dtype, device, verbose): +def resize_lora_model(lora_sd, new_rank, save_dtype, device, sv_ratio, verbose): network_alpha = None network_dim = None verbose_str = "\n" + ratio_flag = False CLAMP_QUANTILE = 0.99 @@ -57,9 +58,12 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, verbose): network_alpha = network_dim scale = network_alpha/network_dim - new_alpha = float(scale*new_rank) # calculate new alpha from scale - - print(f"old dimension: {network_dim}, old alpha: {network_alpha}, new alpha: {new_alpha}") + if not sv_ratio: + new_alpha = float(scale*new_rank) # calculate new alpha from scale + print(f"old dimension: {network_dim}, old alpha: {network_alpha}, new dim: {new_rank}, new alpha: {new_alpha}") + else: + print(f"Dynamically determining new alphas and dims based off sv ratio: {sv_ratio}") + ratio_flag = True lora_down_weight = None lora_up_weight = None @@ -97,11 +101,24 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, verbose): U, S, Vh = torch.linalg.svd(full_weight_matrix) + if ratio_flag: + # Calculate new dim and alpha for dynamic sizing + max_sv = S[0] + min_sv = max_sv/sv_ratio + new_rank = torch.sum(S > min_sv).item() + new_rank = max(new_rank, 1) + new_alpha = float(scale*new_rank) + if verbose: s_sum = torch.sum(torch.abs(S)) s_rank = torch.sum(torch.abs(S[:new_rank])) - verbose_str+=f"{block_down_name:76} | " - verbose_str+=f"sum(S) retained: {(s_rank)/s_sum:.1%}, max(S) ratio: {S[0]/S[new_rank]:0.1f}\n" + verbose_str+=f"{block_down_name:75} | " + verbose_str+=f"sum(S) retained: {(s_rank)/s_sum:.1%}, max(S) ratio: {S[0]/S[new_rank]:0.1f}" + + if verbose and ratio_flag: + verbose_str+=f", dynamic| dim: {new_rank}, alpha: {new_alpha}\n" + else: + verbose_str+=f"\n" U = U[:, :new_rank] S = S[:new_rank] @@ -160,16 +177,21 @@ def resize(args): lora_sd, metadata = load_state_dict(args.model, merge_dtype) print("resizing rank...") - state_dict, old_dim, new_alpha = resize_lora_model(lora_sd, args.new_rank, save_dtype, args.device, args.verbose) + state_dict, old_dim, new_alpha = resize_lora_model(lora_sd, args.new_rank, save_dtype, args.device, args.sv_ratio, args.verbose) # update metadata if metadata is None: metadata = {} comment = metadata.get("ss_training_comment", "") - metadata["ss_training_comment"] = f"dimension is resized from {old_dim} to {args.new_rank}; {comment}" - metadata["ss_network_dim"] = str(args.new_rank) - metadata["ss_network_alpha"] = str(new_alpha) + if not args.sv_ratio: + metadata["ss_training_comment"] = f"dimension is resized from {old_dim} to {args.new_rank}; {comment}" + metadata["ss_network_dim"] = str(args.new_rank) + metadata["ss_network_alpha"] = str(new_alpha) + else: + metadata["ss_training_comment"] = f"Dynamic resize from {old_dim} with ratio {args.sv_ratio}; {comment}" + metadata["ss_network_dim"] = 'Dynamic' + metadata["ss_network_alpha"] = 'Dynamic' model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata) metadata["sshs_model_hash"] = model_hash @@ -193,6 +215,8 @@ if __name__ == '__main__': parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う") parser.add_argument("--verbose", action="store_true", help="Display verbose resizing information / rank変更時の詳細情報を出力する") + parser.add_argument("--sv_ratio", type=float, default=None, + help="Specify svd ratio for dim calcs. Will override --new_rank") args = parser.parse_args() resize(args) From 52ca6c515c14d2309ab470b59a179d7ca2f5e149 Mon Sep 17 00:00:00 2001 From: mgz-dev <49577754+mgz-dev@users.noreply.github.com> Date: Wed, 1 Mar 2023 13:35:24 -0600 Subject: [PATCH 02/13] add options to resize based off frobenius norm or cumulative sum --- networks/resize_lora.py | 89 ++++++++++++++++++++++++++++++++--------- 1 file changed, 70 insertions(+), 19 deletions(-) diff --git a/networks/resize_lora.py b/networks/resize_lora.py index c4d8a4d8..de405613 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -3,11 +3,11 @@ # Thanks to cloneofsimo and kohya import argparse -import os import torch from safetensors.torch import load_file, save_file, safe_open from tqdm import tqdm from library import train_util, model_util +import numpy as np def load_state_dict(file_name, dtype): @@ -38,11 +38,32 @@ def save_to_file(file_name, model, state_dict, dtype, metadata): torch.save(model, file_name) -def resize_lora_model(lora_sd, new_rank, save_dtype, device, sv_ratio, verbose): +def index_sv_cumulative(S, target): + original_sum = float(torch.sum(S)) + cumulative_sums = torch.cumsum(S, dim=0)/original_sum + index = int(torch.searchsorted(cumulative_sums, target)) + 1 + if index >= len(S): + index = len(S) - 1 + + return index + + +def index_sv_fro(S, target): + S_squared = S.pow(2) + s_fro_sq = float(torch.sum(S_squared)) + sum_S_squared = torch.cumsum(S_squared, dim=0)/s_fro_sq + index = int(torch.searchsorted(sum_S_squared, target**2)) + 1 + if index >= len(S): + index = len(S) - 1 + + return index + + +def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dynamic_param, verbose): network_alpha = None network_dim = None verbose_str = "\n" - ratio_flag = False + fro_list = [] CLAMP_QUANTILE = 0.99 @@ -58,12 +79,12 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, sv_ratio, verbose): network_alpha = network_dim scale = network_alpha/network_dim - if not sv_ratio: + + if dynamic_method: + print(f"Dynamically determining new alphas and dims based off {dynamic_method}: {dynamic_param}") + else: new_alpha = float(scale*new_rank) # calculate new alpha from scale print(f"old dimension: {network_dim}, old alpha: {network_alpha}, new dim: {new_rank}, new alpha: {new_alpha}") - else: - print(f"Dynamically determining new alphas and dims based off sv ratio: {sv_ratio}") - ratio_flag = True lora_down_weight = None lora_up_weight = None @@ -101,22 +122,43 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, sv_ratio, verbose): U, S, Vh = torch.linalg.svd(full_weight_matrix) - if ratio_flag: - # Calculate new dim and alpha for dynamic sizing + if dynamic_method=="sv_ratio": + # Calculate new dim and alpha based off ratio max_sv = S[0] - min_sv = max_sv/sv_ratio + min_sv = max_sv/dynamic_param new_rank = torch.sum(S > min_sv).item() new_rank = max(new_rank, 1) new_alpha = float(scale*new_rank) + elif dynamic_method=="sv_cumulative": + # Calculate new dim and alpha based off cumulative sum + new_rank = index_sv_cumulative(S, dynamic_param) + new_rank = max(new_rank, 1) + new_alpha = float(scale*new_rank) + + elif dynamic_method=="sv_fro": + # Calculate new dim and alpha based off sqrt sum of squares + new_rank = index_sv_fro(S, dynamic_param) + new_rank = max(new_rank, 1) + new_alpha = float(scale*new_rank) + if verbose: s_sum = torch.sum(torch.abs(S)) s_rank = torch.sum(torch.abs(S[:new_rank])) - verbose_str+=f"{block_down_name:75} | " - verbose_str+=f"sum(S) retained: {(s_rank)/s_sum:.1%}, max(S) ratio: {S[0]/S[new_rank]:0.1f}" + + S_squared = S.pow(2) + s_fro = torch.sqrt(torch.sum(S_squared)) + s_red_fro = torch.sqrt(torch.sum(S_squared[:new_rank])) + fro_percent = float(s_red_fro/s_fro) + if not np.isnan(fro_percent): + fro_list.append(float(fro_percent)) - if verbose and ratio_flag: - verbose_str+=f", dynamic| dim: {new_rank}, alpha: {new_alpha}\n" + verbose_str+=f"{block_down_name:75} | " + verbose_str+=f"sum(S) retained: {(s_rank)/s_sum:.1%}, fro retained: {fro_percent:.1%}, max(S) ratio: {S[0]/S[new_rank]:0.1f}" + + + if verbose and dynamic_method: + verbose_str+=f", dynamic | dim: {new_rank}, alpha: {new_alpha}\n" else: verbose_str+=f"\n" @@ -153,6 +195,8 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, sv_ratio, verbose): if verbose: print(verbose_str) + + print(f"Average Frobenius norm retention: {np.mean(fro_list):.2%} | std: {np.std(fro_list):0.3f}") print("resizing complete") return o_lora_sd, network_dim, new_alpha @@ -168,6 +212,9 @@ def resize(args): return torch.bfloat16 return None + if args.dynamic_method and not args.dynamic_param: + raise Exception("If using dynamic_method, then dynamic_param is required") + merge_dtype = str_to_dtype('float') # matmul method above only seems to work in float32 save_dtype = str_to_dtype(args.save_precision) if save_dtype is None: @@ -177,19 +224,20 @@ def resize(args): lora_sd, metadata = load_state_dict(args.model, merge_dtype) print("resizing rank...") - state_dict, old_dim, new_alpha = resize_lora_model(lora_sd, args.new_rank, save_dtype, args.device, args.sv_ratio, args.verbose) + state_dict, old_dim, new_alpha = resize_lora_model(lora_sd, args.new_rank, save_dtype, args.device, args.dynamic_method, args.dynamic_param, args.verbose) # update metadata if metadata is None: metadata = {} comment = metadata.get("ss_training_comment", "") - if not args.sv_ratio: + + if not args.dynamic_method: metadata["ss_training_comment"] = f"dimension is resized from {old_dim} to {args.new_rank}; {comment}" metadata["ss_network_dim"] = str(args.new_rank) metadata["ss_network_alpha"] = str(new_alpha) else: - metadata["ss_training_comment"] = f"Dynamic resize from {old_dim} with ratio {args.sv_ratio}; {comment}" + metadata["ss_training_comment"] = f"Dynamic resize with {args.dynamic_method}: {args.dynamic_param} from {old_dim}; {comment}" metadata["ss_network_dim"] = 'Dynamic' metadata["ss_network_alpha"] = 'Dynamic' @@ -215,8 +263,11 @@ if __name__ == '__main__': parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う") parser.add_argument("--verbose", action="store_true", help="Display verbose resizing information / rank変更時の詳細情報を出力する") - parser.add_argument("--sv_ratio", type=float, default=None, - help="Specify svd ratio for dim calcs. Will override --new_rank") + parser.add_argument("--dynamic_method", type=str, default=None, choices=[None, "sv_ratio", "sv_fro", "sv_cumulative"], + help="Specify dynamic resizing method, will override --new_rank") + parser.add_argument("--dynamic_param", type=float, default=None, + help="Specify target for dynamic reduction") + args = parser.parse_args() resize(args) From 80be6fa130cf199ed05e9a75762e20f4b280ce7e Mon Sep 17 00:00:00 2001 From: mgz-dev <49577754+mgz-dev@users.noreply.github.com> Date: Fri, 3 Mar 2023 23:32:46 -0600 Subject: [PATCH 03/13] refactor and bug fix for too large sv_ratio - code refactor to be able to re-use same function for dynamic extract lora - remove clamp - fix issue where if sv_ratio is too high index goes out of bounds --- networks/resize_lora.py | 100 ++++++++++++++++++++++++---------------- 1 file changed, 59 insertions(+), 41 deletions(-) diff --git a/networks/resize_lora.py b/networks/resize_lora.py index de405613..eb745333 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -59,14 +59,55 @@ def index_sv_fro(S, target): return index +def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1): + param_dict = {} + + if dynamic_method=="sv_ratio": + # Calculate new dim and alpha based off ratio + max_sv = S[0] + min_sv = max_sv/dynamic_param + new_rank = max(torch.sum(S > min_sv).item(),1) + new_alpha = float(scale*new_rank) + + elif dynamic_method=="sv_cumulative": + # Calculate new dim and alpha based off cumulative sum + new_rank = index_sv_cumulative(S, dynamic_param) + new_rank = max(new_rank, 1) + new_alpha = float(scale*new_rank) + + elif dynamic_method=="sv_fro": + # Calculate new dim and alpha based off sqrt sum of squares + new_rank = index_sv_fro(S, dynamic_param) + new_rank = min(max(new_rank, 1), len(S)-1) + new_alpha = float(scale*new_rank) + else: + new_rank = rank + new_alpha = float(scale*new_rank) + + # Calculate resize info + s_sum = torch.sum(torch.abs(S)) + s_rank = torch.sum(torch.abs(S[:new_rank])) + + S_squared = S.pow(2) + s_fro = torch.sqrt(torch.sum(S_squared)) + s_red_fro = torch.sqrt(torch.sum(S_squared[:new_rank])) + fro_percent = float(s_red_fro/s_fro) + + param_dict["new_rank"] = new_rank + param_dict["new_alpha"] = new_alpha + param_dict["sum_retained"] = (s_rank)/s_sum + param_dict["fro_retained"] = fro_percent + param_dict["max_ratio"] = S[0]/S[new_rank] + + return param_dict + + def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dynamic_param, verbose): network_alpha = None network_dim = None verbose_str = "\n" fro_list = [] - CLAMP_QUANTILE = 0.99 - # Extract loaded lora dim and alpha for key, value in lora_sd.items(): if network_alpha is None and 'alpha' in key: @@ -82,9 +123,6 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn if dynamic_method: print(f"Dynamically determining new alphas and dims based off {dynamic_method}: {dynamic_param}") - else: - new_alpha = float(scale*new_rank) # calculate new alpha from scale - print(f"old dimension: {network_dim}, old alpha: {network_alpha}, new dim: {new_rank}, new alpha: {new_alpha}") lora_down_weight = None lora_up_weight = None @@ -93,7 +131,6 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn block_down_name = None block_up_name = None - print("resizing lora...") with torch.no_grad(): for key, value in tqdm(lora_sd.items()): if 'lora_down' in key: @@ -122,39 +159,21 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn U, S, Vh = torch.linalg.svd(full_weight_matrix) - if dynamic_method=="sv_ratio": - # Calculate new dim and alpha based off ratio - max_sv = S[0] - min_sv = max_sv/dynamic_param - new_rank = torch.sum(S > min_sv).item() - new_rank = max(new_rank, 1) - new_alpha = float(scale*new_rank) - elif dynamic_method=="sv_cumulative": - # Calculate new dim and alpha based off cumulative sum - new_rank = index_sv_cumulative(S, dynamic_param) - new_rank = max(new_rank, 1) - new_alpha = float(scale*new_rank) + param_dict = rank_resize(S, new_rank, dynamic_method, dynamic_param, scale) + + new_rank = param_dict['new_rank'] + new_alpha = param_dict['new_alpha'] - elif dynamic_method=="sv_fro": - # Calculate new dim and alpha based off sqrt sum of squares - new_rank = index_sv_fro(S, dynamic_param) - new_rank = max(new_rank, 1) - new_alpha = float(scale*new_rank) - if verbose: - s_sum = torch.sum(torch.abs(S)) - s_rank = torch.sum(torch.abs(S[:new_rank])) - - S_squared = S.pow(2) - s_fro = torch.sqrt(torch.sum(S_squared)) - s_red_fro = torch.sqrt(torch.sum(S_squared[:new_rank])) - fro_percent = float(s_red_fro/s_fro) - if not np.isnan(fro_percent): - fro_list.append(float(fro_percent)) + max_ratio = param_dict['max_ratio'] + sum_retained = param_dict['sum_retained'] + fro_retained = param_dict['fro_retained'] + if not np.isnan(fro_retained): + fro_list.append(float(fro_retained)) verbose_str+=f"{block_down_name:75} | " - verbose_str+=f"sum(S) retained: {(s_rank)/s_sum:.1%}, fro retained: {fro_percent:.1%}, max(S) ratio: {S[0]/S[new_rank]:0.1f}" + verbose_str+=f"sum(S) retained: {sum_retained:.1%}, fro retained: {fro_retained:.1%}, max(S) ratio: {max_ratio:0.1f}" if verbose and dynamic_method: @@ -168,12 +187,11 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn Vh = Vh[:new_rank, :] - dist = torch.cat([U.flatten(), Vh.flatten()]) - hi_val = torch.quantile(dist, CLAMP_QUANTILE) - low_val = -hi_val - - U = U.clamp(low_val, hi_val) - Vh = Vh.clamp(low_val, hi_val) + # dist = torch.cat([U.flatten(), Vh.flatten()]) + # hi_val = torch.quantile(dist, CLAMP_QUANTILE) + # low_val = -hi_val + # U = U.clamp(low_val, hi_val) + # Vh = Vh.clamp(low_val, hi_val) if conv2d: U = U.unsqueeze(2).unsqueeze(3) @@ -223,7 +241,7 @@ def resize(args): print("loading Model...") lora_sd, metadata = load_state_dict(args.model, merge_dtype) - print("resizing rank...") + print("Resizing Lora...") state_dict, old_dim, new_alpha = resize_lora_model(lora_sd, args.new_rank, save_dtype, args.device, args.dynamic_method, args.dynamic_param, args.verbose) # update metadata From 214ed092f2208caa5636bb631e7f37ab97c67a3f Mon Sep 17 00:00:00 2001 From: mgz-dev <49577754+mgz-dev@users.noreply.github.com> Date: Sat, 4 Mar 2023 02:01:10 -0600 Subject: [PATCH 04/13] add support to extract lora with resnet and 2d blocks Modified resize script so support different types of LoRA networks (refer to Kohaku-Blueleaf module implementation for structure). --- networks/resize_lora.py | 109 +++++++++++++++++++++++++++------------- 1 file changed, 74 insertions(+), 35 deletions(-) diff --git a/networks/resize_lora.py b/networks/resize_lora.py index eb745333..77d79d9f 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -59,6 +59,72 @@ def index_sv_fro(S, target): return index +# Modified from Kohaku-blueleaf's extract/merge functions +def extract_conv(weight, lora_rank, dynamic_method, dynamic_param, device, scale=1): + out_size, in_size, kernel_size, _ = weight.size() + U, S, Vh = torch.linalg.svd(weight.reshape(out_size, -1).to(device)) + + param_dict = rank_resize(S, lora_rank, dynamic_method, dynamic_param, scale) + + lora_rank = param_dict["new_rank"] + + U = U[:, :lora_rank] + S = S[:lora_rank] + U = U @ torch.diag(S) + Vh = Vh[:lora_rank, :] + + param_dict["lora_down"] = Vh.reshape(lora_rank, in_size, kernel_size, kernel_size).cpu() + param_dict["lora_up"] = U.reshape(out_size, lora_rank, 1, 1).cpu() + del U, S, Vh, weight + return param_dict + + +def extract_linear(weight, lora_rank, dynamic_method, dynamic_param, device, scale=1): + out_size, in_size = weight.size() + + U, S, Vh = torch.linalg.svd(weight.to(device)) + + param_dict = rank_resize(S, lora_rank, dynamic_method, dynamic_param, scale) + lora_rank = param_dict["new_rank"] + + U = U[:, :lora_rank] + S = S[:lora_rank] + U = U @ torch.diag(S) + Vh = Vh[:lora_rank, :] + + param_dict["lora_down"] = Vh.reshape(lora_rank, in_size).cpu() + param_dict["lora_up"] = U.reshape(out_size, lora_rank).cpu() + del U, S, Vh, weight + return param_dict + + +def merge_conv(lora_down, lora_up, device): + in_rank, in_size, kernel_size, k_ = lora_down.shape + out_size, out_rank, _, _ = lora_up.shape + assert in_rank == out_rank and kernel_size == k_, f"rank {in_rank} {out_rank} or kernel {kernel_size} {k_} mismatch" + + lora_down = lora_down.to(device) + lora_up = lora_up.to(device) + + merged = lora_up.reshape(out_size, -1) @ lora_down.reshape(in_rank, -1) + weight = merged.reshape(out_size, in_size, kernel_size, kernel_size) + del lora_up, lora_down + return weight + + +def merge_linear(lora_down, lora_up, device): + in_rank, in_size = lora_down.shape + out_size, out_rank = lora_up.shape + assert in_rank == out_rank, f"rank {in_rank} {out_rank} mismatch" + + lora_down = lora_down.to(device) + lora_up = lora_up.to(device) + + weight = lora_up @ lora_down + del lora_up, lora_down + return weight + + def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1): param_dict = {} @@ -147,20 +213,11 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn conv2d = (len(lora_down_weight.size()) == 4) if conv2d: - lora_down_weight = lora_down_weight.squeeze() - lora_up_weight = lora_up_weight.squeeze() - - if device: - org_device = lora_up_weight.device - lora_up_weight = lora_up_weight.to(args.device) - lora_down_weight = lora_down_weight.to(args.device) - - full_weight_matrix = torch.matmul(lora_up_weight, lora_down_weight) - - U, S, Vh = torch.linalg.svd(full_weight_matrix) - - - param_dict = rank_resize(S, new_rank, dynamic_method, dynamic_param, scale) + full_weight_matrix = merge_conv(lora_down_weight, lora_up_weight, device) + param_dict = extract_conv(full_weight_matrix, new_rank, dynamic_method, dynamic_param, device, scale) + else: + full_weight_matrix = merge_linear(lora_down_weight, lora_up_weight, device) + param_dict = extract_linear(full_weight_matrix, new_rank, dynamic_method, dynamic_param, device, scale) new_rank = param_dict['new_rank'] new_alpha = param_dict['new_alpha'] @@ -181,28 +238,9 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn else: verbose_str+=f"\n" - U = U[:, :new_rank] - S = S[:new_rank] - U = U @ torch.diag(S) - Vh = Vh[:new_rank, :] - - # dist = torch.cat([U.flatten(), Vh.flatten()]) - # hi_val = torch.quantile(dist, CLAMP_QUANTILE) - # low_val = -hi_val - # U = U.clamp(low_val, hi_val) - # Vh = Vh.clamp(low_val, hi_val) - - if conv2d: - U = U.unsqueeze(2).unsqueeze(3) - Vh = Vh.unsqueeze(2).unsqueeze(3) - - if device: - U = U.to(org_device) - Vh = Vh.to(org_device) - - o_lora_sd[block_down_name + "." + "lora_down.weight"] = Vh.to(save_dtype).contiguous() - o_lora_sd[block_up_name + "." + "lora_up.weight"] = U.to(save_dtype).contiguous() + o_lora_sd[block_down_name + "." + "lora_down.weight"] = param_dict["lora_down"].to(save_dtype).contiguous() + o_lora_sd[block_up_name + "." + "lora_up.weight"] = param_dict["lora_up"].to(save_dtype).contiguous() o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(new_alpha).to(save_dtype) block_down_name = None @@ -210,6 +248,7 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn lora_down_weight = None lora_up_weight = None weights_loaded = False + del param_dict if verbose: print(verbose_str) From 4a4450d6b6c310ba055c58fdcf06fe4527aeeb2c Mon Sep 17 00:00:00 2001 From: mgz-dev <49577754+mgz-dev@users.noreply.github.com> Date: Sat, 4 Mar 2023 03:10:04 -0600 Subject: [PATCH 05/13] make new_rank limit max rank, fix zero matrices -new_rank arg changed to limit the max rank of any layer. -added logic to make sure zero-ed layers do not create large lora dim --- networks/resize_lora.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/networks/resize_lora.py b/networks/resize_lora.py index 77d79d9f..1a8110c4 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -9,6 +9,7 @@ from tqdm import tqdm from library import train_util, model_util import numpy as np +MIN_SV = 1e-6 def load_state_dict(file_name, dtype): if model_util.is_safetensors(file_name): @@ -65,7 +66,6 @@ def extract_conv(weight, lora_rank, dynamic_method, dynamic_param, device, scale U, S, Vh = torch.linalg.svd(weight.reshape(out_size, -1).to(device)) param_dict = rank_resize(S, lora_rank, dynamic_method, dynamic_param, scale) - lora_rank = param_dict["new_rank"] U = U[:, :lora_rank] @@ -150,6 +150,15 @@ def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1): new_rank = rank new_alpha = float(scale*new_rank) + + if S[0] <= MIN_SV: # Zero matrix, set dim to 1 + new_rank = 1 + new_alpha = float(scale*new_rank) + elif new_rank > rank: # cap max rank at rank + new_rank = rank + new_alpha = float(scale*new_rank) + + # Calculate resize info s_sum = torch.sum(torch.abs(S)) s_rank = torch.sum(torch.abs(S[:new_rank])) @@ -188,7 +197,7 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn scale = network_alpha/network_dim if dynamic_method: - print(f"Dynamically determining new alphas and dims based off {dynamic_method}: {dynamic_param}") + print(f"Dynamically determining new alphas and dims based off {dynamic_method}: {dynamic_param}, max rank is {new_rank}") lora_down_weight = None lora_up_weight = None @@ -219,9 +228,6 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn full_weight_matrix = merge_linear(lora_down_weight, lora_up_weight, device) param_dict = extract_linear(full_weight_matrix, new_rank, dynamic_method, dynamic_param, device, scale) - new_rank = param_dict['new_rank'] - new_alpha = param_dict['new_alpha'] - if verbose: max_ratio = param_dict['max_ratio'] sum_retained = param_dict['sum_retained'] @@ -232,16 +238,15 @@ def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dyn verbose_str+=f"{block_down_name:75} | " verbose_str+=f"sum(S) retained: {sum_retained:.1%}, fro retained: {fro_retained:.1%}, max(S) ratio: {max_ratio:0.1f}" - if verbose and dynamic_method: - verbose_str+=f", dynamic | dim: {new_rank}, alpha: {new_alpha}\n" + verbose_str+=f", dynamic | dim: {param_dict['new_rank']}, alpha: {param_dict['new_alpha']}\n" else: verbose_str+=f"\n" - + new_alpha = param_dict['new_alpha'] o_lora_sd[block_down_name + "." + "lora_down.weight"] = param_dict["lora_down"].to(save_dtype).contiguous() o_lora_sd[block_up_name + "." + "lora_up.weight"] = param_dict["lora_up"].to(save_dtype).contiguous() - o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(new_alpha).to(save_dtype) + o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(param_dict['new_alpha']).to(save_dtype) block_down_name = None block_up_name = None @@ -321,7 +326,7 @@ if __name__ == '__main__': parser.add_argument("--verbose", action="store_true", help="Display verbose resizing information / rank変更時の詳細情報を出力する") parser.add_argument("--dynamic_method", type=str, default=None, choices=[None, "sv_ratio", "sv_fro", "sv_cumulative"], - help="Specify dynamic resizing method, will override --new_rank") + help="Specify dynamic resizing method, --new_rank is used as a hard limit for max rank") parser.add_argument("--dynamic_param", type=float, default=None, help="Specify target for dynamic reduction") From 87846c043f344cf153bb1184047c943c153389f0 Mon Sep 17 00:00:00 2001 From: ddPn08 Date: Fri, 3 Mar 2023 00:21:18 +0900 Subject: [PATCH 06/13] fix for multi gpu training --- library/train_util.py | 8 ++++++++ train_network.py | 39 ++++++++++++++++++++++----------------- 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index e15ce133..b7d5ff7c 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -2294,6 +2294,8 @@ def sample_images(accelerator, args: argparse.Namespace, epoch, steps, device, v with torch.no_grad(): with accelerator.autocast(): for i, prompt in enumerate(prompts): + if not accelerator.is_main_process: + continue prompt = prompt.strip() if len(prompt) == 0 or prompt[0] == '#': continue @@ -2351,6 +2353,12 @@ def sample_images(accelerator, args: argparse.Namespace, epoch, steps, device, v if negative_prompt is not None: negative_prompt = negative_prompt.replace(prompt_replacement[0], prompt_replacement[1]) + print(f"prompt: {prompt}") + print(f"negative_prompt: {negative_prompt}") + print(f"height: {height}") + print(f"width: {width}") + print(f"sample_steps: {sample_steps}") + print(f"scale: {scale}") image = pipeline(prompt, height, width, sample_steps, scale, negative_prompt).images[0] ts_str = time.strftime('%Y%m%d%H%M%S', time.localtime()) diff --git a/train_network.py b/train_network.py index ef5a0831..5361c992 100644 --- a/train_network.py +++ b/train_network.py @@ -106,6 +106,7 @@ def train(args): # acceleratorを準備する print("prepare accelerator") accelerator, unwrap_model = train_util.prepare_accelerator(args) + is_main_process = accelerator.is_main_process # mixed precisionに対応した型を用意しておき適宜castする weight_dtype, save_dtype = train_util.prepare_dtype(args) @@ -175,12 +176,13 @@ def train(args): # 学習ステップ数を計算する if args.max_train_epochs is not None: - args.max_train_steps = args.max_train_epochs * len(train_dataloader) - print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") + args.max_train_steps = args.max_train_epochs * math.ceil(len(train_dataloader) / accelerator.num_processes) + if is_main_process: + print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}") # lr schedulerを用意する lr_scheduler = train_util.get_scheduler_fix(args.lr_scheduler, optimizer, num_warmup_steps=args.lr_warmup_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * accelerator.num_processes * args.gradient_accumulation_steps, num_cycles=args.lr_scheduler_num_cycles, power=args.lr_scheduler_power) # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする @@ -251,15 +253,17 @@ def train(args): # 学習する # TODO: find a way to handle total batch size when there are multiple datasets total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - print("running training / 学習開始") - print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}") - print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}") - print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}") - print(f" num epochs / epoch数: {num_train_epochs}") - print(f" batch size per device / バッチサイズ: {', '.join([str(d.batch_size) for d in train_dataset_group.datasets])}") - # print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}") - print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}") - print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}") + + if is_main_process: + print("running training / 学習開始") + print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}") + print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}") + print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}") + print(f" num epochs / epoch数: {num_train_epochs}") + print(f" batch size per device / バッチサイズ: {', '.join([str(d.batch_size) for d in train_dataset_group.datasets])}") + # print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}") + print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}") + print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}") # TODO refactor metadata creation and move to util metadata = { @@ -461,7 +465,8 @@ def train(args): loss_list = [] loss_total = 0.0 for epoch in range(num_train_epochs): - print(f"epoch {epoch+1}/{num_train_epochs}") + if is_main_process: + print(f"epoch {epoch+1}/{num_train_epochs}") train_dataset_group.set_current_epoch(epoch + 1) metadata["ss_epoch"] = str(epoch+1) @@ -573,9 +578,10 @@ def train(args): print(f"removing old checkpoint: {old_ckpt_file}") os.remove(old_ckpt_file) - saving = train_util.save_on_epoch_end(args, save_func, remove_old_func, epoch + 1, num_train_epochs) - if saving and args.save_state: - train_util.save_state_on_epoch_end(args, accelerator, model_name, epoch + 1) + if is_main_process: + saving = train_util.save_on_epoch_end(args, save_func, remove_old_func, epoch + 1, num_train_epochs) + if saving and args.save_state: + train_util.save_state_on_epoch_end(args, accelerator, model_name, epoch + 1) train_util.sample_images(accelerator, args, epoch + 1, global_step, accelerator.device, vae, tokenizer, text_encoder, unet) @@ -584,7 +590,6 @@ def train(args): metadata["ss_epoch"] = str(num_train_epochs) metadata["ss_training_finished_at"] = str(time.time()) - is_main_process = accelerator.is_main_process if is_main_process: network = unwrap_model(network) From 82aac2646945c8b483b9272d712eca16fe60b7aa Mon Sep 17 00:00:00 2001 From: rvhfxb <116002789+rvhfxb@users.noreply.github.com> Date: Wed, 8 Mar 2023 22:42:41 +0900 Subject: [PATCH 07/13] Update train_util.py --- library/train_util.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index 75176e13..f1060cbb 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -906,10 +906,14 @@ class FineTuningDataset(BaseDataset): if os.path.exists(image_key): abs_path = image_key else: - # わりといい加減だがいい方法が思いつかん - abs_path = glob_images(subset.image_dir, image_key) - assert len(abs_path) >= 1, f"no image / 画像がありません: {image_key}" - abs_path = abs_path[0] + npz_path = os.path.join(glob.escape(train_data_dir), image_key + ".npz") + if os.path.exists(npz_path): + abs_path = npz_path + else: + # わりといい加減だがいい方法が思いつかん + abs_path = glob_images(subset.image_dir, image_key) + assert len(abs_path) >= 1, f"no image / 画像がありません: {image_key}" + abs_path = abs_path[0] caption = img_md.get('caption') tags = img_md.get('tags') From 68cd874bb68f30e792243659fbeb8c98733cd365 Mon Sep 17 00:00:00 2001 From: mio <74481573+mio2333@users.noreply.github.com> Date: Fri, 10 Mar 2023 18:29:34 +0800 Subject: [PATCH 08/13] Append sys path for import_module This will be better if we run the scripts we do not run the training script from the current directory. This is reasonable as some other projects will use this as a subfolder, such as https://github.com/ddPn08/kohya-sd-scripts-webui. I can not run the script without adding this. --- train_network.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/train_network.py b/train_network.py index cf64c894..8be8305c 100644 --- a/train_network.py +++ b/train_network.py @@ -134,6 +134,8 @@ def train(args): gc.collect() # prepare network + import sys + sys.path.append(os.path.dirname(__file__)) print("import network module:", args.network_module) network_module = importlib.import_module(args.network_module) From 4ad8e75291ce77974b6441c9710a459cc95ee802 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Fri, 10 Mar 2023 21:10:22 +0900 Subject: [PATCH 09/13] fix to work with dim>320 --- networks/resize_lora.py | 1 - networks/svd_merge_lora.py | 9 +++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/networks/resize_lora.py b/networks/resize_lora.py index 1a8110c4..dfacd666 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -1,6 +1,5 @@ # Convert LoRA to different rank approximation (should only be used to go to lower rank) # This code is based off the extract_lora_from_models.py file which is based on https://github.com/cloneofsimo/lora/blob/develop/lora_diffusion/cli_svd.py -# Thanks to cloneofsimo and kohya import argparse import torch diff --git a/networks/svd_merge_lora.py b/networks/svd_merge_lora.py index c8e39b80..3a03b0d5 100644 --- a/networks/svd_merge_lora.py +++ b/networks/svd_merge_lora.py @@ -23,16 +23,16 @@ def load_state_dict(file_name, dtype): return sd -def save_to_file(file_name, model, state_dict, dtype): +def save_to_file(file_name, state_dict, dtype): if dtype is not None: for key in list(state_dict.keys()): if type(state_dict[key]) == torch.Tensor: state_dict[key] = state_dict[key].to(dtype) if os.path.splitext(file_name)[1] == '.safetensors': - save_file(model, file_name) + save_file(state_dict, file_name) else: - torch.save(model, file_name) + torch.save(state_dict, file_name) def merge_lora_models(models, ratios, new_rank, new_conv_rank, device, merge_dtype): @@ -105,6 +105,7 @@ def merge_lora_models(models, ratios, new_rank, new_conv_rank, device, merge_dty mat = mat.squeeze() module_new_rank = new_conv_rank if conv2d_3x3 else new_rank + module_new_rank = min(module_new_rank, in_dim, out_dim) # LoRA rank cannot exceed the original dim U, S, Vh = torch.linalg.svd(mat) @@ -156,7 +157,7 @@ def merge(args): state_dict = merge_lora_models(args.models, args.ratios, args.new_rank, new_conv_rank, args.device, merge_dtype) print(f"saving model to: {args.save_to}") - save_to_file(args.save_to, state_dict, state_dict, save_dtype) + save_to_file(args.save_to, state_dict, save_dtype) if __name__ == '__main__': From 75d1883da630c033841ff7fc79a94ca7131dd3d3 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Fri, 10 Mar 2023 21:12:15 +0900 Subject: [PATCH 10/13] fix LoRA rank is limited to target dim --- networks/extract_lora_from_models.py | 33 +++++++------------ networks/lora.py | 49 +++++++++++++++++----------- 2 files changed, 42 insertions(+), 40 deletions(-) diff --git a/networks/extract_lora_from_models.py b/networks/extract_lora_from_models.py index 5d77b9e5..b5d18d9b 100644 --- a/networks/extract_lora_from_models.py +++ b/networks/extract_lora_from_models.py @@ -103,7 +103,8 @@ def svd(args): if args.device: mat = mat.to(args.device) - # print(mat.size(), mat.device, rank, in_dim, out_dim) + + # print(lora_name, mat.size(), mat.device, rank, in_dim, out_dim) rank = min(rank, in_dim, out_dim) # LoRA rank cannot exceed the original dim if conv2d: @@ -137,27 +138,17 @@ def svd(args): lora_weights[lora_name] = (U, Vh) # make state dict for LoRA - lora_network_o.apply_to(text_encoder_o, unet_o, text_encoder_different, True) # to make state dict - lora_sd = lora_network_o.state_dict() - print(f"LoRA has {len(lora_sd)} weights.") - - for key in list(lora_sd.keys()): - if "alpha" in key: - continue - - lora_name = key.split('.')[0] - i = 0 if "lora_up" in key else 1 - - weights = lora_weights[lora_name][i] - # print(key, i, weights.size(), lora_sd[key].size()) - # if len(lora_sd[key].size()) == 4: - # weights = weights.unsqueeze(2).unsqueeze(3) - - assert weights.size() == lora_sd[key].size(), f"size unmatch: {key}" - lora_sd[key] = weights + lora_sd = {} + for lora_name, (up_weight, down_weight) in lora_weights.items(): + lora_sd[lora_name + '.lora_up.weight'] = up_weight + lora_sd[lora_name + '.lora_down.weight'] = down_weight + lora_sd[lora_name + '.alpha'] = torch.tensor(down_weight.size()[0]) # load state dict to LoRA and save it - info = lora_network_o.load_state_dict(lora_sd) + lora_network_save = lora.create_network_from_weights(1.0, None, None, text_encoder_o, unet_o, weights_sd=lora_sd) + lora_network_save.apply_to(text_encoder_o, unet_o) # create internal module references for state_dict + + info = lora_network_save.load_state_dict(lora_sd) print(f"Loading extracted LoRA weights: {info}") dir_name = os.path.dirname(args.save_to) @@ -167,7 +158,7 @@ def svd(args): # minimum metadata metadata = {"ss_network_module": "networks.lora", "ss_network_dim": str(args.dim), "ss_network_alpha": str(args.dim)} - lora_network_o.save_weights(args.save_to, save_dtype, metadata) + lora_network_save.save_weights(args.save_to, save_dtype, metadata) print(f"LoRA weights are saved to: {args.save_to}") diff --git a/networks/lora.py b/networks/lora.py index c0181c02..6d3875dc 100644 --- a/networks/lora.py +++ b/networks/lora.py @@ -21,30 +21,34 @@ class LoRAModule(torch.nn.Module): """ if alpha == 0 or None, alpha is rank (no scaling). """ super().__init__() self.lora_name = lora_name - self.lora_dim = lora_dim if org_module.__class__.__name__ == 'Conv2d': in_dim = org_module.in_channels out_dim = org_module.out_channels + else: + in_dim = org_module.in_features + out_dim = org_module.out_features - self.lora_dim = min(self.lora_dim, in_dim, out_dim) - if self.lora_dim != lora_dim: - print(f"{lora_name} dim (rank) is changed to: {self.lora_dim}") + # if limit_rank: + # self.lora_dim = min(lora_dim, in_dim, out_dim) + # if self.lora_dim != lora_dim: + # print(f"{lora_name} dim (rank) is changed to: {self.lora_dim}") + # else: + self.lora_dim = lora_dim + if org_module.__class__.__name__ == 'Conv2d': kernel_size = org_module.kernel_size stride = org_module.stride padding = org_module.padding self.lora_down = torch.nn.Conv2d(in_dim, self.lora_dim, kernel_size, stride, padding, bias=False) self.lora_up = torch.nn.Conv2d(self.lora_dim, out_dim, (1, 1), (1, 1), bias=False) else: - in_dim = org_module.in_features - out_dim = org_module.out_features - self.lora_down = torch.nn.Linear(in_dim, lora_dim, bias=False) - self.lora_up = torch.nn.Linear(lora_dim, out_dim, bias=False) + self.lora_down = torch.nn.Linear(in_dim, self.lora_dim, bias=False) + self.lora_up = torch.nn.Linear(self.lora_dim, out_dim, bias=False) if type(alpha) == torch.Tensor: alpha = alpha.detach().float().numpy() # without casting, bf16 causes error - alpha = lora_dim if alpha is None or alpha == 0 else alpha + alpha = self.lora_dim if alpha is None or alpha == 0 else alpha self.scale = alpha / self.lora_dim self.register_buffer('alpha', torch.tensor(alpha)) # 定数として扱える @@ -149,12 +153,13 @@ def create_network(multiplier, network_dim, network_alpha, vae, text_encoder, un return network -def create_network_from_weights(multiplier, file, vae, text_encoder, unet, **kwargs): - if os.path.splitext(file)[1] == '.safetensors': - from safetensors.torch import load_file, safe_open - weights_sd = load_file(file) - else: - weights_sd = torch.load(file, map_location='cpu') +def create_network_from_weights(multiplier, file, vae, text_encoder, unet, weights_sd=None, **kwargs): + if weights_sd is None: + if os.path.splitext(file)[1] == '.safetensors': + from safetensors.torch import load_file, safe_open + weights_sd = load_file(file) + else: + weights_sd = torch.load(file, map_location='cpu') # get dim/alpha mapping modules_dim = {} @@ -174,7 +179,7 @@ def create_network_from_weights(multiplier, file, vae, text_encoder, unet, **kwa # support old LoRA without alpha for key in modules_dim.keys(): if key not in modules_alpha: - modules_alpha = modules_dim[key] + modules_alpha = modules_dim[key] network = LoRANetwork(text_encoder, unet, multiplier=multiplier, modules_dim=modules_dim, modules_alpha=modules_alpha) network.weights_sd = weights_sd @@ -183,7 +188,8 @@ def create_network_from_weights(multiplier, file, vae, text_encoder, unet, **kwa class LoRANetwork(torch.nn.Module): # is it possible to apply conv_in and conv_out? - UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention", "ResnetBlock2D", "Downsample2D", "Upsample2D"] + UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"] + UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"] TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"] LORA_PREFIX_UNET = 'lora_unet' LORA_PREFIX_TEXT_ENCODER = 'lora_te' @@ -245,7 +251,12 @@ class LoRANetwork(torch.nn.Module): text_encoder, LoRANetwork.TEXT_ENCODER_TARGET_REPLACE_MODULE) print(f"create LoRA for Text Encoder: {len(self.text_encoder_loras)} modules.") - self.unet_loras = create_modules(LoRANetwork.LORA_PREFIX_UNET, unet, LoRANetwork.UNET_TARGET_REPLACE_MODULE) + # extend U-Net target modules if conv2d 3x3 is enabled, or load from weights + target_modules = LoRANetwork.UNET_TARGET_REPLACE_MODULE + if modules_dim is not None or self.conv_lora_dim is not None: + target_modules += LoRANetwork.UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 + + self.unet_loras = create_modules(LoRANetwork.LORA_PREFIX_UNET, unet, target_modules) print(f"create LoRA for U-Net: {len(self.unet_loras)} modules.") self.weights_sd = None @@ -371,7 +382,7 @@ class LoRANetwork(torch.nn.Module): else: torch.save(state_dict, file) - @staticmethod + @ staticmethod def set_regions(networks, image): image = image.astype(np.float32) / 255.0 for i, network in enumerate(networks[:3]): From 618592c52b9e82f7abc78105ecc23014a3505b19 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Fri, 10 Mar 2023 21:31:59 +0900 Subject: [PATCH 11/13] npz check to use subset, add dadap warn close #274 --- library/train_util.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index 68bce108..718fe36d 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -912,7 +912,7 @@ class FineTuningDataset(BaseDataset): if os.path.exists(image_key): abs_path = image_key else: - npz_path = os.path.join(glob.escape(train_data_dir), image_key + ".npz") + npz_path = os.path.join(subset.image_dir, image_key + ".npz") if os.path.exists(npz_path): abs_path = npz_path else: @@ -1761,15 +1761,22 @@ def get_optimizer(args, trainable_params): raise ImportError("No dadaptation / dadaptation がインストールされていないようです") print(f"use D-Adaptation Adam optimizer | {optimizer_kwargs}") - min_lr = lr + actual_lr = lr + lr_count = 1 if type(trainable_params) == list and type(trainable_params[0]) == dict: + lrs = set() + actual_lr = trainable_params[0].get("lr", actual_lr) for group in trainable_params: - min_lr = min(min_lr, group.get("lr", lr)) + lrs.add(group.get("lr", actual_lr)) + lr_count = len(lrs) - if min_lr <= 0.1: + if actual_lr <= 0.1: print( - f'learning rate is too low. If using dadaptation, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: {min_lr}') + f'learning rate is too low. If using dadaptation, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: lr={actual_lr}') print('recommend option: lr=1.0 / 推奨は1.0です') + if lr_count > 1: + print( + f"when multiple learning rates are specified with dadaptation (e.g. for Text Encoder and U-Net), only the first one will take effect / D-Adaptationで複数の学習率を指定した場合(Text EncoderとU-Netなど)、最初の学習率のみが有効になります: lr={actual_lr}") optimizer_class = dadaptation.DAdaptAdam optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) From c78c51c78f26f3bba646c4f86bf769fdfd236fd9 Mon Sep 17 00:00:00 2001 From: Kohya S Date: Fri, 10 Mar 2023 21:59:25 +0900 Subject: [PATCH 12/13] update documents --- README.md | 25 ++++++++++++++++++++++++- train_README-ja.md | 8 ++++++++ train_network_README-ja.md | 4 ++++ 3 files changed, 36 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index aaf371cb..437b1120 100644 --- a/README.md +++ b/README.md @@ -127,7 +127,30 @@ The majority of scripts is licensed under ASL 2.0 (including codes from Diffuser ## Change History -- 9 Mar. 2023, 2023/3/9: +- 10 Mar. 2023, 2023/3/10: release v0.5.1 + - Fix to LoRA modules in the model are same to the previous (before 0.5.0) if Conv2d-3x3 is disabled (no `conv_dim` arg, default). + - Conv2D with kernel size 1x1 in ResNet modules were accidentally included in v0.5.0. + - Trained models with v0.5.0 will work with Web UI's built-in LoRA and Additional Networks extension. + - Fix an issue that dim (rank) of LoRA module is limited to the in/out dimensions of the target Linear/Conv2d (in case of the dim > 320). + - `resize_lora.py` now have a feature to `dynamic resizing` which means each LoRA module can have different ranks (dims). Thanks to mgz-dev for this great work! + - The appropriate rank is selected based on the complexity of each module with an algorithm specified in the command line arguments. For details: https://github.com/kohya-ss/sd-scripts/pull/243 + - Multiple GPUs training is finally supported in `train_network.py`. Thanks to ddPn08 to solve this long running issue! + - Dataset with fine-tuning method (with metadata json) now works without images if `.npz` files exist. Thanks to rvhfxb! + - `train_network.py` can work if the current directory is not the directory where the script is in. Thanks to mio2333! + - Fix `extract_lora_from_models.py` and `svd_merge_lora.py` doesn't work with higher rank (>320). + + - LoRAのConv2d-3x3拡張を行わない場合(`conv_dim` を指定しない場合)、以前(v0.5.0)と同じ構成になるよう修正しました。 + - ResNetのカーネルサイズ1x1のConv2dが誤って対象になっていました。 + - ただv0.5.0で学習したモデルは Additional Networks 拡張、およびWeb UIのLoRA機能で問題なく使えると思われます。 + - LoRAモジュールの dim (rank) が、対象モジュールの次元数以下に制限される不具合を修正しました(320より大きい dim を指定した場合)。 + - `resize_lora.py` に `dynamic resizing` (リサイズ後の各LoRAモジュールが異なるrank (dim) を持てる機能)を追加しました。mgz-dev 氏の貢献に感謝します。 + - 適切なランクがコマンドライン引数で指定したアルゴリズムにより自動的に選択されます。詳細はこちらをご覧ください: https://github.com/kohya-ss/sd-scripts/pull/243 + - `train_network.py` でマルチGPU学習をサポートしました。長年の懸案を解決された ddPn08 氏に感謝します。 + - fine-tuning方式のデータセット(メタデータ.jsonファイルを使うデータセット)で `.npz` が存在するときには画像がなくても動作するようになりました。rvhfxb 氏に感謝します。 + - 他のディレクトリから `train_network.py` を呼び出しても動作するよう変更しました。 mio2333 氏に感謝します。 + - `extract_lora_from_models.py` および `svd_merge_lora.py` が320より大きいrankを指定すると動かない不具合を修正しました。 + +- 9 Mar. 2023, 2023/3/9: release v0.5.0 - There may be problems due to major changes. If you cannot revert back to the previous version when problems occur, please do not update for a while. - Minimum metadata (module name, dim, alpha and network_args) is recorded even with `--no_metadata`, issue https://github.com/kohya-ss/sd-scripts/issues/254 - `train_network.py` supports LoRA for Conv2d-3x3 (extended to conv2d with a kernel size not 1x1). diff --git a/train_README-ja.md b/train_README-ja.md index 479f9604..d5f1b5fc 100644 --- a/train_README-ja.md +++ b/train_README-ja.md @@ -502,6 +502,14 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b clip_skipと同様に、モデルの学習状態と異なる長さで学習するには、ある程度の教師データ枚数、長めの学習時間が必要になると思われます。 +- `--persistent_data_loader_workers` + + Windows環境で指定するとエポック間の待ち時間が大幅に短縮されます。 + +- `--max_data_loader_n_workers` + + データ読み込みのプロセス数を指定します。プロセス数が多いとデータ読み込みが速くなりGPUを効率的に利用できますが、メインメモリを消費します。デフォルトは「`8` または `CPU同時実行スレッド数-1` の小さいほう」なので、メインメモリに余裕がない場合や、GPU使用率が90%程度以上なら、それらの数値を見ながら `2` または `1` 程度まで下げてください。 + - `--logging_dir` / `--log_prefix` 学習ログの保存に関するオプションです。logging_dirオプションにログ保存先フォルダを指定してください。TensorBoard形式のログが保存されます。 diff --git a/train_network_README-ja.md b/train_network_README-ja.md index 4a79a6f7..79d1709f 100644 --- a/train_network_README-ja.md +++ b/train_network_README-ja.md @@ -64,6 +64,10 @@ accelerate launch --num_cpu_threads_per_process 1 train_network.py * LoRAのRANKを指定します(``--networkdim=4``など)。省略時は4になります。数が多いほど表現力は増しますが、学習に必要なメモリ、時間は増えます。また闇雲に増やしても良くないようです。 * `--network_alpha` * アンダーフローを防ぎ安定して学習するための ``alpha`` 値を指定します。デフォルトは1です。``network_dim``と同じ値を指定すると以前のバージョンと同じ動作になります。 +* `--persistent_data_loader_workers` + * Windows環境で指定するとエポック間の待ち時間が大幅に短縮されます。 +* `--max_data_loader_n_workers` + * データ読み込みのプロセス数を指定します。プロセス数が多いとデータ読み込みが速くなりGPUを効率的に利用できますが、メインメモリを消費します。デフォルトは「`8` または `CPU同時実行スレッド数-1` の小さいほう」なので、メインメモリに余裕がない場合や、GPU使用率が90%程度以上なら、それらの数値を見ながら `2` または `1` 程度まで下げてください。 * `--network_weights` * 学習前に学習済みのLoRAの重みを読み込み、そこから追加で学習します。 * `--network_train_unet_only` From b1774608074368b85f9a44659706eff5e9cd52bb Mon Sep 17 00:00:00 2001 From: Kohya S Date: Fri, 10 Mar 2023 22:02:17 +0900 Subject: [PATCH 13/13] restore comment --- networks/resize_lora.py | 1 + 1 file changed, 1 insertion(+) diff --git a/networks/resize_lora.py b/networks/resize_lora.py index dfacd666..09a19c19 100644 --- a/networks/resize_lora.py +++ b/networks/resize_lora.py @@ -1,5 +1,6 @@ # Convert LoRA to different rank approximation (should only be used to go to lower rank) # This code is based off the extract_lora_from_models.py file which is based on https://github.com/cloneofsimo/lora/blob/develop/lora_diffusion/cli_svd.py +# Thanks to cloneofsimo import argparse import torch