mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-09 06:45:09 +00:00
Support : OFT merge to base model (#1580)
* Support : OFT merge to base model * Fix typo * Fix typo_2 * Delete unused parameter 'eye'
This commit is contained in:
@@ -8,10 +8,12 @@ from tqdm import tqdm
|
|||||||
from library import sai_model_spec, sdxl_model_util, train_util
|
from library import sai_model_spec, sdxl_model_util, train_util
|
||||||
import library.model_util as model_util
|
import library.model_util as model_util
|
||||||
import lora
|
import lora
|
||||||
|
import oft
|
||||||
from library.utils import setup_logging
|
from library.utils import setup_logging
|
||||||
setup_logging()
|
setup_logging()
|
||||||
import logging
|
import logging
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
import concurrent.futures
|
||||||
|
|
||||||
def load_state_dict(file_name, dtype):
|
def load_state_dict(file_name, dtype):
|
||||||
if os.path.splitext(file_name)[1] == ".safetensors":
|
if os.path.splitext(file_name)[1] == ".safetensors":
|
||||||
@@ -39,15 +41,28 @@ def save_to_file(file_name, model, state_dict, dtype, metadata):
|
|||||||
else:
|
else:
|
||||||
torch.save(model, file_name)
|
torch.save(model, file_name)
|
||||||
|
|
||||||
|
def detect_method_from_training_model(models, dtype):
|
||||||
|
for model in models:
|
||||||
|
lora_sd, _ = load_state_dict(model, dtype)
|
||||||
|
for key in tqdm(lora_sd.keys()):
|
||||||
|
if 'lora_up' in key or 'lora_down' in key:
|
||||||
|
return 'LoRA'
|
||||||
|
elif "oft_blocks" in key:
|
||||||
|
return 'OFT'
|
||||||
|
|
||||||
def merge_to_sd_model(text_encoder1, text_encoder2, unet, models, ratios, merge_dtype):
|
def merge_to_sd_model(text_encoder1, text_encoder2, unet, models, ratios, merge_dtype):
|
||||||
text_encoder1.to(merge_dtype)
|
text_encoder1.to(merge_dtype)
|
||||||
text_encoder1.to(merge_dtype)
|
text_encoder1.to(merge_dtype)
|
||||||
unet.to(merge_dtype)
|
unet.to(merge_dtype)
|
||||||
|
|
||||||
|
# detect the method: OFT or LoRA_module
|
||||||
|
method = detect_method_from_training_model(models, merge_dtype)
|
||||||
|
logger.info(f"method:{method}")
|
||||||
|
|
||||||
# create module map
|
# create module map
|
||||||
name_to_module = {}
|
name_to_module = {}
|
||||||
for i, root_module in enumerate([text_encoder1, text_encoder2, unet]):
|
for i, root_module in enumerate([text_encoder1, text_encoder2, unet]):
|
||||||
|
if method == 'LoRA':
|
||||||
if i <= 1:
|
if i <= 1:
|
||||||
if i == 0:
|
if i == 0:
|
||||||
prefix = lora.LoRANetwork.LORA_PREFIX_TEXT_ENCODER1
|
prefix = lora.LoRANetwork.LORA_PREFIX_TEXT_ENCODER1
|
||||||
@@ -59,20 +74,34 @@ def merge_to_sd_model(text_encoder1, text_encoder2, unet, models, ratios, merge_
|
|||||||
target_replace_modules = (
|
target_replace_modules = (
|
||||||
lora.LoRANetwork.UNET_TARGET_REPLACE_MODULE + lora.LoRANetwork.UNET_TARGET_REPLACE_MODULE_CONV2D_3X3
|
lora.LoRANetwork.UNET_TARGET_REPLACE_MODULE + lora.LoRANetwork.UNET_TARGET_REPLACE_MODULE_CONV2D_3X3
|
||||||
)
|
)
|
||||||
|
elif method == 'OFT':
|
||||||
|
prefix = oft.OFTNetwork.OFT_PREFIX_UNET
|
||||||
|
target_replace_modules = (
|
||||||
|
oft.OFTNetwork.UNET_TARGET_REPLACE_MODULE_ALL_LINEAR + oft.OFTNetwork.UNET_TARGET_REPLACE_MODULE_CONV2D_3X3
|
||||||
|
)
|
||||||
|
|
||||||
for name, module in root_module.named_modules():
|
for name, module in root_module.named_modules():
|
||||||
if module.__class__.__name__ in target_replace_modules:
|
if module.__class__.__name__ in target_replace_modules:
|
||||||
for child_name, child_module in module.named_modules():
|
for child_name, child_module in module.named_modules():
|
||||||
|
if method == 'LoRA':
|
||||||
if child_module.__class__.__name__ == "Linear" or child_module.__class__.__name__ == "Conv2d":
|
if child_module.__class__.__name__ == "Linear" or child_module.__class__.__name__ == "Conv2d":
|
||||||
lora_name = prefix + "." + name + "." + child_name
|
lora_name = prefix + "." + name + "." + child_name
|
||||||
lora_name = lora_name.replace(".", "_")
|
lora_name = lora_name.replace(".", "_")
|
||||||
name_to_module[lora_name] = child_module
|
name_to_module[lora_name] = child_module
|
||||||
|
elif method == 'OFT':
|
||||||
|
if child_module.__class__.__name__ == "Linear" or child_module.__class__.__name__ == "Conv2d":
|
||||||
|
oft_name = prefix + "." + name + "." + child_name
|
||||||
|
oft_name = oft_name.replace(".", "_")
|
||||||
|
name_to_module[oft_name] = child_module
|
||||||
|
|
||||||
|
|
||||||
for model, ratio in zip(models, ratios):
|
for model, ratio in zip(models, ratios):
|
||||||
logger.info(f"loading: {model}")
|
logger.info(f"loading: {model}")
|
||||||
lora_sd, _ = load_state_dict(model, merge_dtype)
|
lora_sd, _ = load_state_dict(model, merge_dtype)
|
||||||
|
|
||||||
logger.info(f"merging...")
|
logger.info(f"merging...")
|
||||||
|
|
||||||
|
if method == 'LoRA':
|
||||||
for key in tqdm(lora_sd.keys()):
|
for key in tqdm(lora_sd.keys()):
|
||||||
if "lora_down" in key:
|
if "lora_down" in key:
|
||||||
up_key = key.replace("lora_down", "lora_up")
|
up_key = key.replace("lora_down", "lora_up")
|
||||||
@@ -116,6 +145,73 @@ def merge_to_sd_model(text_encoder1, text_encoder2, unet, models, ratios, merge_
|
|||||||
module.weight = torch.nn.Parameter(weight)
|
module.weight = torch.nn.Parameter(weight)
|
||||||
|
|
||||||
|
|
||||||
|
elif method == 'OFT':
|
||||||
|
|
||||||
|
multiplier=1.0
|
||||||
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||||
|
|
||||||
|
for key in tqdm(lora_sd.keys()):
|
||||||
|
if "oft_blocks" in key:
|
||||||
|
oft_blocks = lora_sd[key]
|
||||||
|
dim = oft_blocks.shape[0]
|
||||||
|
break
|
||||||
|
for key in tqdm(lora_sd.keys()):
|
||||||
|
if "alpha" in key:
|
||||||
|
oft_blocks = lora_sd[key]
|
||||||
|
alpha = oft_blocks.item()
|
||||||
|
break
|
||||||
|
|
||||||
|
def merge_to(key):
|
||||||
|
if "alpha" in key:
|
||||||
|
return
|
||||||
|
|
||||||
|
# find original module for this OFT
|
||||||
|
module_name = ".".join(key.split(".")[:-1])
|
||||||
|
if module_name not in name_to_module:
|
||||||
|
return
|
||||||
|
module = name_to_module[module_name]
|
||||||
|
|
||||||
|
# logger.info(f"apply {key} to {module}")
|
||||||
|
|
||||||
|
oft_blocks = lora_sd[key]
|
||||||
|
|
||||||
|
if isinstance(module, torch.nn.Linear):
|
||||||
|
out_dim = module.out_features
|
||||||
|
elif isinstance(module, torch.nn.Conv2d):
|
||||||
|
out_dim = module.out_channels
|
||||||
|
|
||||||
|
num_blocks = dim
|
||||||
|
block_size = out_dim // dim
|
||||||
|
constraint = (0 if alpha is None else alpha) * out_dim
|
||||||
|
|
||||||
|
block_Q = oft_blocks - oft_blocks.transpose(1, 2)
|
||||||
|
norm_Q = torch.norm(block_Q.flatten())
|
||||||
|
new_norm_Q = torch.clamp(norm_Q, max=constraint)
|
||||||
|
block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
|
||||||
|
I = torch.eye(block_size, device=oft_blocks.device).unsqueeze(0).repeat(num_blocks, 1, 1)
|
||||||
|
block_R = torch.matmul(I + block_Q, (I - block_Q).inverse())
|
||||||
|
block_R_weighted = multiplier * block_R + (1 - multiplier) * I
|
||||||
|
R = torch.block_diag(*block_R_weighted)
|
||||||
|
|
||||||
|
# get org weight
|
||||||
|
org_sd = module.state_dict()
|
||||||
|
org_weight = org_sd["weight"].to(device)
|
||||||
|
|
||||||
|
R = R.to(org_weight.device, dtype=org_weight.dtype)
|
||||||
|
|
||||||
|
if org_weight.dim() == 4:
|
||||||
|
weight = torch.einsum("oihw, op -> pihw", org_weight, R)
|
||||||
|
else:
|
||||||
|
weight = torch.einsum("oi, op -> pi", org_weight, R)
|
||||||
|
|
||||||
|
weight = weight.contiguous() # Make Tensor contiguous; required due to ThreadPoolExecutor
|
||||||
|
|
||||||
|
module.weight = torch.nn.Parameter(weight)
|
||||||
|
|
||||||
|
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||||
|
list(tqdm(executor.map(merge_to, lora_sd.keys()), total=len(lora_sd.keys())))
|
||||||
|
|
||||||
|
|
||||||
def merge_lora_models(models, ratios, merge_dtype, concat=False, shuffle=False):
|
def merge_lora_models(models, ratios, merge_dtype, concat=False, shuffle=False):
|
||||||
base_alphas = {} # alpha for merged model
|
base_alphas = {} # alpha for merged model
|
||||||
base_dims = {}
|
base_dims = {}
|
||||||
|
|||||||
Reference in New Issue
Block a user