From 9a607bec43dcc0843b09f5e61d64e7341ce8e728 Mon Sep 17 00:00:00 2001 From: Kohya S <52813779+kohya-ss@users.noreply.github.com> Date: Thu, 12 Feb 2026 23:17:25 +0900 Subject: [PATCH] feat: improve ComfyUI conversion script with prefix constants and module name adjustments --- docs/anima_train_network.md | 34 ++++++++++++++++++++++++- networks/convert_anima_lora_to_comfy.py | 14 ++++++---- 2 files changed, 42 insertions(+), 6 deletions(-) diff --git a/docs/anima_train_network.md b/docs/anima_train_network.md index 934c832f..b358d202 100644 --- a/docs/anima_train_network.md +++ b/docs/anima_train_network.md @@ -346,6 +346,8 @@ To apply LoRA to the LLM Adapter blocks: --network_args "train_llm_adapter=True" ``` +In preliminary tests, lowering the learning rate for the LLM Adapter seems to improve stability. Adjust it using something like: `"network_reg_lrs=.*llm_adapter.*=5e-5"`. + ### 5.4. Other Network Args / その他のネットワーク引数 * `--network_args "verbose=True"` - Print all LoRA module names and their dimensions. @@ -396,6 +398,8 @@ To apply LoRA to the LLM Adapter blocks: LLM AdapterブロックにLoRAを適用するには:`--network_args "train_llm_adapter=True"` +簡易な検証ではLLM Adapterの学習率はある程度下げた方が安定するようです。`"network_reg_lrs=.*llm_adapter.*=5e-5"`などで調整してください。 + ### 5.4. その他のネットワーク引数 * `verbose=True` - 全LoRAモジュール名とdimを表示 @@ -573,7 +577,35 @@ Qwen3に個別の学習率を指定するには`--text_encoder_lr`を使用し -## 9. Others / その他 +## 9. Related Tools / 関連ツール + +### `networks/anima_convert_lora_to_comfy.py` + +A script to convert LoRA models to ComfyUI-compatible format. ComfyUI does not directly support sd-scripts format Qwen3 LoRA, so conversion is necessary (conversion may not be needed for DiT-only LoRA). You can convert from the sd-scripts format to ComfyUI format with: + +```bash +python networks/convert_anima_lora_to_comfy.py path/to/source.safetensors path/to/destination.safetensors +``` + +Using the `--reverse` option allows conversion in the opposite direction (ComfyUI format to sd-scripts format). However, reverse conversion is only possible for LoRAs converted by this script. LoRAs created with other training tools cannot be converted. + +
+日本語 + +**`networks/convert_anima_lora_to_comfy.py`** + +LoRAモデルをComfyUI互換形式に変換するスクリプト。ComfyUIがsd-scripts形式のQwen3 LoRAを直接サポートしていないため、変換が必要です(DiTのみのLoRAの場合は変換不要のようです)。sd-scripts形式からComfyUI形式への変換は以下のコマンドで行います: + +```bash +python networks/convert_anima_lora_to_comfy.py path/to/source.safetensors path/to/destination.safetensors +``` + +`--reverse`オプションを付けると、逆変換(ComfyUI形式からsd-scripts形式)も可能です。ただし、逆変換ができるのはこのスクリプトで変換したLoRAに限ります。他の学習ツールで作成したLoRAは変換できません。 + +
+ + +## 10. Others / その他 ### Metadata Saved in LoRA Models diff --git a/networks/convert_anima_lora_to_comfy.py b/networks/convert_anima_lora_to_comfy.py index 880a2f78..5ff2b9ee 100644 --- a/networks/convert_anima_lora_to_comfy.py +++ b/networks/convert_anima_lora_to_comfy.py @@ -11,6 +11,9 @@ import logging logger = logging.getLogger(__name__) +COMFYUI_DIT_PREFIX = "diffusion_model." +COMFYUI_QWEN3_PREFIX = "text_encoders.qwen3_06b.transformer.model." + def main(args): # load source safetensors @@ -48,6 +51,7 @@ def main(args): # Convert back illegal dots in module names # DiT + original_module_name = original_module_name.replace("llm.adapter", "llm_adapter") original_module_name = original_module_name.replace(".linear.", ".linear_") original_module_name = original_module_name.replace("t.embedding.norm", "t_embedding_norm") original_module_name = original_module_name.replace("x.embedder", "x_embedder") @@ -78,16 +82,16 @@ def main(args): original_module_name = original_module_name.replace("post.attention.layernorm", "post_attention_layernorm") # Prefix conversion - new_prefix = "diffusion_model." if is_dit_lora else "text_encoder.qwen3." + new_prefix = COMFYUI_DIT_PREFIX if is_dit_lora else COMFYUI_QWEN3_PREFIX new_k = f"{new_prefix}{original_module_name}.{weight_name}" else: - if k.startswith("diffusion_model."): + if k.startswith(COMFYUI_DIT_PREFIX): is_dit_lora = True - module_and_weight_name = k[len("diffusion_model.") :] - elif k.startswith("text_encoder.qwen3."): + module_and_weight_name = k[len(COMFYUI_DIT_PREFIX) :] + elif k.startswith(COMFYUI_QWEN3_PREFIX): is_dit_lora = False - module_and_weight_name = k[len("text_encoder.qwen3.") :] + module_and_weight_name = k[len(COMFYUI_QWEN3_PREFIX) :] else: logger.warning(f"Skipping unrecognized key {k}") continue