mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-18 01:30:02 +00:00
Compare commits
4 Commits
1c1da5697a
...
9457e53d55
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9457e53d55 | ||
|
|
51435f1718 | ||
|
|
fa53f71ec0 | ||
|
|
c0f0b5dae3 |
@@ -50,6 +50,9 @@ Stable Diffusion等の画像生成モデルの学習、モデルによる画像
|
|||||||
|
|
||||||
### 更新履歴
|
### 更新履歴
|
||||||
|
|
||||||
|
- **Version 0.10.3 (2026-04-02):**
|
||||||
|
- Animaでfp16で学習する際の安定性をさらに改善しました。[PR #2302](https://github.com/kohya-ss/sd-scripts/pull/2302) 問題をご報告いただいた方々に深く感謝します。
|
||||||
|
|
||||||
- **Version 0.10.2 (2026-03-30):**
|
- **Version 0.10.2 (2026-03-30):**
|
||||||
- SD/SDXLのLECO学習に対応しました。[PR #2285](https://github.com/kohya-ss/sd-scripts/pull/2285) および [PR #2294](https://github.com/kohya-ss/sd-scripts/pull/2294) umisetokikaze氏に深く感謝します。
|
- SD/SDXLのLECO学習に対応しました。[PR #2285](https://github.com/kohya-ss/sd-scripts/pull/2285) および [PR #2294](https://github.com/kohya-ss/sd-scripts/pull/2294) umisetokikaze氏に深く感謝します。
|
||||||
- 詳細は[ドキュメント](./docs/train_leco.md)をご覧ください。
|
- 詳細は[ドキュメント](./docs/train_leco.md)をご覧ください。
|
||||||
|
|||||||
@@ -47,6 +47,9 @@ If you find this project helpful, please consider supporting its development via
|
|||||||
|
|
||||||
### Change History
|
### Change History
|
||||||
|
|
||||||
|
- **Version 0.10.3 (2026-04-02):**
|
||||||
|
- Stability when training with fp16 on Anima has been further improved. See [PR #2302](https://github.com/kohya-ss/sd-scripts/pull/2302) for details. We deeply appreciate those who reported the issue.
|
||||||
|
|
||||||
- **Version 0.10.2 (2026-03-30):**
|
- **Version 0.10.2 (2026-03-30):**
|
||||||
- LECO training for SD/SDXL is now supported. Many thanks to umisetokikaze for [PR #2285](https://github.com/kohya-ss/sd-scripts/pull/2285) and [PR #2294](https://github.com/kohya-ss/sd-scripts/pull/2294).
|
- LECO training for SD/SDXL is now supported. Many thanks to umisetokikaze for [PR #2285](https://github.com/kohya-ss/sd-scripts/pull/2285) and [PR #2294](https://github.com/kohya-ss/sd-scripts/pull/2294).
|
||||||
- Please refer to the [documentation](./docs/train_leco.md) for details.
|
- Please refer to the [documentation](./docs/train_leco.md) for details.
|
||||||
|
|||||||
@@ -738,9 +738,9 @@ class FinalLayer(nn.Module):
|
|||||||
x_B_T_H_W_D: torch.Tensor,
|
x_B_T_H_W_D: torch.Tensor,
|
||||||
emb_B_T_D: torch.Tensor,
|
emb_B_T_D: torch.Tensor,
|
||||||
adaln_lora_B_T_3D: Optional[torch.Tensor] = None,
|
adaln_lora_B_T_3D: Optional[torch.Tensor] = None,
|
||||||
|
use_fp32: bool = False,
|
||||||
):
|
):
|
||||||
# Compute AdaLN modulation parameters (in float32 when fp16 to avoid overflow in Linear layers)
|
# Compute AdaLN modulation parameters (in float32 when fp16 to avoid overflow in Linear layers)
|
||||||
use_fp32 = x_B_T_H_W_D.dtype == torch.float16
|
|
||||||
with torch.autocast(device_type=x_B_T_H_W_D.device.type, dtype=torch.float32, enabled=use_fp32):
|
with torch.autocast(device_type=x_B_T_H_W_D.device.type, dtype=torch.float32, enabled=use_fp32):
|
||||||
if self.use_adaln_lora:
|
if self.use_adaln_lora:
|
||||||
assert adaln_lora_B_T_3D is not None
|
assert adaln_lora_B_T_3D is not None
|
||||||
@@ -863,11 +863,11 @@ class Block(nn.Module):
|
|||||||
emb_B_T_D: torch.Tensor,
|
emb_B_T_D: torch.Tensor,
|
||||||
crossattn_emb: torch.Tensor,
|
crossattn_emb: torch.Tensor,
|
||||||
attn_params: attention.AttentionParams,
|
attn_params: attention.AttentionParams,
|
||||||
|
use_fp32: bool = False,
|
||||||
rope_emb_L_1_1_D: Optional[torch.Tensor] = None,
|
rope_emb_L_1_1_D: Optional[torch.Tensor] = None,
|
||||||
adaln_lora_B_T_3D: Optional[torch.Tensor] = None,
|
adaln_lora_B_T_3D: Optional[torch.Tensor] = None,
|
||||||
extra_per_block_pos_emb: Optional[torch.Tensor] = None,
|
extra_per_block_pos_emb: Optional[torch.Tensor] = None,
|
||||||
) -> torch.Tensor:
|
) -> torch.Tensor:
|
||||||
use_fp32 = x_B_T_H_W_D.dtype == torch.float16
|
|
||||||
if use_fp32:
|
if use_fp32:
|
||||||
# Cast to float32 for better numerical stability in residual connections. Each module will cast back to float16 by enclosing autocast context.
|
# Cast to float32 for better numerical stability in residual connections. Each module will cast back to float16 by enclosing autocast context.
|
||||||
x_B_T_H_W_D = x_B_T_H_W_D.float()
|
x_B_T_H_W_D = x_B_T_H_W_D.float()
|
||||||
@@ -959,6 +959,7 @@ class Block(nn.Module):
|
|||||||
emb_B_T_D: torch.Tensor,
|
emb_B_T_D: torch.Tensor,
|
||||||
crossattn_emb: torch.Tensor,
|
crossattn_emb: torch.Tensor,
|
||||||
attn_params: attention.AttentionParams,
|
attn_params: attention.AttentionParams,
|
||||||
|
use_fp32: bool = False,
|
||||||
rope_emb_L_1_1_D: Optional[torch.Tensor] = None,
|
rope_emb_L_1_1_D: Optional[torch.Tensor] = None,
|
||||||
adaln_lora_B_T_3D: Optional[torch.Tensor] = None,
|
adaln_lora_B_T_3D: Optional[torch.Tensor] = None,
|
||||||
extra_per_block_pos_emb: Optional[torch.Tensor] = None,
|
extra_per_block_pos_emb: Optional[torch.Tensor] = None,
|
||||||
@@ -972,6 +973,7 @@ class Block(nn.Module):
|
|||||||
emb_B_T_D,
|
emb_B_T_D,
|
||||||
crossattn_emb,
|
crossattn_emb,
|
||||||
attn_params,
|
attn_params,
|
||||||
|
use_fp32,
|
||||||
rope_emb_L_1_1_D,
|
rope_emb_L_1_1_D,
|
||||||
adaln_lora_B_T_3D,
|
adaln_lora_B_T_3D,
|
||||||
extra_per_block_pos_emb,
|
extra_per_block_pos_emb,
|
||||||
@@ -994,6 +996,7 @@ class Block(nn.Module):
|
|||||||
emb_B_T_D,
|
emb_B_T_D,
|
||||||
crossattn_emb,
|
crossattn_emb,
|
||||||
attn_params,
|
attn_params,
|
||||||
|
use_fp32,
|
||||||
rope_emb_L_1_1_D,
|
rope_emb_L_1_1_D,
|
||||||
adaln_lora_B_T_3D,
|
adaln_lora_B_T_3D,
|
||||||
extra_per_block_pos_emb,
|
extra_per_block_pos_emb,
|
||||||
@@ -1007,6 +1010,7 @@ class Block(nn.Module):
|
|||||||
emb_B_T_D,
|
emb_B_T_D,
|
||||||
crossattn_emb,
|
crossattn_emb,
|
||||||
attn_params,
|
attn_params,
|
||||||
|
use_fp32,
|
||||||
rope_emb_L_1_1_D,
|
rope_emb_L_1_1_D,
|
||||||
adaln_lora_B_T_3D,
|
adaln_lora_B_T_3D,
|
||||||
extra_per_block_pos_emb,
|
extra_per_block_pos_emb,
|
||||||
@@ -1018,6 +1022,7 @@ class Block(nn.Module):
|
|||||||
emb_B_T_D,
|
emb_B_T_D,
|
||||||
crossattn_emb,
|
crossattn_emb,
|
||||||
attn_params,
|
attn_params,
|
||||||
|
use_fp32,
|
||||||
rope_emb_L_1_1_D,
|
rope_emb_L_1_1_D,
|
||||||
adaln_lora_B_T_3D,
|
adaln_lora_B_T_3D,
|
||||||
extra_per_block_pos_emb,
|
extra_per_block_pos_emb,
|
||||||
@@ -1338,16 +1343,19 @@ class Anima(nn.Module):
|
|||||||
|
|
||||||
attn_params = attention.AttentionParams.create_attention_params(self.attn_mode, self.split_attn)
|
attn_params = attention.AttentionParams.create_attention_params(self.attn_mode, self.split_attn)
|
||||||
|
|
||||||
|
# Determine whether to use float32 for block computations based on input dtype (use float32 for better stability when input is float16)
|
||||||
|
use_fp32 = x_B_T_H_W_D.dtype == torch.float16
|
||||||
|
|
||||||
for block_idx, block in enumerate(self.blocks):
|
for block_idx, block in enumerate(self.blocks):
|
||||||
if self.blocks_to_swap:
|
if self.blocks_to_swap:
|
||||||
self.offloader.wait_for_block(block_idx)
|
self.offloader.wait_for_block(block_idx)
|
||||||
|
|
||||||
x_B_T_H_W_D = block(x_B_T_H_W_D, t_embedding_B_T_D, crossattn_emb, attn_params, **block_kwargs)
|
x_B_T_H_W_D = block(x_B_T_H_W_D, t_embedding_B_T_D, crossattn_emb, attn_params, use_fp32, **block_kwargs)
|
||||||
|
|
||||||
if self.blocks_to_swap:
|
if self.blocks_to_swap:
|
||||||
self.offloader.submit_move_blocks(self.blocks, block_idx)
|
self.offloader.submit_move_blocks(self.blocks, block_idx)
|
||||||
|
|
||||||
x_B_T_H_W_O = self.final_layer(x_B_T_H_W_D, t_embedding_B_T_D, adaln_lora_B_T_3D=adaln_lora_B_T_3D)
|
x_B_T_H_W_O = self.final_layer(x_B_T_H_W_D, t_embedding_B_T_D, adaln_lora_B_T_3D=adaln_lora_B_T_3D, use_fp32=use_fp32)
|
||||||
x_B_C_Tt_Hp_Wp = self.unpatchify(x_B_T_H_W_O)
|
x_B_C_Tt_Hp_Wp = self.unpatchify(x_B_T_H_W_O)
|
||||||
return x_B_C_Tt_Hp_Wp
|
return x_B_C_Tt_Hp_Wp
|
||||||
|
|
||||||
|
|||||||
@@ -465,6 +465,10 @@ def create_network(
|
|||||||
conv_block_dims = None
|
conv_block_dims = None
|
||||||
conv_block_alphas = None
|
conv_block_alphas = None
|
||||||
|
|
||||||
|
drop_keys = kwargs.get("drop_keys", None)
|
||||||
|
if drop_keys is not None:
|
||||||
|
drop_keys = drop_keys.split(',')
|
||||||
|
|
||||||
# rank/module dropout
|
# rank/module dropout
|
||||||
rank_dropout = kwargs.get("rank_dropout", None)
|
rank_dropout = kwargs.get("rank_dropout", None)
|
||||||
if rank_dropout is not None:
|
if rank_dropout is not None:
|
||||||
@@ -489,6 +493,7 @@ def create_network(
|
|||||||
block_alphas=block_alphas,
|
block_alphas=block_alphas,
|
||||||
conv_block_dims=conv_block_dims,
|
conv_block_dims=conv_block_dims,
|
||||||
conv_block_alphas=conv_block_alphas,
|
conv_block_alphas=conv_block_alphas,
|
||||||
|
drop_keys=drop_keys,
|
||||||
varbose=True,
|
varbose=True,
|
||||||
is_sdxl=is_sdxl,
|
is_sdxl=is_sdxl,
|
||||||
)
|
)
|
||||||
@@ -893,6 +898,7 @@ class LoRANetwork(torch.nn.Module):
|
|||||||
modules_dim: Optional[Dict[str, int]] = None,
|
modules_dim: Optional[Dict[str, int]] = None,
|
||||||
modules_alpha: Optional[Dict[str, int]] = None,
|
modules_alpha: Optional[Dict[str, int]] = None,
|
||||||
module_class: Type[object] = LoRAModule,
|
module_class: Type[object] = LoRAModule,
|
||||||
|
drop_keys: Optional[List[str]] = None,
|
||||||
varbose: Optional[bool] = False,
|
varbose: Optional[bool] = False,
|
||||||
is_sdxl: Optional[bool] = False,
|
is_sdxl: Optional[bool] = False,
|
||||||
) -> None:
|
) -> None:
|
||||||
@@ -914,6 +920,7 @@ class LoRANetwork(torch.nn.Module):
|
|||||||
self.dropout = dropout
|
self.dropout = dropout
|
||||||
self.rank_dropout = rank_dropout
|
self.rank_dropout = rank_dropout
|
||||||
self.module_dropout = module_dropout
|
self.module_dropout = module_dropout
|
||||||
|
self.drop_keys = drop_keys
|
||||||
|
|
||||||
self.loraplus_lr_ratio = None
|
self.loraplus_lr_ratio = None
|
||||||
self.loraplus_unet_lr_ratio = None
|
self.loraplus_unet_lr_ratio = None
|
||||||
@@ -941,6 +948,9 @@ class LoRANetwork(torch.nn.Module):
|
|||||||
f"apply LoRA to Conv2d with kernel size (3,3). dim (rank): {self.conv_lora_dim}, alpha: {self.conv_alpha}"
|
f"apply LoRA to Conv2d with kernel size (3,3). dim (rank): {self.conv_lora_dim}, alpha: {self.conv_alpha}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if self.drop_keys:
|
||||||
|
print(f"Drop keys: {self.drop_keys}")
|
||||||
|
|
||||||
# create module instances
|
# create module instances
|
||||||
def create_modules(
|
def create_modules(
|
||||||
is_unet: bool,
|
is_unet: bool,
|
||||||
@@ -970,6 +980,12 @@ class LoRANetwork(torch.nn.Module):
|
|||||||
lora_name = prefix + "." + name + "." + child_name
|
lora_name = prefix + "." + name + "." + child_name
|
||||||
lora_name = lora_name.replace(".", "_")
|
lora_name = lora_name.replace(".", "_")
|
||||||
|
|
||||||
|
if self.drop_keys:
|
||||||
|
for key in self.drop_keys:
|
||||||
|
if key in lora_name:
|
||||||
|
skipped.append(lora_name)
|
||||||
|
continue
|
||||||
|
|
||||||
dim = None
|
dim = None
|
||||||
alpha = None
|
alpha = None
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user