mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-10 15:00:23 +00:00
Compare commits
2 Commits
sd3
...
fdbeca26a1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fdbeca26a1 | ||
|
|
fad68161c9 |
@@ -50,9 +50,6 @@ Stable Diffusion等の画像生成モデルの学習、モデルによる画像
|
|||||||
|
|
||||||
### 更新履歴
|
### 更新履歴
|
||||||
|
|
||||||
- 次のリリースに含まれる予定の主な変更点は以下の通りです。リリース前の変更点は予告なく変更される可能性があります。
|
|
||||||
- Intel GPUの互換性を向上しました。[PR #2307](https://github.com/kohya-ss/sd-scripts/pull/2307) WhitePr氏に感謝します。
|
|
||||||
|
|
||||||
- **Version 0.10.3 (2026-04-02):**
|
- **Version 0.10.3 (2026-04-02):**
|
||||||
- Animaでfp16で学習する際の安定性をさらに改善しました。[PR #2302](https://github.com/kohya-ss/sd-scripts/pull/2302) 問題をご報告いただいた方々に深く感謝します。
|
- Animaでfp16で学習する際の安定性をさらに改善しました。[PR #2302](https://github.com/kohya-ss/sd-scripts/pull/2302) 問題をご報告いただいた方々に深く感謝します。
|
||||||
|
|
||||||
|
|||||||
@@ -47,9 +47,6 @@ If you find this project helpful, please consider supporting its development via
|
|||||||
|
|
||||||
### Change History
|
### Change History
|
||||||
|
|
||||||
- The following are the main changes planned for the next release. Please note that these changes may be subject to change without notice before the release.
|
|
||||||
- Improved compatibility with Intel GPUs. Thanks to WhitePr for [PR #2307](https://github.com/kohya-ss/sd-scripts/pull/2307).
|
|
||||||
|
|
||||||
- **Version 0.10.3 (2026-04-02):**
|
- **Version 0.10.3 (2026-04-02):**
|
||||||
- Stability when training with fp16 on Anima has been further improved. See [PR #2302](https://github.com/kohya-ss/sd-scripts/pull/2302) for details. We deeply appreciate those who reported the issue.
|
- Stability when training with fp16 on Anima has been further improved. See [PR #2302](https://github.com/kohya-ss/sd-scripts/pull/2302) for details. We deeply appreciate those who reported the issue.
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import torch
|
import torch
|
||||||
from packaging import version
|
|
||||||
try:
|
try:
|
||||||
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
||||||
has_ipex = True
|
has_ipex = True
|
||||||
@@ -9,7 +8,7 @@ except Exception:
|
|||||||
has_ipex = False
|
has_ipex = False
|
||||||
from .hijacks import ipex_hijacks
|
from .hijacks import ipex_hijacks
|
||||||
|
|
||||||
torch_version = version.parse(torch.__version__)
|
torch_version = float(torch.__version__[:3])
|
||||||
|
|
||||||
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
||||||
|
|
||||||
@@ -57,6 +56,7 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
torch.cuda.__path__ = torch.xpu.__path__
|
torch.cuda.__path__ = torch.xpu.__path__
|
||||||
torch.cuda.set_stream = torch.xpu.set_stream
|
torch.cuda.set_stream = torch.xpu.set_stream
|
||||||
torch.cuda.torch = torch.xpu.torch
|
torch.cuda.torch = torch.xpu.torch
|
||||||
|
torch.cuda.Union = torch.xpu.Union
|
||||||
torch.cuda.__annotations__ = torch.xpu.__annotations__
|
torch.cuda.__annotations__ = torch.xpu.__annotations__
|
||||||
torch.cuda.__package__ = torch.xpu.__package__
|
torch.cuda.__package__ = torch.xpu.__package__
|
||||||
torch.cuda.__builtins__ = torch.xpu.__builtins__
|
torch.cuda.__builtins__ = torch.xpu.__builtins__
|
||||||
@@ -64,12 +64,14 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
torch.cuda.StreamContext = torch.xpu.StreamContext
|
torch.cuda.StreamContext = torch.xpu.StreamContext
|
||||||
torch.cuda._lazy_call = torch.xpu._lazy_call
|
torch.cuda._lazy_call = torch.xpu._lazy_call
|
||||||
torch.cuda.random = torch.xpu.random
|
torch.cuda.random = torch.xpu.random
|
||||||
|
torch.cuda._device = torch.xpu._device
|
||||||
torch.cuda.__name__ = torch.xpu.__name__
|
torch.cuda.__name__ = torch.xpu.__name__
|
||||||
|
torch.cuda._device_t = torch.xpu._device_t
|
||||||
torch.cuda.__spec__ = torch.xpu.__spec__
|
torch.cuda.__spec__ = torch.xpu.__spec__
|
||||||
torch.cuda.__file__ = torch.xpu.__file__
|
torch.cuda.__file__ = torch.xpu.__file__
|
||||||
# torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing
|
# torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing
|
||||||
|
|
||||||
if torch_version < version.parse("2.3"):
|
if torch_version < 2.3:
|
||||||
torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock
|
torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock
|
||||||
torch.cuda._initialized = torch.xpu.lazy_init._initialized
|
torch.cuda._initialized = torch.xpu.lazy_init._initialized
|
||||||
torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork
|
torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork
|
||||||
@@ -112,22 +114,17 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
torch.cuda.threading = torch.xpu.threading
|
torch.cuda.threading = torch.xpu.threading
|
||||||
torch.cuda.traceback = torch.xpu.traceback
|
torch.cuda.traceback = torch.xpu.traceback
|
||||||
|
|
||||||
if torch_version < version.parse("2.5"):
|
if torch_version < 2.5:
|
||||||
torch.cuda.os = torch.xpu.os
|
torch.cuda.os = torch.xpu.os
|
||||||
torch.cuda.Device = torch.xpu.Device
|
torch.cuda.Device = torch.xpu.Device
|
||||||
torch.cuda.warnings = torch.xpu.warnings
|
torch.cuda.warnings = torch.xpu.warnings
|
||||||
torch.cuda.classproperty = torch.xpu.classproperty
|
torch.cuda.classproperty = torch.xpu.classproperty
|
||||||
torch.UntypedStorage.cuda = torch.UntypedStorage.xpu
|
torch.UntypedStorage.cuda = torch.UntypedStorage.xpu
|
||||||
|
|
||||||
if torch_version < version.parse("2.7"):
|
if torch_version < 2.7:
|
||||||
torch.cuda.Tuple = torch.xpu.Tuple
|
torch.cuda.Tuple = torch.xpu.Tuple
|
||||||
torch.cuda.List = torch.xpu.List
|
torch.cuda.List = torch.xpu.List
|
||||||
|
|
||||||
if torch_version < version.parse("2.11"):
|
|
||||||
torch.cuda._device_t = torch.xpu._device_t
|
|
||||||
torch.cuda._device = torch.xpu._device
|
|
||||||
torch.cuda.Union = torch.xpu.Union
|
|
||||||
|
|
||||||
|
|
||||||
# Memory:
|
# Memory:
|
||||||
if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read():
|
if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read():
|
||||||
@@ -163,7 +160,7 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
torch.cuda.initial_seed = torch.xpu.initial_seed
|
torch.cuda.initial_seed = torch.xpu.initial_seed
|
||||||
|
|
||||||
# C
|
# C
|
||||||
if torch_version < version.parse("2.3"):
|
if torch_version < 2.3:
|
||||||
torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentRawStream
|
torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentRawStream
|
||||||
ipex._C._DeviceProperties.multi_processor_count = ipex._C._DeviceProperties.gpu_subslice_count
|
ipex._C._DeviceProperties.multi_processor_count = ipex._C._DeviceProperties.gpu_subslice_count
|
||||||
ipex._C._DeviceProperties.major = 12
|
ipex._C._DeviceProperties.major = 12
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ init_ipex()
|
|||||||
|
|
||||||
import diffusers
|
import diffusers
|
||||||
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextConfig, logging
|
from transformers import CLIPTextModel, CLIPTokenizer, CLIPTextConfig, logging
|
||||||
from diffusers import AutoencoderKL, DDIMScheduler, StableDiffusionPipeline # , UNet2DConditionModel
|
from diffusers import AutoencoderKL, DDIMScheduler, StableDiffusionPipeline, StableUnCLIPImg2ImgPipeline # , UNet2DConditionModel
|
||||||
from safetensors.torch import load_file, save_file
|
from safetensors.torch import load_file, save_file
|
||||||
from library.original_unet import UNet2DConditionModel
|
from library.original_unet import UNet2DConditionModel
|
||||||
from library.utils import setup_logging
|
from library.utils import setup_logging
|
||||||
@@ -658,6 +658,77 @@ def convert_ldm_clip_checkpoint_v2(checkpoint, max_length):
|
|||||||
return new_sd
|
return new_sd
|
||||||
|
|
||||||
|
|
||||||
|
def convert_ldm_clip_checkpoint_v2_fix(checkpoint, max_length):
|
||||||
|
# 嫌になるくらい違うぞ!
|
||||||
|
def convert_key(key):
|
||||||
|
if not key.startswith("cond_stage_model"):
|
||||||
|
return None
|
||||||
|
|
||||||
|
# common conversion
|
||||||
|
key = key.replace("cond_stage_model.model.transformer.", "text_model.encoder.")
|
||||||
|
key = key.replace("cond_stage_model.model.", "text_model.")
|
||||||
|
|
||||||
|
if "resblocks" in key:
|
||||||
|
# resblocks conversion
|
||||||
|
key = key.replace(".resblocks.", ".layers.")
|
||||||
|
if ".ln_" in key:
|
||||||
|
key = key.replace(".ln_", ".layer_norm")
|
||||||
|
elif ".mlp." in key:
|
||||||
|
key = key.replace(".c_fc.", ".fc1.")
|
||||||
|
key = key.replace(".c_proj.", ".fc2.")
|
||||||
|
elif ".attn.out_proj" in key:
|
||||||
|
key = key.replace(".attn.out_proj.", ".self_attn.out_proj.")
|
||||||
|
elif ".attn.in_proj" in key:
|
||||||
|
key = None # 特殊なので後で処理する
|
||||||
|
else:
|
||||||
|
raise ValueError(f"unexpected key in SD: {key}")
|
||||||
|
elif ".positional_embedding" in key:
|
||||||
|
key = key.replace(".positional_embedding", ".embeddings.position_embedding.weight")
|
||||||
|
elif ".text_projection" in key:
|
||||||
|
key = None # 使われない???
|
||||||
|
elif ".logit_scale" in key:
|
||||||
|
key = None # 使われない???
|
||||||
|
elif ".token_embedding" in key:
|
||||||
|
key = key.replace(".token_embedding.weight", ".embeddings.token_embedding.weight")
|
||||||
|
elif ".ln_final" in key:
|
||||||
|
key = key.replace(".ln_final", ".final_layer_norm")
|
||||||
|
return key
|
||||||
|
|
||||||
|
keys = list(checkpoint.keys())
|
||||||
|
new_sd = {}
|
||||||
|
for key in keys:
|
||||||
|
# remove resblocks 23
|
||||||
|
if ".resblocks.23." in key:
|
||||||
|
continue
|
||||||
|
if 'embedder.model' in key:
|
||||||
|
continue
|
||||||
|
new_key = convert_key(key)
|
||||||
|
if new_key is None:
|
||||||
|
continue
|
||||||
|
new_sd[new_key] = checkpoint[key]
|
||||||
|
|
||||||
|
# attnの変換
|
||||||
|
for key in keys:
|
||||||
|
if ".resblocks.23." in key:
|
||||||
|
continue
|
||||||
|
if 'embedder.model' in key:
|
||||||
|
continue
|
||||||
|
if ".resblocks" in key and ".attn.in_proj_" in key:
|
||||||
|
# 三つに分割
|
||||||
|
values = torch.chunk(checkpoint[key], 3)
|
||||||
|
|
||||||
|
key_suffix = ".weight" if "weight" in key else ".bias"
|
||||||
|
key_pfx = key.replace("cond_stage_model.model.transformer.resblocks.", "text_model.encoder.layers.")
|
||||||
|
key_pfx = key_pfx.replace("_weight", "")
|
||||||
|
key_pfx = key_pfx.replace("_bias", "")
|
||||||
|
key_pfx = key_pfx.replace(".attn.in_proj", ".self_attn.")
|
||||||
|
new_sd[key_pfx + "q_proj" + key_suffix] = values[0]
|
||||||
|
new_sd[key_pfx + "k_proj" + key_suffix] = values[1]
|
||||||
|
new_sd[key_pfx + "v_proj" + key_suffix] = values[2]
|
||||||
|
|
||||||
|
return new_sd
|
||||||
|
|
||||||
|
|
||||||
# endregion
|
# endregion
|
||||||
|
|
||||||
|
|
||||||
@@ -1017,33 +1088,58 @@ def load_models_from_stable_diffusion_checkpoint(v2, ckpt_path, device="cpu", dt
|
|||||||
vae = AutoencoderKL(**vae_config).to(device)
|
vae = AutoencoderKL(**vae_config).to(device)
|
||||||
info = vae.load_state_dict(converted_vae_checkpoint)
|
info = vae.load_state_dict(converted_vae_checkpoint)
|
||||||
logger.info(f"loading vae: {info}")
|
logger.info(f"loading vae: {info}")
|
||||||
|
|
||||||
# convert text_model
|
|
||||||
if v2:
|
if v2:
|
||||||
converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v2(state_dict, 77)
|
try:
|
||||||
cfg = CLIPTextConfig(
|
converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v2_fix(state_dict, 77)
|
||||||
vocab_size=49408,
|
cfg = CLIPTextConfig(
|
||||||
hidden_size=1024,
|
attention_dropout = 0.0,
|
||||||
intermediate_size=4096,
|
bos_token_id = 0,
|
||||||
num_hidden_layers=23,
|
dropout = 0.0,
|
||||||
num_attention_heads=16,
|
eos_token_id = 2,
|
||||||
max_position_embeddings=77,
|
hidden_act = "gelu",
|
||||||
hidden_act="gelu",
|
hidden_size = 1024,
|
||||||
layer_norm_eps=1e-05,
|
initializer_factor = 1.0,
|
||||||
dropout=0.0,
|
initializer_range = 0.02,
|
||||||
attention_dropout=0.0,
|
intermediate_size = 4096,
|
||||||
initializer_range=0.02,
|
layer_norm_eps = 1e-05,
|
||||||
initializer_factor=1.0,
|
max_position_embeddings = 77,
|
||||||
pad_token_id=1,
|
model_type = "clip_text_model",
|
||||||
bos_token_id=0,
|
num_attention_heads = 16,
|
||||||
eos_token_id=2,
|
num_hidden_layers = 23,
|
||||||
model_type="clip_text_model",
|
pad_token_id = 1,
|
||||||
projection_dim=512,
|
projection_dim = 512,
|
||||||
torch_dtype="float32",
|
torch_dtype = "float16",
|
||||||
transformers_version="4.25.0.dev0",
|
transformers_version = "4.28.0.dev0",
|
||||||
)
|
vocab_size = 49408
|
||||||
text_model = CLIPTextModel._from_config(cfg)
|
)
|
||||||
info = text_model.load_state_dict(converted_text_encoder_checkpoint)
|
text_model = CLIPTextModel._from_config(cfg)
|
||||||
|
info = text_model.load_state_dict(converted_text_encoder_checkpoint)
|
||||||
|
except Exception as e:
|
||||||
|
converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v2(state_dict, 77)
|
||||||
|
cfg = CLIPTextConfig(
|
||||||
|
vocab_size=49408,
|
||||||
|
hidden_size=1024,
|
||||||
|
intermediate_size=4096,
|
||||||
|
num_hidden_layers=23,
|
||||||
|
num_attention_heads=16,
|
||||||
|
max_position_embeddings=77,
|
||||||
|
hidden_act="gelu",
|
||||||
|
layer_norm_eps=1e-05,
|
||||||
|
dropout=0.0,
|
||||||
|
attention_dropout=0.0,
|
||||||
|
initializer_range=0.02,
|
||||||
|
initializer_factor=1.0,
|
||||||
|
pad_token_id=1,
|
||||||
|
bos_token_id=0,
|
||||||
|
eos_token_id=2,
|
||||||
|
model_type="clip_text_model",
|
||||||
|
projection_dim=512,
|
||||||
|
torch_dtype="float32",
|
||||||
|
transformers_version="4.25.0.dev0",
|
||||||
|
)
|
||||||
|
text_model = CLIPTextModel._from_config(cfg)
|
||||||
|
info = text_model.load_state_dict(converted_text_encoder_checkpoint)
|
||||||
else:
|
else:
|
||||||
converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v1(state_dict)
|
converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v1(state_dict)
|
||||||
|
|
||||||
@@ -1077,6 +1173,25 @@ def load_models_from_stable_diffusion_checkpoint(v2, ckpt_path, device="cpu", dt
|
|||||||
|
|
||||||
return text_model, vae, unet
|
return text_model, vae, unet
|
||||||
|
|
||||||
|
# def load_models_from_stable_diffusion_checkpoint(v2, ckpt_path, device="cpu", dtype=torch.float32):
|
||||||
|
# pipe = StableUnCLIPImg2ImgPipeline.from_pretrained(ckpt_path, torch_dtype=torch.float32).to(device)
|
||||||
|
|
||||||
|
# # Load the UNet model
|
||||||
|
# unet = pipe.unet.to(device)
|
||||||
|
|
||||||
|
# # Load the VAE model
|
||||||
|
# vae = pipe.vae.to(device)
|
||||||
|
|
||||||
|
# # Load the text model
|
||||||
|
# text_encoder = pipe.text_encoder.to(device)
|
||||||
|
|
||||||
|
# # Log information
|
||||||
|
# logger.info(f"Loaded UNet: {unet}")
|
||||||
|
# logger.info(f"Loaded VAE: {vae}")
|
||||||
|
# logger.info(f"Loaded Text Encoder: {text_encoder}")
|
||||||
|
|
||||||
|
# return text_encoder, vae, unet
|
||||||
|
|
||||||
|
|
||||||
def get_model_version_str_for_sd1_sd2(v2, v_parameterization):
|
def get_model_version_str_for_sd1_sd2(v2, v_parameterization):
|
||||||
# only for reference
|
# only for reference
|
||||||
|
|||||||
Reference in New Issue
Block a user