mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-18 01:30:02 +00:00
Compare commits
3 Commits
dev
...
4b42fef8ed
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b42fef8ed | ||
|
|
dd9a330860 | ||
|
|
af5adb2b61 |
@@ -50,9 +50,6 @@ Stable Diffusion等の画像生成モデルの学習、モデルによる画像
|
|||||||
|
|
||||||
### 更新履歴
|
### 更新履歴
|
||||||
|
|
||||||
- 次のリリースに含まれる予定の主な変更点は以下の通りです。リリース前の変更点は予告なく変更される可能性があります。
|
|
||||||
- Intel GPUの互換性を向上しました。[PR #2307](https://github.com/kohya-ss/sd-scripts/pull/2307) WhitePr氏に感謝します。
|
|
||||||
|
|
||||||
- **Version 0.10.3 (2026-04-02):**
|
- **Version 0.10.3 (2026-04-02):**
|
||||||
- Animaでfp16で学習する際の安定性をさらに改善しました。[PR #2302](https://github.com/kohya-ss/sd-scripts/pull/2302) 問題をご報告いただいた方々に深く感謝します。
|
- Animaでfp16で学習する際の安定性をさらに改善しました。[PR #2302](https://github.com/kohya-ss/sd-scripts/pull/2302) 問題をご報告いただいた方々に深く感謝します。
|
||||||
|
|
||||||
|
|||||||
@@ -47,9 +47,6 @@ If you find this project helpful, please consider supporting its development via
|
|||||||
|
|
||||||
### Change History
|
### Change History
|
||||||
|
|
||||||
- The following are the main changes planned for the next release. Please note that these changes may be subject to change without notice before the release.
|
|
||||||
- Improved compatibility with Intel GPUs. Thanks to WhitePr for [PR #2307](https://github.com/kohya-ss/sd-scripts/pull/2307).
|
|
||||||
|
|
||||||
- **Version 0.10.3 (2026-04-02):**
|
- **Version 0.10.3 (2026-04-02):**
|
||||||
- Stability when training with fp16 on Anima has been further improved. See [PR #2302](https://github.com/kohya-ss/sd-scripts/pull/2302) for details. We deeply appreciate those who reported the issue.
|
- Stability when training with fp16 on Anima has been further improved. See [PR #2302](https://github.com/kohya-ss/sd-scripts/pull/2302) for details. We deeply appreciate those who reported the issue.
|
||||||
|
|
||||||
|
|||||||
@@ -971,8 +971,8 @@ class Flux(nn.Module):
|
|||||||
|
|
||||||
def enable_block_swap(self, num_blocks: int, device: torch.device):
|
def enable_block_swap(self, num_blocks: int, device: torch.device):
|
||||||
self.blocks_to_swap = num_blocks
|
self.blocks_to_swap = num_blocks
|
||||||
double_blocks_to_swap = num_blocks // 2
|
double_blocks_to_swap = min(self.num_double_blocks - 2, num_blocks // 2)
|
||||||
single_blocks_to_swap = (num_blocks - double_blocks_to_swap) * 2
|
single_blocks_to_swap = (num_blocks - (num_blocks // 2)) * 2
|
||||||
|
|
||||||
assert double_blocks_to_swap <= self.num_double_blocks - 2 and single_blocks_to_swap <= self.num_single_blocks - 2, (
|
assert double_blocks_to_swap <= self.num_double_blocks - 2 and single_blocks_to_swap <= self.num_single_blocks - 2, (
|
||||||
f"Cannot swap more than {self.num_double_blocks - 2} double blocks and {self.num_single_blocks - 2} single blocks. "
|
f"Cannot swap more than {self.num_double_blocks - 2} double blocks and {self.num_single_blocks - 2} single blocks. "
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import torch
|
import torch
|
||||||
from packaging import version
|
|
||||||
try:
|
try:
|
||||||
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
|
||||||
has_ipex = True
|
has_ipex = True
|
||||||
@@ -9,7 +8,7 @@ except Exception:
|
|||||||
has_ipex = False
|
has_ipex = False
|
||||||
from .hijacks import ipex_hijacks
|
from .hijacks import ipex_hijacks
|
||||||
|
|
||||||
torch_version = version.parse(torch.__version__)
|
torch_version = float(torch.__version__[:3])
|
||||||
|
|
||||||
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
# pylint: disable=protected-access, missing-function-docstring, line-too-long
|
||||||
|
|
||||||
@@ -57,6 +56,7 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
torch.cuda.__path__ = torch.xpu.__path__
|
torch.cuda.__path__ = torch.xpu.__path__
|
||||||
torch.cuda.set_stream = torch.xpu.set_stream
|
torch.cuda.set_stream = torch.xpu.set_stream
|
||||||
torch.cuda.torch = torch.xpu.torch
|
torch.cuda.torch = torch.xpu.torch
|
||||||
|
torch.cuda.Union = torch.xpu.Union
|
||||||
torch.cuda.__annotations__ = torch.xpu.__annotations__
|
torch.cuda.__annotations__ = torch.xpu.__annotations__
|
||||||
torch.cuda.__package__ = torch.xpu.__package__
|
torch.cuda.__package__ = torch.xpu.__package__
|
||||||
torch.cuda.__builtins__ = torch.xpu.__builtins__
|
torch.cuda.__builtins__ = torch.xpu.__builtins__
|
||||||
@@ -64,12 +64,14 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
torch.cuda.StreamContext = torch.xpu.StreamContext
|
torch.cuda.StreamContext = torch.xpu.StreamContext
|
||||||
torch.cuda._lazy_call = torch.xpu._lazy_call
|
torch.cuda._lazy_call = torch.xpu._lazy_call
|
||||||
torch.cuda.random = torch.xpu.random
|
torch.cuda.random = torch.xpu.random
|
||||||
|
torch.cuda._device = torch.xpu._device
|
||||||
torch.cuda.__name__ = torch.xpu.__name__
|
torch.cuda.__name__ = torch.xpu.__name__
|
||||||
|
torch.cuda._device_t = torch.xpu._device_t
|
||||||
torch.cuda.__spec__ = torch.xpu.__spec__
|
torch.cuda.__spec__ = torch.xpu.__spec__
|
||||||
torch.cuda.__file__ = torch.xpu.__file__
|
torch.cuda.__file__ = torch.xpu.__file__
|
||||||
# torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing
|
# torch.cuda.is_current_stream_capturing = torch.xpu.is_current_stream_capturing
|
||||||
|
|
||||||
if torch_version < version.parse("2.3"):
|
if torch_version < 2.3:
|
||||||
torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock
|
torch.cuda._initialization_lock = torch.xpu.lazy_init._initialization_lock
|
||||||
torch.cuda._initialized = torch.xpu.lazy_init._initialized
|
torch.cuda._initialized = torch.xpu.lazy_init._initialized
|
||||||
torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork
|
torch.cuda._is_in_bad_fork = torch.xpu.lazy_init._is_in_bad_fork
|
||||||
@@ -112,22 +114,17 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
torch.cuda.threading = torch.xpu.threading
|
torch.cuda.threading = torch.xpu.threading
|
||||||
torch.cuda.traceback = torch.xpu.traceback
|
torch.cuda.traceback = torch.xpu.traceback
|
||||||
|
|
||||||
if torch_version < version.parse("2.5"):
|
if torch_version < 2.5:
|
||||||
torch.cuda.os = torch.xpu.os
|
torch.cuda.os = torch.xpu.os
|
||||||
torch.cuda.Device = torch.xpu.Device
|
torch.cuda.Device = torch.xpu.Device
|
||||||
torch.cuda.warnings = torch.xpu.warnings
|
torch.cuda.warnings = torch.xpu.warnings
|
||||||
torch.cuda.classproperty = torch.xpu.classproperty
|
torch.cuda.classproperty = torch.xpu.classproperty
|
||||||
torch.UntypedStorage.cuda = torch.UntypedStorage.xpu
|
torch.UntypedStorage.cuda = torch.UntypedStorage.xpu
|
||||||
|
|
||||||
if torch_version < version.parse("2.7"):
|
if torch_version < 2.7:
|
||||||
torch.cuda.Tuple = torch.xpu.Tuple
|
torch.cuda.Tuple = torch.xpu.Tuple
|
||||||
torch.cuda.List = torch.xpu.List
|
torch.cuda.List = torch.xpu.List
|
||||||
|
|
||||||
if torch_version < version.parse("2.11"):
|
|
||||||
torch.cuda._device_t = torch.xpu._device_t
|
|
||||||
torch.cuda._device = torch.xpu._device
|
|
||||||
torch.cuda.Union = torch.xpu.Union
|
|
||||||
|
|
||||||
|
|
||||||
# Memory:
|
# Memory:
|
||||||
if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read():
|
if 'linux' in sys.platform and "WSL2" in os.popen("uname -a").read():
|
||||||
@@ -163,7 +160,7 @@ def ipex_init(): # pylint: disable=too-many-statements
|
|||||||
torch.cuda.initial_seed = torch.xpu.initial_seed
|
torch.cuda.initial_seed = torch.xpu.initial_seed
|
||||||
|
|
||||||
# C
|
# C
|
||||||
if torch_version < version.parse("2.3"):
|
if torch_version < 2.3:
|
||||||
torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentRawStream
|
torch._C._cuda_getCurrentRawStream = ipex._C._getCurrentRawStream
|
||||||
ipex._C._DeviceProperties.multi_processor_count = ipex._C._DeviceProperties.gpu_subslice_count
|
ipex._C._DeviceProperties.multi_processor_count = ipex._C._DeviceProperties.gpu_subslice_count
|
||||||
ipex._C._DeviceProperties.major = 12
|
ipex._C._DeviceProperties.major = 12
|
||||||
|
|||||||
Reference in New Issue
Block a user