Disable IPEX attention if the GPU supports 64 bit

This commit is contained in:
Disty0
2023-12-05 19:40:16 +03:00
parent bce9a081db
commit 3d70137d31
4 changed files with 24 additions and 19 deletions

View File

@@ -165,6 +165,7 @@ def ipex_init(): # pylint: disable=too-many-statements
torch.cuda.get_device_id_list_per_card = torch.xpu.get_device_id_list_per_card torch.cuda.get_device_id_list_per_card = torch.xpu.get_device_id_list_per_card
ipex_hijacks() ipex_hijacks()
if not torch.xpu.has_fp64_dtype():
attention_init() attention_init()
try: try:
from .diffusers import ipex_diffusers from .diffusers import ipex_diffusers

View File

@@ -1,6 +1,6 @@
import torch import torch
import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
import diffusers #0.21.1 # pylint: disable=import-error import diffusers #0.24.0 # pylint: disable=import-error
from diffusers.models.attention_processor import Attention from diffusers.models.attention_processor import Attention
# pylint: disable=protected-access, missing-function-docstring, line-too-long # pylint: disable=protected-access, missing-function-docstring, line-too-long

View File

@@ -5,6 +5,7 @@ import intel_extension_for_pytorch._C as core # pylint: disable=import-error, un
# pylint: disable=protected-access, missing-function-docstring, line-too-long # pylint: disable=protected-access, missing-function-docstring, line-too-long
device_supports_fp64 = torch.xpu.has_fp64_dtype()
OptState = ipex.cpu.autocast._grad_scaler.OptState OptState = ipex.cpu.autocast._grad_scaler.OptState
_MultiDeviceReplicator = ipex.cpu.autocast._grad_scaler._MultiDeviceReplicator _MultiDeviceReplicator = ipex.cpu.autocast._grad_scaler._MultiDeviceReplicator
_refresh_per_optimizer_state = ipex.cpu.autocast._grad_scaler._refresh_per_optimizer_state _refresh_per_optimizer_state = ipex.cpu.autocast._grad_scaler._refresh_per_optimizer_state
@@ -96,6 +97,9 @@ def unscale_(self, optimizer):
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64. # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
assert self._scale is not None assert self._scale is not None
if device_supports_fp64:
inv_scale = self._scale.double().reciprocal().float()
else:
inv_scale = self._scale.to("cpu").double().reciprocal().float().to(self._scale.device) inv_scale = self._scale.to("cpu").double().reciprocal().float().to(self._scale.device)
found_inf = torch.full( found_inf = torch.full(
(1,), 0.0, dtype=torch.float32, device=self._scale.device (1,), 0.0, dtype=torch.float32, device=self._scale.device

View File

@@ -194,7 +194,7 @@ def ipex_hijacks():
lambda orig_func, input, pad, mode='constant', value=None: orig_func(input.to(torch.float32), pad, mode=mode, value=value).to(dtype=torch.bfloat16), lambda orig_func, input, pad, mode='constant', value=None: orig_func(input.to(torch.float32), pad, mode=mode, value=value).to(dtype=torch.bfloat16),
lambda orig_func, input, pad, mode='constant', value=None: mode == 'reflect' and input.dtype == torch.bfloat16) lambda orig_func, input, pad, mode='constant', value=None: mode == 'reflect' and input.dtype == torch.bfloat16)
#Diffusers Float64 (ARC GPUs doesn't support double or Float64): # Diffusers Float64 (Alchemist GPUs doesn't support 64 bit):
if not torch.xpu.has_fp64_dtype(): if not torch.xpu.has_fp64_dtype():
CondFunc('torch.from_numpy', CondFunc('torch.from_numpy',
lambda orig_func, ndarray: orig_func(ndarray.astype('float32')), lambda orig_func, ndarray: orig_func(ndarray.astype('float32')),