mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-08 22:35:09 +00:00
Add Paged/ adam8bit/lion8bit for Sdxl bitsandbytes 0.39.1 cuda118 on windows (#623)
* ADD libbitsandbytes.dll for 0.38.1 * Delete libbitsandbytes_cuda116.dll * Delete cextension.py * add main.py * Update requirements.txt for bitsandbytes 0.38.1 * Update README.md for bitsandbytes-windows * Update README-ja.md for bitsandbytes 0.38.1 * Update main.py for return cuda118 * Update train_util.py for lion8bit * Update train_README-ja.md for lion8bit * Update train_util.py for add DAdaptAdan and DAdaptSGD * Update train_util.py for DAdaptadam * Update train_network.py for dadapt * Update train_README-ja.md for DAdapt * Update train_util.py for DAdapt * Update train_network.py for DAdaptAdaGrad * Update train_db.py for DAdapt * Update fine_tune.py for DAdapt * Update train_textual_inversion.py for DAdapt * Update train_textual_inversion_XTI.py for DAdapt * Revert "Merge branch 'qinglong' into main" This reverts commitb65c023083, reversing changes made tof6fda20caf. * Revert "Update requirements.txt for bitsandbytes 0.38.1" This reverts commit83abc60dfa. * Revert "Delete cextension.py" This reverts commit3ba4dfe046. * Revert "Update README.md for bitsandbytes-windows" This reverts commit4642c52086. * Revert "Update README-ja.md for bitsandbytes 0.38.1" This reverts commitfa6d7485ac. * Update train_util.py for DAdaptLion * Update train_README-zh.md for dadaptlion * Update train_README-ja.md for DAdaptLion * add DAdatpt V3 * Alignment * Update train_util.py for experimental * Update train_util.py V3 * Update train_util.py * Update requirements.txt * Update train_README-zh.md * Update train_README-ja.md * Update train_util.py fix * Update train_util.py * support Prodigy * add lower * Update main.py * support PagedAdamW8bit/PagedLion8bit * Update requirements.txt * update for PageAdamW8bit and PagedLion8bit * Revert * revert main * Update train_util.py * update for bitsandbytes 0.39.1 * Update requirements.txt * vram leak fix --------- Co-authored-by: Pam <pamhome21@gmail.com>
This commit is contained in:
Binary file not shown.
BIN
bitsandbytes_windows/libbitsandbytes_cuda118.dll
Normal file
BIN
bitsandbytes_windows/libbitsandbytes_cuda118.dll
Normal file
Binary file not shown.
@@ -4,7 +4,7 @@ extract factors the build is dependent on:
|
|||||||
[ ] TODO: Q - What if we have multiple GPUs of different makes?
|
[ ] TODO: Q - What if we have multiple GPUs of different makes?
|
||||||
- CUDA version
|
- CUDA version
|
||||||
- Software:
|
- Software:
|
||||||
- CPU-only: only CPU quantization functions (no optimizer, no matrix multiple)
|
- CPU-only: only CPU quantization functions (no optimizer, no matrix multipl)
|
||||||
- CuBLAS-LT: full-build 8-bit optimizer
|
- CuBLAS-LT: full-build 8-bit optimizer
|
||||||
- no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
|
- no CuBLAS-LT: no 8-bit matrix multiplication (`nomatmul`)
|
||||||
|
|
||||||
@@ -16,35 +16,367 @@ evaluation:
|
|||||||
- based on that set the default path
|
- based on that set the default path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import ctypes
|
import ctypes as ct
|
||||||
|
import os
|
||||||
|
import errno
|
||||||
|
import torch
|
||||||
|
import platform
|
||||||
|
from warnings import warn
|
||||||
|
from itertools import product
|
||||||
|
|
||||||
from .paths import determine_cuda_runtime_lib_path
|
from pathlib import Path
|
||||||
|
from typing import Set, Union
|
||||||
|
from .env_vars import get_potentially_lib_path_containing_env_vars
|
||||||
|
|
||||||
|
# these are the most common libs names
|
||||||
|
# libcudart.so is missing by default for a conda install with PyTorch 2.0 and instead
|
||||||
|
# we have libcudart.so.11.0 which causes a lot of errors before
|
||||||
|
# not sure if libcudart.so.12.0 exists in pytorch installs, but it does not hurt
|
||||||
|
CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0']
|
||||||
|
|
||||||
|
# this is a order list of backup paths to search CUDA in, if it cannot be found in the main environmental paths
|
||||||
|
backup_paths = []
|
||||||
|
|
||||||
|
|
||||||
|
IS_WINDOWS_PLATFORM: bool = (platform.system()=="Windows")
|
||||||
|
PATH_COLLECTION_SEPARATOR: str = ":" if not IS_WINDOWS_PLATFORM else ";"
|
||||||
|
CUDA_RUNTIME_LIBS: list = ["libcudart.so", 'libcudart.so.11.0', 'libcudart.so.12.0'] if not IS_WINDOWS_PLATFORM else ["cudart64_110.dll", "cudart64_120.dll", "cudart64_12.dll"]
|
||||||
|
backup_paths.append('$CONDA_PREFIX/lib/libcudart.so.11.0' if not IS_WINDOWS_PLATFORM else '%CONDA_PREFIX%\\lib\\cudart64_110.dll')
|
||||||
|
CUDA_SHARED_LIB_NAME: str = "libcuda.so" if not IS_WINDOWS_PLATFORM else f"{os.environ['SystemRoot']}\\System32\\nvcuda.dll"
|
||||||
|
SHARED_LIB_EXTENSION: str = ".so" if not IS_WINDOWS_PLATFORM else ".dll"
|
||||||
|
class CUDASetup:
|
||||||
|
_instance = None
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
raise RuntimeError("Call get_instance() instead")
|
||||||
|
|
||||||
|
def generate_instructions(self):
|
||||||
|
if getattr(self, 'error', False): return
|
||||||
|
print(self.error)
|
||||||
|
self.error = True
|
||||||
|
if self.cuda is None:
|
||||||
|
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA library was not detected.')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 1): Your paths are probably not up-to-date. You can update them via: sudo ldconfig.')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 2): If you do not have sudo rights, you can do the following:')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 2a): Find the cuda library via: find / -name libcuda.so 2>/dev/null')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 2b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_2a')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 2c): For a permanent solution add the export from 2b into your .bashrc file, located at ~/.bashrc')
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.cudart_path is None:
|
||||||
|
self.add_log_entry('CUDA SETUP: Problem: The main issue seems to be that the main CUDA runtime library was not detected.')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 1: To solve the issue the libcudart.so location needs to be added to the LD_LIBRARY_PATH variable')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 1a): Find the cuda runtime library via: find / -name libcudart.so 2>/dev/null')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 1b): Once the library is found add it to the LD_LIBRARY_PATH: export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:FOUND_PATH_FROM_1a')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 1c): For a permanent solution add the export from 1b into your .bashrc file, located at ~/.bashrc')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 2: If no library was found in step 1a) you need to install CUDA.')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 2a): Download CUDA install script: wget https://github.com/TimDettmers/bitsandbytes/blob/main/cuda_install.sh')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 2b): Install desired CUDA version to desired location. The syntax is bash cuda_install.sh CUDA_VERSION PATH_TO_INSTALL_INTO.')
|
||||||
|
self.add_log_entry('CUDA SETUP: Solution 2b): For example, "bash cuda_install.sh 113 ~/local/" will download CUDA 11.3 and install into the folder ~/local')
|
||||||
|
return
|
||||||
|
|
||||||
|
make_cmd = f'CUDA_VERSION={self.cuda_version_string}'
|
||||||
|
if len(self.cuda_version_string) < 3:
|
||||||
|
make_cmd += ' make cuda92'
|
||||||
|
elif self.cuda_version_string == '110':
|
||||||
|
make_cmd += ' make cuda110'
|
||||||
|
elif self.cuda_version_string[:2] == '11' and int(self.cuda_version_string[2]) > 0:
|
||||||
|
make_cmd += ' make cuda11x'
|
||||||
|
elif self.cuda_version_string == '100':
|
||||||
|
self.add_log_entry('CUDA SETUP: CUDA 10.0 not supported. Please use a different CUDA version.')
|
||||||
|
self.add_log_entry('CUDA SETUP: Before you try again running bitsandbytes, make sure old CUDA 10.0 versions are uninstalled and removed from $LD_LIBRARY_PATH variables.')
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
has_cublaslt = is_cublasLt_compatible(self.cc)
|
||||||
|
if not has_cublaslt:
|
||||||
|
make_cmd += '_nomatmul'
|
||||||
|
|
||||||
|
self.add_log_entry('CUDA SETUP: Something unexpected happened. Please compile from source:')
|
||||||
|
self.add_log_entry('git clone git@github.com:TimDettmers/bitsandbytes.git')
|
||||||
|
self.add_log_entry('cd bitsandbytes')
|
||||||
|
self.add_log_entry(make_cmd)
|
||||||
|
self.add_log_entry('python setup.py install')
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
if not getattr(self, 'initialized', False):
|
||||||
|
self.has_printed = False
|
||||||
|
self.lib = None
|
||||||
|
self.initialized = False
|
||||||
|
self.error = False
|
||||||
|
|
||||||
|
def run_cuda_setup(self):
|
||||||
|
self.initialized = True
|
||||||
|
self.cuda_setup_log = []
|
||||||
|
|
||||||
|
binary_name, cudart_path, cuda, cc, cuda_version_string = evaluate_cuda_setup()
|
||||||
|
self.cudart_path = cudart_path
|
||||||
|
self.cuda = cuda
|
||||||
|
self.cc = cc
|
||||||
|
self.cuda_version_string = cuda_version_string
|
||||||
|
|
||||||
|
package_dir = Path(__file__).parent.parent
|
||||||
|
binary_path = package_dir / binary_name
|
||||||
|
|
||||||
|
print('bin', binary_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not binary_path.exists():
|
||||||
|
self.add_log_entry(f"CUDA SETUP: Required library version not found: {binary_name}. Maybe you need to compile it from source?")
|
||||||
|
legacy_binary_name = "libbitsandbytes_cpu" + SHARED_LIB_EXTENSION
|
||||||
|
self.add_log_entry(f"CUDA SETUP: Defaulting to {legacy_binary_name}...")
|
||||||
|
binary_path = package_dir / legacy_binary_name
|
||||||
|
if not binary_path.exists() or torch.cuda.is_available():
|
||||||
|
self.add_log_entry('')
|
||||||
|
self.add_log_entry('='*48 + 'ERROR' + '='*37)
|
||||||
|
self.add_log_entry('CUDA SETUP: CUDA detection failed! Possible reasons:')
|
||||||
|
self.add_log_entry('1. CUDA driver not installed')
|
||||||
|
self.add_log_entry('2. CUDA not installed')
|
||||||
|
self.add_log_entry('3. You have multiple conflicting CUDA libraries')
|
||||||
|
self.add_log_entry('4. Required library not pre-compiled for this bitsandbytes release!')
|
||||||
|
self.add_log_entry('CUDA SETUP: If you compiled from source, try again with `make CUDA_VERSION=DETECTED_CUDA_VERSION` for example, `make CUDA_VERSION=113`.')
|
||||||
|
self.add_log_entry('CUDA SETUP: The CUDA version for the compile might depend on your conda install. Inspect CUDA version via `conda list | grep cuda`.')
|
||||||
|
self.add_log_entry('='*80)
|
||||||
|
self.add_log_entry('')
|
||||||
|
self.generate_instructions()
|
||||||
|
raise Exception('CUDA SETUP: Setup Failed!')
|
||||||
|
self.lib = ct.cdll.LoadLibrary(str(binary_path))
|
||||||
|
else:
|
||||||
|
self.add_log_entry(f"CUDA SETUP: Loading binary {binary_path}...")
|
||||||
|
self.lib = ct.cdll.LoadLibrary(str(binary_path))
|
||||||
|
except Exception as ex:
|
||||||
|
self.add_log_entry(str(ex))
|
||||||
|
|
||||||
|
def add_log_entry(self, msg, is_warning=False):
|
||||||
|
self.cuda_setup_log.append((msg, is_warning))
|
||||||
|
|
||||||
|
def print_log_stack(self):
|
||||||
|
for msg, is_warning in self.cuda_setup_log:
|
||||||
|
if is_warning:
|
||||||
|
warn(msg)
|
||||||
|
else:
|
||||||
|
print(msg)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_instance(cls):
|
||||||
|
if cls._instance is None:
|
||||||
|
cls._instance = cls.__new__(cls)
|
||||||
|
cls._instance.initialize()
|
||||||
|
return cls._instance
|
||||||
|
|
||||||
|
|
||||||
|
def is_cublasLt_compatible(cc):
|
||||||
|
has_cublaslt = False
|
||||||
|
if cc is not None:
|
||||||
|
cc_major, cc_minor = cc.split('.')
|
||||||
|
if int(cc_major) < 7 or (int(cc_major) == 7 and int(cc_minor) < 5):
|
||||||
|
CUDASetup.get_instance().add_log_entry("WARNING: Compute capability < 7.5 detected! Only slow 8-bit matmul is supported for your GPU!", is_warning=True)
|
||||||
|
else:
|
||||||
|
has_cublaslt = True
|
||||||
|
return has_cublaslt
|
||||||
|
|
||||||
|
def extract_candidate_paths(paths_list_candidate: str) -> Set[Path]:
|
||||||
|
return {Path(ld_path) for ld_path in paths_list_candidate.split(PATH_COLLECTION_SEPARATOR) if ld_path}
|
||||||
|
|
||||||
|
|
||||||
|
def remove_non_existent_dirs(candidate_paths: Set[Path]) -> Set[Path]:
|
||||||
|
existent_directories: Set[Path] = set()
|
||||||
|
for path in candidate_paths:
|
||||||
|
try:
|
||||||
|
if path.exists():
|
||||||
|
existent_directories.add(path)
|
||||||
|
except OSError as exc:
|
||||||
|
if exc.errno != errno.ENAMETOOLONG:
|
||||||
|
raise exc
|
||||||
|
|
||||||
|
non_existent_directories: Set[Path] = candidate_paths - existent_directories
|
||||||
|
if non_existent_directories:
|
||||||
|
CUDASetup.get_instance().add_log_entry("WARNING: The following directories listed in your path were found to "
|
||||||
|
f"be non-existent: {non_existent_directories}", is_warning=True)
|
||||||
|
|
||||||
|
return existent_directories
|
||||||
|
|
||||||
|
|
||||||
|
def get_cuda_runtime_lib_paths(candidate_paths: Set[Path]) -> Set[Path]:
|
||||||
|
paths = set()
|
||||||
|
for libname in CUDA_RUNTIME_LIBS:
|
||||||
|
for path in candidate_paths:
|
||||||
|
if (path / libname).is_file():
|
||||||
|
paths.add(path / libname)
|
||||||
|
return paths
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_paths_list(paths_list_candidate: str) -> Set[Path]:
|
||||||
|
"""
|
||||||
|
Searches a given environmental var for the CUDA runtime library,
|
||||||
|
i.e. `libcudart.so`.
|
||||||
|
"""
|
||||||
|
return remove_non_existent_dirs(extract_candidate_paths(paths_list_candidate))
|
||||||
|
|
||||||
|
|
||||||
|
def find_cuda_lib_in(paths_list_candidate: str) -> Set[Path]:
|
||||||
|
return get_cuda_runtime_lib_paths(
|
||||||
|
resolve_paths_list(paths_list_candidate)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def warn_in_case_of_duplicates(results_paths: Set[Path]) -> None:
|
||||||
|
if len(results_paths) > 1:
|
||||||
|
warning_msg = (
|
||||||
|
f"Found duplicate {CUDA_RUNTIME_LIBS} files: {results_paths}.. "
|
||||||
|
"We'll flip a coin and try one of these, in order to fail forward.\n"
|
||||||
|
"Either way, this might cause trouble in the future:\n"
|
||||||
|
"If you get `CUDA error: invalid device function` errors, the above "
|
||||||
|
"might be the cause and the solution is to make sure only one "
|
||||||
|
f"{CUDA_RUNTIME_LIBS} in the paths that we search based on your env.")
|
||||||
|
CUDASetup.get_instance().add_log_entry(warning_msg, is_warning=True)
|
||||||
|
|
||||||
|
|
||||||
|
def determine_cuda_runtime_lib_path() -> Union[Path, None]:
|
||||||
|
"""
|
||||||
|
Searches for a cuda installations, in the following order of priority:
|
||||||
|
1. active conda env
|
||||||
|
2. LD_LIBRARY_PATH
|
||||||
|
3. any other env vars, while ignoring those that
|
||||||
|
- are known to be unrelated (see `bnb.cuda_setup.env_vars.to_be_ignored`)
|
||||||
|
- don't contain the path separator `/`
|
||||||
|
|
||||||
|
If multiple libraries are found in part 3, we optimistically try one,
|
||||||
|
while giving a warning message.
|
||||||
|
"""
|
||||||
|
candidate_env_vars = get_potentially_lib_path_containing_env_vars()
|
||||||
|
|
||||||
|
if "CONDA_PREFIX" in candidate_env_vars:
|
||||||
|
conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "bin"
|
||||||
|
|
||||||
|
conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
|
||||||
|
warn_in_case_of_duplicates(conda_cuda_libs)
|
||||||
|
|
||||||
|
if conda_cuda_libs:
|
||||||
|
return next(iter(conda_cuda_libs))
|
||||||
|
|
||||||
|
conda_libs_path = Path(candidate_env_vars["CONDA_PREFIX"]) / "lib"
|
||||||
|
|
||||||
|
conda_cuda_libs = find_cuda_lib_in(str(conda_libs_path))
|
||||||
|
warn_in_case_of_duplicates(conda_cuda_libs)
|
||||||
|
|
||||||
|
if conda_cuda_libs:
|
||||||
|
return next(iter(conda_cuda_libs))
|
||||||
|
CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CONDA_PREFIX"]} did not contain '
|
||||||
|
f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
|
||||||
|
|
||||||
|
if "CUDA_PATH" in candidate_env_vars:
|
||||||
|
ld_cuda_libs_path = Path(candidate_env_vars["CUDA_PATH"]) / "bin"
|
||||||
|
|
||||||
|
lib_ld_cuda_libs = find_cuda_lib_in(str(ld_cuda_libs_path))
|
||||||
|
warn_in_case_of_duplicates(lib_ld_cuda_libs)
|
||||||
|
|
||||||
|
if lib_ld_cuda_libs:
|
||||||
|
return next(iter(lib_ld_cuda_libs))
|
||||||
|
|
||||||
|
ld_cuda_libs_path = Path(candidate_env_vars["CUDA_PATH"]) / "lib"
|
||||||
|
|
||||||
|
lib_ld_cuda_libs = find_cuda_lib_in(str(ld_cuda_libs_path))
|
||||||
|
warn_in_case_of_duplicates(lib_ld_cuda_libs)
|
||||||
|
|
||||||
|
if lib_ld_cuda_libs:
|
||||||
|
return next(iter(lib_ld_cuda_libs))
|
||||||
|
|
||||||
|
CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CUDA_PATH"]} did not contain '
|
||||||
|
f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
|
||||||
|
|
||||||
|
if "CUDA_HOME" in candidate_env_vars:
|
||||||
|
ld_cuda_libs_path = Path(candidate_env_vars["CUDA_HOME"]) / "bin"
|
||||||
|
|
||||||
|
lib_ld_cuda_libs = find_cuda_lib_in(str(ld_cuda_libs_path))
|
||||||
|
warn_in_case_of_duplicates(lib_ld_cuda_libs)
|
||||||
|
|
||||||
|
if lib_ld_cuda_libs:
|
||||||
|
return next(iter(lib_ld_cuda_libs))
|
||||||
|
|
||||||
|
ld_cuda_libs_path = Path(candidate_env_vars["CUDA_HOME"]) / "lib"
|
||||||
|
|
||||||
|
lib_ld_cuda_libs = find_cuda_lib_in(str(ld_cuda_libs_path))
|
||||||
|
warn_in_case_of_duplicates(lib_ld_cuda_libs)
|
||||||
|
|
||||||
|
if lib_ld_cuda_libs:
|
||||||
|
return next(iter(lib_ld_cuda_libs))
|
||||||
|
|
||||||
|
CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["CUDA_HOME"]} did not contain '
|
||||||
|
f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
|
||||||
|
|
||||||
|
if "LD_LIBRARY_PATH" in candidate_env_vars:
|
||||||
|
lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["LD_LIBRARY_PATH"])
|
||||||
|
warn_in_case_of_duplicates(lib_ld_cuda_libs)
|
||||||
|
|
||||||
|
if lib_ld_cuda_libs:
|
||||||
|
return next(iter(lib_ld_cuda_libs))
|
||||||
|
|
||||||
|
CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["LD_LIBRARY_PATH"]} did not contain '
|
||||||
|
f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
|
||||||
|
|
||||||
|
if "PATH" in candidate_env_vars:
|
||||||
|
lib_ld_cuda_libs = find_cuda_lib_in(candidate_env_vars["PATH"])
|
||||||
|
warn_in_case_of_duplicates(lib_ld_cuda_libs)
|
||||||
|
|
||||||
|
if lib_ld_cuda_libs:
|
||||||
|
return next(iter(lib_ld_cuda_libs))
|
||||||
|
|
||||||
|
CUDASetup.get_instance().add_log_entry(f'{candidate_env_vars["PATH"]} did not contain '
|
||||||
|
f'{CUDA_RUNTIME_LIBS} as expected! Searching further paths...', is_warning=True)
|
||||||
|
|
||||||
|
remaining_candidate_env_vars = {
|
||||||
|
env_var: value for env_var, value in candidate_env_vars.items()
|
||||||
|
if env_var not in {"CONDA_PREFIX", "CUDA_HOME", "CUDA_PATH", "LD_LIBRARY_PATH", "PATH"}
|
||||||
|
}
|
||||||
|
|
||||||
|
cuda_runtime_libs = set()
|
||||||
|
for env_var, value in remaining_candidate_env_vars.items():
|
||||||
|
cuda_runtime_libs.update(find_cuda_lib_in(value))
|
||||||
|
|
||||||
|
if len(cuda_runtime_libs) == 0:
|
||||||
|
CUDASetup.get_instance().add_log_entry('CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching in backup paths...')
|
||||||
|
cuda_runtime_libs.update(find_cuda_lib_in('/usr/local/cuda/lib64'))
|
||||||
|
|
||||||
|
warn_in_case_of_duplicates(cuda_runtime_libs)
|
||||||
|
|
||||||
|
return next(iter(cuda_runtime_libs)) if cuda_runtime_libs else None
|
||||||
|
|
||||||
|
|
||||||
def check_cuda_result(cuda, result_val):
|
def check_cuda_result(cuda, result_val):
|
||||||
# 3. Check for CUDA errors
|
# 3. Check for CUDA errors
|
||||||
if result_val != 0:
|
if result_val != 0:
|
||||||
error_str = ctypes.c_char_p()
|
error_str = ct.c_char_p()
|
||||||
cuda.cuGetErrorString(result_val, ctypes.byref(error_str))
|
cuda.cuGetErrorString(result_val, ct.byref(error_str))
|
||||||
print(f"CUDA exception! Error code: {error_str.value.decode()}")
|
if error_str.value is not None:
|
||||||
|
CUDASetup.get_instance().add_log_entry(f"CUDA exception! Error code: {error_str.value.decode()}")
|
||||||
|
else:
|
||||||
|
CUDASetup.get_instance().add_log_entry(f"Unknown CUDA exception! Please check your CUDA install. It might also be that your GPU is too old.")
|
||||||
|
|
||||||
|
|
||||||
def get_cuda_version(cuda, cudart_path):
|
|
||||||
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
|
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART____VERSION.html#group__CUDART____VERSION
|
||||||
|
def get_cuda_version(cuda, cudart_path):
|
||||||
|
if cuda is None: return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
cudart = ctypes.CDLL(cudart_path)
|
cudart = ct.CDLL(str(cudart_path))
|
||||||
except OSError:
|
except OSError:
|
||||||
# TODO: shouldn't we error or at least warn here?
|
CUDASetup.get_instance().add_log_entry(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
|
||||||
print(f'ERROR: libcudart.so could not be read from path: {cudart_path}!')
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
version = ctypes.c_int()
|
version = ct.c_int()
|
||||||
check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ctypes.byref(version)))
|
try:
|
||||||
|
check_cuda_result(cuda, cudart.cudaRuntimeGetVersion(ct.byref(version)))
|
||||||
|
except AttributeError as e:
|
||||||
|
CUDASetup.get_instance().add_log_entry(f'ERROR: {str(e)}')
|
||||||
|
CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: libcudart.so path is {cudart_path}')
|
||||||
|
CUDASetup.get_instance().add_log_entry(f'CUDA SETUP: Is seems that your cuda installation is not in your path. See https://github.com/TimDettmers/bitsandbytes/issues/85 for more information.')
|
||||||
version = int(version.value)
|
version = int(version.value)
|
||||||
major = version//1000
|
major = version//1000
|
||||||
minor = (version-(major*1000))//10
|
minor = (version-(major*1000))//10
|
||||||
|
|
||||||
if major < 11:
|
if major < 11:
|
||||||
print('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
|
CUDASetup.get_instance().add_log_entry('CUDA SETUP: CUDA version lower than 11 are currently not supported for LLM.int8(). You will be only to use 8-bit optimizers and quantization routines!!')
|
||||||
|
|
||||||
return f'{major}{minor}'
|
return f'{major}{minor}'
|
||||||
|
|
||||||
@@ -52,10 +384,9 @@ def get_cuda_version(cuda, cudart_path):
|
|||||||
def get_cuda_lib_handle():
|
def get_cuda_lib_handle():
|
||||||
# 1. find libcuda.so library (GPU driver) (/usr/lib)
|
# 1. find libcuda.so library (GPU driver) (/usr/lib)
|
||||||
try:
|
try:
|
||||||
cuda = ctypes.CDLL("libcuda.so")
|
cuda = ct.CDLL(CUDA_SHARED_LIB_NAME)
|
||||||
except OSError:
|
except OSError:
|
||||||
# TODO: shouldn't we error or at least warn here?
|
CUDASetup.get_instance().add_log_entry('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
|
||||||
print('CUDA SETUP: WARNING! libcuda.so not found! Do you have a CUDA driver installed? If you are on a cluster, make sure you are on a CUDA machine!')
|
|
||||||
return None
|
return None
|
||||||
check_cuda_result(cuda, cuda.cuInit(0))
|
check_cuda_result(cuda, cuda.cuInit(0))
|
||||||
|
|
||||||
@@ -73,23 +404,20 @@ def get_compute_capabilities(cuda):
|
|||||||
# bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
|
# bits taken from https://gist.github.com/f0k/63a664160d016a491b2cbea15913d549
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
nGpus = ct.c_int()
|
||||||
|
cc_major = ct.c_int()
|
||||||
|
cc_minor = ct.c_int()
|
||||||
|
|
||||||
nGpus = ctypes.c_int()
|
device = ct.c_int()
|
||||||
cc_major = ctypes.c_int()
|
|
||||||
cc_minor = ctypes.c_int()
|
|
||||||
|
|
||||||
device = ctypes.c_int()
|
check_cuda_result(cuda, cuda.cuDeviceGetCount(ct.byref(nGpus)))
|
||||||
|
|
||||||
check_cuda_result(cuda, cuda.cuDeviceGetCount(ctypes.byref(nGpus)))
|
|
||||||
ccs = []
|
ccs = []
|
||||||
for i in range(nGpus.value):
|
for i in range(nGpus.value):
|
||||||
check_cuda_result(cuda, cuda.cuDeviceGet(ctypes.byref(device), i))
|
check_cuda_result(cuda, cuda.cuDeviceGet(ct.byref(device), i))
|
||||||
ref_major = ctypes.byref(cc_major)
|
ref_major = ct.byref(cc_major)
|
||||||
ref_minor = ctypes.byref(cc_minor)
|
ref_minor = ct.byref(cc_minor)
|
||||||
# 2. call extern C function to determine CC
|
# 2. call extern C function to determine CC
|
||||||
check_cuda_result(
|
check_cuda_result(cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device))
|
||||||
cuda, cuda.cuDeviceComputeCapability(ref_major, ref_minor, device)
|
|
||||||
)
|
|
||||||
ccs.append(f"{cc_major.value}.{cc_minor.value}")
|
ccs.append(f"{cc_major.value}.{cc_minor.value}")
|
||||||
|
|
||||||
return ccs
|
return ccs
|
||||||
@@ -102,48 +430,49 @@ def get_compute_capability(cuda):
|
|||||||
capabilities are downwards compatible. If no GPUs are detected, it returns
|
capabilities are downwards compatible. If no GPUs are detected, it returns
|
||||||
None.
|
None.
|
||||||
"""
|
"""
|
||||||
ccs = get_compute_capabilities(cuda)
|
if cuda is None: return None
|
||||||
if ccs is not None:
|
|
||||||
# TODO: handle different compute capabilities; for now, take the max
|
# TODO: handle different compute capabilities; for now, take the max
|
||||||
return ccs[-1]
|
ccs = get_compute_capabilities(cuda)
|
||||||
return None
|
if ccs: return ccs[-1]
|
||||||
|
|
||||||
|
|
||||||
def evaluate_cuda_setup():
|
def evaluate_cuda_setup():
|
||||||
|
if 'BITSANDBYTES_NOWELCOME' not in os.environ or str(os.environ['BITSANDBYTES_NOWELCOME']) == '0':
|
||||||
print('')
|
print('')
|
||||||
print('='*35 + 'BUG REPORT' + '='*35)
|
print('='*35 + 'BUG REPORT' + '='*35)
|
||||||
print('Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues')
|
print(('Welcome to bitsandbytes. For bug reports, please run\n\npython -m bitsandbytes\n\n'),
|
||||||
print('For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link')
|
('and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues'))
|
||||||
print('='*80)
|
print('='*80)
|
||||||
return "libbitsandbytes_cuda116.dll" # $$$
|
return 'libbitsandbytes_cuda118.dll', None, None, None, None
|
||||||
|
if not torch.cuda.is_available(): return 'libbitsandbytes_cpu'+SHARED_LIB_EXTENSION, None, None, None, None
|
||||||
binary_name = "libbitsandbytes_cpu.so"
|
|
||||||
#if not torch.cuda.is_available():
|
|
||||||
#print('No GPU detected. Loading CPU library...')
|
|
||||||
#return binary_name
|
|
||||||
|
|
||||||
|
cuda_setup = CUDASetup.get_instance()
|
||||||
cudart_path = determine_cuda_runtime_lib_path()
|
cudart_path = determine_cuda_runtime_lib_path()
|
||||||
if cudart_path is None:
|
|
||||||
print(
|
|
||||||
"WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!"
|
|
||||||
)
|
|
||||||
return binary_name
|
|
||||||
|
|
||||||
print(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
|
|
||||||
cuda = get_cuda_lib_handle()
|
cuda = get_cuda_lib_handle()
|
||||||
cc = get_compute_capability(cuda)
|
cc = get_compute_capability(cuda)
|
||||||
print(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
|
|
||||||
cuda_version_string = get_cuda_version(cuda, cudart_path)
|
cuda_version_string = get_cuda_version(cuda, cudart_path)
|
||||||
|
|
||||||
|
failure = False
|
||||||
|
if cudart_path is None:
|
||||||
|
failure = True
|
||||||
|
cuda_setup.add_log_entry("WARNING: No libcudart.so found! Install CUDA or the cudatoolkit package (anaconda)!", is_warning=True)
|
||||||
|
else:
|
||||||
|
cuda_setup.add_log_entry(f"CUDA SETUP: CUDA runtime path found: {cudart_path}")
|
||||||
|
|
||||||
if cc == '':
|
if cc == '' or cc is None:
|
||||||
print(
|
failure = True
|
||||||
"WARNING: No GPU detected! Check your CUDA paths. Processing to load CPU-only library..."
|
cuda_setup.add_log_entry("WARNING: No GPU detected! Check your CUDA paths. Proceeding to load CPU-only library...", is_warning=True)
|
||||||
)
|
else:
|
||||||
return binary_name
|
cuda_setup.add_log_entry(f"CUDA SETUP: Highest compute capability among GPUs detected: {cc}")
|
||||||
|
|
||||||
|
if cuda is None:
|
||||||
|
failure = True
|
||||||
|
else:
|
||||||
|
cuda_setup.add_log_entry(f'CUDA SETUP: Detected CUDA version {cuda_version_string}')
|
||||||
|
|
||||||
# 7.5 is the minimum CC vor cublaslt
|
# 7.5 is the minimum CC vor cublaslt
|
||||||
has_cublaslt = cc in ["7.5", "8.0", "8.6"]
|
has_cublaslt = is_cublasLt_compatible(cc)
|
||||||
|
|
||||||
# TODO:
|
# TODO:
|
||||||
# (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
|
# (1) CUDA missing cases (no CUDA installed by CUDA driver (nvidia-smi accessible)
|
||||||
@@ -151,16 +480,13 @@ def evaluate_cuda_setup():
|
|||||||
|
|
||||||
# we use ls -l instead of nvcc to determine the cuda version
|
# we use ls -l instead of nvcc to determine the cuda version
|
||||||
# since most installations will have the libcudart.so installed, but not the compiler
|
# since most installations will have the libcudart.so installed, but not the compiler
|
||||||
print(f'CUDA SETUP: Detected CUDA version {cuda_version_string}')
|
|
||||||
|
|
||||||
def get_binary_name():
|
if failure:
|
||||||
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt.so"
|
binary_name = "libbitsandbytes_cpu" + SHARED_LIB_EXTENSION
|
||||||
bin_base_name = "libbitsandbytes_cuda"
|
elif has_cublaslt:
|
||||||
if has_cublaslt:
|
binary_name = f"libbitsandbytes_cuda{cuda_version_string}" + SHARED_LIB_EXTENSION
|
||||||
return f"{bin_base_name}{cuda_version_string}.so"
|
|
||||||
else:
|
else:
|
||||||
return f"{bin_base_name}{cuda_version_string}_nocublaslt.so"
|
"if not has_cublaslt (CC < 7.5), then we have to choose _nocublaslt"
|
||||||
|
binary_name = f"libbitsandbytes_cuda{cuda_version_string}_nocublaslt" + SHARED_LIB_EXTENSION
|
||||||
|
|
||||||
binary_name = get_binary_name()
|
return binary_name, cudart_path, cuda, cc, cuda_version_string
|
||||||
|
|
||||||
return binary_name
|
|
||||||
|
|||||||
@@ -2165,6 +2165,8 @@ def cache_batch_latents(
|
|||||||
info.latents = latent
|
info.latents = latent
|
||||||
if flip_aug:
|
if flip_aug:
|
||||||
info.latents_flipped = flipped_latent
|
info.latents_flipped = flipped_latent
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
|
||||||
|
|
||||||
def cache_batch_text_encoder_outputs(
|
def cache_batch_text_encoder_outputs(
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ albumentations==1.3.0
|
|||||||
opencv-python==4.7.0.68
|
opencv-python==4.7.0.68
|
||||||
einops==0.6.0
|
einops==0.6.0
|
||||||
pytorch-lightning==1.9.0
|
pytorch-lightning==1.9.0
|
||||||
bitsandbytes==0.35.0
|
bitsandbytes==0.39.1
|
||||||
tensorboard==2.10.1
|
tensorboard==2.10.1
|
||||||
safetensors==0.3.1
|
safetensors==0.3.1
|
||||||
# gradio==3.16.2
|
# gradio==3.16.2
|
||||||
|
|||||||
Reference in New Issue
Block a user