diff --git a/README.md b/README.md index 18e8e659..497969ab 100644 --- a/README.md +++ b/README.md @@ -9,11 +9,17 @@ __Please update PyTorch to 2.4.0. We have tested with `torch==2.4.0` and `torchv The command to install PyTorch is as follows: `pip3 install torch==2.4.0 torchvision==0.19.0 --index-url https://download.pytorch.org/whl/cu124` +If you are using DeepSpeed, please install DeepSpeed with `pip install deepspeed==0.16.7`. + - [FLUX.1 training](#flux1-training) - [SD3 training](#sd3-training) ### Recent Updates +May 1, 2025: +- The error when training FLUX.1 with mixed precision in flux_train.py with DeepSpeed enabled has been resolved. Thanks to sharlynxy for PR [#2060](https://github.com/kohya-ss/sd-scripts/pull/2060). Please refer to the PR for details. + - If you enable DeepSpeed, please install DeepSpeed with `pip install deepspeed==0.16.7`. + Apr 27, 2025: - FLUX.1 training now supports CFG scale in the sample generation during training. Please use `--g` option, to specify the CFG scale (note that `--l` is used as the embedded guidance scale.) PR [#2064](https://github.com/kohya-ss/sd-scripts/pull/2064). - See [here](#sample-image-generation-during-training) for details. @@ -875,6 +881,14 @@ Note: Some user reports ``ValueError: fp16 mixed precision requires a GPU`` is o (Single GPU with id `0` will be used.) +## DeepSpeed installation (experimental, Linux or WSL2 only) + +To install DeepSpeed, run the following command in your activated virtual environment: + +```bash +pip install deepspeed==0.16.7 +``` + ## Upgrade When a new release comes out you can upgrade your repo with the following command: diff --git a/library/deepspeed_utils.py b/library/deepspeed_utils.py index 99a7b2b3..a8a05c3a 100644 --- a/library/deepspeed_utils.py +++ b/library/deepspeed_utils.py @@ -5,6 +5,8 @@ from accelerate import DeepSpeedPlugin, Accelerator from .utils import setup_logging +from .device_utils import get_preferred_device + setup_logging() import logging @@ -94,6 +96,7 @@ def prepare_deepspeed_plugin(args: argparse.Namespace): deepspeed_plugin.deepspeed_config["train_batch_size"] = ( args.train_batch_size * args.gradient_accumulation_steps * int(os.environ["WORLD_SIZE"]) ) + deepspeed_plugin.set_mixed_precision(args.mixed_precision) if args.mixed_precision.lower() == "fp16": deepspeed_plugin.deepspeed_config["fp16"]["initial_scale_power"] = 0 # preventing overflow. @@ -122,18 +125,56 @@ def prepare_deepspeed_model(args: argparse.Namespace, **models): class DeepSpeedWrapper(torch.nn.Module): def __init__(self, **kw_models) -> None: super().__init__() + self.models = torch.nn.ModuleDict() + + wrap_model_forward_with_torch_autocast = args.mixed_precision is not "no" for key, model in kw_models.items(): if isinstance(model, list): model = torch.nn.ModuleList(model) + + if wrap_model_forward_with_torch_autocast: + model = self.__wrap_model_with_torch_autocast(model) + assert isinstance( model, torch.nn.Module ), f"model must be an instance of torch.nn.Module, but got {key} is {type(model)}" + self.models.update(torch.nn.ModuleDict({key: model})) + def __wrap_model_with_torch_autocast(self, model): + if isinstance(model, torch.nn.ModuleList): + model = torch.nn.ModuleList([self.__wrap_model_forward_with_torch_autocast(m) for m in model]) + else: + model = self.__wrap_model_forward_with_torch_autocast(model) + return model + + def __wrap_model_forward_with_torch_autocast(self, model): + + assert hasattr(model, "forward"), f"model must have a forward method." + + forward_fn = model.forward + + def forward(*args, **kwargs): + try: + device_type = model.device.type + except AttributeError: + logger.warning( + "[DeepSpeed] model.device is not available. Using get_preferred_device() " + "to determine the device_type for torch.autocast()." + ) + device_type = get_preferred_device().type + + with torch.autocast(device_type = device_type): + return forward_fn(*args, **kwargs) + + model.forward = forward + return model + def get_models(self): return self.models + ds_model = DeepSpeedWrapper(**models) return ds_model diff --git a/library/train_util.py b/library/train_util.py index 08bd836d..d7243e07 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -1060,8 +1060,11 @@ class BaseDataset(torch.utils.data.Dataset): self.bucket_info["buckets"][i] = {"resolution": reso, "count": len(bucket)} logger.info(f"bucket {i}: resolution {reso}, count: {len(bucket)}") - img_ar_errors = np.array(img_ar_errors) - mean_img_ar_error = np.mean(np.abs(img_ar_errors)) + if len(img_ar_errors) == 0: + mean_img_ar_error = 0 # avoid NaN + else: + img_ar_errors = np.array(img_ar_errors) + mean_img_ar_error = np.mean(np.abs(img_ar_errors)) self.bucket_info["mean_img_ar_error"] = mean_img_ar_error logger.info(f"mean ar error (without repeats): {mean_img_ar_error}") @@ -5516,6 +5519,11 @@ def load_target_model(args, weight_dtype, accelerator, unet_use_linear_projectio def patch_accelerator_for_fp16_training(accelerator): + + from accelerate import DistributedType + if accelerator.distributed_type == DistributedType.DEEPSPEED: + return + org_unscale_grads = accelerator.scaler._unscale_grads_ def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16): diff --git a/networks/lora_flux.py b/networks/lora_flux.py index 92b3979a..0b30f1b8 100644 --- a/networks/lora_flux.py +++ b/networks/lora_flux.py @@ -955,26 +955,26 @@ class LoRANetwork(torch.nn.Module): for lora in self.text_encoder_loras + self.unet_loras: lora.update_grad_norms() - def grad_norms(self) -> Tensor: + def grad_norms(self) -> Tensor | None: grad_norms = [] for lora in self.text_encoder_loras + self.unet_loras: if hasattr(lora, "grad_norms") and lora.grad_norms is not None: grad_norms.append(lora.grad_norms.mean(dim=0)) - return torch.stack(grad_norms) if len(grad_norms) > 0 else torch.tensor([]) + return torch.stack(grad_norms) if len(grad_norms) > 0 else None - def weight_norms(self) -> Tensor: + def weight_norms(self) -> Tensor | None: weight_norms = [] for lora in self.text_encoder_loras + self.unet_loras: if hasattr(lora, "weight_norms") and lora.weight_norms is not None: weight_norms.append(lora.weight_norms.mean(dim=0)) - return torch.stack(weight_norms) if len(weight_norms) > 0 else torch.tensor([]) + return torch.stack(weight_norms) if len(weight_norms) > 0 else None - def combined_weight_norms(self) -> Tensor: + def combined_weight_norms(self) -> Tensor | None: combined_weight_norms = [] for lora in self.text_encoder_loras + self.unet_loras: if hasattr(lora, "combined_weight_norms") and lora.combined_weight_norms is not None: combined_weight_norms.append(lora.combined_weight_norms.mean(dim=0)) - return torch.stack(combined_weight_norms) if len(combined_weight_norms) > 0 else torch.tensor([]) + return torch.stack(combined_weight_norms) if len(combined_weight_norms) > 0 else None def load_weights(self, file): diff --git a/pytest.ini b/pytest.ini index 484d3aef..34b7e9c1 100644 --- a/pytest.ini +++ b/pytest.ini @@ -6,3 +6,4 @@ filterwarnings = ignore::DeprecationWarning ignore::UserWarning ignore::FutureWarning +pythonpath = . diff --git a/train_network.py b/train_network.py index 5b2d2751..2b130bad 100644 --- a/train_network.py +++ b/train_network.py @@ -1518,11 +1518,13 @@ class NetworkTrainer: max_mean_logs = {"Keys Scaled": keys_scaled, "Average key norm": mean_norm} else: if hasattr(network, "weight_norms"): - mean_norm = network.weight_norms().mean().item() - mean_grad_norm = network.grad_norms().mean().item() - mean_combined_norm = network.combined_weight_norms().mean().item() weight_norms = network.weight_norms() - maximum_norm = weight_norms.max().item() if weight_norms.numel() > 0 else None + mean_norm = weight_norms.mean().item() if weight_norms is not None else None + grad_norms = network.grad_norms() + mean_grad_norm = grad_norms.mean().item() if grad_norms is not None else None + combined_weight_norms = network.combined_weight_norms() + mean_combined_norm = combined_weight_norms.mean().item() if combined_weight_norms is not None else None + maximum_norm = weight_norms.max().item() if weight_norms is not None else None keys_scaled = None max_mean_logs = {} else: