Use self.get_noise_pred_and_target and drop fixed timesteps

This commit is contained in:
rockerBOO
2025-01-06 10:48:38 -05:00
parent 1c0ae306e5
commit bbf6bbd5ea
3 changed files with 40 additions and 86 deletions

View File

@@ -312,6 +312,7 @@ class Sd3NetworkTrainer(train_network.NetworkTrainer):
network,
weight_dtype,
train_unet,
is_train=True
):
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
@@ -339,7 +340,7 @@ class Sd3NetworkTrainer(train_network.NetworkTrainer):
t5_attn_mask = None
# call model
with accelerator.autocast():
with torch.set_grad_enabled(is_train and train_unet), accelerator.autocast():
# TODO support attention mask
model_pred = unet(noisy_model_input, timesteps, context=context, y=lg_pooled)