fix: refactor huber-loss calculation in multiple training scripts

This commit is contained in:
Kohya S
2024-12-01 21:20:28 +09:00
parent 0fe6320f09
commit cc11989755
13 changed files with 52 additions and 70 deletions

View File

@@ -695,9 +695,7 @@ def train(args):
# Sample noise, sample a random timestep for each image, and add noise to the latents,
# with noise offset and/or multires noise if specified
noise, noisy_latents, timesteps = train_util.get_noise_noisy_latents_and_timesteps(
args, noise_scheduler, latents
)
noise, noisy_latents, timesteps = train_util.get_noise_noisy_latents_and_timesteps(args, noise_scheduler, latents)
noisy_latents = noisy_latents.to(weight_dtype) # TODO check why noisy_latents is not weight_dtype
@@ -711,6 +709,7 @@ def train(args):
else:
target = noise
huber_c = train_util.get_huber_threshold_if_needed(args, timesteps, noise_scheduler)
if (
args.min_snr_gamma
or args.scale_v_pred_loss_like_noise_pred
@@ -719,9 +718,7 @@ def train(args):
or args.masked_loss
):
# do not mean over batch dimension for snr weight or scale v-pred loss
loss = train_util.conditional_loss(
args, noise_pred.float(), target.float(), timesteps, "none", noise_scheduler
)
loss = train_util.conditional_loss(noise_pred.float(), target.float(), args.loss_type, "none", huber_c)
if args.masked_loss or ("alpha_masks" in batch and batch["alpha_masks"] is not None):
loss = apply_masked_loss(loss, batch)
loss = loss.mean([1, 2, 3])
@@ -737,9 +734,7 @@ def train(args):
loss = loss.mean() # mean over batch dimension
else:
loss = train_util.conditional_loss(
args, noise_pred.float(), target.float(), timesteps, "none", noise_scheduler
)
loss = train_util.conditional_loss(noise_pred.float(), target.float(), args.loss_type, "mean", huber_c)
accelerator.backward(loss)