Min-SNR Weighting Strategy: Fixed SNR calculation to authors implementation

This commit is contained in:
AI-Casanova
2023-03-21 20:38:27 -05:00
committed by AI-Casanova
parent 64c923230e
commit a3c7d711e4

View File

@@ -10,8 +10,8 @@ def apply_snr_weight(loss, latents, noisy_latents, gamma):
gamma_over_snr = torch.div(torch.ones_like(snr)*gamma,snr) gamma_over_snr = torch.div(torch.ones_like(snr)*gamma,snr)
snr_weight = torch.minimum(gamma_over_snr,torch.ones_like(gamma_over_snr)).float() #from paper snr_weight = torch.minimum(gamma_over_snr,torch.ones_like(gamma_over_snr)).float() #from paper
loss = loss * snr_weight loss = loss * snr_weight
print(snr_weight) #print(snr_weight)
return loss return loss
def add_custom_train_arguments(parser: argparse.ArgumentParser): def add_custom_train_arguments(parser: argparse.ArgumentParser):
parser.add_argument("--min_snr_gamma", type=float, default=0, help="gamma for reducing the weight of high loss timesteps. Lower numbers have stronger effect. 5 is recommended by paper.") parser.add_argument("--min_snr_gamma", type=float, default=0, help="gamma for reducing the weight of high loss timesteps. Lower numbers have stronger effect. 5 is recommended by paper.")