mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-08 22:35:09 +00:00
Fix gradients synchronization for multi-GPUs training (#989)
* delete DDP wrapper * fix train_db vae and train_network * fix train_db vae and train_network unwrap * network grad sync --------- Co-authored-by: Kohya S <52813779+kohya-ss@users.noreply.github.com>
This commit is contained in:
@@ -397,13 +397,10 @@ def train(args):
|
||||
# acceleratorがなんかよろしくやってくれるらしい
|
||||
if train_unet:
|
||||
unet = accelerator.prepare(unet)
|
||||
(unet,) = train_util.transform_models_if_DDP([unet])
|
||||
if train_text_encoder1:
|
||||
text_encoder1 = accelerator.prepare(text_encoder1)
|
||||
(text_encoder1,) = train_util.transform_models_if_DDP([text_encoder1])
|
||||
if train_text_encoder2:
|
||||
text_encoder2 = accelerator.prepare(text_encoder2)
|
||||
(text_encoder2,) = train_util.transform_models_if_DDP([text_encoder2])
|
||||
|
||||
optimizer, train_dataloader, lr_scheduler = accelerator.prepare(optimizer, train_dataloader, lr_scheduler)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user