mirror of
https://github.com/kohya-ss/sd-scripts.git
synced 2026-04-09 06:45:09 +00:00
fix typos
This commit is contained in:
@@ -62,7 +62,7 @@ def train(args):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if cache_latents:
|
if cache_latents:
|
||||||
assert train_dataset_group.is_latent_cachable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
|
assert train_dataset_group.is_latent_cacheable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
|
||||||
|
|
||||||
# acceleratorを準備する
|
# acceleratorを準備する
|
||||||
print("prepare accelerator")
|
print("prepare accelerator")
|
||||||
|
|||||||
@@ -547,7 +547,7 @@ class BaseDataset(torch.utils.data.Dataset):
|
|||||||
assert image.shape[0] == reso[1] and image.shape[1] == reso[0], f"internal error, illegal trimmed size: {image.shape}, {reso}"
|
assert image.shape[0] == reso[1] and image.shape[1] == reso[0], f"internal error, illegal trimmed size: {image.shape}, {reso}"
|
||||||
return image
|
return image
|
||||||
|
|
||||||
def is_latent_cachable(self):
|
def is_latent_cacheable(self):
|
||||||
return all([not subset.color_aug and not subset.random_crop for subset in self.subsets])
|
return all([not subset.color_aug and not subset.random_crop for subset in self.subsets])
|
||||||
|
|
||||||
def cache_latents(self, vae):
|
def cache_latents(self, vae):
|
||||||
@@ -1062,8 +1062,8 @@ class DatasetGroup(torch.utils.data.ConcatDataset):
|
|||||||
for dataset in self.datasets:
|
for dataset in self.datasets:
|
||||||
dataset.cache_latents(vae)
|
dataset.cache_latents(vae)
|
||||||
|
|
||||||
def is_latent_cachable(self) -> bool:
|
def is_latent_cacheable(self) -> bool:
|
||||||
return all([dataset.is_latent_cachable() for dataset in self.datasets])
|
return all([dataset.is_latent_cacheable() for dataset in self.datasets])
|
||||||
|
|
||||||
def set_current_epoch(self, epoch):
|
def set_current_epoch(self, epoch):
|
||||||
for dataset in self.datasets:
|
for dataset in self.datasets:
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ def train(args):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if cache_latents:
|
if cache_latents:
|
||||||
assert train_dataset_group.is_latent_cachable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
|
assert train_dataset_group.is_latent_cacheable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
|
||||||
|
|
||||||
# acceleratorを準備する
|
# acceleratorを準備する
|
||||||
print("prepare accelerator")
|
print("prepare accelerator")
|
||||||
|
|||||||
@@ -99,7 +99,7 @@ def train(args):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if cache_latents:
|
if cache_latents:
|
||||||
assert train_dataset_group.is_latent_cachable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
|
assert train_dataset_group.is_latent_cacheable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
|
||||||
|
|
||||||
# acceleratorを準備する
|
# acceleratorを準備する
|
||||||
print("prepare accelerator")
|
print("prepare accelerator")
|
||||||
@@ -341,7 +341,7 @@ def train(args):
|
|||||||
|
|
||||||
metadata["ss_datasets"] = json.dumps(datasets_metadata)
|
metadata["ss_datasets"] = json.dumps(datasets_metadata)
|
||||||
else:
|
else:
|
||||||
# conserving backward compatiblity when using train_dataset_dir and reg_dataset_dir
|
# conserving backward compatibility when using train_dataset_dir and reg_dataset_dir
|
||||||
assert len(train_dataset_group.datasets) == 1, f"There should be a single dataset but {len(train_dataset_group.datasets)} found. This seems to be a bug. / データセットは1個だけ存在するはずですが、実際には{len(train_dataset_group.datasets)}個でした。プログラムのバグかもしれません。"
|
assert len(train_dataset_group.datasets) == 1, f"There should be a single dataset but {len(train_dataset_group.datasets)} found. This seems to be a bug. / データセットは1個だけ存在するはずですが、実際には{len(train_dataset_group.datasets)}個でした。プログラムのバグかもしれません。"
|
||||||
|
|
||||||
dataset = train_dataset_group.datasets[0]
|
dataset = train_dataset_group.datasets[0]
|
||||||
|
|||||||
@@ -197,7 +197,7 @@ def train(args):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if cache_latents:
|
if cache_latents:
|
||||||
assert train_dataset_group.is_latent_cachable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
|
assert train_dataset_group.is_latent_cacheable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
|
||||||
|
|
||||||
# モデルに xformers とか memory efficient attention を組み込む
|
# モデルに xformers とか memory efficient attention を組み込む
|
||||||
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)
|
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)
|
||||||
|
|||||||
Reference in New Issue
Block a user