diff --git a/fine_tune.py b/fine_tune.py index 524b9b2a..ddc518f5 100644 --- a/fine_tune.py +++ b/fine_tune.py @@ -62,7 +62,7 @@ def train(args): return if cache_latents: - assert train_dataset_group.is_latent_cachable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" + assert train_dataset_group.is_latent_cacheable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" # acceleratorを準備する print("prepare accelerator") diff --git a/library/train_util.py b/library/train_util.py index a8cbd7d6..b191604c 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -547,7 +547,7 @@ class BaseDataset(torch.utils.data.Dataset): assert image.shape[0] == reso[1] and image.shape[1] == reso[0], f"internal error, illegal trimmed size: {image.shape}, {reso}" return image - def is_latent_cachable(self): + def is_latent_cacheable(self): return all([not subset.color_aug and not subset.random_crop for subset in self.subsets]) def cache_latents(self, vae): @@ -1062,8 +1062,8 @@ class DatasetGroup(torch.utils.data.ConcatDataset): for dataset in self.datasets: dataset.cache_latents(vae) - def is_latent_cachable(self) -> bool: - return all([dataset.is_latent_cachable() for dataset in self.datasets]) + def is_latent_cacheable(self) -> bool: + return all([dataset.is_latent_cacheable() for dataset in self.datasets]) def set_current_epoch(self, epoch): for dataset in self.datasets: diff --git a/train_db.py b/train_db.py index 2fa9f8bb..6ce1367e 100644 --- a/train_db.py +++ b/train_db.py @@ -62,7 +62,7 @@ def train(args): return if cache_latents: - assert train_dataset_group.is_latent_cachable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" + assert train_dataset_group.is_latent_cacheable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" # acceleratorを準備する print("prepare accelerator") diff --git a/train_network.py b/train_network.py index 24577c6f..34ce2160 100644 --- a/train_network.py +++ b/train_network.py @@ -99,7 +99,7 @@ def train(args): return if cache_latents: - assert train_dataset_group.is_latent_cachable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" + assert train_dataset_group.is_latent_cacheable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" # acceleratorを準備する print("prepare accelerator") @@ -341,7 +341,7 @@ def train(args): metadata["ss_datasets"] = json.dumps(datasets_metadata) else: - # conserving backward compatiblity when using train_dataset_dir and reg_dataset_dir + # conserving backward compatibility when using train_dataset_dir and reg_dataset_dir assert len(train_dataset_group.datasets) == 1, f"There should be a single dataset but {len(train_dataset_group.datasets)} found. This seems to be a bug. / データセットは1個だけ存在するはずですが、実際には{len(train_dataset_group.datasets)}個でした。プログラムのバグかもしれません。" dataset = train_dataset_group.datasets[0] diff --git a/train_textual_inversion.py b/train_textual_inversion.py index 0f23dd55..669be7e0 100644 --- a/train_textual_inversion.py +++ b/train_textual_inversion.py @@ -197,7 +197,7 @@ def train(args): return if cache_latents: - assert train_dataset_group.is_latent_cachable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" + assert train_dataset_group.is_latent_cacheable(), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません" # モデルに xformers とか memory efficient attention を組み込む train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)