ACCC1380 commited on
Commit
ed620e7
1 Parent(s): 6354438

Upload lora-scripts/sd-scripts/finetune/prepare_buckets_latents.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/finetune/prepare_buckets_latents.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import json
4
+
5
+ from pathlib import Path
6
+ from typing import List
7
+ from tqdm import tqdm
8
+ import numpy as np
9
+ from PIL import Image
10
+ import cv2
11
+
12
+ import torch
13
+ from library.device_utils import init_ipex, get_preferred_device
14
+ init_ipex()
15
+
16
+ from torchvision import transforms
17
+
18
+ import library.model_util as model_util
19
+ import library.train_util as train_util
20
+ from library.utils import setup_logging
21
+ setup_logging()
22
+ import logging
23
+ logger = logging.getLogger(__name__)
24
+
25
+ DEVICE = get_preferred_device()
26
+
27
+ IMAGE_TRANSFORMS = transforms.Compose(
28
+ [
29
+ transforms.ToTensor(),
30
+ transforms.Normalize([0.5], [0.5]),
31
+ ]
32
+ )
33
+
34
+
35
+ def collate_fn_remove_corrupted(batch):
36
+ """Collate function that allows to remove corrupted examples in the
37
+ dataloader. It expects that the dataloader returns 'None' when that occurs.
38
+ The 'None's in the batch are removed.
39
+ """
40
+ # Filter out all the Nones (corrupted examples)
41
+ batch = list(filter(lambda x: x is not None, batch))
42
+ return batch
43
+
44
+
45
+ def get_npz_filename(data_dir, image_key, is_full_path, recursive):
46
+ if is_full_path:
47
+ base_name = os.path.splitext(os.path.basename(image_key))[0]
48
+ relative_path = os.path.relpath(os.path.dirname(image_key), data_dir)
49
+ else:
50
+ base_name = image_key
51
+ relative_path = ""
52
+
53
+ if recursive and relative_path:
54
+ return os.path.join(data_dir, relative_path, base_name) + ".npz"
55
+ else:
56
+ return os.path.join(data_dir, base_name) + ".npz"
57
+
58
+
59
+ def main(args):
60
+ # assert args.bucket_reso_steps % 8 == 0, f"bucket_reso_steps must be divisible by 8 / bucket_reso_stepは8で割り切れる必要があります"
61
+ if args.bucket_reso_steps % 8 > 0:
62
+ logger.warning(f"resolution of buckets in training time is a multiple of 8 / 学習時の各bucketの解像度は8単位になります")
63
+ if args.bucket_reso_steps % 32 > 0:
64
+ logger.warning(
65
+ f"WARNING: bucket_reso_steps is not divisible by 32. It is not working with SDXL / bucket_reso_stepsが32で割り切れません。SDXLでは動作しません"
66
+ )
67
+
68
+ train_data_dir_path = Path(args.train_data_dir)
69
+ image_paths: List[str] = [str(p) for p in train_util.glob_images_pathlib(train_data_dir_path, args.recursive)]
70
+ logger.info(f"found {len(image_paths)} images.")
71
+
72
+ if os.path.exists(args.in_json):
73
+ logger.info(f"loading existing metadata: {args.in_json}")
74
+ with open(args.in_json, "rt", encoding="utf-8") as f:
75
+ metadata = json.load(f)
76
+ else:
77
+ logger.error(f"no metadata / メタデータファイルがありません: {args.in_json}")
78
+ return
79
+
80
+ weight_dtype = torch.float32
81
+ if args.mixed_precision == "fp16":
82
+ weight_dtype = torch.float16
83
+ elif args.mixed_precision == "bf16":
84
+ weight_dtype = torch.bfloat16
85
+
86
+ vae = model_util.load_vae(args.model_name_or_path, weight_dtype)
87
+ vae.eval()
88
+ vae.to(DEVICE, dtype=weight_dtype)
89
+
90
+ # bucketのサイズを計算する
91
+ max_reso = tuple([int(t) for t in args.max_resolution.split(",")])
92
+ assert len(max_reso) == 2, f"illegal resolution (not 'width,height') / 画像サイズに誤りがあります。'幅,高さ'で指定してください: {args.max_resolution}"
93
+
94
+ bucket_manager = train_util.BucketManager(
95
+ args.bucket_no_upscale, max_reso, args.min_bucket_reso, args.max_bucket_reso, args.bucket_reso_steps
96
+ )
97
+ if not args.bucket_no_upscale:
98
+ bucket_manager.make_buckets()
99
+ else:
100
+ logger.warning(
101
+ "min_bucket_reso and max_bucket_reso are ignored if bucket_no_upscale is set, because bucket reso is defined by image size automatically / bucket_no_upscaleが指定された場合は、bucketの解像度は画像サイズから自動計算されるため、min_bucket_resoとmax_bucket_resoは無視されます"
102
+ )
103
+
104
+ # 画像をひとつずつ適切なbucketに割り当てながらlatentを計算する
105
+ img_ar_errors = []
106
+
107
+ def process_batch(is_last):
108
+ for bucket in bucket_manager.buckets:
109
+ if (is_last and len(bucket) > 0) or len(bucket) >= args.batch_size:
110
+ train_util.cache_batch_latents(vae, True, bucket, args.flip_aug, False)
111
+ bucket.clear()
112
+
113
+ # 読み込みの高速化のためにDataLoaderを使うオプション
114
+ if args.max_data_loader_n_workers is not None:
115
+ dataset = train_util.ImageLoadingDataset(image_paths)
116
+ data = torch.utils.data.DataLoader(
117
+ dataset,
118
+ batch_size=1,
119
+ shuffle=False,
120
+ num_workers=args.max_data_loader_n_workers,
121
+ collate_fn=collate_fn_remove_corrupted,
122
+ drop_last=False,
123
+ )
124
+ else:
125
+ data = [[(None, ip)] for ip in image_paths]
126
+
127
+ bucket_counts = {}
128
+ for data_entry in tqdm(data, smoothing=0.0):
129
+ if data_entry[0] is None:
130
+ continue
131
+
132
+ img_tensor, image_path = data_entry[0]
133
+ if img_tensor is not None:
134
+ image = transforms.functional.to_pil_image(img_tensor)
135
+ else:
136
+ try:
137
+ image = Image.open(image_path)
138
+ if image.mode != "RGB":
139
+ image = image.convert("RGB")
140
+ except Exception as e:
141
+ logger.error(f"Could not load image path / 画像を読み込めません: {image_path}, error: {e}")
142
+ continue
143
+
144
+ image_key = image_path if args.full_path else os.path.splitext(os.path.basename(image_path))[0]
145
+ if image_key not in metadata:
146
+ metadata[image_key] = {}
147
+
148
+ # 本当はこのあとの部分もDataSetに持っていけば高速化できるがいろいろ大変
149
+
150
+ reso, resized_size, ar_error = bucket_manager.select_bucket(image.width, image.height)
151
+ img_ar_errors.append(abs(ar_error))
152
+ bucket_counts[reso] = bucket_counts.get(reso, 0) + 1
153
+
154
+ # メタデータに記録する解像度はlatent単位とするので、8単位で切り捨て
155
+ metadata[image_key]["train_resolution"] = (reso[0] - reso[0] % 8, reso[1] - reso[1] % 8)
156
+
157
+ if not args.bucket_no_upscale:
158
+ # upscaleを行わないときには、resize後のサイズは、bucketのサイズと、縦横どちらかが同じであることを確認する
159
+ assert (
160
+ resized_size[0] == reso[0] or resized_size[1] == reso[1]
161
+ ), f"internal error, resized size not match: {reso}, {resized_size}, {image.width}, {image.height}"
162
+ assert (
163
+ resized_size[0] >= reso[0] and resized_size[1] >= reso[1]
164
+ ), f"internal error, resized size too small: {reso}, {resized_size}, {image.width}, {image.height}"
165
+
166
+ assert (
167
+ resized_size[0] >= reso[0] and resized_size[1] >= reso[1]
168
+ ), f"internal error resized size is small: {resized_size}, {reso}"
169
+
170
+ # 既に存在するファイルがあればshape等を確認して同じならskipする
171
+ npz_file_name = get_npz_filename(args.train_data_dir, image_key, args.full_path, args.recursive)
172
+ if args.skip_existing:
173
+ if train_util.is_disk_cached_latents_is_expected(reso, npz_file_name, args.flip_aug):
174
+ continue
175
+
176
+ # バッチへ追加
177
+ image_info = train_util.ImageInfo(image_key, 1, "", False, image_path)
178
+ image_info.latents_npz = npz_file_name
179
+ image_info.bucket_reso = reso
180
+ image_info.resized_size = resized_size
181
+ image_info.image = image
182
+ bucket_manager.add_image(reso, image_info)
183
+
184
+ # バッチを推論するか判定して推論する
185
+ process_batch(False)
186
+
187
+ # 残りを処理する
188
+ process_batch(True)
189
+
190
+ bucket_manager.sort()
191
+ for i, reso in enumerate(bucket_manager.resos):
192
+ count = bucket_counts.get(reso, 0)
193
+ if count > 0:
194
+ logger.info(f"bucket {i} {reso}: {count}")
195
+ img_ar_errors = np.array(img_ar_errors)
196
+ logger.info(f"mean ar error: {np.mean(img_ar_errors)}")
197
+
198
+ # metadataを書き出して終わり
199
+ logger.info(f"writing metadata: {args.out_json}")
200
+ with open(args.out_json, "wt", encoding="utf-8") as f:
201
+ json.dump(metadata, f, indent=2)
202
+ logger.info("done!")
203
+
204
+
205
+ def setup_parser() -> argparse.ArgumentParser:
206
+ parser = argparse.ArgumentParser()
207
+ parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ")
208
+ parser.add_argument("in_json", type=str, help="metadata file to input / 読み込むメタデータファイル")
209
+ parser.add_argument("out_json", type=str, help="metadata file to output / メタデータファイル書き出し先")
210
+ parser.add_argument("model_name_or_path", type=str, help="model name or path to encode latents / latentを取得するためのモデル")
211
+ parser.add_argument("--v2", action="store_true", help="not used (for backward compatibility) / 使用されません(互換性のため残してあります)")
212
+ parser.add_argument("--batch_size", type=int, default=1, help="batch size in inference / 推論時のバッチサイズ")
213
+ parser.add_argument(
214
+ "--max_data_loader_n_workers",
215
+ type=int,
216
+ default=None,
217
+ help="enable image reading by DataLoader with this number of workers (faster) / DataLoaderによる画像読み込みを有効にしてこのワーカー数を適用する(読み込みを高速化)",
218
+ )
219
+ parser.add_argument(
220
+ "--max_resolution",
221
+ type=str,
222
+ default="512,512",
223
+ help="max resolution in fine tuning (width,height) / fine tuning時の最大画像サイズ 「幅,高さ」(使用メモリ量に関係します)",
224
+ )
225
+ parser.add_argument("--min_bucket_reso", type=int, default=256, help="minimum resolution for buckets / bucketの最小解像度")
226
+ parser.add_argument("--max_bucket_reso", type=int, default=1024, help="maximum resolution for buckets / bucketの最大解像度")
227
+ parser.add_argument(
228
+ "--bucket_reso_steps",
229
+ type=int,
230
+ default=64,
231
+ help="steps of resolution for buckets, divisible by 8 is recommended / bucketの解像度の単位、8で割り切れる値を推奨します",
232
+ )
233
+ parser.add_argument(
234
+ "--bucket_no_upscale", action="store_true", help="make bucket for each image without upscaling / 画像を拡大せずbucketを作成します"
235
+ )
236
+ parser.add_argument(
237
+ "--mixed_precision", type=str, default="no", choices=["no", "fp16", "bf16"], help="use mixed precision / 混合精度を使う場合、その精度"
238
+ )
239
+ parser.add_argument(
240
+ "--full_path",
241
+ action="store_true",
242
+ help="use full path as image-key in metadata (supports multiple directories) / メタデータで画像キーをフルパスにする(複数の学習画像ディレクトリに対応)",
243
+ )
244
+ parser.add_argument(
245
+ "--flip_aug", action="store_true", help="flip augmentation, save latents for flipped images / 左右反転した画像もlatentを取得、保存する"
246
+ )
247
+ parser.add_argument(
248
+ "--skip_existing",
249
+ action="store_true",
250
+ help="skip images if npz already exists (both normal and flipped exists if flip_aug is enabled) / npzが既に存在する画像をスキップする(flip_aug有効時は通常、反転の両方が存在する画像をスキップ)",
251
+ )
252
+ parser.add_argument(
253
+ "--recursive",
254
+ action="store_true",
255
+ help="recursively look for training tags in all child folders of train_data_dir / train_data_dirのすべての子フォルダにある学習タグを再帰的に探す",
256
+ )
257
+
258
+ return parser
259
+
260
+
261
+ if __name__ == "__main__":
262
+ parser = setup_parser()
263
+
264
+ args = parser.parse_args()
265
+ main(args)