ACCC1380 commited on
Commit
479e33f
1 Parent(s): 70272e3

Upload lora-scripts/sd-scripts/tools/cache_text_encoder_outputs.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/tools/cache_text_encoder_outputs.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # text encoder出力のdiskへの事前キャッシュを行う / cache text encoder outputs to disk in advance
2
+
3
+ import argparse
4
+ import math
5
+ from multiprocessing import Value
6
+ import os
7
+
8
+ from accelerate.utils import set_seed
9
+ import torch
10
+ from tqdm import tqdm
11
+
12
+ from library import config_util
13
+ from library import train_util
14
+ from library import sdxl_train_util
15
+ from library.config_util import (
16
+ ConfigSanitizer,
17
+ BlueprintGenerator,
18
+ )
19
+ from library.utils import setup_logging
20
+ setup_logging()
21
+ import logging
22
+ logger = logging.getLogger(__name__)
23
+
24
+ def cache_to_disk(args: argparse.Namespace) -> None:
25
+ train_util.prepare_dataset_args(args, True)
26
+
27
+ # check cache arg
28
+ assert (
29
+ args.cache_text_encoder_outputs_to_disk
30
+ ), "cache_text_encoder_outputs_to_disk must be True / cache_text_encoder_outputs_to_diskはTrueである必要があります"
31
+
32
+ # できるだけ準備はしておくが今のところSDXLのみしか動かない
33
+ assert (
34
+ args.sdxl
35
+ ), "cache_text_encoder_outputs_to_disk is only available for SDXL / cache_text_encoder_outputs_to_diskはSDXLのみ利用可能です"
36
+
37
+ use_dreambooth_method = args.in_json is None
38
+
39
+ if args.seed is not None:
40
+ set_seed(args.seed) # 乱数系列を初期化する
41
+
42
+ # tokenizerを準備する:datasetを動かすために必要
43
+ if args.sdxl:
44
+ tokenizer1, tokenizer2 = sdxl_train_util.load_tokenizers(args)
45
+ tokenizers = [tokenizer1, tokenizer2]
46
+ else:
47
+ tokenizer = train_util.load_tokenizer(args)
48
+ tokenizers = [tokenizer]
49
+
50
+ # データセットを準備する
51
+ if args.dataset_class is None:
52
+ blueprint_generator = BlueprintGenerator(ConfigSanitizer(True, True, False, True))
53
+ if args.dataset_config is not None:
54
+ logger.info(f"Load dataset config from {args.dataset_config}")
55
+ user_config = config_util.load_user_config(args.dataset_config)
56
+ ignored = ["train_data_dir", "in_json"]
57
+ if any(getattr(args, attr) is not None for attr in ignored):
58
+ logger.warning(
59
+ "ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
60
+ ", ".join(ignored)
61
+ )
62
+ )
63
+ else:
64
+ if use_dreambooth_method:
65
+ logger.info("Using DreamBooth method.")
66
+ user_config = {
67
+ "datasets": [
68
+ {
69
+ "subsets": config_util.generate_dreambooth_subsets_config_by_subdirs(
70
+ args.train_data_dir, args.reg_data_dir
71
+ )
72
+ }
73
+ ]
74
+ }
75
+ else:
76
+ logger.info("Training with captions.")
77
+ user_config = {
78
+ "datasets": [
79
+ {
80
+ "subsets": [
81
+ {
82
+ "image_dir": args.train_data_dir,
83
+ "metadata_file": args.in_json,
84
+ }
85
+ ]
86
+ }
87
+ ]
88
+ }
89
+
90
+ blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizers)
91
+ train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
92
+ else:
93
+ train_dataset_group = train_util.load_arbitrary_dataset(args, tokenizers)
94
+
95
+ current_epoch = Value("i", 0)
96
+ current_step = Value("i", 0)
97
+ ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
98
+ collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)
99
+
100
+ # acceleratorを準備する
101
+ logger.info("prepare accelerator")
102
+ accelerator = train_util.prepare_accelerator(args)
103
+
104
+ # mixed precisionに対応した型を用意しておき適宜castする
105
+ weight_dtype, _ = train_util.prepare_dtype(args)
106
+
107
+ # モデルを読み込む
108
+ logger.info("load model")
109
+ if args.sdxl:
110
+ (_, text_encoder1, text_encoder2, _, _, _, _) = sdxl_train_util.load_target_model(args, accelerator, "sdxl", weight_dtype)
111
+ text_encoders = [text_encoder1, text_encoder2]
112
+ else:
113
+ text_encoder1, _, _, _ = train_util.load_target_model(args, weight_dtype, accelerator)
114
+ text_encoders = [text_encoder1]
115
+
116
+ for text_encoder in text_encoders:
117
+ text_encoder.to(accelerator.device, dtype=weight_dtype)
118
+ text_encoder.requires_grad_(False)
119
+ text_encoder.eval()
120
+
121
+ # dataloaderを準備する
122
+ train_dataset_group.set_caching_mode("text")
123
+
124
+ # DataLoaderのプロセス数:0 は persistent_workers が使えないので注意
125
+ n_workers = min(args.max_data_loader_n_workers, os.cpu_count()) # cpu_count or max_data_loader_n_workers
126
+
127
+ train_dataloader = torch.utils.data.DataLoader(
128
+ train_dataset_group,
129
+ batch_size=1,
130
+ shuffle=True,
131
+ collate_fn=collator,
132
+ num_workers=n_workers,
133
+ persistent_workers=args.persistent_data_loader_workers,
134
+ )
135
+
136
+ # acceleratorを使ってモデルを準備する:マルチGPUで使えるようになるはず
137
+ train_dataloader = accelerator.prepare(train_dataloader)
138
+
139
+ # データ取得のためのループ
140
+ for batch in tqdm(train_dataloader):
141
+ absolute_paths = batch["absolute_paths"]
142
+ input_ids1_list = batch["input_ids1_list"]
143
+ input_ids2_list = batch["input_ids2_list"]
144
+
145
+ image_infos = []
146
+ for absolute_path, input_ids1, input_ids2 in zip(absolute_paths, input_ids1_list, input_ids2_list):
147
+ image_info = train_util.ImageInfo(absolute_path, 1, "dummy", False, absolute_path)
148
+ image_info.text_encoder_outputs_npz = os.path.splitext(absolute_path)[0] + train_util.TEXT_ENCODER_OUTPUTS_CACHE_SUFFIX
149
+ image_info
150
+
151
+ if args.skip_existing:
152
+ if os.path.exists(image_info.text_encoder_outputs_npz):
153
+ logger.warning(f"Skipping {image_info.text_encoder_outputs_npz} because it already exists.")
154
+ continue
155
+
156
+ image_info.input_ids1 = input_ids1
157
+ image_info.input_ids2 = input_ids2
158
+ image_infos.append(image_info)
159
+
160
+ if len(image_infos) > 0:
161
+ b_input_ids1 = torch.stack([image_info.input_ids1 for image_info in image_infos])
162
+ b_input_ids2 = torch.stack([image_info.input_ids2 for image_info in image_infos])
163
+ train_util.cache_batch_text_encoder_outputs(
164
+ image_infos, tokenizers, text_encoders, args.max_token_length, True, b_input_ids1, b_input_ids2, weight_dtype
165
+ )
166
+
167
+ accelerator.wait_for_everyone()
168
+ accelerator.print(f"Finished caching latents for {len(train_dataset_group)} batches.")
169
+
170
+
171
+ def setup_parser() -> argparse.ArgumentParser:
172
+ parser = argparse.ArgumentParser()
173
+
174
+ train_util.add_sd_models_arguments(parser)
175
+ train_util.add_training_arguments(parser, True)
176
+ train_util.add_dataset_arguments(parser, True, True, True)
177
+ config_util.add_config_arguments(parser)
178
+ sdxl_train_util.add_sdxl_training_arguments(parser)
179
+ parser.add_argument("--sdxl", action="store_true", help="Use SDXL model / SDXLモデルを使用する")
180
+ parser.add_argument(
181
+ "--skip_existing",
182
+ action="store_true",
183
+ help="skip images if npz already exists (both normal and flipped exists if flip_aug is enabled) / npzが既に存在する画像をスキップする(flip_aug有効時は通常、反転の両方が存在する画像をスキップ)",
184
+ )
185
+ return parser
186
+
187
+
188
+ if __name__ == "__main__":
189
+ parser = setup_parser()
190
+
191
+ args = parser.parse_args()
192
+ args = train_util.read_config_from_file(args, parser)
193
+
194
+ cache_to_disk(args)