ACCC1380 commited on
Commit
beeccc7
1 Parent(s): f5bbeb9

Upload lora-scripts/sd-scripts/finetune/make_captions_by_git.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/finetune/make_captions_by_git.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import re
4
+
5
+ from pathlib import Path
6
+ from PIL import Image
7
+ from tqdm import tqdm
8
+
9
+ import torch
10
+ from library.device_utils import init_ipex, get_preferred_device
11
+ init_ipex()
12
+
13
+ from transformers import AutoProcessor, AutoModelForCausalLM
14
+ from transformers.generation.utils import GenerationMixin
15
+
16
+ import library.train_util as train_util
17
+ from library.utils import setup_logging
18
+ setup_logging()
19
+ import logging
20
+ logger = logging.getLogger(__name__)
21
+
22
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
23
+
24
+ PATTERN_REPLACE = [
25
+ re.compile(r'(has|with|and) the (words?|letters?|name) (" ?[^"]*"|\w+)( ?(is )?(on|in) (the |her |their |him )?\w+)?'),
26
+ re.compile(r'(with a sign )?that says ?(" ?[^"]*"|\w+)( ?on it)?'),
27
+ re.compile(r"(with a sign )?that says ?(' ?(i'm)?[^']*'|\w+)( ?on it)?"),
28
+ re.compile(r"with the number \d+ on (it|\w+ \w+)"),
29
+ re.compile(r'with the words "'),
30
+ re.compile(r"word \w+ on it"),
31
+ re.compile(r"that says the word \w+ on it"),
32
+ re.compile("that says'the word \"( on it)?"),
33
+ ]
34
+
35
+ # 誤検知しまくりの with the word xxxx を消す
36
+
37
+
38
+ def remove_words(captions, debug):
39
+ removed_caps = []
40
+ for caption in captions:
41
+ cap = caption
42
+ for pat in PATTERN_REPLACE:
43
+ cap = pat.sub("", cap)
44
+ if debug and cap != caption:
45
+ logger.info(caption)
46
+ logger.info(cap)
47
+ removed_caps.append(cap)
48
+ return removed_caps
49
+
50
+
51
+ def collate_fn_remove_corrupted(batch):
52
+ """Collate function that allows to remove corrupted examples in the
53
+ dataloader. It expects that the dataloader returns 'None' when that occurs.
54
+ The 'None's in the batch are removed.
55
+ """
56
+ # Filter out all the Nones (corrupted examples)
57
+ batch = list(filter(lambda x: x is not None, batch))
58
+ return batch
59
+
60
+
61
+ def main(args):
62
+ r"""
63
+ transformers 4.30.2で、バッチサイズ>1でも動くようになったので、以下コメントアウト
64
+
65
+ # GITにバッチサイズが1より大きくても動くようにパッチを当てる: transformers 4.26.0用
66
+ org_prepare_input_ids_for_generation = GenerationMixin._prepare_input_ids_for_generation
67
+ curr_batch_size = [args.batch_size] # ループの最後で件数がbatch_size未満になるので入れ替えられるように
68
+
69
+ # input_idsがバッチサイズと同じ件数である必要がある:バッチサイズはこの関数から参照できないので外から渡す
70
+ # ここより上で置き換えようとするとすごく大変
71
+ def _prepare_input_ids_for_generation_patch(self, bos_token_id, encoder_outputs):
72
+ input_ids = org_prepare_input_ids_for_generation(self, bos_token_id, encoder_outputs)
73
+ if input_ids.size()[0] != curr_batch_size[0]:
74
+ input_ids = input_ids.repeat(curr_batch_size[0], 1)
75
+ return input_ids
76
+
77
+ GenerationMixin._prepare_input_ids_for_generation = _prepare_input_ids_for_generation_patch
78
+ """
79
+
80
+ logger.info(f"load images from {args.train_data_dir}")
81
+ train_data_dir_path = Path(args.train_data_dir)
82
+ image_paths = train_util.glob_images_pathlib(train_data_dir_path, args.recursive)
83
+ logger.info(f"found {len(image_paths)} images.")
84
+
85
+ # できればcacheに依存せず明示的にダウンロードしたい
86
+ logger.info(f"loading GIT: {args.model_id}")
87
+ git_processor = AutoProcessor.from_pretrained(args.model_id)
88
+ git_model = AutoModelForCausalLM.from_pretrained(args.model_id).to(DEVICE)
89
+ logger.info("GIT loaded")
90
+
91
+ # captioningする
92
+ def run_batch(path_imgs):
93
+ imgs = [im for _, im in path_imgs]
94
+
95
+ # curr_batch_size[0] = len(path_imgs)
96
+ inputs = git_processor(images=imgs, return_tensors="pt").to(DEVICE) # 画像はpil形式
97
+ generated_ids = git_model.generate(pixel_values=inputs.pixel_values, max_length=args.max_length)
98
+ captions = git_processor.batch_decode(generated_ids, skip_special_tokens=True)
99
+
100
+ if args.remove_words:
101
+ captions = remove_words(captions, args.debug)
102
+
103
+ for (image_path, _), caption in zip(path_imgs, captions):
104
+ with open(os.path.splitext(image_path)[0] + args.caption_extension, "wt", encoding="utf-8") as f:
105
+ f.write(caption + "\n")
106
+ if args.debug:
107
+ logger.info(f"{image_path} {caption}")
108
+
109
+ # 読み込みの高速化のためにDataLoaderを使うオプション
110
+ if args.max_data_loader_n_workers is not None:
111
+ dataset = train_util.ImageLoadingDataset(image_paths)
112
+ data = torch.utils.data.DataLoader(
113
+ dataset,
114
+ batch_size=args.batch_size,
115
+ shuffle=False,
116
+ num_workers=args.max_data_loader_n_workers,
117
+ collate_fn=collate_fn_remove_corrupted,
118
+ drop_last=False,
119
+ )
120
+ else:
121
+ data = [[(None, ip)] for ip in image_paths]
122
+
123
+ b_imgs = []
124
+ for data_entry in tqdm(data, smoothing=0.0):
125
+ for data in data_entry:
126
+ if data is None:
127
+ continue
128
+
129
+ image, image_path = data
130
+ if image is None:
131
+ try:
132
+ image = Image.open(image_path)
133
+ if image.mode != "RGB":
134
+ image = image.convert("RGB")
135
+ except Exception as e:
136
+ logger.error(f"Could not load image path / 画像を読み込めません: {image_path}, error: {e}")
137
+ continue
138
+
139
+ b_imgs.append((image_path, image))
140
+ if len(b_imgs) >= args.batch_size:
141
+ run_batch(b_imgs)
142
+ b_imgs.clear()
143
+
144
+ if len(b_imgs) > 0:
145
+ run_batch(b_imgs)
146
+
147
+ logger.info("done!")
148
+
149
+
150
+ def setup_parser() -> argparse.ArgumentParser:
151
+ parser = argparse.ArgumentParser()
152
+ parser.add_argument("train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ")
153
+ parser.add_argument("--caption_extension", type=str, default=".caption", help="extension of caption file / 出力されるキャプションファイルの拡張子")
154
+ parser.add_argument(
155
+ "--model_id",
156
+ type=str,
157
+ default="microsoft/git-large-textcaps",
158
+ help="model id for GIT in Hugging Face / 使用するGITのHugging FaceのモデルID",
159
+ )
160
+ parser.add_argument("--batch_size", type=int, default=1, help="batch size in inference / 推論時のバッチサイズ")
161
+ parser.add_argument(
162
+ "--max_data_loader_n_workers",
163
+ type=int,
164
+ default=None,
165
+ help="enable image reading by DataLoader with this number of workers (faster) / DataLoaderによる画像読み込みを有効にしてこのワーカー数を適用する(読み込みを高速化)",
166
+ )
167
+ parser.add_argument("--max_length", type=int, default=50, help="max length of caption / captionの最大長")
168
+ parser.add_argument(
169
+ "--remove_words",
170
+ action="store_true",
171
+ help="remove like `with the words xxx` from caption / `with the words xxx`のような部分をキャプションから削除する",
172
+ )
173
+ parser.add_argument("--debug", action="store_true", help="debug mode")
174
+ parser.add_argument("--recursive", action="store_true", help="search for images in subfolders recursively / サブフォルダを再帰的に検索する")
175
+
176
+ return parser
177
+
178
+
179
+ if __name__ == "__main__":
180
+ parser = setup_parser()
181
+
182
+ args = parser.parse_args()
183
+ main(args)