ACCC1380 commited on
Commit
ce364c7
1 Parent(s): 9e0276b

Upload lora-scripts/sd-scripts/finetune/tag_images_by_wd14_tagger.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/finetune/tag_images_by_wd14_tagger.py ADDED
@@ -0,0 +1,514 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import csv
3
+ import os
4
+ from pathlib import Path
5
+
6
+ import cv2
7
+ import numpy as np
8
+ import torch
9
+ from huggingface_hub import hf_hub_download
10
+ from PIL import Image
11
+ from tqdm import tqdm
12
+
13
+ import library.train_util as train_util
14
+ from library.utils import setup_logging
15
+
16
+ setup_logging()
17
+ import logging
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # from wd14 tagger
22
+ IMAGE_SIZE = 448
23
+
24
+ # wd-v1-4-swinv2-tagger-v2 / wd-v1-4-vit-tagger / wd-v1-4-vit-tagger-v2/ wd-v1-4-convnext-tagger / wd-v1-4-convnext-tagger-v2
25
+ DEFAULT_WD14_TAGGER_REPO = "SmilingWolf/wd-v1-4-convnext-tagger-v2"
26
+ FILES = ["keras_metadata.pb", "saved_model.pb", "selected_tags.csv"]
27
+ FILES_ONNX = ["model.onnx"]
28
+ SUB_DIR = "variables"
29
+ SUB_DIR_FILES = ["variables.data-00000-of-00001", "variables.index"]
30
+ CSV_FILE = FILES[-1]
31
+
32
+
33
+ def preprocess_image(image):
34
+ image = np.array(image)
35
+ image = image[:, :, ::-1] # RGB->BGR
36
+
37
+ # pad to square
38
+ size = max(image.shape[0:2])
39
+ pad_x = size - image.shape[1]
40
+ pad_y = size - image.shape[0]
41
+ pad_l = pad_x // 2
42
+ pad_t = pad_y // 2
43
+ image = np.pad(image, ((pad_t, pad_y - pad_t), (pad_l, pad_x - pad_l), (0, 0)), mode="constant", constant_values=255)
44
+
45
+ interp = cv2.INTER_AREA if size > IMAGE_SIZE else cv2.INTER_LANCZOS4
46
+ image = cv2.resize(image, (IMAGE_SIZE, IMAGE_SIZE), interpolation=interp)
47
+
48
+ image = image.astype(np.float32)
49
+ return image
50
+
51
+
52
+ class ImageLoadingPrepDataset(torch.utils.data.Dataset):
53
+ def __init__(self, image_paths):
54
+ self.images = image_paths
55
+
56
+ def __len__(self):
57
+ return len(self.images)
58
+
59
+ def __getitem__(self, idx):
60
+ img_path = str(self.images[idx])
61
+
62
+ try:
63
+ image = Image.open(img_path).convert("RGB")
64
+ image = preprocess_image(image)
65
+ # tensor = torch.tensor(image) # これ Tensor に変換する必要ないな……(;・∀・)
66
+ except Exception as e:
67
+ logger.error(f"Could not load image path / 画像を読み込めません: {img_path}, error: {e}")
68
+ return None
69
+
70
+ return (image, img_path)
71
+
72
+
73
+ def collate_fn_remove_corrupted(batch):
74
+ """Collate function that allows to remove corrupted examples in the
75
+ dataloader. It expects that the dataloader returns 'None' when that occurs.
76
+ The 'None's in the batch are removed.
77
+ """
78
+ # Filter out all the Nones (corrupted examples)
79
+ batch = list(filter(lambda x: x is not None, batch))
80
+ return batch
81
+
82
+
83
+ def main(args):
84
+ # model location is model_dir + repo_id
85
+ # repo id may be like "user/repo" or "user/repo/branch", so we need to remove slash
86
+ model_location = os.path.join(args.model_dir, args.repo_id.replace("/", "_"))
87
+
88
+ # hf_hub_downloadをそのまま使うとsymlink関係で問題があるらしいので、キャッシュディレクトリとforce_filenameを指定してなんとかする
89
+ # depreacatedの警告が出るけどなくなったらその時
90
+ # https://github.com/toriato/stable-diffusion-webui-wd14-tagger/issues/22
91
+ if not os.path.exists(model_location) or args.force_download:
92
+ os.makedirs(args.model_dir, exist_ok=True)
93
+ logger.info(f"downloading wd14 tagger model from hf_hub. id: {args.repo_id}")
94
+ files = FILES
95
+ if args.onnx:
96
+ files = ["selected_tags.csv"]
97
+ files += FILES_ONNX
98
+ else:
99
+ for file in SUB_DIR_FILES:
100
+ hf_hub_download(
101
+ args.repo_id,
102
+ file,
103
+ subfolder=SUB_DIR,
104
+ cache_dir=os.path.join(model_location, SUB_DIR),
105
+ force_download=True,
106
+ force_filename=file,
107
+ )
108
+ for file in files:
109
+ hf_hub_download(args.repo_id, file, cache_dir=model_location, force_download=True, force_filename=file)
110
+ else:
111
+ logger.info("using existing wd14 tagger model")
112
+
113
+ # モデルを読み込む
114
+ if args.onnx:
115
+ import torch
116
+ import onnx
117
+ import onnxruntime as ort
118
+
119
+ onnx_path = f"{model_location}/model.onnx"
120
+ logger.info("Running wd14 tagger with onnx")
121
+ logger.info(f"loading onnx model: {onnx_path}")
122
+
123
+ if not os.path.exists(onnx_path):
124
+ raise Exception(
125
+ f"onnx model not found: {onnx_path}, please redownload the model with --force_download"
126
+ + " / onnxモデルが見つかりませんでした。--force_downloadで再ダウンロードしてください"
127
+ )
128
+
129
+ model = onnx.load(onnx_path)
130
+ input_name = model.graph.input[0].name
131
+ try:
132
+ batch_size = model.graph.input[0].type.tensor_type.shape.dim[0].dim_value
133
+ except Exception:
134
+ batch_size = model.graph.input[0].type.tensor_type.shape.dim[0].dim_param
135
+
136
+ if args.batch_size != batch_size and not isinstance(batch_size, str) and batch_size > 0:
137
+ # some rebatch model may use 'N' as dynamic axes
138
+ logger.warning(
139
+ f"Batch size {args.batch_size} doesn't match onnx model batch size {batch_size}, use model batch size {batch_size}"
140
+ )
141
+ args.batch_size = batch_size
142
+
143
+ del model
144
+
145
+ if "OpenVINOExecutionProvider" in ort.get_available_providers():
146
+ # requires provider options for gpu support
147
+ # fp16 causes nonsense outputs
148
+ ort_sess = ort.InferenceSession(
149
+ onnx_path,
150
+ providers=(["OpenVINOExecutionProvider"]),
151
+ provider_options=[{'device_type' : "GPU_FP32"}],
152
+ )
153
+ else:
154
+ ort_sess = ort.InferenceSession(
155
+ onnx_path,
156
+ providers=(
157
+ ["CUDAExecutionProvider"] if "CUDAExecutionProvider" in ort.get_available_providers() else
158
+ ["ROCMExecutionProvider"] if "ROCMExecutionProvider" in ort.get_available_providers() else
159
+ ["CPUExecutionProvider"]
160
+ ),
161
+ )
162
+ else:
163
+ from tensorflow.keras.models import load_model
164
+
165
+ model = load_model(f"{model_location}")
166
+
167
+ # label_names = pd.read_csv("2022_0000_0899_6549/selected_tags.csv")
168
+ # 依存ライブラリを増やしたくないので自力で読むよ
169
+
170
+ with open(os.path.join(model_location, CSV_FILE), "r", encoding="utf-8") as f:
171
+ reader = csv.reader(f)
172
+ line = [row for row in reader]
173
+ header = line[0] # tag_id,name,category,count
174
+ rows = line[1:]
175
+ assert header[0] == "tag_id" and header[1] == "name" and header[2] == "category", f"unexpected csv format: {header}"
176
+
177
+ rating_tags = [row[1] for row in rows[0:] if row[2] == "9"]
178
+ general_tags = [row[1] for row in rows[0:] if row[2] == "0"]
179
+ character_tags = [row[1] for row in rows[0:] if row[2] == "4"]
180
+
181
+ # preprocess tags in advance
182
+ if args.character_tag_expand:
183
+ for i, tag in enumerate(character_tags):
184
+ if tag.endswith(")"):
185
+ # chara_name_(series) -> chara_name, series
186
+ # chara_name_(costume)_(series) -> chara_name_(costume), series
187
+ tags = tag.split("(")
188
+ character_tag = "(".join(tags[:-1])
189
+ if character_tag.endswith("_"):
190
+ character_tag = character_tag[:-1]
191
+ series_tag = tags[-1].replace(")", "")
192
+ character_tags[i] = character_tag + args.caption_separator + series_tag
193
+
194
+ if args.remove_underscore:
195
+ rating_tags = [tag.replace("_", " ") if len(tag) > 3 else tag for tag in rating_tags]
196
+ general_tags = [tag.replace("_", " ") if len(tag) > 3 else tag for tag in general_tags]
197
+ character_tags = [tag.replace("_", " ") if len(tag) > 3 else tag for tag in character_tags]
198
+
199
+ if args.tag_replacement is not None:
200
+ # escape , and ; in tag_replacement: wd14 tag names may contain , and ;
201
+ escaped_tag_replacements = args.tag_replacement.replace("\\,", "@@@@").replace("\\;", "####")
202
+ tag_replacements = escaped_tag_replacements.split(";")
203
+ for tag_replacement in tag_replacements:
204
+ tags = tag_replacement.split(",") # source, target
205
+ assert len(tags) == 2, f"tag replacement must be in the format of `source,target` / タグの置換は `置換元,置換先` の形式で指定してください: {args.tag_replacement}"
206
+
207
+ source, target = [tag.replace("@@@@", ",").replace("####", ";") for tag in tags]
208
+ logger.info(f"replacing tag: {source} -> {target}")
209
+
210
+ if source in general_tags:
211
+ general_tags[general_tags.index(source)] = target
212
+ elif source in character_tags:
213
+ character_tags[character_tags.index(source)] = target
214
+ elif source in rating_tags:
215
+ rating_tags[rating_tags.index(source)] = target
216
+
217
+ # 画像を読み込む
218
+ train_data_dir_path = Path(args.train_data_dir)
219
+ image_paths = train_util.glob_images_pathlib(train_data_dir_path, args.recursive)
220
+ logger.info(f"found {len(image_paths)} images.")
221
+
222
+ tag_freq = {}
223
+
224
+ caption_separator = args.caption_separator
225
+ stripped_caption_separator = caption_separator.strip()
226
+ undesired_tags = args.undesired_tags.split(stripped_caption_separator)
227
+ undesired_tags = set([tag.strip() for tag in undesired_tags if tag.strip() != ""])
228
+
229
+ always_first_tags = None
230
+ if args.always_first_tags is not None:
231
+ always_first_tags = [tag for tag in args.always_first_tags.split(stripped_caption_separator) if tag.strip() != ""]
232
+
233
+ def run_batch(path_imgs):
234
+ imgs = np.array([im for _, im in path_imgs])
235
+
236
+ if args.onnx:
237
+ # if len(imgs) < args.batch_size:
238
+ # imgs = np.concatenate([imgs, np.zeros((args.batch_size - len(imgs), IMAGE_SIZE, IMAGE_SIZE, 3))], axis=0)
239
+ probs = ort_sess.run(None, {input_name: imgs})[0] # onnx output numpy
240
+ probs = probs[: len(path_imgs)]
241
+ else:
242
+ probs = model(imgs, training=False)
243
+ probs = probs.numpy()
244
+
245
+ for (image_path, _), prob in zip(path_imgs, probs):
246
+ combined_tags = []
247
+ rating_tag_text = ""
248
+ character_tag_text = ""
249
+ general_tag_text = ""
250
+
251
+ # 最初の4つ以降はタグなのでconfidenceがthreshold以上のものを追加する
252
+ # First 4 labels are ratings, the rest are tags: pick any where prediction confidence >= threshold
253
+ for i, p in enumerate(prob[4:]):
254
+ if i < len(general_tags) and p >= args.general_threshold:
255
+ tag_name = general_tags[i]
256
+
257
+ if tag_name not in undesired_tags:
258
+ tag_freq[tag_name] = tag_freq.get(tag_name, 0) + 1
259
+ general_tag_text += caption_separator + tag_name
260
+ combined_tags.append(tag_name)
261
+ elif i >= len(general_tags) and p >= args.character_threshold:
262
+ tag_name = character_tags[i - len(general_tags)]
263
+
264
+ if tag_name not in undesired_tags:
265
+ tag_freq[tag_name] = tag_freq.get(tag_name, 0) + 1
266
+ character_tag_text += caption_separator + tag_name
267
+ if args.character_tags_first: # insert to the beginning
268
+ combined_tags.insert(0, tag_name)
269
+ else:
270
+ combined_tags.append(tag_name)
271
+
272
+ # 最初の4つはratingなのでargmaxで選ぶ
273
+ # First 4 labels are actually ratings: pick one with argmax
274
+ if args.use_rating_tags or args.use_rating_tags_as_last_tag:
275
+ ratings_probs = prob[:4]
276
+ rating_index = ratings_probs.argmax()
277
+ found_rating = rating_tags[rating_index]
278
+
279
+ if found_rating not in undesired_tags:
280
+ tag_freq[found_rating] = tag_freq.get(found_rating, 0) + 1
281
+ rating_tag_text = found_rating
282
+ if args.use_rating_tags:
283
+ combined_tags.insert(0, found_rating) # insert to the beginning
284
+ else:
285
+ combined_tags.append(found_rating)
286
+
287
+ # 一番最初に置くタグを指定する
288
+ # Always put some tags at the beginning
289
+ if always_first_tags is not None:
290
+ for tag in always_first_tags:
291
+ if tag in combined_tags:
292
+ combined_tags.remove(tag)
293
+ combined_tags.insert(0, tag)
294
+
295
+ # 先頭のカンマを取る
296
+ if len(general_tag_text) > 0:
297
+ general_tag_text = general_tag_text[len(caption_separator) :]
298
+ if len(character_tag_text) > 0:
299
+ character_tag_text = character_tag_text[len(caption_separator) :]
300
+
301
+ caption_file = os.path.splitext(image_path)[0] + args.caption_extension
302
+
303
+ tag_text = caption_separator.join(combined_tags)
304
+
305
+ if args.append_tags:
306
+ # Check if file exists
307
+ if os.path.exists(caption_file):
308
+ with open(caption_file, "rt", encoding="utf-8") as f:
309
+ # Read file and remove new lines
310
+ existing_content = f.read().strip("\n") # Remove newlines
311
+
312
+ # Split the content into tags and store them in a list
313
+ existing_tags = [tag.strip() for tag in existing_content.split(stripped_caption_separator) if tag.strip()]
314
+
315
+ # Check and remove repeating tags in tag_text
316
+ new_tags = [tag for tag in combined_tags if tag not in existing_tags]
317
+
318
+ # Create new tag_text
319
+ tag_text = caption_separator.join(existing_tags + new_tags)
320
+
321
+ with open(caption_file, "wt", encoding="utf-8") as f:
322
+ f.write(tag_text + "\n")
323
+ if args.debug:
324
+ logger.info("")
325
+ logger.info(f"{image_path}:")
326
+ logger.info(f"\tRating tags: {rating_tag_text}")
327
+ logger.info(f"\tCharacter tags: {character_tag_text}")
328
+ logger.info(f"\tGeneral tags: {general_tag_text}")
329
+
330
+ # 読み込みの高速化のためにDataLoaderを使うオプション
331
+ if args.max_data_loader_n_workers is not None:
332
+ dataset = ImageLoadingPrepDataset(image_paths)
333
+ data = torch.utils.data.DataLoader(
334
+ dataset,
335
+ batch_size=args.batch_size,
336
+ shuffle=False,
337
+ num_workers=args.max_data_loader_n_workers,
338
+ collate_fn=collate_fn_remove_corrupted,
339
+ drop_last=False,
340
+ )
341
+ else:
342
+ data = [[(None, ip)] for ip in image_paths]
343
+
344
+ b_imgs = []
345
+ for data_entry in tqdm(data, smoothing=0.0):
346
+ for data in data_entry:
347
+ if data is None:
348
+ continue
349
+
350
+ image, image_path = data
351
+ if image is None:
352
+ try:
353
+ image = Image.open(image_path)
354
+ if image.mode != "RGB":
355
+ image = image.convert("RGB")
356
+ image = preprocess_image(image)
357
+ except Exception as e:
358
+ logger.error(f"Could not load image path / 画像を読み込めません: {image_path}, error: {e}")
359
+ continue
360
+ b_imgs.append((image_path, image))
361
+
362
+ if len(b_imgs) >= args.batch_size:
363
+ b_imgs = [(str(image_path), image) for image_path, image in b_imgs] # Convert image_path to string
364
+ run_batch(b_imgs)
365
+ b_imgs.clear()
366
+
367
+ if len(b_imgs) > 0:
368
+ b_imgs = [(str(image_path), image) for image_path, image in b_imgs] # Convert image_path to string
369
+ run_batch(b_imgs)
370
+
371
+ if args.frequency_tags:
372
+ sorted_tags = sorted(tag_freq.items(), key=lambda x: x[1], reverse=True)
373
+ print("Tag frequencies:")
374
+ for tag, freq in sorted_tags:
375
+ print(f"{tag}: {freq}")
376
+
377
+ logger.info("done!")
378
+
379
+
380
+ def setup_parser() -> argparse.ArgumentParser:
381
+ parser = argparse.ArgumentParser()
382
+ parser.add_argument(
383
+ "train_data_dir", type=str, help="directory for train images / 学習画像データのディレクトリ"
384
+ )
385
+ parser.add_argument(
386
+ "--repo_id",
387
+ type=str,
388
+ default=DEFAULT_WD14_TAGGER_REPO,
389
+ help="repo id for wd14 tagger on Hugging Face / Hugging Faceのwd14 taggerのリポジトリID",
390
+ )
391
+ parser.add_argument(
392
+ "--model_dir",
393
+ type=str,
394
+ default="wd14_tagger_model",
395
+ help="directory to store wd14 tagger model / wd14 taggerのモデルを格納するディレクトリ",
396
+ )
397
+ parser.add_argument(
398
+ "--force_download",
399
+ action="store_true",
400
+ help="force downloading wd14 tagger models / wd14 taggerのモデルを再ダウンロードします",
401
+ )
402
+ parser.add_argument(
403
+ "--batch_size", type=int, default=1, help="batch size in inference / 推論時のバッチサイズ"
404
+ )
405
+ parser.add_argument(
406
+ "--max_data_loader_n_workers",
407
+ type=int,
408
+ default=None,
409
+ help="enable image reading by DataLoader with this number of workers (faster) / DataLoaderによる画像読み込みを有効にしてこのワーカー数を適用する(読み込みを高速化)",
410
+ )
411
+ parser.add_argument(
412
+ "--caption_extention",
413
+ type=str,
414
+ default=None,
415
+ help="extension of caption file (for backward compatibility) / 出力されるキャプションファイルの拡張子(スペルミスしていたのを残してあります)",
416
+ )
417
+ parser.add_argument(
418
+ "--caption_extension", type=str, default=".txt", help="extension of caption file / 出力されるキャプションファイルの拡張子"
419
+ )
420
+ parser.add_argument(
421
+ "--thresh", type=float, default=0.35, help="threshold of confidence to add a tag / タグを追加するか判定する閾値"
422
+ )
423
+ parser.add_argument(
424
+ "--general_threshold",
425
+ type=float,
426
+ default=None,
427
+ help="threshold of confidence to add a tag for general category, same as --thresh if omitted / generalカテゴリのタグを追加するための確信度の閾値、省略時は --thresh と同じ",
428
+ )
429
+ parser.add_argument(
430
+ "--character_threshold",
431
+ type=float,
432
+ default=None,
433
+ help="threshold of confidence to add a tag for character category, same as --thres if omitted / characterカテゴリのタグを追加するための確信度の閾値、省略時は --thresh と同じ",
434
+ )
435
+ parser.add_argument(
436
+ "--recursive", action="store_true", help="search for images in subfolders recursively / サブフォルダを再帰的に検索する"
437
+ )
438
+ parser.add_argument(
439
+ "--remove_underscore",
440
+ action="store_true",
441
+ help="replace underscores with spaces in the output tags / 出力されるタグのアンダースコアをスペースに置き換える",
442
+ )
443
+ parser.add_argument(
444
+ "--debug", action="store_true", help="debug mode"
445
+ )
446
+ parser.add_argument(
447
+ "--undesired_tags",
448
+ type=str,
449
+ default="",
450
+ help="comma-separated list of undesired tags to remove from the output / 出力から除外したいタグのカンマ区切りのリスト",
451
+ )
452
+ parser.add_argument(
453
+ "--frequency_tags", action="store_true", help="Show frequency of tags for images / タグの出現頻度を表示する"
454
+ )
455
+ parser.add_argument(
456
+ "--onnx", action="store_true", help="use onnx model for inference / onnxモデルを推論に使用する"
457
+ )
458
+ parser.add_argument(
459
+ "--append_tags", action="store_true", help="Append captions instead of overwriting / 上書きではなくキャプションを追記する"
460
+ )
461
+ parser.add_argument(
462
+ "--use_rating_tags", action="store_true", help="Adds rating tags as the first tag / レーティングタグを最初のタグとして追加する",
463
+ )
464
+ parser.add_argument(
465
+ "--use_rating_tags_as_last_tag", action="store_true", help="Adds rating tags as the last tag / レーティングタグを最後のタグとして追加する",
466
+ )
467
+ parser.add_argument(
468
+ "--character_tags_first", action="store_true", help="Always inserts character tags before the general tags / characterタグを常にgeneralタグの前に出力する",
469
+ )
470
+ parser.add_argument(
471
+ "--always_first_tags",
472
+ type=str,
473
+ default=None,
474
+ help="comma-separated list of tags to always put at the beginning, e.g. `1girl,1boy`"
475
+ + " / 必ず先頭に置くタグのカンマ区切りリスト、例 : `1girl,1boy`",
476
+ )
477
+ parser.add_argument(
478
+ "--caption_separator",
479
+ type=str,
480
+ default=", ",
481
+ help="Separator for captions, include space if needed / キャプションの区切り文字、必要ならスペースを含めてください",
482
+ )
483
+ parser.add_argument(
484
+ "--tag_replacement",
485
+ type=str,
486
+ default=None,
487
+ help="tag replacement in the format of `source1,target1;source2,target2; ...`. Escape `,` and `;` with `\`. e.g. `tag1,tag2;tag3,tag4`"
488
+ + " / タグの置換を `置換元1,置換先1;置換元2,置換先2; ...`で指定する。`\` で `,` と `;` をエスケープできる。例: `tag1,tag2;tag3,tag4`",
489
+ )
490
+ parser.add_argument(
491
+ "--character_tag_expand",
492
+ action="store_true",
493
+ help="expand tag tail parenthesis to another tag for character tags. `chara_name_(series)` becomes `chara_name, series`"
494
+ + " / キャラクタタグの末尾の括弧を別のタグに展開する。`chara_name_(series)` は `chara_name, series` になる",
495
+ )
496
+
497
+ return parser
498
+
499
+
500
+ if __name__ == "__main__":
501
+ parser = setup_parser()
502
+
503
+ args = parser.parse_args()
504
+
505
+ # スペルミスしていたオプションを復元する
506
+ if args.caption_extention is not None:
507
+ args.caption_extension = args.caption_extention
508
+
509
+ if args.general_threshold is None:
510
+ args.general_threshold = args.thresh
511
+ if args.character_threshold is None:
512
+ args.character_threshold = args.thresh
513
+
514
+ main(args)