ACCC1380 commited on
Commit
499d757
1 Parent(s): 6a29d29

Upload lora-scripts/sd-scripts/train_controlnet.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/train_controlnet.py ADDED
@@ -0,0 +1,618 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import math
4
+ import os
5
+ import random
6
+ import time
7
+ from multiprocessing import Value
8
+ from types import SimpleNamespace
9
+ import toml
10
+
11
+ from tqdm import tqdm
12
+
13
+ import torch
14
+ from library import deepspeed_utils
15
+ from library.device_utils import init_ipex, clean_memory_on_device
16
+ init_ipex()
17
+
18
+ from torch.nn.parallel import DistributedDataParallel as DDP
19
+ from accelerate.utils import set_seed
20
+ from diffusers import DDPMScheduler, ControlNetModel
21
+ from safetensors.torch import load_file
22
+
23
+ import library.model_util as model_util
24
+ import library.train_util as train_util
25
+ import library.config_util as config_util
26
+ from library.config_util import (
27
+ ConfigSanitizer,
28
+ BlueprintGenerator,
29
+ )
30
+ import library.huggingface_util as huggingface_util
31
+ import library.custom_train_functions as custom_train_functions
32
+ from library.custom_train_functions import (
33
+ apply_snr_weight,
34
+ pyramid_noise_like,
35
+ apply_noise_offset,
36
+ )
37
+ from library.utils import setup_logging, add_logging_arguments
38
+
39
+ setup_logging()
40
+ import logging
41
+
42
+ logger = logging.getLogger(__name__)
43
+
44
+
45
+ # TODO 他のスクリプトと共通化する
46
+ def generate_step_logs(args: argparse.Namespace, current_loss, avr_loss, lr_scheduler):
47
+ logs = {
48
+ "loss/current": current_loss,
49
+ "loss/average": avr_loss,
50
+ "lr": lr_scheduler.get_last_lr()[0],
51
+ }
52
+
53
+ if args.optimizer_type.lower().startswith("DAdapt".lower()):
54
+ logs["lr/d*lr"] = lr_scheduler.optimizers[-1].param_groups[0]["d"] * lr_scheduler.optimizers[-1].param_groups[0]["lr"]
55
+
56
+ return logs
57
+
58
+
59
+ def train(args):
60
+ # session_id = random.randint(0, 2**32)
61
+ # training_started_at = time.time()
62
+ train_util.verify_training_args(args)
63
+ train_util.prepare_dataset_args(args, True)
64
+ setup_logging(args, reset=True)
65
+
66
+ cache_latents = args.cache_latents
67
+ use_user_config = args.dataset_config is not None
68
+
69
+ if args.seed is None:
70
+ args.seed = random.randint(0, 2**32)
71
+ set_seed(args.seed)
72
+
73
+ tokenizer = train_util.load_tokenizer(args)
74
+
75
+ # データセットを準備する
76
+ blueprint_generator = BlueprintGenerator(ConfigSanitizer(False, False, True, True))
77
+ if use_user_config:
78
+ logger.info(f"Load dataset config from {args.dataset_config}")
79
+ user_config = config_util.load_user_config(args.dataset_config)
80
+ ignored = ["train_data_dir", "conditioning_data_dir"]
81
+ if any(getattr(args, attr) is not None for attr in ignored):
82
+ logger.warning(
83
+ "ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
84
+ ", ".join(ignored)
85
+ )
86
+ )
87
+ else:
88
+ user_config = {
89
+ "datasets": [
90
+ {
91
+ "subsets": config_util.generate_controlnet_subsets_config_by_subdirs(
92
+ args.train_data_dir,
93
+ args.conditioning_data_dir,
94
+ args.caption_extension,
95
+ )
96
+ }
97
+ ]
98
+ }
99
+
100
+ blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer)
101
+ train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
102
+
103
+ current_epoch = Value("i", 0)
104
+ current_step = Value("i", 0)
105
+ ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None
106
+ collator = train_util.collator_class(current_epoch, current_step, ds_for_collator)
107
+
108
+ if args.debug_dataset:
109
+ train_util.debug_dataset(train_dataset_group)
110
+ return
111
+ if len(train_dataset_group) == 0:
112
+ logger.error(
113
+ "No data found. Please verify arguments (train_data_dir must be the parent of folders with images) / 画像がありません。引数指定を確認してください(train_data_dirには画像があるフォルダではなく、画像があるフォルダの親フォルダを指定する必要があります)"
114
+ )
115
+ return
116
+
117
+ if cache_latents:
118
+ assert (
119
+ train_dataset_group.is_latent_cacheable()
120
+ ), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
121
+
122
+ # acceleratorを準備する
123
+ logger.info("prepare accelerator")
124
+ accelerator = train_util.prepare_accelerator(args)
125
+ is_main_process = accelerator.is_main_process
126
+
127
+ # mixed precisionに対応した型を用意しておき適宜castする
128
+ weight_dtype, save_dtype = train_util.prepare_dtype(args)
129
+
130
+ # モデルを読み込む
131
+ text_encoder, vae, unet, _ = train_util.load_target_model(
132
+ args, weight_dtype, accelerator, unet_use_linear_projection_in_v2=True
133
+ )
134
+
135
+ # DiffusersのControlNetが使用するデータを準備する
136
+ if args.v2:
137
+ unet.config = {
138
+ "act_fn": "silu",
139
+ "attention_head_dim": [5, 10, 20, 20],
140
+ "block_out_channels": [320, 640, 1280, 1280],
141
+ "center_input_sample": False,
142
+ "cross_attention_dim": 1024,
143
+ "down_block_types": ["CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D"],
144
+ "downsample_padding": 1,
145
+ "dual_cross_attention": False,
146
+ "flip_sin_to_cos": True,
147
+ "freq_shift": 0,
148
+ "in_channels": 4,
149
+ "layers_per_block": 2,
150
+ "mid_block_scale_factor": 1,
151
+ "norm_eps": 1e-05,
152
+ "norm_num_groups": 32,
153
+ "num_class_embeds": None,
154
+ "only_cross_attention": False,
155
+ "out_channels": 4,
156
+ "sample_size": 96,
157
+ "up_block_types": ["UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"],
158
+ "use_linear_projection": True,
159
+ "upcast_attention": True,
160
+ "only_cross_attention": False,
161
+ "downsample_padding": 1,
162
+ "use_linear_projection": True,
163
+ "class_embed_type": None,
164
+ "num_class_embeds": None,
165
+ "resnet_time_scale_shift": "default",
166
+ "projection_class_embeddings_input_dim": None,
167
+ }
168
+ else:
169
+ unet.config = {
170
+ "act_fn": "silu",
171
+ "attention_head_dim": 8,
172
+ "block_out_channels": [320, 640, 1280, 1280],
173
+ "center_input_sample": False,
174
+ "cross_attention_dim": 768,
175
+ "down_block_types": ["CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D"],
176
+ "downsample_padding": 1,
177
+ "flip_sin_to_cos": True,
178
+ "freq_shift": 0,
179
+ "in_channels": 4,
180
+ "layers_per_block": 2,
181
+ "mid_block_scale_factor": 1,
182
+ "norm_eps": 1e-05,
183
+ "norm_num_groups": 32,
184
+ "out_channels": 4,
185
+ "sample_size": 64,
186
+ "up_block_types": ["UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"],
187
+ "only_cross_attention": False,
188
+ "downsample_padding": 1,
189
+ "use_linear_projection": False,
190
+ "class_embed_type": None,
191
+ "num_class_embeds": None,
192
+ "upcast_attention": False,
193
+ "resnet_time_scale_shift": "default",
194
+ "projection_class_embeddings_input_dim": None,
195
+ }
196
+ unet.config = SimpleNamespace(**unet.config)
197
+
198
+ controlnet = ControlNetModel.from_unet(unet)
199
+
200
+ if args.controlnet_model_name_or_path:
201
+ filename = args.controlnet_model_name_or_path
202
+ if os.path.isfile(filename):
203
+ if os.path.splitext(filename)[1] == ".safetensors":
204
+ state_dict = load_file(filename)
205
+ else:
206
+ state_dict = torch.load(filename)
207
+ state_dict = model_util.convert_controlnet_state_dict_to_diffusers(state_dict)
208
+ controlnet.load_state_dict(state_dict)
209
+ elif os.path.isdir(filename):
210
+ controlnet = ControlNetModel.from_pretrained(filename)
211
+
212
+ # モデルに xformers とか memory efficient attention を組み込む
213
+ train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers, args.sdpa)
214
+
215
+ # 学習を準備する
216
+ if cache_latents:
217
+ vae.to(accelerator.device, dtype=weight_dtype)
218
+ vae.requires_grad_(False)
219
+ vae.eval()
220
+ with torch.no_grad():
221
+ train_dataset_group.cache_latents(
222
+ vae,
223
+ args.vae_batch_size,
224
+ args.cache_latents_to_disk,
225
+ accelerator.is_main_process,
226
+ )
227
+ vae.to("cpu")
228
+ clean_memory_on_device(accelerator.device)
229
+
230
+ accelerator.wait_for_everyone()
231
+
232
+ if args.gradient_checkpointing:
233
+ controlnet.enable_gradient_checkpointing()
234
+
235
+ # 学習に必要なクラスを準備する
236
+ accelerator.print("prepare optimizer, data loader etc.")
237
+
238
+ trainable_params = controlnet.parameters()
239
+
240
+ _, _, optimizer = train_util.get_optimizer(args, trainable_params)
241
+
242
+ # dataloaderを準備する
243
+ # DataLoaderのプロセス数:0 は persistent_workers が使えないので注意
244
+ n_workers = min(args.max_data_loader_n_workers, os.cpu_count()) # cpu_count or max_data_loader_n_workers
245
+
246
+ train_dataloader = torch.utils.data.DataLoader(
247
+ train_dataset_group,
248
+ batch_size=1,
249
+ shuffle=True,
250
+ collate_fn=collator,
251
+ num_workers=n_workers,
252
+ persistent_workers=args.persistent_data_loader_workers,
253
+ )
254
+
255
+ # 学習ステップ数を計算する
256
+ if args.max_train_epochs is not None:
257
+ args.max_train_steps = args.max_train_epochs * math.ceil(
258
+ len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
259
+ )
260
+ accelerator.print(
261
+ f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}"
262
+ )
263
+
264
+ # データセット側にも学習ステップを送信
265
+ train_dataset_group.set_max_train_steps(args.max_train_steps)
266
+
267
+ # lr schedulerを用意する
268
+ lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
269
+
270
+ # 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする
271
+ if args.full_fp16:
272
+ assert (
273
+ args.mixed_precision == "fp16"
274
+ ), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。"
275
+ accelerator.print("enable full fp16 training.")
276
+ controlnet.to(weight_dtype)
277
+
278
+ # acceleratorがなんかよろしくやってくれるらしい
279
+ controlnet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
280
+ controlnet, optimizer, train_dataloader, lr_scheduler
281
+ )
282
+
283
+ unet.requires_grad_(False)
284
+ text_encoder.requires_grad_(False)
285
+ unet.to(accelerator.device)
286
+ text_encoder.to(accelerator.device)
287
+
288
+ # transform DDP after prepare
289
+ controlnet = controlnet.module if isinstance(controlnet, DDP) else controlnet
290
+
291
+ controlnet.train()
292
+
293
+ if not cache_latents:
294
+ vae.requires_grad_(False)
295
+ vae.eval()
296
+ vae.to(accelerator.device, dtype=weight_dtype)
297
+
298
+ # 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
299
+ if args.full_fp16:
300
+ train_util.patch_accelerator_for_fp16_training(accelerator)
301
+
302
+ # resumeする
303
+ train_util.resume_from_local_or_hf_if_specified(accelerator, args)
304
+
305
+ # epoch数を計算する
306
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
307
+ num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
308
+ if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
309
+ args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
310
+
311
+ # 学習する
312
+ # TODO: find a way to handle total batch size when there are multiple datasets
313
+ accelerator.print("running training / 学習開始")
314
+ accelerator.print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
315
+ accelerator.print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
316
+ accelerator.print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
317
+ accelerator.print(f" num epochs / epoch数: {num_train_epochs}")
318
+ accelerator.print(
319
+ f" batch size per device / バッチサイズ: {', '.join([str(d.batch_size) for d in train_dataset_group.datasets])}"
320
+ )
321
+ # logger.info(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
322
+ accelerator.print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
323
+ accelerator.print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
324
+
325
+ progress_bar = tqdm(
326
+ range(args.max_train_steps),
327
+ smoothing=0,
328
+ disable=not accelerator.is_local_main_process,
329
+ desc="steps",
330
+ )
331
+ global_step = 0
332
+
333
+ noise_scheduler = DDPMScheduler(
334
+ beta_start=0.00085,
335
+ beta_end=0.012,
336
+ beta_schedule="scaled_linear",
337
+ num_train_timesteps=1000,
338
+ clip_sample=False,
339
+ )
340
+ if accelerator.is_main_process:
341
+ init_kwargs = {}
342
+ if args.wandb_run_name:
343
+ init_kwargs["wandb"] = {"name": args.wandb_run_name}
344
+ if args.log_tracker_config is not None:
345
+ init_kwargs = toml.load(args.log_tracker_config)
346
+ accelerator.init_trackers(
347
+ "controlnet_train" if args.log_tracker_name is None else args.log_tracker_name, init_kwargs=init_kwargs
348
+ )
349
+
350
+ loss_recorder = train_util.LossRecorder()
351
+ del train_dataset_group
352
+
353
+ # function for saving/removing
354
+ def save_model(ckpt_name, model, force_sync_upload=False):
355
+ os.makedirs(args.output_dir, exist_ok=True)
356
+ ckpt_file = os.path.join(args.output_dir, ckpt_name)
357
+
358
+ accelerator.print(f"\nsaving checkpoint: {ckpt_file}")
359
+
360
+ state_dict = model_util.convert_controlnet_state_dict_to_sd(model.state_dict())
361
+
362
+ if save_dtype is not None:
363
+ for key in list(state_dict.keys()):
364
+ v = state_dict[key]
365
+ v = v.detach().clone().to("cpu").to(save_dtype)
366
+ state_dict[key] = v
367
+
368
+ if os.path.splitext(ckpt_file)[1] == ".safetensors":
369
+ from safetensors.torch import save_file
370
+
371
+ save_file(state_dict, ckpt_file)
372
+ else:
373
+ torch.save(state_dict, ckpt_file)
374
+
375
+ if args.huggingface_repo_id is not None:
376
+ huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload)
377
+
378
+ def remove_model(old_ckpt_name):
379
+ old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name)
380
+ if os.path.exists(old_ckpt_file):
381
+ accelerator.print(f"removing old checkpoint: {old_ckpt_file}")
382
+ os.remove(old_ckpt_file)
383
+
384
+ # For --sample_at_first
385
+ train_util.sample_images(
386
+ accelerator, args, 0, global_step, accelerator.device, vae, tokenizer, text_encoder, unet, controlnet=controlnet
387
+ )
388
+
389
+ # training loop
390
+ for epoch in range(num_train_epochs):
391
+ if is_main_process:
392
+ accelerator.print(f"\nepoch {epoch+1}/{num_train_epochs}")
393
+ current_epoch.value = epoch + 1
394
+
395
+ for step, batch in enumerate(train_dataloader):
396
+ current_step.value = global_step
397
+ with accelerator.accumulate(controlnet):
398
+ with torch.no_grad():
399
+ if "latents" in batch and batch["latents"] is not None:
400
+ latents = batch["latents"].to(accelerator.device).to(dtype=weight_dtype)
401
+ else:
402
+ # latentに変換
403
+ latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
404
+ latents = latents * 0.18215
405
+ b_size = latents.shape[0]
406
+
407
+ input_ids = batch["input_ids"].to(accelerator.device)
408
+ encoder_hidden_states = train_util.get_hidden_states(args, input_ids, tokenizer, text_encoder, weight_dtype)
409
+
410
+ # Sample noise that we'll add to the latents
411
+ noise = torch.randn_like(latents, device=latents.device)
412
+ if args.noise_offset:
413
+ noise = apply_noise_offset(latents, noise, args.noise_offset, args.adaptive_noise_scale)
414
+ elif args.multires_noise_iterations:
415
+ noise = pyramid_noise_like(
416
+ noise,
417
+ latents.device,
418
+ args.multires_noise_iterations,
419
+ args.multires_noise_discount,
420
+ )
421
+
422
+ # Sample a random timestep for each image
423
+ timesteps, huber_c = train_util.get_timesteps_and_huber_c(args, 0, noise_scheduler.config.num_train_timesteps, noise_scheduler, b_size, latents.device)
424
+
425
+ # Add noise to the latents according to the noise magnitude at each timestep
426
+ # (this is the forward diffusion process)
427
+ noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
428
+
429
+ controlnet_image = batch["conditioning_images"].to(dtype=weight_dtype)
430
+
431
+ with accelerator.autocast():
432
+ down_block_res_samples, mid_block_res_sample = controlnet(
433
+ noisy_latents,
434
+ timesteps,
435
+ encoder_hidden_states=encoder_hidden_states,
436
+ controlnet_cond=controlnet_image,
437
+ return_dict=False,
438
+ )
439
+
440
+ # Predict the noise residual
441
+ noise_pred = unet(
442
+ noisy_latents,
443
+ timesteps,
444
+ encoder_hidden_states,
445
+ down_block_additional_residuals=[sample.to(dtype=weight_dtype) for sample in down_block_res_samples],
446
+ mid_block_additional_residual=mid_block_res_sample.to(dtype=weight_dtype),
447
+ ).sample
448
+
449
+ if args.v_parameterization:
450
+ # v-parameterization training
451
+ target = noise_scheduler.get_velocity(latents, noise, timesteps)
452
+ else:
453
+ target = noise
454
+
455
+ loss = train_util.conditional_loss(noise_pred.float(), target.float(), reduction="none", loss_type=args.loss_type, huber_c=huber_c)
456
+ loss = loss.mean([1, 2, 3])
457
+
458
+ loss_weights = batch["loss_weights"] # 各sampleごとのweight
459
+ loss = loss * loss_weights
460
+
461
+ if args.min_snr_gamma:
462
+ loss = apply_snr_weight(loss, timesteps, noise_scheduler, args.min_snr_gamma, args.v_parameterization)
463
+
464
+ loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし
465
+
466
+ accelerator.backward(loss)
467
+ if accelerator.sync_gradients and args.max_grad_norm != 0.0:
468
+ params_to_clip = controlnet.parameters()
469
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
470
+
471
+ optimizer.step()
472
+ lr_scheduler.step()
473
+ optimizer.zero_grad(set_to_none=True)
474
+
475
+ # Checks if the accelerator has performed an optimization step behind the scenes
476
+ if accelerator.sync_gradients:
477
+ progress_bar.update(1)
478
+ global_step += 1
479
+
480
+ train_util.sample_images(
481
+ accelerator,
482
+ args,
483
+ None,
484
+ global_step,
485
+ accelerator.device,
486
+ vae,
487
+ tokenizer,
488
+ text_encoder,
489
+ unet,
490
+ controlnet=controlnet,
491
+ )
492
+
493
+ # 指定ステップごとにモデルを保存
494
+ if args.save_every_n_steps is not None and global_step % args.save_every_n_steps == 0:
495
+ accelerator.wait_for_everyone()
496
+ if accelerator.is_main_process:
497
+ ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, global_step)
498
+ save_model(
499
+ ckpt_name,
500
+ accelerator.unwrap_model(controlnet),
501
+ )
502
+
503
+ if args.save_state:
504
+ train_util.save_and_remove_state_stepwise(args, accelerator, global_step)
505
+
506
+ remove_step_no = train_util.get_remove_step_no(args, global_step)
507
+ if remove_step_no is not None:
508
+ remove_ckpt_name = train_util.get_step_ckpt_name(args, "." + args.save_model_as, remove_step_no)
509
+ remove_model(remove_ckpt_name)
510
+
511
+ current_loss = loss.detach().item()
512
+ loss_recorder.add(epoch=epoch, step=step, loss=current_loss)
513
+ avr_loss: float = loss_recorder.moving_average
514
+ logs = {"avr_loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]}
515
+ progress_bar.set_postfix(**logs)
516
+
517
+ if args.logging_dir is not None:
518
+ logs = generate_step_logs(args, current_loss, avr_loss, lr_scheduler)
519
+ accelerator.log(logs, step=global_step)
520
+
521
+ if global_step >= args.max_train_steps:
522
+ break
523
+
524
+ if args.logging_dir is not None:
525
+ logs = {"loss/epoch": loss_recorder.moving_average}
526
+ accelerator.log(logs, step=epoch + 1)
527
+
528
+ accelerator.wait_for_everyone()
529
+
530
+ # 指定エポックごとにモデルを保存
531
+ if args.save_every_n_epochs is not None:
532
+ saving = (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs
533
+ if is_main_process and saving:
534
+ ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, epoch + 1)
535
+ save_model(ckpt_name, accelerator.unwrap_model(controlnet))
536
+
537
+ remove_epoch_no = train_util.get_remove_epoch_no(args, epoch + 1)
538
+ if remove_epoch_no is not None:
539
+ remove_ckpt_name = train_util.get_epoch_ckpt_name(args, "." + args.save_model_as, remove_epoch_no)
540
+ remove_model(remove_ckpt_name)
541
+
542
+ if args.save_state:
543
+ train_util.save_and_remove_state_on_epoch_end(args, accelerator, epoch + 1)
544
+
545
+ train_util.sample_images(
546
+ accelerator,
547
+ args,
548
+ epoch + 1,
549
+ global_step,
550
+ accelerator.device,
551
+ vae,
552
+ tokenizer,
553
+ text_encoder,
554
+ unet,
555
+ controlnet=controlnet,
556
+ )
557
+
558
+ # end of epoch
559
+ if is_main_process:
560
+ controlnet = accelerator.unwrap_model(controlnet)
561
+
562
+ accelerator.end_training()
563
+
564
+ if is_main_process and (args.save_state or args.save_state_on_train_end):
565
+ train_util.save_state_on_train_end(args, accelerator)
566
+
567
+ # del accelerator # この後メモリを使うのでこれは消す→printで使うので消さずにおく
568
+
569
+ if is_main_process:
570
+ ckpt_name = train_util.get_last_ckpt_name(args, "." + args.save_model_as)
571
+ save_model(ckpt_name, controlnet, force_sync_upload=True)
572
+
573
+ logger.info("model saved.")
574
+
575
+
576
+ def setup_parser() -> argparse.ArgumentParser:
577
+ parser = argparse.ArgumentParser()
578
+
579
+ add_logging_arguments(parser)
580
+ train_util.add_sd_models_arguments(parser)
581
+ train_util.add_dataset_arguments(parser, False, True, True)
582
+ train_util.add_training_arguments(parser, False)
583
+ deepspeed_utils.add_deepspeed_arguments(parser)
584
+ train_util.add_optimizer_arguments(parser)
585
+ config_util.add_config_arguments(parser)
586
+ custom_train_functions.add_custom_train_arguments(parser)
587
+
588
+ parser.add_argument(
589
+ "--save_model_as",
590
+ type=str,
591
+ default="safetensors",
592
+ choices=[None, "ckpt", "pt", "safetensors"],
593
+ help="format to save the model (default is .safetensors) / モデル保存時の形式(デフォルトはsafetensors)",
594
+ )
595
+ parser.add_argument(
596
+ "--controlnet_model_name_or_path",
597
+ type=str,
598
+ default=None,
599
+ help="controlnet model name or path / controlnetのモデル名またはパス",
600
+ )
601
+ parser.add_argument(
602
+ "--conditioning_data_dir",
603
+ type=str,
604
+ default=None,
605
+ help="conditioning data directory / 条件付けデータのディレクトリ",
606
+ )
607
+
608
+ return parser
609
+
610
+
611
+ if __name__ == "__main__":
612
+ parser = setup_parser()
613
+
614
+ args = parser.parse_args()
615
+ train_util.verify_command_line_training_args(args)
616
+ args = train_util.read_config_from_file(args, parser)
617
+
618
+ train(args)