ACCC1380 commited on
Commit
c51edd7
1 Parent(s): 07b7d37

Upload lora-scripts/sd-scripts/tools/original_control_net.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/tools/original_control_net.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, NamedTuple, Any
2
+ import numpy as np
3
+ import cv2
4
+ import torch
5
+ from safetensors.torch import load_file
6
+
7
+ from library.original_unet import UNet2DConditionModel, SampleOutput
8
+
9
+ import library.model_util as model_util
10
+ from library.utils import setup_logging
11
+ setup_logging()
12
+ import logging
13
+ logger = logging.getLogger(__name__)
14
+
15
+ class ControlNetInfo(NamedTuple):
16
+ unet: Any
17
+ net: Any
18
+ prep: Any
19
+ weight: float
20
+ ratio: float
21
+
22
+
23
+ class ControlNet(torch.nn.Module):
24
+ def __init__(self) -> None:
25
+ super().__init__()
26
+
27
+ # make control model
28
+ self.control_model = torch.nn.Module()
29
+
30
+ dims = [320, 320, 320, 320, 640, 640, 640, 1280, 1280, 1280, 1280, 1280]
31
+ zero_convs = torch.nn.ModuleList()
32
+ for i, dim in enumerate(dims):
33
+ sub_list = torch.nn.ModuleList([torch.nn.Conv2d(dim, dim, 1)])
34
+ zero_convs.append(sub_list)
35
+ self.control_model.add_module("zero_convs", zero_convs)
36
+
37
+ middle_block_out = torch.nn.Conv2d(1280, 1280, 1)
38
+ self.control_model.add_module("middle_block_out", torch.nn.ModuleList([middle_block_out]))
39
+
40
+ dims = [16, 16, 32, 32, 96, 96, 256, 320]
41
+ strides = [1, 1, 2, 1, 2, 1, 2, 1]
42
+ prev_dim = 3
43
+ input_hint_block = torch.nn.Sequential()
44
+ for i, (dim, stride) in enumerate(zip(dims, strides)):
45
+ input_hint_block.append(torch.nn.Conv2d(prev_dim, dim, 3, stride, 1))
46
+ if i < len(dims) - 1:
47
+ input_hint_block.append(torch.nn.SiLU())
48
+ prev_dim = dim
49
+ self.control_model.add_module("input_hint_block", input_hint_block)
50
+
51
+
52
+ def load_control_net(v2, unet, model):
53
+ device = unet.device
54
+
55
+ # control sdからキー変換しつつU-Netに対応する部分のみ取り出し、DiffusersのU-Netに読み込む
56
+ # state dictを読み込む
57
+ logger.info(f"ControlNet: loading control SD model : {model}")
58
+
59
+ if model_util.is_safetensors(model):
60
+ ctrl_sd_sd = load_file(model)
61
+ else:
62
+ ctrl_sd_sd = torch.load(model, map_location="cpu")
63
+ ctrl_sd_sd = ctrl_sd_sd.pop("state_dict", ctrl_sd_sd)
64
+
65
+ # 重みをU-Netに読み込めるようにする。ControlNetはSD版のstate dictなので、それを読み込む
66
+ is_difference = "difference" in ctrl_sd_sd
67
+ logger.info(f"ControlNet: loading difference: {is_difference}")
68
+
69
+ # ControlNetには存在しないキーがあるので、まず現在のU-NetでSD版の全keyを作っておく
70
+ # またTransfer Controlの元weightとなる
71
+ ctrl_unet_sd_sd = model_util.convert_unet_state_dict_to_sd(v2, unet.state_dict())
72
+
73
+ # 元のU-Netに影響しないようにコピーする。またprefixが付いていないので付ける
74
+ for key in list(ctrl_unet_sd_sd.keys()):
75
+ ctrl_unet_sd_sd["model.diffusion_model." + key] = ctrl_unet_sd_sd.pop(key).clone()
76
+
77
+ zero_conv_sd = {}
78
+ for key in list(ctrl_sd_sd.keys()):
79
+ if key.startswith("control_"):
80
+ unet_key = "model.diffusion_" + key[len("control_") :]
81
+ if unet_key not in ctrl_unet_sd_sd: # zero conv
82
+ zero_conv_sd[key] = ctrl_sd_sd[key]
83
+ continue
84
+ if is_difference: # Transfer Control
85
+ ctrl_unet_sd_sd[unet_key] += ctrl_sd_sd[key].to(device, dtype=unet.dtype)
86
+ else:
87
+ ctrl_unet_sd_sd[unet_key] = ctrl_sd_sd[key].to(device, dtype=unet.dtype)
88
+
89
+ unet_config = model_util.create_unet_diffusers_config(v2)
90
+ ctrl_unet_du_sd = model_util.convert_ldm_unet_checkpoint(v2, ctrl_unet_sd_sd, unet_config) # DiffUsers版ControlNetのstate dict
91
+
92
+ # ControlNetのU-Netを作成する
93
+ ctrl_unet = UNet2DConditionModel(**unet_config)
94
+ info = ctrl_unet.load_state_dict(ctrl_unet_du_sd)
95
+ logger.info(f"ControlNet: loading Control U-Net: {info}")
96
+
97
+ # U-Net以外のControlNetを作成する
98
+ # TODO support middle only
99
+ ctrl_net = ControlNet()
100
+ info = ctrl_net.load_state_dict(zero_conv_sd)
101
+ logger.info("ControlNet: loading ControlNet: {info}")
102
+
103
+ ctrl_unet.to(unet.device, dtype=unet.dtype)
104
+ ctrl_net.to(unet.device, dtype=unet.dtype)
105
+ return ctrl_unet, ctrl_net
106
+
107
+
108
+ def load_preprocess(prep_type: str):
109
+ if prep_type is None or prep_type.lower() == "none":
110
+ return None
111
+
112
+ if prep_type.startswith("canny"):
113
+ args = prep_type.split("_")
114
+ th1 = int(args[1]) if len(args) >= 2 else 63
115
+ th2 = int(args[2]) if len(args) >= 3 else 191
116
+
117
+ def canny(img):
118
+ img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
119
+ return cv2.Canny(img, th1, th2)
120
+
121
+ return canny
122
+
123
+ logger.info(f"Unsupported prep type: {prep_type}")
124
+ return None
125
+
126
+
127
+ def preprocess_ctrl_net_hint_image(image):
128
+ image = np.array(image).astype(np.float32) / 255.0
129
+ # ControlNetのサンプルはcv2を使っているが、読み込みはGradioなので実はRGBになっている
130
+ # image = image[:, :, ::-1].copy() # rgb to bgr
131
+ image = image[None].transpose(0, 3, 1, 2) # nchw
132
+ image = torch.from_numpy(image)
133
+ return image # 0 to 1
134
+
135
+
136
+ def get_guided_hints(control_nets: List[ControlNetInfo], num_latent_input, b_size, hints):
137
+ guided_hints = []
138
+ for i, cnet_info in enumerate(control_nets):
139
+ # hintは 1枚目の画像のcnet1, 1枚目の画像のcnet2, 1枚目の画像のcnet3, 2枚目の画像のcnet1, 2枚目の画像のcnet2 ... と並んでいること
140
+ b_hints = []
141
+ if len(hints) == 1: # すべて同じ画像をhintとして使う
142
+ hint = hints[0]
143
+ if cnet_info.prep is not None:
144
+ hint = cnet_info.prep(hint)
145
+ hint = preprocess_ctrl_net_hint_image(hint)
146
+ b_hints = [hint for _ in range(b_size)]
147
+ else:
148
+ for bi in range(b_size):
149
+ hint = hints[(bi * len(control_nets) + i) % len(hints)]
150
+ if cnet_info.prep is not None:
151
+ hint = cnet_info.prep(hint)
152
+ hint = preprocess_ctrl_net_hint_image(hint)
153
+ b_hints.append(hint)
154
+ b_hints = torch.cat(b_hints, dim=0)
155
+ b_hints = b_hints.to(cnet_info.unet.device, dtype=cnet_info.unet.dtype)
156
+
157
+ guided_hint = cnet_info.net.control_model.input_hint_block(b_hints)
158
+ guided_hints.append(guided_hint)
159
+ return guided_hints
160
+
161
+
162
+ def call_unet_and_control_net(
163
+ step,
164
+ num_latent_input,
165
+ original_unet,
166
+ control_nets: List[ControlNetInfo],
167
+ guided_hints,
168
+ current_ratio,
169
+ sample,
170
+ timestep,
171
+ encoder_hidden_states,
172
+ encoder_hidden_states_for_control_net,
173
+ ):
174
+ # ControlNet
175
+ # 複数のControlNetの場合は、出力をマージするのではなく交互に適用する
176
+ cnet_cnt = len(control_nets)
177
+ cnet_idx = step % cnet_cnt
178
+ cnet_info = control_nets[cnet_idx]
179
+
180
+ # logger.info(current_ratio, cnet_info.prep, cnet_info.weight, cnet_info.ratio)
181
+ if cnet_info.ratio < current_ratio:
182
+ return original_unet(sample, timestep, encoder_hidden_states)
183
+
184
+ guided_hint = guided_hints[cnet_idx]
185
+
186
+ # gradual latent support: match the size of guided_hint to the size of sample
187
+ if guided_hint.shape[-2:] != sample.shape[-2:]:
188
+ # print(f"guided_hint.shape={guided_hint.shape}, sample.shape={sample.shape}")
189
+ org_dtype = guided_hint.dtype
190
+ if org_dtype == torch.bfloat16:
191
+ guided_hint = guided_hint.to(torch.float32)
192
+ guided_hint = torch.nn.functional.interpolate(guided_hint, size=sample.shape[-2:], mode="bicubic")
193
+ if org_dtype == torch.bfloat16:
194
+ guided_hint = guided_hint.to(org_dtype)
195
+
196
+ guided_hint = guided_hint.repeat((num_latent_input, 1, 1, 1))
197
+ outs = unet_forward(
198
+ True, cnet_info.net, cnet_info.unet, guided_hint, None, sample, timestep, encoder_hidden_states_for_control_net
199
+ )
200
+ outs = [o * cnet_info.weight for o in outs]
201
+
202
+ # U-Net
203
+ return unet_forward(False, cnet_info.net, original_unet, None, outs, sample, timestep, encoder_hidden_states)
204
+
205
+
206
+ """
207
+ # これはmergeのバージョン
208
+ # ControlNet
209
+ cnet_outs_list = []
210
+ for i, cnet_info in enumerate(control_nets):
211
+ # logger.info(current_ratio, cnet_info.prep, cnet_info.weight, cnet_info.ratio)
212
+ if cnet_info.ratio < current_ratio:
213
+ continue
214
+ guided_hint = guided_hints[i]
215
+ outs = unet_forward(True, cnet_info.net, cnet_info.unet, guided_hint, None, sample, timestep, encoder_hidden_states)
216
+ for i in range(len(outs)):
217
+ outs[i] *= cnet_info.weight
218
+
219
+ cnet_outs_list.append(outs)
220
+
221
+ count = len(cnet_outs_list)
222
+ if count == 0:
223
+ return original_unet(sample, timestep, encoder_hidden_states)
224
+
225
+ # sum of controlnets
226
+ for i in range(1, count):
227
+ cnet_outs_list[0] += cnet_outs_list[i]
228
+
229
+ # U-Net
230
+ return unet_forward(False, cnet_info.net, original_unet, None, cnet_outs_list[0], sample, timestep, encoder_hidden_states)
231
+ """
232
+
233
+
234
+ def unet_forward(
235
+ is_control_net,
236
+ control_net: ControlNet,
237
+ unet: UNet2DConditionModel,
238
+ guided_hint,
239
+ ctrl_outs,
240
+ sample,
241
+ timestep,
242
+ encoder_hidden_states,
243
+ ):
244
+ # copy from UNet2DConditionModel
245
+ default_overall_up_factor = 2**unet.num_upsamplers
246
+
247
+ forward_upsample_size = False
248
+ upsample_size = None
249
+
250
+ if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
251
+ logger.info("Forward upsample size to force interpolation output size.")
252
+ forward_upsample_size = True
253
+
254
+ # 1. time
255
+ timesteps = timestep
256
+ if not torch.is_tensor(timesteps):
257
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
258
+ # This would be a good case for the `match` statement (Python 3.10+)
259
+ is_mps = sample.device.type == "mps"
260
+ if isinstance(timestep, float):
261
+ dtype = torch.float32 if is_mps else torch.float64
262
+ else:
263
+ dtype = torch.int32 if is_mps else torch.int64
264
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
265
+ elif len(timesteps.shape) == 0:
266
+ timesteps = timesteps[None].to(sample.device)
267
+
268
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
269
+ timesteps = timesteps.expand(sample.shape[0])
270
+
271
+ t_emb = unet.time_proj(timesteps)
272
+
273
+ # timesteps does not contain any weights and will always return f32 tensors
274
+ # but time_embedding might actually be running in fp16. so we need to cast here.
275
+ # there might be better ways to encapsulate this.
276
+ t_emb = t_emb.to(dtype=unet.dtype)
277
+ emb = unet.time_embedding(t_emb)
278
+
279
+ outs = [] # output of ControlNet
280
+ zc_idx = 0
281
+
282
+ # 2. pre-process
283
+ sample = unet.conv_in(sample)
284
+ if is_control_net:
285
+ sample += guided_hint
286
+ outs.append(control_net.control_model.zero_convs[zc_idx][0](sample)) # , emb, encoder_hidden_states))
287
+ zc_idx += 1
288
+
289
+ # 3. down
290
+ down_block_res_samples = (sample,)
291
+ for downsample_block in unet.down_blocks:
292
+ if downsample_block.has_cross_attention:
293
+ sample, res_samples = downsample_block(
294
+ hidden_states=sample,
295
+ temb=emb,
296
+ encoder_hidden_states=encoder_hidden_states,
297
+ )
298
+ else:
299
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
300
+ if is_control_net:
301
+ for rs in res_samples:
302
+ outs.append(control_net.control_model.zero_convs[zc_idx][0](rs)) # , emb, encoder_hidden_states))
303
+ zc_idx += 1
304
+
305
+ down_block_res_samples += res_samples
306
+
307
+ # 4. mid
308
+ sample = unet.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states)
309
+ if is_control_net:
310
+ outs.append(control_net.control_model.middle_block_out[0](sample))
311
+ return outs
312
+
313
+ if not is_control_net:
314
+ sample += ctrl_outs.pop()
315
+
316
+ # 5. up
317
+ for i, upsample_block in enumerate(unet.up_blocks):
318
+ is_final_block = i == len(unet.up_blocks) - 1
319
+
320
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
321
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
322
+
323
+ if not is_control_net and len(ctrl_outs) > 0:
324
+ res_samples = list(res_samples)
325
+ apply_ctrl_outs = ctrl_outs[-len(res_samples) :]
326
+ ctrl_outs = ctrl_outs[: -len(res_samples)]
327
+ for j in range(len(res_samples)):
328
+ res_samples[j] = res_samples[j] + apply_ctrl_outs[j]
329
+ res_samples = tuple(res_samples)
330
+
331
+ # if we have not reached the final block and need to forward the
332
+ # upsample size, we do it here
333
+ if not is_final_block and forward_upsample_size:
334
+ upsample_size = down_block_res_samples[-1].shape[2:]
335
+
336
+ if upsample_block.has_cross_attention:
337
+ sample = upsample_block(
338
+ hidden_states=sample,
339
+ temb=emb,
340
+ res_hidden_states_tuple=res_samples,
341
+ encoder_hidden_states=encoder_hidden_states,
342
+ upsample_size=upsample_size,
343
+ )
344
+ else:
345
+ sample = upsample_block(
346
+ hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
347
+ )
348
+ # 6. post-process
349
+ sample = unet.conv_norm_out(sample)
350
+ sample = unet.conv_act(sample)
351
+ sample = unet.conv_out(sample)
352
+
353
+ return SampleOutput(sample=sample)