ACCC1380 commited on
Commit
4026052
1 Parent(s): 960cabc

Upload lora-scripts/sd-scripts/networks/lora_diffusers.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/networks/lora_diffusers.py ADDED
@@ -0,0 +1,616 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Diffusersで動くLoRA。このファイル単独で完結する。
2
+ # LoRA module for Diffusers. This file works independently.
3
+
4
+ import bisect
5
+ import math
6
+ import random
7
+ from typing import Any, Dict, List, Mapping, Optional, Union
8
+ from diffusers import UNet2DConditionModel
9
+ import numpy as np
10
+ from tqdm import tqdm
11
+ from transformers import CLIPTextModel
12
+
13
+ import torch
14
+ from library.device_utils import init_ipex, get_preferred_device
15
+ init_ipex()
16
+
17
+ from library.utils import setup_logging
18
+ setup_logging()
19
+ import logging
20
+ logger = logging.getLogger(__name__)
21
+
22
+ def make_unet_conversion_map() -> Dict[str, str]:
23
+ unet_conversion_map_layer = []
24
+
25
+ for i in range(3): # num_blocks is 3 in sdxl
26
+ # loop over downblocks/upblocks
27
+ for j in range(2):
28
+ # loop over resnets/attentions for downblocks
29
+ hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
30
+ sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
31
+ unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
32
+
33
+ if i < 3:
34
+ # no attention layers in down_blocks.3
35
+ hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
36
+ sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
37
+ unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
38
+
39
+ for j in range(3):
40
+ # loop over resnets/attentions for upblocks
41
+ hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
42
+ sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
43
+ unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
44
+
45
+ # if i > 0: commentout for sdxl
46
+ # no attention layers in up_blocks.0
47
+ hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
48
+ sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
49
+ unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
50
+
51
+ if i < 3:
52
+ # no downsample in down_blocks.3
53
+ hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
54
+ sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
55
+ unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
56
+
57
+ # no upsample in up_blocks.3
58
+ hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
59
+ sd_upsample_prefix = f"output_blocks.{3*i + 2}.{2}." # change for sdxl
60
+ unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
61
+
62
+ hf_mid_atn_prefix = "mid_block.attentions.0."
63
+ sd_mid_atn_prefix = "middle_block.1."
64
+ unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
65
+
66
+ for j in range(2):
67
+ hf_mid_res_prefix = f"mid_block.resnets.{j}."
68
+ sd_mid_res_prefix = f"middle_block.{2*j}."
69
+ unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
70
+
71
+ unet_conversion_map_resnet = [
72
+ # (stable-diffusion, HF Diffusers)
73
+ ("in_layers.0.", "norm1."),
74
+ ("in_layers.2.", "conv1."),
75
+ ("out_layers.0.", "norm2."),
76
+ ("out_layers.3.", "conv2."),
77
+ ("emb_layers.1.", "time_emb_proj."),
78
+ ("skip_connection.", "conv_shortcut."),
79
+ ]
80
+
81
+ unet_conversion_map = []
82
+ for sd, hf in unet_conversion_map_layer:
83
+ if "resnets" in hf:
84
+ for sd_res, hf_res in unet_conversion_map_resnet:
85
+ unet_conversion_map.append((sd + sd_res, hf + hf_res))
86
+ else:
87
+ unet_conversion_map.append((sd, hf))
88
+
89
+ for j in range(2):
90
+ hf_time_embed_prefix = f"time_embedding.linear_{j+1}."
91
+ sd_time_embed_prefix = f"time_embed.{j*2}."
92
+ unet_conversion_map.append((sd_time_embed_prefix, hf_time_embed_prefix))
93
+
94
+ for j in range(2):
95
+ hf_label_embed_prefix = f"add_embedding.linear_{j+1}."
96
+ sd_label_embed_prefix = f"label_emb.0.{j*2}."
97
+ unet_conversion_map.append((sd_label_embed_prefix, hf_label_embed_prefix))
98
+
99
+ unet_conversion_map.append(("input_blocks.0.0.", "conv_in."))
100
+ unet_conversion_map.append(("out.0.", "conv_norm_out."))
101
+ unet_conversion_map.append(("out.2.", "conv_out."))
102
+
103
+ sd_hf_conversion_map = {sd.replace(".", "_")[:-1]: hf.replace(".", "_")[:-1] for sd, hf in unet_conversion_map}
104
+ return sd_hf_conversion_map
105
+
106
+
107
+ UNET_CONVERSION_MAP = make_unet_conversion_map()
108
+
109
+
110
+ class LoRAModule(torch.nn.Module):
111
+ """
112
+ replaces forward method of the original Linear, instead of replacing the original Linear module.
113
+ """
114
+
115
+ def __init__(
116
+ self,
117
+ lora_name,
118
+ org_module: torch.nn.Module,
119
+ multiplier=1.0,
120
+ lora_dim=4,
121
+ alpha=1,
122
+ ):
123
+ """if alpha == 0 or None, alpha is rank (no scaling)."""
124
+ super().__init__()
125
+ self.lora_name = lora_name
126
+
127
+ if org_module.__class__.__name__ == "Conv2d" or org_module.__class__.__name__ == "LoRACompatibleConv":
128
+ in_dim = org_module.in_channels
129
+ out_dim = org_module.out_channels
130
+ else:
131
+ in_dim = org_module.in_features
132
+ out_dim = org_module.out_features
133
+
134
+ self.lora_dim = lora_dim
135
+
136
+ if org_module.__class__.__name__ == "Conv2d" or org_module.__class__.__name__ == "LoRACompatibleConv":
137
+ kernel_size = org_module.kernel_size
138
+ stride = org_module.stride
139
+ padding = org_module.padding
140
+ self.lora_down = torch.nn.Conv2d(in_dim, self.lora_dim, kernel_size, stride, padding, bias=False)
141
+ self.lora_up = torch.nn.Conv2d(self.lora_dim, out_dim, (1, 1), (1, 1), bias=False)
142
+ else:
143
+ self.lora_down = torch.nn.Linear(in_dim, self.lora_dim, bias=False)
144
+ self.lora_up = torch.nn.Linear(self.lora_dim, out_dim, bias=False)
145
+
146
+ if type(alpha) == torch.Tensor:
147
+ alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
148
+ alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
149
+ self.scale = alpha / self.lora_dim
150
+ self.register_buffer("alpha", torch.tensor(alpha)) # 勾配計算に含めない / not included in gradient calculation
151
+
152
+ # same as microsoft's
153
+ torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
154
+ torch.nn.init.zeros_(self.lora_up.weight)
155
+
156
+ self.multiplier = multiplier
157
+ self.org_module = [org_module]
158
+ self.enabled = True
159
+ self.network: LoRANetwork = None
160
+ self.org_forward = None
161
+
162
+ # override org_module's forward method
163
+ def apply_to(self, multiplier=None):
164
+ if multiplier is not None:
165
+ self.multiplier = multiplier
166
+ if self.org_forward is None:
167
+ self.org_forward = self.org_module[0].forward
168
+ self.org_module[0].forward = self.forward
169
+
170
+ # restore org_module's forward method
171
+ def unapply_to(self):
172
+ if self.org_forward is not None:
173
+ self.org_module[0].forward = self.org_forward
174
+
175
+ # forward with lora
176
+ # scale is used LoRACompatibleConv, but we ignore it because we have multiplier
177
+ def forward(self, x, scale=1.0):
178
+ if not self.enabled:
179
+ return self.org_forward(x)
180
+ return self.org_forward(x) + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale
181
+
182
+ def set_network(self, network):
183
+ self.network = network
184
+
185
+ # merge lora weight to org weight
186
+ def merge_to(self, multiplier=1.0):
187
+ # get lora weight
188
+ lora_weight = self.get_weight(multiplier)
189
+
190
+ # get org weight
191
+ org_sd = self.org_module[0].state_dict()
192
+ org_weight = org_sd["weight"]
193
+ weight = org_weight + lora_weight.to(org_weight.device, dtype=org_weight.dtype)
194
+
195
+ # set weight to org_module
196
+ org_sd["weight"] = weight
197
+ self.org_module[0].load_state_dict(org_sd)
198
+
199
+ # restore org weight from lora weight
200
+ def restore_from(self, multiplier=1.0):
201
+ # get lora weight
202
+ lora_weight = self.get_weight(multiplier)
203
+
204
+ # get org weight
205
+ org_sd = self.org_module[0].state_dict()
206
+ org_weight = org_sd["weight"]
207
+ weight = org_weight - lora_weight.to(org_weight.device, dtype=org_weight.dtype)
208
+
209
+ # set weight to org_module
210
+ org_sd["weight"] = weight
211
+ self.org_module[0].load_state_dict(org_sd)
212
+
213
+ # return lora weight
214
+ def get_weight(self, multiplier=None):
215
+ if multiplier is None:
216
+ multiplier = self.multiplier
217
+
218
+ # get up/down weight from module
219
+ up_weight = self.lora_up.weight.to(torch.float)
220
+ down_weight = self.lora_down.weight.to(torch.float)
221
+
222
+ # pre-calculated weight
223
+ if len(down_weight.size()) == 2:
224
+ # linear
225
+ weight = self.multiplier * (up_weight @ down_weight) * self.scale
226
+ elif down_weight.size()[2:4] == (1, 1):
227
+ # conv2d 1x1
228
+ weight = (
229
+ self.multiplier
230
+ * (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3)
231
+ * self.scale
232
+ )
233
+ else:
234
+ # conv2d 3x3
235
+ conved = torch.nn.functional.conv2d(down_weight.permute(1, 0, 2, 3), up_weight).permute(1, 0, 2, 3)
236
+ weight = self.multiplier * conved * self.scale
237
+
238
+ return weight
239
+
240
+
241
+ # Create network from weights for inference, weights are not loaded here
242
+ def create_network_from_weights(
243
+ text_encoder: Union[CLIPTextModel, List[CLIPTextModel]], unet: UNet2DConditionModel, weights_sd: Dict, multiplier: float = 1.0
244
+ ):
245
+ # get dim/alpha mapping
246
+ modules_dim = {}
247
+ modules_alpha = {}
248
+ for key, value in weights_sd.items():
249
+ if "." not in key:
250
+ continue
251
+
252
+ lora_name = key.split(".")[0]
253
+ if "alpha" in key:
254
+ modules_alpha[lora_name] = value
255
+ elif "lora_down" in key:
256
+ dim = value.size()[0]
257
+ modules_dim[lora_name] = dim
258
+ # logger.info(f"{lora_name} {value.size()} {dim}")
259
+
260
+ # support old LoRA without alpha
261
+ for key in modules_dim.keys():
262
+ if key not in modules_alpha:
263
+ modules_alpha[key] = modules_dim[key]
264
+
265
+ return LoRANetwork(text_encoder, unet, multiplier=multiplier, modules_dim=modules_dim, modules_alpha=modules_alpha)
266
+
267
+
268
+ def merge_lora_weights(pipe, weights_sd: Dict, multiplier: float = 1.0):
269
+ text_encoders = [pipe.text_encoder, pipe.text_encoder_2] if hasattr(pipe, "text_encoder_2") else [pipe.text_encoder]
270
+ unet = pipe.unet
271
+
272
+ lora_network = create_network_from_weights(text_encoders, unet, weights_sd, multiplier=multiplier)
273
+ lora_network.load_state_dict(weights_sd)
274
+ lora_network.merge_to(multiplier=multiplier)
275
+
276
+
277
+ # block weightや学習に対応しない簡易版 / simple version without block weight and training
278
+ class LoRANetwork(torch.nn.Module):
279
+ UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel"]
280
+ UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"]
281
+ TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"]
282
+ LORA_PREFIX_UNET = "lora_unet"
283
+ LORA_PREFIX_TEXT_ENCODER = "lora_te"
284
+
285
+ # SDXL: must starts with LORA_PREFIX_TEXT_ENCODER
286
+ LORA_PREFIX_TEXT_ENCODER1 = "lora_te1"
287
+ LORA_PREFIX_TEXT_ENCODER2 = "lora_te2"
288
+
289
+ def __init__(
290
+ self,
291
+ text_encoder: Union[List[CLIPTextModel], CLIPTextModel],
292
+ unet: UNet2DConditionModel,
293
+ multiplier: float = 1.0,
294
+ modules_dim: Optional[Dict[str, int]] = None,
295
+ modules_alpha: Optional[Dict[str, int]] = None,
296
+ varbose: Optional[bool] = False,
297
+ ) -> None:
298
+ super().__init__()
299
+ self.multiplier = multiplier
300
+
301
+ logger.info("create LoRA network from weights")
302
+
303
+ # convert SDXL Stability AI's U-Net modules to Diffusers
304
+ converted = self.convert_unet_modules(modules_dim, modules_alpha)
305
+ if converted:
306
+ logger.info(f"converted {converted} Stability AI's U-Net LoRA modules to Diffusers (SDXL)")
307
+
308
+ # create module instances
309
+ def create_modules(
310
+ is_unet: bool,
311
+ text_encoder_idx: Optional[int], # None, 1, 2
312
+ root_module: torch.nn.Module,
313
+ target_replace_modules: List[torch.nn.Module],
314
+ ) -> List[LoRAModule]:
315
+ prefix = (
316
+ self.LORA_PREFIX_UNET
317
+ if is_unet
318
+ else (
319
+ self.LORA_PREFIX_TEXT_ENCODER
320
+ if text_encoder_idx is None
321
+ else (self.LORA_PREFIX_TEXT_ENCODER1 if text_encoder_idx == 1 else self.LORA_PREFIX_TEXT_ENCODER2)
322
+ )
323
+ )
324
+ loras = []
325
+ skipped = []
326
+ for name, module in root_module.named_modules():
327
+ if module.__class__.__name__ in target_replace_modules:
328
+ for child_name, child_module in module.named_modules():
329
+ is_linear = (
330
+ child_module.__class__.__name__ == "Linear" or child_module.__class__.__name__ == "LoRACompatibleLinear"
331
+ )
332
+ is_conv2d = (
333
+ child_module.__class__.__name__ == "Conv2d" or child_module.__class__.__name__ == "LoRACompatibleConv"
334
+ )
335
+
336
+ if is_linear or is_conv2d:
337
+ lora_name = prefix + "." + name + "." + child_name
338
+ lora_name = lora_name.replace(".", "_")
339
+
340
+ if lora_name not in modules_dim:
341
+ # logger.info(f"skipped {lora_name} (not found in modules_dim)")
342
+ skipped.append(lora_name)
343
+ continue
344
+
345
+ dim = modules_dim[lora_name]
346
+ alpha = modules_alpha[lora_name]
347
+ lora = LoRAModule(
348
+ lora_name,
349
+ child_module,
350
+ self.multiplier,
351
+ dim,
352
+ alpha,
353
+ )
354
+ loras.append(lora)
355
+ return loras, skipped
356
+
357
+ text_encoders = text_encoder if type(text_encoder) == list else [text_encoder]
358
+
359
+ # create LoRA for text encoder
360
+ # 毎回すべてのモジュールを作るのは無駄なので要検討 / it is wasteful to create all modules every time, need to consider
361
+ self.text_encoder_loras: List[LoRAModule] = []
362
+ skipped_te = []
363
+ for i, text_encoder in enumerate(text_encoders):
364
+ if len(text_encoders) > 1:
365
+ index = i + 1
366
+ else:
367
+ index = None
368
+
369
+ text_encoder_loras, skipped = create_modules(False, index, text_encoder, LoRANetwork.TEXT_ENCODER_TARGET_REPLACE_MODULE)
370
+ self.text_encoder_loras.extend(text_encoder_loras)
371
+ skipped_te += skipped
372
+ logger.info(f"create LoRA for Text Encoder: {len(self.text_encoder_loras)} modules.")
373
+ if len(skipped_te) > 0:
374
+ logger.warning(f"skipped {len(skipped_te)} modules because of missing weight for text encoder.")
375
+
376
+ # extend U-Net target modules to include Conv2d 3x3
377
+ target_modules = LoRANetwork.UNET_TARGET_REPLACE_MODULE + LoRANetwork.UNET_TARGET_REPLACE_MODULE_CONV2D_3X3
378
+
379
+ self.unet_loras: List[LoRAModule]
380
+ self.unet_loras, skipped_un = create_modules(True, None, unet, target_modules)
381
+ logger.info(f"create LoRA for U-Net: {len(self.unet_loras)} modules.")
382
+ if len(skipped_un) > 0:
383
+ logger.warning(f"skipped {len(skipped_un)} modules because of missing weight for U-Net.")
384
+
385
+ # assertion
386
+ names = set()
387
+ for lora in self.text_encoder_loras + self.unet_loras:
388
+ names.add(lora.lora_name)
389
+ for lora_name in modules_dim.keys():
390
+ assert lora_name in names, f"{lora_name} is not found in created LoRA modules."
391
+
392
+ # make to work load_state_dict
393
+ for lora in self.text_encoder_loras + self.unet_loras:
394
+ self.add_module(lora.lora_name, lora)
395
+
396
+ # SDXL: convert SDXL Stability AI's U-Net modules to Diffusers
397
+ def convert_unet_modules(self, modules_dim, modules_alpha):
398
+ converted_count = 0
399
+ not_converted_count = 0
400
+
401
+ map_keys = list(UNET_CONVERSION_MAP.keys())
402
+ map_keys.sort()
403
+
404
+ for key in list(modules_dim.keys()):
405
+ if key.startswith(LoRANetwork.LORA_PREFIX_UNET + "_"):
406
+ search_key = key.replace(LoRANetwork.LORA_PREFIX_UNET + "_", "")
407
+ position = bisect.bisect_right(map_keys, search_key)
408
+ map_key = map_keys[position - 1]
409
+ if search_key.startswith(map_key):
410
+ new_key = key.replace(map_key, UNET_CONVERSION_MAP[map_key])
411
+ modules_dim[new_key] = modules_dim[key]
412
+ modules_alpha[new_key] = modules_alpha[key]
413
+ del modules_dim[key]
414
+ del modules_alpha[key]
415
+ converted_count += 1
416
+ else:
417
+ not_converted_count += 1
418
+ assert (
419
+ converted_count == 0 or not_converted_count == 0
420
+ ), f"some modules are not converted: {converted_count} converted, {not_converted_count} not converted"
421
+ return converted_count
422
+
423
+ def set_multiplier(self, multiplier):
424
+ self.multiplier = multiplier
425
+ for lora in self.text_encoder_loras + self.unet_loras:
426
+ lora.multiplier = self.multiplier
427
+
428
+ def apply_to(self, multiplier=1.0, apply_text_encoder=True, apply_unet=True):
429
+ if apply_text_encoder:
430
+ logger.info("enable LoRA for text encoder")
431
+ for lora in self.text_encoder_loras:
432
+ lora.apply_to(multiplier)
433
+ if apply_unet:
434
+ logger.info("enable LoRA for U-Net")
435
+ for lora in self.unet_loras:
436
+ lora.apply_to(multiplier)
437
+
438
+ def unapply_to(self):
439
+ for lora in self.text_encoder_loras + self.unet_loras:
440
+ lora.unapply_to()
441
+
442
+ def merge_to(self, multiplier=1.0):
443
+ logger.info("merge LoRA weights to original weights")
444
+ for lora in tqdm(self.text_encoder_loras + self.unet_loras):
445
+ lora.merge_to(multiplier)
446
+ logger.info(f"weights are merged")
447
+
448
+ def restore_from(self, multiplier=1.0):
449
+ logger.info("restore LoRA weights from original weights")
450
+ for lora in tqdm(self.text_encoder_loras + self.unet_loras):
451
+ lora.restore_from(multiplier)
452
+ logger.info(f"weights are restored")
453
+
454
+ def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):
455
+ # convert SDXL Stability AI's state dict to Diffusers' based state dict
456
+ map_keys = list(UNET_CONVERSION_MAP.keys()) # prefix of U-Net modules
457
+ map_keys.sort()
458
+ for key in list(state_dict.keys()):
459
+ if key.startswith(LoRANetwork.LORA_PREFIX_UNET + "_"):
460
+ search_key = key.replace(LoRANetwork.LORA_PREFIX_UNET + "_", "")
461
+ position = bisect.bisect_right(map_keys, search_key)
462
+ map_key = map_keys[position - 1]
463
+ if search_key.startswith(map_key):
464
+ new_key = key.replace(map_key, UNET_CONVERSION_MAP[map_key])
465
+ state_dict[new_key] = state_dict[key]
466
+ del state_dict[key]
467
+
468
+ # in case of V2, some weights have different shape, so we need to convert them
469
+ # because V2 LoRA is based on U-Net created by use_linear_projection=False
470
+ my_state_dict = self.state_dict()
471
+ for key in state_dict.keys():
472
+ if state_dict[key].size() != my_state_dict[key].size():
473
+ # logger.info(f"convert {key} from {state_dict[key].size()} to {my_state_dict[key].size()}")
474
+ state_dict[key] = state_dict[key].view(my_state_dict[key].size())
475
+
476
+ return super().load_state_dict(state_dict, strict)
477
+
478
+
479
+ if __name__ == "__main__":
480
+ # sample code to use LoRANetwork
481
+ import os
482
+ import argparse
483
+ from diffusers import StableDiffusionPipeline, StableDiffusionXLPipeline
484
+ import torch
485
+
486
+ device = get_preferred_device()
487
+
488
+ parser = argparse.ArgumentParser()
489
+ parser.add_argument("--model_id", type=str, default=None, help="model id for huggingface")
490
+ parser.add_argument("--lora_weights", type=str, default=None, help="path to LoRA weights")
491
+ parser.add_argument("--sdxl", action="store_true", help="use SDXL model")
492
+ parser.add_argument("--prompt", type=str, default="A photo of cat", help="prompt text")
493
+ parser.add_argument("--negative_prompt", type=str, default="", help="negative prompt text")
494
+ parser.add_argument("--seed", type=int, default=0, help="random seed")
495
+ args = parser.parse_args()
496
+
497
+ image_prefix = args.model_id.replace("/", "_") + "_"
498
+
499
+ # load Diffusers model
500
+ logger.info(f"load model from {args.model_id}")
501
+ pipe: Union[StableDiffusionPipeline, StableDiffusionXLPipeline]
502
+ if args.sdxl:
503
+ # use_safetensors=True does not work with 0.18.2
504
+ pipe = StableDiffusionXLPipeline.from_pretrained(args.model_id, variant="fp16", torch_dtype=torch.float16)
505
+ else:
506
+ pipe = StableDiffusionPipeline.from_pretrained(args.model_id, variant="fp16", torch_dtype=torch.float16)
507
+ pipe.to(device)
508
+ pipe.set_use_memory_efficient_attention_xformers(True)
509
+
510
+ text_encoders = [pipe.text_encoder, pipe.text_encoder_2] if args.sdxl else [pipe.text_encoder]
511
+
512
+ # load LoRA weights
513
+ logger.info(f"load LoRA weights from {args.lora_weights}")
514
+ if os.path.splitext(args.lora_weights)[1] == ".safetensors":
515
+ from safetensors.torch import load_file
516
+
517
+ lora_sd = load_file(args.lora_weights)
518
+ else:
519
+ lora_sd = torch.load(args.lora_weights)
520
+
521
+ # create by LoRA weights and load weights
522
+ logger.info(f"create LoRA network")
523
+ lora_network: LoRANetwork = create_network_from_weights(text_encoders, pipe.unet, lora_sd, multiplier=1.0)
524
+
525
+ logger.info(f"load LoRA network weights")
526
+ lora_network.load_state_dict(lora_sd)
527
+
528
+ lora_network.to(device, dtype=pipe.unet.dtype) # required to apply_to. merge_to works without this
529
+
530
+ # 必要があれば、元のモデルの重みをバックアップしておく
531
+ # back-up unet/text encoder weights if necessary
532
+ def detach_and_move_to_cpu(state_dict):
533
+ for k, v in state_dict.items():
534
+ state_dict[k] = v.detach().cpu()
535
+ return state_dict
536
+
537
+ org_unet_sd = pipe.unet.state_dict()
538
+ detach_and_move_to_cpu(org_unet_sd)
539
+
540
+ org_text_encoder_sd = pipe.text_encoder.state_dict()
541
+ detach_and_move_to_cpu(org_text_encoder_sd)
542
+
543
+ if args.sdxl:
544
+ org_text_encoder_2_sd = pipe.text_encoder_2.state_dict()
545
+ detach_and_move_to_cpu(org_text_encoder_2_sd)
546
+
547
+ def seed_everything(seed):
548
+ torch.manual_seed(seed)
549
+ torch.cuda.manual_seed_all(seed)
550
+ np.random.seed(seed)
551
+ random.seed(seed)
552
+
553
+ # create image with original weights
554
+ logger.info(f"create image with original weights")
555
+ seed_everything(args.seed)
556
+ image = pipe(args.prompt, negative_prompt=args.negative_prompt).images[0]
557
+ image.save(image_prefix + "original.png")
558
+
559
+ # apply LoRA network to the model: slower than merge_to, but can be reverted easily
560
+ logger.info(f"apply LoRA network to the model")
561
+ lora_network.apply_to(multiplier=1.0)
562
+
563
+ logger.info(f"create image with applied LoRA")
564
+ seed_everything(args.seed)
565
+ image = pipe(args.prompt, negative_prompt=args.negative_prompt).images[0]
566
+ image.save(image_prefix + "applied_lora.png")
567
+
568
+ # unapply LoRA network to the model
569
+ logger.info(f"unapply LoRA network to the model")
570
+ lora_network.unapply_to()
571
+
572
+ logger.info(f"create image with unapplied LoRA")
573
+ seed_everything(args.seed)
574
+ image = pipe(args.prompt, negative_prompt=args.negative_prompt).images[0]
575
+ image.save(image_prefix + "unapplied_lora.png")
576
+
577
+ # merge LoRA network to the model: faster than apply_to, but requires back-up of original weights (or unmerge_to)
578
+ logger.info(f"merge LoRA network to the model")
579
+ lora_network.merge_to(multiplier=1.0)
580
+
581
+ logger.info(f"create image with LoRA")
582
+ seed_everything(args.seed)
583
+ image = pipe(args.prompt, negative_prompt=args.negative_prompt).images[0]
584
+ image.save(image_prefix + "merged_lora.png")
585
+
586
+ # restore (unmerge) LoRA weights: numerically unstable
587
+ # マージされた重みを元に戻す。計算誤差のため、元の重みと完全に一致しないことがあるかもしれない
588
+ # 保存したstate_dictから元の重みを復元するのが確実
589
+ logger.info(f"restore (unmerge) LoRA weights")
590
+ lora_network.restore_from(multiplier=1.0)
591
+
592
+ logger.info(f"create image without LoRA")
593
+ seed_everything(args.seed)
594
+ image = pipe(args.prompt, negative_prompt=args.negative_prompt).images[0]
595
+ image.save(image_prefix + "unmerged_lora.png")
596
+
597
+ # restore original weights
598
+ logger.info(f"restore original weights")
599
+ pipe.unet.load_state_dict(org_unet_sd)
600
+ pipe.text_encoder.load_state_dict(org_text_encoder_sd)
601
+ if args.sdxl:
602
+ pipe.text_encoder_2.load_state_dict(org_text_encoder_2_sd)
603
+
604
+ logger.info(f"create image with restored original weights")
605
+ seed_everything(args.seed)
606
+ image = pipe(args.prompt, negative_prompt=args.negative_prompt).images[0]
607
+ image.save(image_prefix + "restore_original.png")
608
+
609
+ # use convenience function to merge LoRA weights
610
+ logger.info(f"merge LoRA weights with convenience function")
611
+ merge_lora_weights(pipe, lora_sd, multiplier=1.0)
612
+
613
+ logger.info(f"create image with merged LoRA weights")
614
+ seed_everything(args.seed)
615
+ image = pipe(args.prompt, negative_prompt=args.negative_prompt).images[0]
616
+ image.save(image_prefix + "convenience_merged_lora.png")