ACCC1380 commited on
Commit
69aad76
1 Parent(s): 254087d

Upload lora-scripts/sd-scripts/library/custom_train_functions.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/library/custom_train_functions.py ADDED
@@ -0,0 +1,548 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import argparse
3
+ import random
4
+ import re
5
+ from typing import List, Optional, Union
6
+ from .utils import setup_logging
7
+
8
+ setup_logging()
9
+ import logging
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ def prepare_scheduler_for_custom_training(noise_scheduler, device):
15
+ if hasattr(noise_scheduler, "all_snr"):
16
+ return
17
+
18
+ alphas_cumprod = noise_scheduler.alphas_cumprod
19
+ sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod)
20
+ sqrt_one_minus_alphas_cumprod = torch.sqrt(1.0 - alphas_cumprod)
21
+ alpha = sqrt_alphas_cumprod
22
+ sigma = sqrt_one_minus_alphas_cumprod
23
+ all_snr = (alpha / sigma) ** 2
24
+
25
+ noise_scheduler.all_snr = all_snr.to(device)
26
+
27
+
28
+ def fix_noise_scheduler_betas_for_zero_terminal_snr(noise_scheduler):
29
+ # fix beta: zero terminal SNR
30
+ logger.info(f"fix noise scheduler betas: https://arxiv.org/abs/2305.08891")
31
+
32
+ def enforce_zero_terminal_snr(betas):
33
+ # Convert betas to alphas_bar_sqrt
34
+ alphas = 1 - betas
35
+ alphas_bar = alphas.cumprod(0)
36
+ alphas_bar_sqrt = alphas_bar.sqrt()
37
+
38
+ # Store old values.
39
+ alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
40
+ alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
41
+ # Shift so last timestep is zero.
42
+ alphas_bar_sqrt -= alphas_bar_sqrt_T
43
+ # Scale so first timestep is back to old value.
44
+ alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
45
+
46
+ # Convert alphas_bar_sqrt to betas
47
+ alphas_bar = alphas_bar_sqrt**2
48
+ alphas = alphas_bar[1:] / alphas_bar[:-1]
49
+ alphas = torch.cat([alphas_bar[0:1], alphas])
50
+ betas = 1 - alphas
51
+ return betas
52
+
53
+ betas = noise_scheduler.betas
54
+ betas = enforce_zero_terminal_snr(betas)
55
+ alphas = 1.0 - betas
56
+ alphas_cumprod = torch.cumprod(alphas, dim=0)
57
+
58
+ # logger.info(f"original: {noise_scheduler.betas}")
59
+ # logger.info(f"fixed: {betas}")
60
+
61
+ noise_scheduler.betas = betas
62
+ noise_scheduler.alphas = alphas
63
+ noise_scheduler.alphas_cumprod = alphas_cumprod
64
+
65
+
66
+ def apply_snr_weight(loss, timesteps, noise_scheduler, gamma, v_prediction=False):
67
+ snr = torch.stack([noise_scheduler.all_snr[t] for t in timesteps])
68
+ min_snr_gamma = torch.minimum(snr, torch.full_like(snr, gamma))
69
+ if v_prediction:
70
+ snr_weight = torch.div(min_snr_gamma, snr + 1).float().to(loss.device)
71
+ else:
72
+ snr_weight = torch.div(min_snr_gamma, snr).float().to(loss.device)
73
+ loss = loss * snr_weight
74
+ return loss
75
+
76
+
77
+ def scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler):
78
+ scale = get_snr_scale(timesteps, noise_scheduler)
79
+ loss = loss * scale
80
+ return loss
81
+
82
+
83
+ def get_snr_scale(timesteps, noise_scheduler):
84
+ snr_t = torch.stack([noise_scheduler.all_snr[t] for t in timesteps]) # batch_size
85
+ snr_t = torch.minimum(snr_t, torch.ones_like(snr_t) * 1000) # if timestep is 0, snr_t is inf, so limit it to 1000
86
+ scale = snr_t / (snr_t + 1)
87
+ # # show debug info
88
+ # logger.info(f"timesteps: {timesteps}, snr_t: {snr_t}, scale: {scale}")
89
+ return scale
90
+
91
+
92
+ def add_v_prediction_like_loss(loss, timesteps, noise_scheduler, v_pred_like_loss):
93
+ scale = get_snr_scale(timesteps, noise_scheduler)
94
+ # logger.info(f"add v-prediction like loss: {v_pred_like_loss}, scale: {scale}, loss: {loss}, time: {timesteps}")
95
+ loss = loss + loss / scale * v_pred_like_loss
96
+ return loss
97
+
98
+
99
+ def apply_debiased_estimation(loss, timesteps, noise_scheduler):
100
+ snr_t = torch.stack([noise_scheduler.all_snr[t] for t in timesteps]) # batch_size
101
+ snr_t = torch.minimum(snr_t, torch.ones_like(snr_t) * 1000) # if timestep is 0, snr_t is inf, so limit it to 1000
102
+ weight = 1 / torch.sqrt(snr_t)
103
+ loss = weight * loss
104
+ return loss
105
+
106
+
107
+ # TODO train_utilと分散しているのでどちらかに寄せる
108
+
109
+
110
+ def add_custom_train_arguments(parser: argparse.ArgumentParser, support_weighted_captions: bool = True):
111
+ parser.add_argument(
112
+ "--min_snr_gamma",
113
+ type=float,
114
+ default=None,
115
+ help="gamma for reducing the weight of high loss timesteps. Lower numbers have stronger effect. 5 is recommended by paper. / 低いタイムステップでの高いlossに対して重みを減らすためのgamma値、低いほど効果が強く、論文では5が推奨",
116
+ )
117
+ parser.add_argument(
118
+ "--scale_v_pred_loss_like_noise_pred",
119
+ action="store_true",
120
+ help="scale v-prediction loss like noise prediction loss / v-prediction lossをnoise prediction lossと同じようにスケーリングする",
121
+ )
122
+ parser.add_argument(
123
+ "--v_pred_like_loss",
124
+ type=float,
125
+ default=None,
126
+ help="add v-prediction like loss multiplied by this value / v-prediction lossをこの値をかけたものをlossに加算する",
127
+ )
128
+ parser.add_argument(
129
+ "--debiased_estimation_loss",
130
+ action="store_true",
131
+ help="debiased estimation loss / debiased estimation loss",
132
+ )
133
+ if support_weighted_captions:
134
+ parser.add_argument(
135
+ "--weighted_captions",
136
+ action="store_true",
137
+ default=False,
138
+ help="Enable weighted captions in the standard style (token:1.3). No commas inside parens, or shuffle/dropout may break the decoder. / 「[token]」、「(token)」「(token:1.3)」のような重み付きキャプションを有効にする。カンマを括弧内に入れるとシャッフルやdropoutで重みづけがおかしくなるので注意",
139
+ )
140
+
141
+
142
+ re_attention = re.compile(
143
+ r"""
144
+ \\\(|
145
+ \\\)|
146
+ \\\[|
147
+ \\]|
148
+ \\\\|
149
+ \\|
150
+ \(|
151
+ \[|
152
+ :([+-]?[.\d]+)\)|
153
+ \)|
154
+ ]|
155
+ [^\\()\[\]:]+|
156
+ :
157
+ """,
158
+ re.X,
159
+ )
160
+
161
+
162
+ def parse_prompt_attention(text):
163
+ """
164
+ Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
165
+ Accepted tokens are:
166
+ (abc) - increases attention to abc by a multiplier of 1.1
167
+ (abc:3.12) - increases attention to abc by a multiplier of 3.12
168
+ [abc] - decreases attention to abc by a multiplier of 1.1
169
+ \( - literal character '('
170
+ \[ - literal character '['
171
+ \) - literal character ')'
172
+ \] - literal character ']'
173
+ \\ - literal character '\'
174
+ anything else - just text
175
+ >>> parse_prompt_attention('normal text')
176
+ [['normal text', 1.0]]
177
+ >>> parse_prompt_attention('an (important) word')
178
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
179
+ >>> parse_prompt_attention('(unbalanced')
180
+ [['unbalanced', 1.1]]
181
+ >>> parse_prompt_attention('\(literal\]')
182
+ [['(literal]', 1.0]]
183
+ >>> parse_prompt_attention('(unnecessary)(parens)')
184
+ [['unnecessaryparens', 1.1]]
185
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
186
+ [['a ', 1.0],
187
+ ['house', 1.5730000000000004],
188
+ [' ', 1.1],
189
+ ['on', 1.0],
190
+ [' a ', 1.1],
191
+ ['hill', 0.55],
192
+ [', sun, ', 1.1],
193
+ ['sky', 1.4641000000000006],
194
+ ['.', 1.1]]
195
+ """
196
+
197
+ res = []
198
+ round_brackets = []
199
+ square_brackets = []
200
+
201
+ round_bracket_multiplier = 1.1
202
+ square_bracket_multiplier = 1 / 1.1
203
+
204
+ def multiply_range(start_position, multiplier):
205
+ for p in range(start_position, len(res)):
206
+ res[p][1] *= multiplier
207
+
208
+ for m in re_attention.finditer(text):
209
+ text = m.group(0)
210
+ weight = m.group(1)
211
+
212
+ if text.startswith("\\"):
213
+ res.append([text[1:], 1.0])
214
+ elif text == "(":
215
+ round_brackets.append(len(res))
216
+ elif text == "[":
217
+ square_brackets.append(len(res))
218
+ elif weight is not None and len(round_brackets) > 0:
219
+ multiply_range(round_brackets.pop(), float(weight))
220
+ elif text == ")" and len(round_brackets) > 0:
221
+ multiply_range(round_brackets.pop(), round_bracket_multiplier)
222
+ elif text == "]" and len(square_brackets) > 0:
223
+ multiply_range(square_brackets.pop(), square_bracket_multiplier)
224
+ else:
225
+ res.append([text, 1.0])
226
+
227
+ for pos in round_brackets:
228
+ multiply_range(pos, round_bracket_multiplier)
229
+
230
+ for pos in square_brackets:
231
+ multiply_range(pos, square_bracket_multiplier)
232
+
233
+ if len(res) == 0:
234
+ res = [["", 1.0]]
235
+
236
+ # merge runs of identical weights
237
+ i = 0
238
+ while i + 1 < len(res):
239
+ if res[i][1] == res[i + 1][1]:
240
+ res[i][0] += res[i + 1][0]
241
+ res.pop(i + 1)
242
+ else:
243
+ i += 1
244
+
245
+ return res
246
+
247
+
248
+ def get_prompts_with_weights(tokenizer, prompt: List[str], max_length: int):
249
+ r"""
250
+ Tokenize a list of prompts and return its tokens with weights of each token.
251
+
252
+ No padding, starting or ending token is included.
253
+ """
254
+ tokens = []
255
+ weights = []
256
+ truncated = False
257
+ for text in prompt:
258
+ texts_and_weights = parse_prompt_attention(text)
259
+ text_token = []
260
+ text_weight = []
261
+ for word, weight in texts_and_weights:
262
+ # tokenize and discard the starting and the ending token
263
+ token = tokenizer(word).input_ids[1:-1]
264
+ text_token += token
265
+ # copy the weight by length of token
266
+ text_weight += [weight] * len(token)
267
+ # stop if the text is too long (longer than truncation limit)
268
+ if len(text_token) > max_length:
269
+ truncated = True
270
+ break
271
+ # truncate
272
+ if len(text_token) > max_length:
273
+ truncated = True
274
+ text_token = text_token[:max_length]
275
+ text_weight = text_weight[:max_length]
276
+ tokens.append(text_token)
277
+ weights.append(text_weight)
278
+ if truncated:
279
+ logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
280
+ return tokens, weights
281
+
282
+
283
+ def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, no_boseos_middle=True, chunk_length=77):
284
+ r"""
285
+ Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
286
+ """
287
+ max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
288
+ weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
289
+ for i in range(len(tokens)):
290
+ tokens[i] = [bos] + tokens[i] + [eos] * (max_length - 1 - len(tokens[i]))
291
+ if no_boseos_middle:
292
+ weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
293
+ else:
294
+ w = []
295
+ if len(weights[i]) == 0:
296
+ w = [1.0] * weights_length
297
+ else:
298
+ for j in range(max_embeddings_multiples):
299
+ w.append(1.0) # weight for starting token in this chunk
300
+ w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
301
+ w.append(1.0) # weight for ending token in this chunk
302
+ w += [1.0] * (weights_length - len(w))
303
+ weights[i] = w[:]
304
+
305
+ return tokens, weights
306
+
307
+
308
+ def get_unweighted_text_embeddings(
309
+ tokenizer,
310
+ text_encoder,
311
+ text_input: torch.Tensor,
312
+ chunk_length: int,
313
+ clip_skip: int,
314
+ eos: int,
315
+ pad: int,
316
+ no_boseos_middle: Optional[bool] = True,
317
+ ):
318
+ """
319
+ When the length of tokens is a multiple of the capacity of the text encoder,
320
+ it should be split into chunks and sent to the text encoder individually.
321
+ """
322
+ max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
323
+ if max_embeddings_multiples > 1:
324
+ text_embeddings = []
325
+ for i in range(max_embeddings_multiples):
326
+ # extract the i-th chunk
327
+ text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
328
+
329
+ # cover the head and the tail by the starting and the ending tokens
330
+ text_input_chunk[:, 0] = text_input[0, 0]
331
+ if pad == eos: # v1
332
+ text_input_chunk[:, -1] = text_input[0, -1]
333
+ else: # v2
334
+ for j in range(len(text_input_chunk)):
335
+ if text_input_chunk[j, -1] != eos and text_input_chunk[j, -1] != pad: # 最後に普通の文字がある
336
+ text_input_chunk[j, -1] = eos
337
+ if text_input_chunk[j, 1] == pad: # BOSだけであとはPAD
338
+ text_input_chunk[j, 1] = eos
339
+
340
+ if clip_skip is None or clip_skip == 1:
341
+ text_embedding = text_encoder(text_input_chunk)[0]
342
+ else:
343
+ enc_out = text_encoder(text_input_chunk, output_hidden_states=True, return_dict=True)
344
+ text_embedding = enc_out["hidden_states"][-clip_skip]
345
+ text_embedding = text_encoder.text_model.final_layer_norm(text_embedding)
346
+
347
+ if no_boseos_middle:
348
+ if i == 0:
349
+ # discard the ending token
350
+ text_embedding = text_embedding[:, :-1]
351
+ elif i == max_embeddings_multiples - 1:
352
+ # discard the starting token
353
+ text_embedding = text_embedding[:, 1:]
354
+ else:
355
+ # discard both starting and ending tokens
356
+ text_embedding = text_embedding[:, 1:-1]
357
+
358
+ text_embeddings.append(text_embedding)
359
+ text_embeddings = torch.concat(text_embeddings, axis=1)
360
+ else:
361
+ if clip_skip is None or clip_skip == 1:
362
+ text_embeddings = text_encoder(text_input)[0]
363
+ else:
364
+ enc_out = text_encoder(text_input, output_hidden_states=True, return_dict=True)
365
+ text_embeddings = enc_out["hidden_states"][-clip_skip]
366
+ text_embeddings = text_encoder.text_model.final_layer_norm(text_embeddings)
367
+ return text_embeddings
368
+
369
+
370
+ def get_weighted_text_embeddings(
371
+ tokenizer,
372
+ text_encoder,
373
+ prompt: Union[str, List[str]],
374
+ device,
375
+ max_embeddings_multiples: Optional[int] = 3,
376
+ no_boseos_middle: Optional[bool] = False,
377
+ clip_skip=None,
378
+ ):
379
+ r"""
380
+ Prompts can be assigned with local weights using brackets. For example,
381
+ prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
382
+ and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
383
+
384
+ Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
385
+
386
+ Args:
387
+ prompt (`str` or `List[str]`):
388
+ The prompt or prompts to guide the image generation.
389
+ max_embeddings_multiples (`int`, *optional*, defaults to `3`):
390
+ The max multiple length of prompt embeddings compared to the max output length of text encoder.
391
+ no_boseos_middle (`bool`, *optional*, defaults to `False`):
392
+ If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
393
+ ending token in each of the chunk in the middle.
394
+ skip_parsing (`bool`, *optional*, defaults to `False`):
395
+ Skip the parsing of brackets.
396
+ skip_weighting (`bool`, *optional*, defaults to `False`):
397
+ Skip the weighting. When the parsing is skipped, it is forced True.
398
+ """
399
+ max_length = (tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
400
+ if isinstance(prompt, str):
401
+ prompt = [prompt]
402
+
403
+ prompt_tokens, prompt_weights = get_prompts_with_weights(tokenizer, prompt, max_length - 2)
404
+
405
+ # round up the longest length of tokens to a multiple of (model_max_length - 2)
406
+ max_length = max([len(token) for token in prompt_tokens])
407
+
408
+ max_embeddings_multiples = min(
409
+ max_embeddings_multiples,
410
+ (max_length - 1) // (tokenizer.model_max_length - 2) + 1,
411
+ )
412
+ max_embeddings_multiples = max(1, max_embeddings_multiples)
413
+ max_length = (tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
414
+
415
+ # pad the length of tokens and weights
416
+ bos = tokenizer.bos_token_id
417
+ eos = tokenizer.eos_token_id
418
+ pad = tokenizer.pad_token_id
419
+ prompt_tokens, prompt_weights = pad_tokens_and_weights(
420
+ prompt_tokens,
421
+ prompt_weights,
422
+ max_length,
423
+ bos,
424
+ eos,
425
+ no_boseos_middle=no_boseos_middle,
426
+ chunk_length=tokenizer.model_max_length,
427
+ )
428
+ prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=device)
429
+
430
+ # get the embeddings
431
+ text_embeddings = get_unweighted_text_embeddings(
432
+ tokenizer,
433
+ text_encoder,
434
+ prompt_tokens,
435
+ tokenizer.model_max_length,
436
+ clip_skip,
437
+ eos,
438
+ pad,
439
+ no_boseos_middle=no_boseos_middle,
440
+ )
441
+ prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=device)
442
+
443
+ # assign weights to the prompts and normalize in the sense of mean
444
+ previous_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
445
+ text_embeddings = text_embeddings * prompt_weights.unsqueeze(-1)
446
+ current_mean = text_embeddings.float().mean(axis=[-2, -1]).to(text_embeddings.dtype)
447
+ text_embeddings = text_embeddings * (previous_mean / current_mean).unsqueeze(-1).unsqueeze(-1)
448
+
449
+ return text_embeddings
450
+
451
+
452
+ # https://wandb.ai/johnowhitaker/multires_noise/reports/Multi-Resolution-Noise-for-Diffusion-Model-Training--VmlldzozNjYyOTU2
453
+ def pyramid_noise_like(noise, device, iterations=6, discount=0.4):
454
+ b, c, w, h = noise.shape # EDIT: w and h get over-written, rename for a different variant!
455
+ u = torch.nn.Upsample(size=(w, h), mode="bilinear").to(device)
456
+ for i in range(iterations):
457
+ r = random.random() * 2 + 2 # Rather than always going 2x,
458
+ wn, hn = max(1, int(w / (r**i))), max(1, int(h / (r**i)))
459
+ noise += u(torch.randn(b, c, wn, hn).to(device)) * discount**i
460
+ if wn == 1 or hn == 1:
461
+ break # Lowest resolution is 1x1
462
+ return noise / noise.std() # Scaled back to roughly unit variance
463
+
464
+
465
+ # https://www.crosslabs.org//blog/diffusion-with-offset-noise
466
+ def apply_noise_offset(latents, noise, noise_offset, adaptive_noise_scale):
467
+ if noise_offset is None:
468
+ return noise
469
+ if adaptive_noise_scale is not None:
470
+ # latent shape: (batch_size, channels, height, width)
471
+ # abs mean value for each channel
472
+ latent_mean = torch.abs(latents.mean(dim=(2, 3), keepdim=True))
473
+
474
+ # multiply adaptive noise scale to the mean value and add it to the noise offset
475
+ noise_offset = noise_offset + adaptive_noise_scale * latent_mean
476
+ noise_offset = torch.clamp(noise_offset, 0.0, None) # in case of adaptive noise scale is negative
477
+
478
+ noise = noise + noise_offset * torch.randn((latents.shape[0], latents.shape[1], 1, 1), device=latents.device)
479
+ return noise
480
+
481
+
482
+ def apply_masked_loss(loss, batch):
483
+ # mask image is -1 to 1. we need to convert it to 0 to 1
484
+ mask_image = batch["conditioning_images"].to(dtype=loss.dtype)[:, 0].unsqueeze(1) # use R channel
485
+
486
+ # resize to the same size as the loss
487
+ mask_image = torch.nn.functional.interpolate(mask_image, size=loss.shape[2:], mode="area")
488
+ mask_image = mask_image / 2 + 0.5
489
+ loss = loss * mask_image
490
+ return loss
491
+
492
+
493
+ """
494
+ ##########################################
495
+ # Perlin Noise
496
+ def rand_perlin_2d(device, shape, res, fade=lambda t: 6 * t**5 - 15 * t**4 + 10 * t**3):
497
+ delta = (res[0] / shape[0], res[1] / shape[1])
498
+ d = (shape[0] // res[0], shape[1] // res[1])
499
+
500
+ grid = (
501
+ torch.stack(
502
+ torch.meshgrid(torch.arange(0, res[0], delta[0], device=device), torch.arange(0, res[1], delta[1], device=device)),
503
+ dim=-1,
504
+ )
505
+ % 1
506
+ )
507
+ angles = 2 * torch.pi * torch.rand(res[0] + 1, res[1] + 1, device=device)
508
+ gradients = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1)
509
+
510
+ tile_grads = (
511
+ lambda slice1, slice2: gradients[slice1[0] : slice1[1], slice2[0] : slice2[1]]
512
+ .repeat_interleave(d[0], 0)
513
+ .repeat_interleave(d[1], 1)
514
+ )
515
+ dot = lambda grad, shift: (
516
+ torch.stack((grid[: shape[0], : shape[1], 0] + shift[0], grid[: shape[0], : shape[1], 1] + shift[1]), dim=-1)
517
+ * grad[: shape[0], : shape[1]]
518
+ ).sum(dim=-1)
519
+
520
+ n00 = dot(tile_grads([0, -1], [0, -1]), [0, 0])
521
+ n10 = dot(tile_grads([1, None], [0, -1]), [-1, 0])
522
+ n01 = dot(tile_grads([0, -1], [1, None]), [0, -1])
523
+ n11 = dot(tile_grads([1, None], [1, None]), [-1, -1])
524
+ t = fade(grid[: shape[0], : shape[1]])
525
+ return 1.414 * torch.lerp(torch.lerp(n00, n10, t[..., 0]), torch.lerp(n01, n11, t[..., 0]), t[..., 1])
526
+
527
+
528
+ def rand_perlin_2d_octaves(device, shape, res, octaves=1, persistence=0.5):
529
+ noise = torch.zeros(shape, device=device)
530
+ frequency = 1
531
+ amplitude = 1
532
+ for _ in range(octaves):
533
+ noise += amplitude * rand_perlin_2d(device, shape, (frequency * res[0], frequency * res[1]))
534
+ frequency *= 2
535
+ amplitude *= persistence
536
+ return noise
537
+
538
+
539
+ def perlin_noise(noise, device, octaves):
540
+ _, c, w, h = noise.shape
541
+ perlin = lambda: rand_perlin_2d_octaves(device, (w, h), (4, 4), octaves)
542
+ noise_perlin = []
543
+ for _ in range(c):
544
+ noise_perlin.append(perlin())
545
+ noise_perlin = torch.stack(noise_perlin).unsqueeze(0) # (1, c, w, h)
546
+ noise += noise_perlin # broadcast for each batch
547
+ return noise / noise.std() # Scaled back to roughly unit variance
548
+ """