ACCC1380 commited on
Commit
008ad02
1 Parent(s): f50857f

Upload lora-scripts/sd-scripts/library/utils.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/library/utils.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import sys
3
+ import threading
4
+ import torch
5
+ from torchvision import transforms
6
+ from typing import *
7
+ from diffusers import EulerAncestralDiscreteScheduler
8
+ import diffusers.schedulers.scheduling_euler_ancestral_discrete
9
+ from diffusers.schedulers.scheduling_euler_ancestral_discrete import EulerAncestralDiscreteSchedulerOutput
10
+
11
+
12
+ def fire_in_thread(f, *args, **kwargs):
13
+ threading.Thread(target=f, args=args, kwargs=kwargs).start()
14
+
15
+
16
+ def add_logging_arguments(parser):
17
+ parser.add_argument(
18
+ "--console_log_level",
19
+ type=str,
20
+ default=None,
21
+ choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
22
+ help="Set the logging level, default is INFO / ログレベルを設定する。デフォルトはINFO",
23
+ )
24
+ parser.add_argument(
25
+ "--console_log_file",
26
+ type=str,
27
+ default=None,
28
+ help="Log to a file instead of stderr / 標準エラー出力ではなくファイルにログを出力する",
29
+ )
30
+ parser.add_argument("--console_log_simple", action="store_true", help="Simple log output / シンプルなログ出力")
31
+
32
+
33
+ def setup_logging(args=None, log_level=None, reset=False):
34
+ if logging.root.handlers:
35
+ if reset:
36
+ # remove all handlers
37
+ for handler in logging.root.handlers[:]:
38
+ logging.root.removeHandler(handler)
39
+ else:
40
+ return
41
+
42
+ # log_level can be set by the caller or by the args, the caller has priority. If not set, use INFO
43
+ if log_level is None and args is not None:
44
+ log_level = args.console_log_level
45
+ if log_level is None:
46
+ log_level = "INFO"
47
+ log_level = getattr(logging, log_level)
48
+
49
+ msg_init = None
50
+ if args is not None and args.console_log_file:
51
+ handler = logging.FileHandler(args.console_log_file, mode="w")
52
+ else:
53
+ handler = None
54
+ if not args or not args.console_log_simple:
55
+ try:
56
+ from rich.logging import RichHandler
57
+ from rich.console import Console
58
+ from rich.logging import RichHandler
59
+
60
+ handler = RichHandler(console=Console(stderr=True))
61
+ except ImportError:
62
+ # print("rich is not installed, using basic logging")
63
+ msg_init = "rich is not installed, using basic logging"
64
+
65
+ if handler is None:
66
+ handler = logging.StreamHandler(sys.stdout) # same as print
67
+ handler.propagate = False
68
+
69
+ formatter = logging.Formatter(
70
+ fmt="%(message)s",
71
+ datefmt="%Y-%m-%d %H:%M:%S",
72
+ )
73
+ handler.setFormatter(formatter)
74
+ logging.root.setLevel(log_level)
75
+ logging.root.addHandler(handler)
76
+
77
+ if msg_init is not None:
78
+ logger = logging.getLogger(__name__)
79
+ logger.info(msg_init)
80
+
81
+
82
+
83
+ # TODO make inf_utils.py
84
+
85
+
86
+ # region Gradual Latent hires fix
87
+
88
+
89
+ class GradualLatent:
90
+ def __init__(
91
+ self,
92
+ ratio,
93
+ start_timesteps,
94
+ every_n_steps,
95
+ ratio_step,
96
+ s_noise=1.0,
97
+ gaussian_blur_ksize=None,
98
+ gaussian_blur_sigma=0.5,
99
+ gaussian_blur_strength=0.5,
100
+ unsharp_target_x=True,
101
+ ):
102
+ self.ratio = ratio
103
+ self.start_timesteps = start_timesteps
104
+ self.every_n_steps = every_n_steps
105
+ self.ratio_step = ratio_step
106
+ self.s_noise = s_noise
107
+ self.gaussian_blur_ksize = gaussian_blur_ksize
108
+ self.gaussian_blur_sigma = gaussian_blur_sigma
109
+ self.gaussian_blur_strength = gaussian_blur_strength
110
+ self.unsharp_target_x = unsharp_target_x
111
+
112
+ def __str__(self) -> str:
113
+ return (
114
+ f"GradualLatent(ratio={self.ratio}, start_timesteps={self.start_timesteps}, "
115
+ + f"every_n_steps={self.every_n_steps}, ratio_step={self.ratio_step}, s_noise={self.s_noise}, "
116
+ + f"gaussian_blur_ksize={self.gaussian_blur_ksize}, gaussian_blur_sigma={self.gaussian_blur_sigma}, gaussian_blur_strength={self.gaussian_blur_strength}, "
117
+ + f"unsharp_target_x={self.unsharp_target_x})"
118
+ )
119
+
120
+ def apply_unshark_mask(self, x: torch.Tensor):
121
+ if self.gaussian_blur_ksize is None:
122
+ return x
123
+ blurred = transforms.functional.gaussian_blur(x, self.gaussian_blur_ksize, self.gaussian_blur_sigma)
124
+ # mask = torch.sigmoid((x - blurred) * self.gaussian_blur_strength)
125
+ mask = (x - blurred) * self.gaussian_blur_strength
126
+ sharpened = x + mask
127
+ return sharpened
128
+
129
+ def interpolate(self, x: torch.Tensor, resized_size, unsharp=True):
130
+ org_dtype = x.dtype
131
+ if org_dtype == torch.bfloat16:
132
+ x = x.float()
133
+
134
+ x = torch.nn.functional.interpolate(x, size=resized_size, mode="bicubic", align_corners=False).to(dtype=org_dtype)
135
+
136
+ # apply unsharp mask / アンシャープマスクを適用する
137
+ if unsharp and self.gaussian_blur_ksize:
138
+ x = self.apply_unshark_mask(x)
139
+
140
+ return x
141
+
142
+
143
+ class EulerAncestralDiscreteSchedulerGL(EulerAncestralDiscreteScheduler):
144
+ def __init__(self, *args, **kwargs):
145
+ super().__init__(*args, **kwargs)
146
+ self.resized_size = None
147
+ self.gradual_latent = None
148
+
149
+ def set_gradual_latent_params(self, size, gradual_latent: GradualLatent):
150
+ self.resized_size = size
151
+ self.gradual_latent = gradual_latent
152
+
153
+ def step(
154
+ self,
155
+ model_output: torch.FloatTensor,
156
+ timestep: Union[float, torch.FloatTensor],
157
+ sample: torch.FloatTensor,
158
+ generator: Optional[torch.Generator] = None,
159
+ return_dict: bool = True,
160
+ ) -> Union[EulerAncestralDiscreteSchedulerOutput, Tuple]:
161
+ """
162
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
163
+ process from the learned model outputs (most often the predicted noise).
164
+
165
+ Args:
166
+ model_output (`torch.FloatTensor`):
167
+ The direct output from learned diffusion model.
168
+ timestep (`float`):
169
+ The current discrete timestep in the diffusion chain.
170
+ sample (`torch.FloatTensor`):
171
+ A current instance of a sample created by the diffusion process.
172
+ generator (`torch.Generator`, *optional*):
173
+ A random number generator.
174
+ return_dict (`bool`):
175
+ Whether or not to return a
176
+ [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] or tuple.
177
+
178
+ Returns:
179
+ [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] or `tuple`:
180
+ If return_dict is `True`,
181
+ [`~schedulers.scheduling_euler_ancestral_discrete.EulerAncestralDiscreteSchedulerOutput`] is returned,
182
+ otherwise a tuple is returned where the first element is the sample tensor.
183
+
184
+ """
185
+
186
+ if isinstance(timestep, int) or isinstance(timestep, torch.IntTensor) or isinstance(timestep, torch.LongTensor):
187
+ raise ValueError(
188
+ (
189
+ "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to"
190
+ " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass"
191
+ " one of the `scheduler.timesteps` as a timestep."
192
+ ),
193
+ )
194
+
195
+ if not self.is_scale_input_called:
196
+ # logger.warning(
197
+ print(
198
+ "The `scale_model_input` function should be called before `step` to ensure correct denoising. "
199
+ "See `StableDiffusionPipeline` for a usage example."
200
+ )
201
+
202
+ if self.step_index is None:
203
+ self._init_step_index(timestep)
204
+
205
+ sigma = self.sigmas[self.step_index]
206
+
207
+ # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
208
+ if self.config.prediction_type == "epsilon":
209
+ pred_original_sample = sample - sigma * model_output
210
+ elif self.config.prediction_type == "v_prediction":
211
+ # * c_out + input * c_skip
212
+ pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1))
213
+ elif self.config.prediction_type == "sample":
214
+ raise NotImplementedError("prediction_type not implemented yet: sample")
215
+ else:
216
+ raise ValueError(f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`")
217
+
218
+ sigma_from = self.sigmas[self.step_index]
219
+ sigma_to = self.sigmas[self.step_index + 1]
220
+ sigma_up = (sigma_to**2 * (sigma_from**2 - sigma_to**2) / sigma_from**2) ** 0.5
221
+ sigma_down = (sigma_to**2 - sigma_up**2) ** 0.5
222
+
223
+ # 2. Convert to an ODE derivative
224
+ derivative = (sample - pred_original_sample) / sigma
225
+
226
+ dt = sigma_down - sigma
227
+
228
+ device = model_output.device
229
+ if self.resized_size is None:
230
+ prev_sample = sample + derivative * dt
231
+
232
+ noise = diffusers.schedulers.scheduling_euler_ancestral_discrete.randn_tensor(
233
+ model_output.shape, dtype=model_output.dtype, device=device, generator=generator
234
+ )
235
+ s_noise = 1.0
236
+ else:
237
+ print("resized_size", self.resized_size, "model_output.shape", model_output.shape, "sample.shape", sample.shape)
238
+ s_noise = self.gradual_latent.s_noise
239
+
240
+ if self.gradual_latent.unsharp_target_x:
241
+ prev_sample = sample + derivative * dt
242
+ prev_sample = self.gradual_latent.interpolate(prev_sample, self.resized_size)
243
+ else:
244
+ sample = self.gradual_latent.interpolate(sample, self.resized_size)
245
+ derivative = self.gradual_latent.interpolate(derivative, self.resized_size, unsharp=False)
246
+ prev_sample = sample + derivative * dt
247
+
248
+ noise = diffusers.schedulers.scheduling_euler_ancestral_discrete.randn_tensor(
249
+ (model_output.shape[0], model_output.shape[1], self.resized_size[0], self.resized_size[1]),
250
+ dtype=model_output.dtype,
251
+ device=device,
252
+ generator=generator,
253
+ )
254
+
255
+ prev_sample = prev_sample + noise * sigma_up * s_noise
256
+
257
+ # upon completion increase step index by one
258
+ self._step_index += 1
259
+
260
+ if not return_dict:
261
+ return (prev_sample,)
262
+
263
+ return EulerAncestralDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
264
+
265
+
266
+ # endregion