ACCC1380 commited on
Commit
254087d
1 Parent(s): a82af9a

Upload lora-scripts/sd-scripts/library/slicing_vae.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/library/slicing_vae.py ADDED
@@ -0,0 +1,682 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from Diffusers to reduce VRAM usage
2
+
3
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from dataclasses import dataclass
17
+ from typing import Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import torch
21
+ import torch.nn as nn
22
+
23
+
24
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
25
+ from diffusers.models.modeling_utils import ModelMixin
26
+ from diffusers.models.unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
27
+ from diffusers.models.vae import DecoderOutput, DiagonalGaussianDistribution
28
+ from diffusers.models.autoencoder_kl import AutoencoderKLOutput
29
+ from .utils import setup_logging
30
+ setup_logging()
31
+ import logging
32
+ logger = logging.getLogger(__name__)
33
+
34
+ def slice_h(x, num_slices):
35
+ # slice with pad 1 both sides: to eliminate side effect of padding of conv2d
36
+ # Conv2dのpaddingの副作用を排除するために、両側にpad 1しながらHをスライスする
37
+ # NCHWでもNHWCでもどちらでも動く
38
+ size = (x.shape[2] + num_slices - 1) // num_slices
39
+ sliced = []
40
+ for i in range(num_slices):
41
+ if i == 0:
42
+ sliced.append(x[:, :, : size + 1, :])
43
+ else:
44
+ end = size * (i + 1) + 1
45
+ if x.shape[2] - end < 3: # if the last slice is too small, use the rest of the tensor 最後が細すぎるとconv2dできないので全部使う
46
+ end = x.shape[2]
47
+ sliced.append(x[:, :, size * i - 1 : end, :])
48
+ if end >= x.shape[2]:
49
+ break
50
+ return sliced
51
+
52
+
53
+ def cat_h(sliced):
54
+ # padding分を除いて結合する
55
+ cat = []
56
+ for i, x in enumerate(sliced):
57
+ if i == 0:
58
+ cat.append(x[:, :, :-1, :])
59
+ elif i == len(sliced) - 1:
60
+ cat.append(x[:, :, 1:, :])
61
+ else:
62
+ cat.append(x[:, :, 1:-1, :])
63
+ del x
64
+ x = torch.cat(cat, dim=2)
65
+ return x
66
+
67
+
68
+ def resblock_forward(_self, num_slices, input_tensor, temb, **kwargs):
69
+ assert _self.upsample is None and _self.downsample is None
70
+ assert _self.norm1.num_groups == _self.norm2.num_groups
71
+ assert temb is None
72
+
73
+ # make sure norms are on cpu
74
+ org_device = input_tensor.device
75
+ cpu_device = torch.device("cpu")
76
+ _self.norm1.to(cpu_device)
77
+ _self.norm2.to(cpu_device)
78
+
79
+ # GroupNormがCPUでfp16で動かない対策
80
+ org_dtype = input_tensor.dtype
81
+ if org_dtype == torch.float16:
82
+ _self.norm1.to(torch.float32)
83
+ _self.norm2.to(torch.float32)
84
+
85
+ # すべてのテンソルをCPUに移動する
86
+ input_tensor = input_tensor.to(cpu_device)
87
+ hidden_states = input_tensor
88
+
89
+ # どうもこれは結果が異なるようだ……
90
+ # def sliced_norm1(norm, x):
91
+ # num_div = 4 if up_block_idx <= 2 else x.shape[1] // norm.num_groups
92
+ # sliced_tensor = torch.chunk(x, num_div, dim=1)
93
+ # sliced_weight = torch.chunk(norm.weight, num_div, dim=0)
94
+ # sliced_bias = torch.chunk(norm.bias, num_div, dim=0)
95
+ # logger.info(sliced_tensor[0].shape, num_div, sliced_weight[0].shape, sliced_bias[0].shape)
96
+ # normed_tensor = []
97
+ # for i in range(num_div):
98
+ # n = torch.group_norm(sliced_tensor[i], norm.num_groups, sliced_weight[i], sliced_bias[i], norm.eps)
99
+ # normed_tensor.append(n)
100
+ # del n
101
+ # x = torch.cat(normed_tensor, dim=1)
102
+ # return num_div, x
103
+
104
+ # normを分割すると結果が変わるので、ここだけは分割しない。GPUで計算するとVRAMが足りなくなるので、CPUで計算する。幸いCPUでもそこまで遅くない
105
+ if org_dtype == torch.float16:
106
+ hidden_states = hidden_states.to(torch.float32)
107
+ hidden_states = _self.norm1(hidden_states) # run on cpu
108
+ if org_dtype == torch.float16:
109
+ hidden_states = hidden_states.to(torch.float16)
110
+
111
+ sliced = slice_h(hidden_states, num_slices)
112
+ del hidden_states
113
+
114
+ for i in range(len(sliced)):
115
+ x = sliced[i]
116
+ sliced[i] = None
117
+
118
+ # 計算する部分だけGPUに移動する、以下同様
119
+ x = x.to(org_device)
120
+ x = _self.nonlinearity(x)
121
+ x = _self.conv1(x)
122
+ x = x.to(cpu_device)
123
+ sliced[i] = x
124
+ del x
125
+
126
+ hidden_states = cat_h(sliced)
127
+ del sliced
128
+
129
+ if org_dtype == torch.float16:
130
+ hidden_states = hidden_states.to(torch.float32)
131
+ hidden_states = _self.norm2(hidden_states) # run on cpu
132
+ if org_dtype == torch.float16:
133
+ hidden_states = hidden_states.to(torch.float16)
134
+
135
+ sliced = slice_h(hidden_states, num_slices)
136
+ del hidden_states
137
+
138
+ for i in range(len(sliced)):
139
+ x = sliced[i]
140
+ sliced[i] = None
141
+
142
+ x = x.to(org_device)
143
+ x = _self.nonlinearity(x)
144
+ x = _self.dropout(x)
145
+ x = _self.conv2(x)
146
+ x = x.to(cpu_device)
147
+ sliced[i] = x
148
+ del x
149
+
150
+ hidden_states = cat_h(sliced)
151
+ del sliced
152
+
153
+ # make shortcut
154
+ if _self.conv_shortcut is not None:
155
+ sliced = list(torch.chunk(input_tensor, num_slices, dim=2)) # no padding in conv_shortcut パディングがないので普通にスライスする
156
+ del input_tensor
157
+
158
+ for i in range(len(sliced)):
159
+ x = sliced[i]
160
+ sliced[i] = None
161
+
162
+ x = x.to(org_device)
163
+ x = _self.conv_shortcut(x)
164
+ x = x.to(cpu_device)
165
+ sliced[i] = x
166
+ del x
167
+
168
+ input_tensor = torch.cat(sliced, dim=2)
169
+ del sliced
170
+
171
+ output_tensor = (input_tensor + hidden_states) / _self.output_scale_factor
172
+
173
+ output_tensor = output_tensor.to(org_device) # 次のレイヤーがGPUで計算する
174
+ return output_tensor
175
+
176
+
177
+ class SlicingEncoder(nn.Module):
178
+ def __init__(
179
+ self,
180
+ in_channels=3,
181
+ out_channels=3,
182
+ down_block_types=("DownEncoderBlock2D",),
183
+ block_out_channels=(64,),
184
+ layers_per_block=2,
185
+ norm_num_groups=32,
186
+ act_fn="silu",
187
+ double_z=True,
188
+ num_slices=2,
189
+ ):
190
+ super().__init__()
191
+ self.layers_per_block = layers_per_block
192
+
193
+ self.conv_in = torch.nn.Conv2d(in_channels, block_out_channels[0], kernel_size=3, stride=1, padding=1)
194
+
195
+ self.mid_block = None
196
+ self.down_blocks = nn.ModuleList([])
197
+
198
+ # down
199
+ output_channel = block_out_channels[0]
200
+ for i, down_block_type in enumerate(down_block_types):
201
+ input_channel = output_channel
202
+ output_channel = block_out_channels[i]
203
+ is_final_block = i == len(block_out_channels) - 1
204
+
205
+ down_block = get_down_block(
206
+ down_block_type,
207
+ num_layers=self.layers_per_block,
208
+ in_channels=input_channel,
209
+ out_channels=output_channel,
210
+ add_downsample=not is_final_block,
211
+ resnet_eps=1e-6,
212
+ downsample_padding=0,
213
+ resnet_act_fn=act_fn,
214
+ resnet_groups=norm_num_groups,
215
+ attention_head_dim=output_channel,
216
+ temb_channels=None,
217
+ )
218
+ self.down_blocks.append(down_block)
219
+
220
+ # mid
221
+ self.mid_block = UNetMidBlock2D(
222
+ in_channels=block_out_channels[-1],
223
+ resnet_eps=1e-6,
224
+ resnet_act_fn=act_fn,
225
+ output_scale_factor=1,
226
+ resnet_time_scale_shift="default",
227
+ attention_head_dim=block_out_channels[-1],
228
+ resnet_groups=norm_num_groups,
229
+ temb_channels=None,
230
+ )
231
+ self.mid_block.attentions[0].set_use_memory_efficient_attention_xformers(True) # とりあえずDiffusersのxformersを使う
232
+
233
+ # out
234
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6)
235
+ self.conv_act = nn.SiLU()
236
+
237
+ conv_out_channels = 2 * out_channels if double_z else out_channels
238
+ self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1)
239
+
240
+ # replace forward of ResBlocks
241
+ def wrapper(func, module, num_slices):
242
+ def forward(*args, **kwargs):
243
+ return func(module, num_slices, *args, **kwargs)
244
+
245
+ return forward
246
+
247
+ self.num_slices = num_slices
248
+ div = num_slices / (2 ** (len(self.down_blocks) - 1)) # 深い層はそこまで分割しなくていいので適宜減らす
249
+ # logger.info(f"initial divisor: {div}")
250
+ if div >= 2:
251
+ div = int(div)
252
+ for resnet in self.mid_block.resnets:
253
+ resnet.forward = wrapper(resblock_forward, resnet, div)
254
+ # midblock doesn't have downsample
255
+
256
+ for i, down_block in enumerate(self.down_blocks[::-1]):
257
+ if div >= 2:
258
+ div = int(div)
259
+ # logger.info(f"down block: {i} divisor: {div}")
260
+ for resnet in down_block.resnets:
261
+ resnet.forward = wrapper(resblock_forward, resnet, div)
262
+ if down_block.downsamplers is not None:
263
+ # logger.info("has downsample")
264
+ for downsample in down_block.downsamplers:
265
+ downsample.forward = wrapper(self.downsample_forward, downsample, div * 2)
266
+ div *= 2
267
+
268
+ def forward(self, x):
269
+ sample = x
270
+ del x
271
+
272
+ org_device = sample.device
273
+ cpu_device = torch.device("cpu")
274
+
275
+ # sample = self.conv_in(sample)
276
+ sample = sample.to(cpu_device)
277
+ sliced = slice_h(sample, self.num_slices)
278
+ del sample
279
+
280
+ for i in range(len(sliced)):
281
+ x = sliced[i]
282
+ sliced[i] = None
283
+
284
+ x = x.to(org_device)
285
+ x = self.conv_in(x)
286
+ x = x.to(cpu_device)
287
+ sliced[i] = x
288
+ del x
289
+
290
+ sample = cat_h(sliced)
291
+ del sliced
292
+
293
+ sample = sample.to(org_device)
294
+
295
+ # down
296
+ for down_block in self.down_blocks:
297
+ sample = down_block(sample)
298
+
299
+ # middle
300
+ sample = self.mid_block(sample)
301
+
302
+ # post-process
303
+ # ここも省メモリ化したいが、恐らくそこまでメモリを食わないので省略
304
+ sample = self.conv_norm_out(sample)
305
+ sample = self.conv_act(sample)
306
+ sample = self.conv_out(sample)
307
+
308
+ return sample
309
+
310
+ def downsample_forward(self, _self, num_slices, hidden_states):
311
+ assert hidden_states.shape[1] == _self.channels
312
+ assert _self.use_conv and _self.padding == 0
313
+ logger.info(f"downsample forward {num_slices} {hidden_states.shape}")
314
+
315
+ org_device = hidden_states.device
316
+ cpu_device = torch.device("cpu")
317
+
318
+ hidden_states = hidden_states.to(cpu_device)
319
+ pad = (0, 1, 0, 1)
320
+ hidden_states = torch.nn.functional.pad(hidden_states, pad, mode="constant", value=0)
321
+
322
+ # slice with even number because of stride 2
323
+ # strideが2なので偶数でスライスする
324
+ # slice with pad 1 both sides: to eliminate side effect of padding of conv2d
325
+ size = (hidden_states.shape[2] + num_slices - 1) // num_slices
326
+ size = size + 1 if size % 2 == 1 else size
327
+
328
+ sliced = []
329
+ for i in range(num_slices):
330
+ if i == 0:
331
+ sliced.append(hidden_states[:, :, : size + 1, :])
332
+ else:
333
+ end = size * (i + 1) + 1
334
+ if hidden_states.shape[2] - end < 4: # if the last slice is too small, use the rest of the tensor
335
+ end = hidden_states.shape[2]
336
+ sliced.append(hidden_states[:, :, size * i - 1 : end, :])
337
+ if end >= hidden_states.shape[2]:
338
+ break
339
+ del hidden_states
340
+
341
+ for i in range(len(sliced)):
342
+ x = sliced[i]
343
+ sliced[i] = None
344
+
345
+ x = x.to(org_device)
346
+ x = _self.conv(x)
347
+ x = x.to(cpu_device)
348
+
349
+ # ここだけ雰囲気が違うのはCopilotのせい
350
+ if i == 0:
351
+ hidden_states = x
352
+ else:
353
+ hidden_states = torch.cat([hidden_states, x], dim=2)
354
+
355
+ hidden_states = hidden_states.to(org_device)
356
+ # logger.info(f"downsample forward done {hidden_states.shape}")
357
+ return hidden_states
358
+
359
+
360
+ class SlicingDecoder(nn.Module):
361
+ def __init__(
362
+ self,
363
+ in_channels=3,
364
+ out_channels=3,
365
+ up_block_types=("UpDecoderBlock2D",),
366
+ block_out_channels=(64,),
367
+ layers_per_block=2,
368
+ norm_num_groups=32,
369
+ act_fn="silu",
370
+ num_slices=2,
371
+ ):
372
+ super().__init__()
373
+ self.layers_per_block = layers_per_block
374
+
375
+ self.conv_in = nn.Conv2d(in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1)
376
+
377
+ self.mid_block = None
378
+ self.up_blocks = nn.ModuleList([])
379
+
380
+ # mid
381
+ self.mid_block = UNetMidBlock2D(
382
+ in_channels=block_out_channels[-1],
383
+ resnet_eps=1e-6,
384
+ resnet_act_fn=act_fn,
385
+ output_scale_factor=1,
386
+ resnet_time_scale_shift="default",
387
+ attention_head_dim=block_out_channels[-1],
388
+ resnet_groups=norm_num_groups,
389
+ temb_channels=None,
390
+ )
391
+ self.mid_block.attentions[0].set_use_memory_efficient_attention_xformers(True) # とりあえずDiffusersのxformersを使う
392
+
393
+ # up
394
+ reversed_block_out_channels = list(reversed(block_out_channels))
395
+ output_channel = reversed_block_out_channels[0]
396
+ for i, up_block_type in enumerate(up_block_types):
397
+ prev_output_channel = output_channel
398
+ output_channel = reversed_block_out_channels[i]
399
+
400
+ is_final_block = i == len(block_out_channels) - 1
401
+
402
+ up_block = get_up_block(
403
+ up_block_type,
404
+ num_layers=self.layers_per_block + 1,
405
+ in_channels=prev_output_channel,
406
+ out_channels=output_channel,
407
+ prev_output_channel=None,
408
+ add_upsample=not is_final_block,
409
+ resnet_eps=1e-6,
410
+ resnet_act_fn=act_fn,
411
+ resnet_groups=norm_num_groups,
412
+ attention_head_dim=output_channel,
413
+ temb_channels=None,
414
+ )
415
+ self.up_blocks.append(up_block)
416
+ prev_output_channel = output_channel
417
+
418
+ # out
419
+ self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
420
+ self.conv_act = nn.SiLU()
421
+ self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
422
+
423
+ # replace forward of ResBlocks
424
+ def wrapper(func, module, num_slices):
425
+ def forward(*args, **kwargs):
426
+ return func(module, num_slices, *args, **kwargs)
427
+
428
+ return forward
429
+
430
+ self.num_slices = num_slices
431
+ div = num_slices / (2 ** (len(self.up_blocks) - 1))
432
+ logger.info(f"initial divisor: {div}")
433
+ if div >= 2:
434
+ div = int(div)
435
+ for resnet in self.mid_block.resnets:
436
+ resnet.forward = wrapper(resblock_forward, resnet, div)
437
+ # midblock doesn't have upsample
438
+
439
+ for i, up_block in enumerate(self.up_blocks):
440
+ if div >= 2:
441
+ div = int(div)
442
+ # logger.info(f"up block: {i} divisor: {div}")
443
+ for resnet in up_block.resnets:
444
+ resnet.forward = wrapper(resblock_forward, resnet, div)
445
+ if up_block.upsamplers is not None:
446
+ # logger.info("has upsample")
447
+ for upsample in up_block.upsamplers:
448
+ upsample.forward = wrapper(self.upsample_forward, upsample, div * 2)
449
+ div *= 2
450
+
451
+ def forward(self, z):
452
+ sample = z
453
+ del z
454
+ sample = self.conv_in(sample)
455
+
456
+ # middle
457
+ sample = self.mid_block(sample)
458
+
459
+ # up
460
+ for i, up_block in enumerate(self.up_blocks):
461
+ sample = up_block(sample)
462
+
463
+ # post-process
464
+ sample = self.conv_norm_out(sample)
465
+ sample = self.conv_act(sample)
466
+
467
+ # conv_out with slicing because of VRAM usage
468
+ # conv_outはとてもVRAM使うのでスライスして対応
469
+ org_device = sample.device
470
+ cpu_device = torch.device("cpu")
471
+ sample = sample.to(cpu_device)
472
+
473
+ sliced = slice_h(sample, self.num_slices)
474
+ del sample
475
+ for i in range(len(sliced)):
476
+ x = sliced[i]
477
+ sliced[i] = None
478
+
479
+ x = x.to(org_device)
480
+ x = self.conv_out(x)
481
+ x = x.to(cpu_device)
482
+ sliced[i] = x
483
+ sample = cat_h(sliced)
484
+ del sliced
485
+
486
+ sample = sample.to(org_device)
487
+ return sample
488
+
489
+ def upsample_forward(self, _self, num_slices, hidden_states, output_size=None):
490
+ assert hidden_states.shape[1] == _self.channels
491
+ assert _self.use_conv_transpose == False and _self.use_conv
492
+
493
+ org_dtype = hidden_states.dtype
494
+ org_device = hidden_states.device
495
+ cpu_device = torch.device("cpu")
496
+
497
+ hidden_states = hidden_states.to(cpu_device)
498
+ sliced = slice_h(hidden_states, num_slices)
499
+ del hidden_states
500
+
501
+ for i in range(len(sliced)):
502
+ x = sliced[i]
503
+ sliced[i] = None
504
+
505
+ x = x.to(org_device)
506
+
507
+ # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16
508
+ # TODO(Suraj): Remove this cast once the issue is fixed in PyTorch
509
+ # https://github.com/pytorch/pytorch/issues/86679
510
+ # PyTorch 2で直らないかね……
511
+ if org_dtype == torch.bfloat16:
512
+ x = x.to(torch.float32)
513
+
514
+ x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
515
+
516
+ if org_dtype == torch.bfloat16:
517
+ x = x.to(org_dtype)
518
+
519
+ x = _self.conv(x)
520
+
521
+ # upsampleされてるのでpadは2になる
522
+ if i == 0:
523
+ x = x[:, :, :-2, :]
524
+ elif i == num_slices - 1:
525
+ x = x[:, :, 2:, :]
526
+ else:
527
+ x = x[:, :, 2:-2, :]
528
+
529
+ x = x.to(cpu_device)
530
+ sliced[i] = x
531
+ del x
532
+
533
+ hidden_states = torch.cat(sliced, dim=2)
534
+ # logger.info(f"us hidden_states {hidden_states.shape}")
535
+ del sliced
536
+
537
+ hidden_states = hidden_states.to(org_device)
538
+ return hidden_states
539
+
540
+
541
+ class SlicingAutoencoderKL(ModelMixin, ConfigMixin):
542
+ r"""Variational Autoencoder (VAE) model with KL loss from the paper Auto-Encoding Variational Bayes by Diederik P. Kingma
543
+ and Max Welling.
544
+
545
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
546
+ implements for all the model (such as downloading or saving, etc.)
547
+
548
+ Parameters:
549
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
550
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
551
+ down_block_types (`Tuple[str]`, *optional*, defaults to :
552
+ obj:`("DownEncoderBlock2D",)`): Tuple of downsample block types.
553
+ up_block_types (`Tuple[str]`, *optional*, defaults to :
554
+ obj:`("UpDecoderBlock2D",)`): Tuple of upsample block types.
555
+ block_out_channels (`Tuple[int]`, *optional*, defaults to :
556
+ obj:`(64,)`): Tuple of block output channels.
557
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
558
+ latent_channels (`int`, *optional*, defaults to `4`): Number of channels in the latent space.
559
+ sample_size (`int`, *optional*, defaults to `32`): TODO
560
+ """
561
+
562
+ @register_to_config
563
+ def __init__(
564
+ self,
565
+ in_channels: int = 3,
566
+ out_channels: int = 3,
567
+ down_block_types: Tuple[str] = ("DownEncoderBlock2D",),
568
+ up_block_types: Tuple[str] = ("UpDecoderBlock2D",),
569
+ block_out_channels: Tuple[int] = (64,),
570
+ layers_per_block: int = 1,
571
+ act_fn: str = "silu",
572
+ latent_channels: int = 4,
573
+ norm_num_groups: int = 32,
574
+ sample_size: int = 32,
575
+ num_slices: int = 16,
576
+ ):
577
+ super().__init__()
578
+
579
+ # pass init params to Encoder
580
+ self.encoder = SlicingEncoder(
581
+ in_channels=in_channels,
582
+ out_channels=latent_channels,
583
+ down_block_types=down_block_types,
584
+ block_out_channels=block_out_channels,
585
+ layers_per_block=layers_per_block,
586
+ act_fn=act_fn,
587
+ norm_num_groups=norm_num_groups,
588
+ double_z=True,
589
+ num_slices=num_slices,
590
+ )
591
+
592
+ # pass init params to Decoder
593
+ self.decoder = SlicingDecoder(
594
+ in_channels=latent_channels,
595
+ out_channels=out_channels,
596
+ up_block_types=up_block_types,
597
+ block_out_channels=block_out_channels,
598
+ layers_per_block=layers_per_block,
599
+ norm_num_groups=norm_num_groups,
600
+ act_fn=act_fn,
601
+ num_slices=num_slices,
602
+ )
603
+
604
+ self.quant_conv = torch.nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)
605
+ self.post_quant_conv = torch.nn.Conv2d(latent_channels, latent_channels, 1)
606
+ self.use_slicing = False
607
+
608
+ def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:
609
+ h = self.encoder(x)
610
+ moments = self.quant_conv(h)
611
+ posterior = DiagonalGaussianDistribution(moments)
612
+
613
+ if not return_dict:
614
+ return (posterior,)
615
+
616
+ return AutoencoderKLOutput(latent_dist=posterior)
617
+
618
+ def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
619
+ z = self.post_quant_conv(z)
620
+ dec = self.decoder(z)
621
+
622
+ if not return_dict:
623
+ return (dec,)
624
+
625
+ return DecoderOutput(sample=dec)
626
+
627
+ # これはバッチ方向のスライシング 紛らわしい
628
+ def enable_slicing(self):
629
+ r"""
630
+ Enable sliced VAE decoding.
631
+
632
+ When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
633
+ steps. This is useful to save some memory and allow larger batch sizes.
634
+ """
635
+ self.use_slicing = True
636
+
637
+ def disable_slicing(self):
638
+ r"""
639
+ Disable sliced VAE decoding. If `enable_slicing` was previously invoked, this method will go back to computing
640
+ decoding in one step.
641
+ """
642
+ self.use_slicing = False
643
+
644
+ def decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
645
+ if self.use_slicing and z.shape[0] > 1:
646
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
647
+ decoded = torch.cat(decoded_slices)
648
+ else:
649
+ decoded = self._decode(z).sample
650
+
651
+ if not return_dict:
652
+ return (decoded,)
653
+
654
+ return DecoderOutput(sample=decoded)
655
+
656
+ def forward(
657
+ self,
658
+ sample: torch.FloatTensor,
659
+ sample_posterior: bool = False,
660
+ return_dict: bool = True,
661
+ generator: Optional[torch.Generator] = None,
662
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
663
+ r"""
664
+ Args:
665
+ sample (`torch.FloatTensor`): Input sample.
666
+ sample_posterior (`bool`, *optional*, defaults to `False`):
667
+ Whether to sample from the posterior.
668
+ return_dict (`bool`, *optional*, defaults to `True`):
669
+ Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
670
+ """
671
+ x = sample
672
+ posterior = self.encode(x).latent_dist
673
+ if sample_posterior:
674
+ z = posterior.sample(generator=generator)
675
+ else:
676
+ z = posterior.mode()
677
+ dec = self.decode(z).sample
678
+
679
+ if not return_dict:
680
+ return (dec,)
681
+
682
+ return DecoderOutput(sample=dec)