sczhou commited on
Commit
0334511
β€’
1 Parent(s): de59540

init hugging face.

Browse files
Files changed (5) hide show
  1. CodeFormer +1 -0
  2. README.md +3 -2
  3. app.py +239 -0
  4. packages.txt +3 -0
  5. requirements.txt +20 -0
CodeFormer ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit c5b4593074ba6214284d6acd5f1719b6c5d739af
README.md CHANGED
@@ -1,12 +1,13 @@
1
  ---
2
  title: CodeFormer
3
  emoji: πŸ¦€
4
- colorFrom: pink
5
- colorTo: pink
6
  sdk: gradio
7
  sdk_version: 3.3
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: CodeFormer
3
  emoji: πŸ¦€
4
+ colorFrom: blue
5
+ colorTo: green
6
  sdk: gradio
7
  sdk_version: 3.3
8
  app_file: app.py
9
  pinned: false
10
+ license: apache-2.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('CodeFormer')
3
+ import os
4
+ import cv2
5
+ import torch
6
+ import torch.nn.functional as F
7
+ import gradio as gr
8
+
9
+ from torchvision.transforms.functional import normalize
10
+
11
+ from basicsr.utils import imwrite, img2tensor, tensor2img
12
+ from basicsr.utils.download_util import load_file_from_url
13
+ from facelib.utils.face_restoration_helper import FaceRestoreHelper
14
+ from basicsr.archs.rrdbnet_arch import RRDBNet
15
+ from basicsr.utils.realesrgan_utils import RealESRGANer
16
+
17
+ from basicsr.utils.registry import ARCH_REGISTRY
18
+
19
+
20
+ os.system("pip freeze")
21
+
22
+ pretrain_model_url = {
23
+ 'codeformer': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth',
24
+ 'detection': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/detection_Resnet50_Final.pth',
25
+ 'parsing': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/parsing_parsenet.pth',
26
+ 'realesrgan': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/RealESRGAN_x2plus.pth'
27
+ }
28
+ # download weights
29
+ if not os.path.exists('CodeFormer/weights/CodeFormer/codeformer.pth'):
30
+ load_file_from_url(url=pretrain_model_url['codeformer'], model_dir='CodeFormer/weights/CodeFormer', progress=True, file_name=None)
31
+ if not os.path.exists('CodeFormer/weights/facelib/detection_Resnet50_Final.pth'):
32
+ load_file_from_url(url=pretrain_model_url['detection'], model_dir='CodeFormer/weights/facelib', progress=True, file_name=None)
33
+ if not os.path.exists('CodeFormer/weights/facelib/parsing_parsenet.pth'):
34
+ load_file_from_url(url=pretrain_model_url['parsing'], model_dir='CodeFormer/weights/facelib', progress=True, file_name=None)
35
+ if not os.path.exists('CodeFormer/weights/realesrgan/RealESRGAN_x2plus.pth'):
36
+ load_file_from_url(url=pretrain_model_url['realesrgan'], model_dir='CodeFormer/weights/realesrgan', progress=True, file_name=None)
37
+
38
+ # download images
39
+ torch.hub.download_url_to_file(
40
+ 'https://replicate.com/api/models/sczhou/codeformer/files/fa3fe3d1-76b0-4ca8-ac0d-0a925cb0ff54/06.png',
41
+ '01.png')
42
+ torch.hub.download_url_to_file(
43
+ 'https://replicate.com/api/models/sczhou/codeformer/files/a1daba8e-af14-4b00-86a4-69cec9619b53/04.jpg',
44
+ '02.jpg')
45
+ torch.hub.download_url_to_file(
46
+ 'https://replicate.com/api/models/sczhou/codeformer/files/542d64f9-1712-4de7-85f7-3863009a7c3d/03.jpg',
47
+ '03.jpg')
48
+ torch.hub.download_url_to_file(
49
+ 'https://replicate.com/api/models/sczhou/codeformer/files/a11098b0-a18a-4c02-a19a-9a7045d68426/010.jpg',
50
+ '04.jpg')
51
+ torch.hub.download_url_to_file(
52
+ 'https://replicate.com/api/models/sczhou/codeformer/files/7cf19c2c-e0cf-4712-9af8-cf5bdbb8d0ee/012.jpg',
53
+ '05.jpg')
54
+
55
+ def imread(img_path):
56
+ img = cv2.imread(img_path)
57
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
58
+ return img
59
+
60
+ # set enhancer with RealESRGAN
61
+ def set_realesrgan():
62
+ half = True if torch.cuda.is_available() else False
63
+ model = RRDBNet(
64
+ num_in_ch=3,
65
+ num_out_ch=3,
66
+ num_feat=64,
67
+ num_block=23,
68
+ num_grow_ch=32,
69
+ scale=2,
70
+ )
71
+ upsampler = RealESRGANer(
72
+ scale=2,
73
+ model_path="CodeFormer/weights/realesrgan/RealESRGAN_x2plus.pth",
74
+ model=model,
75
+ tile=400,
76
+ tile_pad=40,
77
+ pre_pad=0,
78
+ half=half,
79
+ )
80
+ return upsampler
81
+
82
+ upsampler = set_realesrgan()
83
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
84
+ codeformer_net = ARCH_REGISTRY.get("CodeFormer")(
85
+ dim_embd=512,
86
+ codebook_size=1024,
87
+ n_head=8,
88
+ n_layers=9,
89
+ connect_list=["32", "64", "128", "256"],
90
+ ).to(device)
91
+ ckpt_path = "CodeFormer/weights/CodeFormer/codeformer.pth"
92
+ checkpoint = torch.load(ckpt_path)["params_ema"]
93
+ codeformer_net.load_state_dict(checkpoint)
94
+ codeformer_net.eval()
95
+
96
+ os.makedirs('output', exist_ok=True)
97
+
98
+ def inference(image, background_enhance, face_upsample, upscale, codeformer_fidelity):
99
+ """Run a single prediction on the model"""
100
+ # take the default setting for the demo
101
+ has_aligned = False
102
+ only_center_face = False
103
+ draw_box = False
104
+ detection_model = "retinaface_resnet50"
105
+
106
+ face_helper = FaceRestoreHelper(
107
+ upscale,
108
+ face_size=512,
109
+ crop_ratio=(1, 1),
110
+ det_model=detection_model,
111
+ save_ext="png",
112
+ use_parse=True,
113
+ device=device,
114
+ )
115
+ bg_upsampler = upsampler if background_enhance else None
116
+ face_upsampler = upsampler if face_upsample else None
117
+
118
+ img = cv2.imread(str(image), cv2.IMREAD_COLOR)
119
+
120
+ if has_aligned:
121
+ # the input faces are already cropped and aligned
122
+ img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
123
+ face_helper.cropped_faces = [img]
124
+ else:
125
+ face_helper.read_image(img)
126
+ # get face landmarks for each face
127
+ num_det_faces = face_helper.get_face_landmarks_5(
128
+ only_center_face=only_center_face, resize=640, eye_dist_threshold=5
129
+ )
130
+ print(f"\tdetect {num_det_faces} faces")
131
+ # align and warp each face
132
+ face_helper.align_warp_face()
133
+
134
+ # face restoration for each cropped face
135
+ for idx, cropped_face in enumerate(face_helper.cropped_faces):
136
+ # prepare data
137
+ cropped_face_t = img2tensor(
138
+ cropped_face / 255.0, bgr2rgb=True, float32=True
139
+ )
140
+ normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
141
+ cropped_face_t = cropped_face_t.unsqueeze(0).to(device)
142
+
143
+ try:
144
+ with torch.no_grad():
145
+ output = codeformer_net(
146
+ cropped_face_t, w=codeformer_fidelity, adain=True
147
+ )[0]
148
+ restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
149
+ del output
150
+ torch.cuda.empty_cache()
151
+ except Exception as error:
152
+ print(f"\tFailed inference for CodeFormer: {error}")
153
+ restored_face = tensor2img(
154
+ cropped_face_t, rgb2bgr=True, min_max=(-1, 1)
155
+ )
156
+
157
+ restored_face = restored_face.astype("uint8")
158
+ face_helper.add_restored_face(restored_face)
159
+
160
+ # paste_back
161
+ if not has_aligned:
162
+ # upsample the background
163
+ if bg_upsampler is not None:
164
+ # Now only support RealESRGAN for upsampling background
165
+ bg_img = bg_upsampler.enhance(img, outscale=upscale)[0]
166
+ else:
167
+ bg_img = None
168
+ face_helper.get_inverse_affine(None)
169
+ # paste each restored face to the input image
170
+ if face_upsample and face_upsampler is not None:
171
+ restored_img = face_helper.paste_faces_to_input_image(
172
+ upsample_img=bg_img,
173
+ draw_box=draw_box,
174
+ face_upsampler=face_upsampler,
175
+ )
176
+ else:
177
+ restored_img = face_helper.paste_faces_to_input_image(
178
+ upsample_img=bg_img, draw_box=draw_box
179
+ )
180
+
181
+ # save restored img
182
+ save_path = f'output/out.png'
183
+ imwrite(restored_img, str(save_path))
184
+
185
+ restored_img = cv2.cvtColor(restored_img, cv2.COLOR_BGR2RGB)
186
+ return restored_img
187
+
188
+
189
+
190
+ title = "CodeFormer: Robust Face Restoration and Enhancement Network"
191
+ description = r"""<center><img src='https://user-images.githubusercontent.com/14334509/189166076-94bb2cac-4f4e-40fb-a69f-66709e3d98f5.png' alt='CodeFormer logo'></center>
192
+ <b>Official Gradio demo</b> for <a href='https://github.com/sczhou/CodeFormer' target='_blank'><b>Towards Robust Blind Face Restoration with Codebook Lookup Transformer</b></a>.<br>
193
+ πŸ”₯ CodeFormer is a robust face restoration algorithm for old photos or AI-generated faces.<br>
194
+ πŸ€— Try CodeFormer for improved stable-diffusion generation!<br>
195
+ """
196
+ article = r"""
197
+ If CodeFormer is helpful, please help to ⭐ the <a href='https://github.com/sczhou/CodeFormer' target='_blank'>Github Repo</a>. Thanks!
198
+ [![GitHub Stars](https://img.shields.io/github/stars/sczhou/CodeFormer?style=social)](https://github.com/sczhou/CodeFormer)
199
+
200
+ ---
201
+
202
+ πŸ“ Citation
203
+ If our work is useful for your research, please consider citing:
204
+ ```bibtex
205
+ @article{zhou2022codeformer,
206
+ author = {Zhou, Shangchen and Chan, Kelvin C.K. and Li, Chongyi and Loy, Chen Change},
207
+ title = {Towards Robust Blind Face Restoration with Codebook Lookup TransFormer},
208
+ journal = {arXiv preprint arXiv:2206.11253},
209
+ year = {2022}
210
+ }
211
+ ```
212
+
213
+ πŸ“§ Contact
214
+ If you have any questions, please feel free to reach me out at <b>[email protected]</b>.
215
+
216
+ ![visitors](https://visitor-badge.glitch.me/badge?page_id=sczhou/CodeFormer)
217
+ """
218
+
219
+ gr.Interface(
220
+ inference, [
221
+ gr.inputs.Image(type="filepath", label="Input"),
222
+ gr.inputs.Checkbox(default=True, label="Background_Enhance"),
223
+ gr.inputs.Checkbox(default=True, label="Face_Upsample"),
224
+ gr.inputs.Number(default=2, label="Rescaling_Factor"),
225
+ gr.Slider(0, 1, value=0.5, step=0.01, label='Codeformer_Fidelity, 0 for better quality, 1 for better identity')
226
+ ], [
227
+ gr.outputs.Image(type="numpy", label="Output"),
228
+ ],
229
+ title=title,
230
+ description=description,
231
+ article=article,
232
+ examples=[
233
+ ['01.png', True, True, 2, 0.7],
234
+ ['02.jpg', True, True, 2, 0.7],
235
+ ['03.jpg', True, True, 2, 0.7],
236
+ ['04.jpg', True, True, 2, 0.1],
237
+ ['05.jpg', True, True, 2, 0.1]
238
+ ]
239
+ ).launch()
packages.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
requirements.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ addict
2
+ future
3
+ lmdb
4
+ numpy
5
+ opencv-python
6
+ Pillow
7
+ pyyaml
8
+ requests
9
+ scikit-image
10
+ scipy
11
+ tb-nightly
12
+ torch>=1.7.1
13
+ torchvision
14
+ tqdm
15
+ yapf
16
+ lpips
17
+ gdown # supports downloading the large file from Google Drive
18
+ # cmake
19
+ # dlib
20
+ # conda install -c conda-forge dlib