jjourney1125 commited on
Commit
5b4d580
β€’
1 Parent(s): 6a52ec3

Update modules

Browse files
app.py CHANGED
@@ -285,22 +285,22 @@ def test(img_lq, model, args, window_size):
285
  if __name__ == '__main__':
286
 
287
  parser = argparse.ArgumentParser()
288
- parser.add_argument('--task', type=str, default='real_sr', help='classical_sr, lightweight_sr, real_sr, '
289
  'gray_dn, color_dn, jpeg_car, color_jpeg_car')
290
  parser.add_argument('--scale', type=int, default=4, help='scale factor: 1, 2, 3, 4, 8') # 1 for dn and jpeg car
291
  parser.add_argument('--noise', type=int, default=15, help='noise level: 15, 25, 50')
292
- parser.add_argument('--jpeg', type=int, default=40, help='scale factor: 10, 20, 30, 40')
293
- parser.add_argument('--training_patch_size', type=int, default=128, help='patch size used in training Swin2SR. '
294
  'Just used to differentiate two different settings in Table 2 of the paper. '
295
  'Images are NOT tested patch by patch.')
296
  parser.add_argument('--large_model', action='store_true', help='use large model, only provided for real image sr')
297
  parser.add_argument('--model_path', type=str,
298
- default='experiments/pretrained_models/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth')
299
  parser.add_argument('--folder_lq', type=str, default="test", help='input low-quality test image folder')
300
  parser.add_argument('--folder_gt', type=str, default=None, help='input ground-truth test image folder')
301
  parser.add_argument('--tile', type=int, default=None, help='Tile size, None for no tile during testing (testing as a whole)')
302
  parser.add_argument('--tile_overlap', type=int, default=32, help='Overlapping of different tiles')
303
- parser.add_argument('--save_img_only', default=False, action='store_true', help='save image and do not evaluate')
304
  args = parser.parse_args()
305
 
306
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@@ -320,15 +320,24 @@ if __name__ == '__main__':
320
 
321
  #main(img)
322
 
323
- title = "Swin2SR"
324
- description = "Gradio demo for Swin2SR."
 
 
 
 
 
 
 
 
 
325
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2209.11345' target='_blank'>Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration</a> | <a href='https://github.com/mv-lab/swin2sr' target='_blank'>Github Repo</a></p>"
326
 
327
- examples=[['butterflyx4.png']]
328
  gr.Interface(
329
  main,
330
  gr.inputs.Image(type="pil", label="Input"),
331
- "image",
332
  title=title,
333
  description=description,
334
  article=article,
 
285
  if __name__ == '__main__':
286
 
287
  parser = argparse.ArgumentParser()
288
+ parser.add_argument('--task', type=str, default='compressed_sr', help='classical_sr, lightweight_sr, real_sr, '
289
  'gray_dn, color_dn, jpeg_car, color_jpeg_car')
290
  parser.add_argument('--scale', type=int, default=4, help='scale factor: 1, 2, 3, 4, 8') # 1 for dn and jpeg car
291
  parser.add_argument('--noise', type=int, default=15, help='noise level: 15, 25, 50')
292
+ parser.add_argument('--jpeg', type=int, default=10, help='scale factor: 10, 20, 30, 40')
293
+ parser.add_argument('--training_patch_size', type=int, default=48, help='patch size used in training Swin2SR. '
294
  'Just used to differentiate two different settings in Table 2 of the paper. '
295
  'Images are NOT tested patch by patch.')
296
  parser.add_argument('--large_model', action='store_true', help='use large model, only provided for real image sr')
297
  parser.add_argument('--model_path', type=str,
298
+ default='experiments/pretrained_models/Swin2SR_CompressedSR_X4_48.pth')
299
  parser.add_argument('--folder_lq', type=str, default="test", help='input low-quality test image folder')
300
  parser.add_argument('--folder_gt', type=str, default=None, help='input ground-truth test image folder')
301
  parser.add_argument('--tile', type=int, default=None, help='Tile size, None for no tile during testing (testing as a whole)')
302
  parser.add_argument('--tile_overlap', type=int, default=32, help='Overlapping of different tiles')
303
+ parser.add_argument('--save_img_only', default=True, action='store_true', help='save image and do not evaluate')
304
  args = parser.parse_args()
305
 
306
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
320
 
321
  #main(img)
322
 
323
+ title = "Super-Resolution Demo Swin2SR Official πŸš€πŸš€πŸ”₯"
324
+ description = '''
325
+ <br>
326
+
327
+ **This Demo expects low-quality and low-resolution JPEG compressed images**
328
+
329
+ **We are looking for collaborators! Collaboratorλ₯Ό μ°Ύκ³  μžˆμŠ΅λ‹ˆλ‹€!** πŸ‡¬πŸ‡§ πŸ‡ͺπŸ‡Έ πŸ‡°πŸ‡· πŸ‡«πŸ‡· πŸ‡·πŸ‡΄ πŸ‡©πŸ‡ͺ πŸ‡¨πŸ‡³
330
+
331
+ **Please check our github project: https://github.com/mv-lab/swin2sr**
332
+ </br>
333
+ '''
334
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2209.11345' target='_blank'>Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration</a> | <a href='https://github.com/mv-lab/swin2sr' target='_blank'>Github Repo</a></p>"
335
 
336
+ examples= glob.glob("samples/*.jpg")
337
  gr.Interface(
338
  main,
339
  gr.inputs.Image(type="pil", label="Input"),
340
+ gr.inputs.Image(type="pil", label="Ouput"),
341
  title=title,
342
  description=description,
343
  article=article,
butterflyx4.png DELETED
Binary file (10.4 kB)
 
ali_eye.jpg β†’ samples/ali_eye.jpg RENAMED
File without changes
samples/chain-eye.jpg ADDED
samples/gojou-eyes.jpg ADDED
samples/vagabond.jpg ADDED