johnnv commited on
Commit
cda9ccb
1 Parent(s): 2e1faab

add random crop

Browse files
Files changed (2) hide show
  1. app.py +43 -27
  2. requirements.txt +0 -1
app.py CHANGED
@@ -1,12 +1,13 @@
1
  from __future__ import annotations
2
- import cv2
 
 
3
  import gradio as gr
4
  import matplotlib
5
  import matplotlib.pyplot as plt
6
  import numpy as np
7
  import torch
8
  from CCAgT_utils.categories import CategoriesInfos
9
- from CCAgT_utils.slice import __create_xy_slice
10
  from CCAgT_utils.types.mask import Mask
11
  from CCAgT_utils.visualization import plot
12
  from PIL import Image
@@ -68,42 +69,56 @@ def colorize(
68
  return mask.colorized(CategoriesInfos()) / 255
69
 
70
 
71
- def check_and_resize(
72
- image: np.ndarray,
73
- ) -> np.ndarray:
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
- if image.shape[0] > 1200 or image.shape[1] > 1600:
76
- r = 1600.0 / image.shape[1]
77
- dim = (1600, int(image.shape[0] * r))
78
- return cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
79
 
80
- return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
 
83
  def process_big_images(
84
  image: Image.Image,
85
  ) -> Mask:
86
  '''Process and post-processing for images bigger than 400x300'''
87
- img = check_and_resize(np.asarray(image))
88
- mask = np.zeros(shape=(img.shape[0], img.shape[1]), dtype=np.uint8)
89
 
90
- for bbox in __create_xy_slice(image.size[1], image.size[0], 300, 400):
91
- part = cv2.copyMakeBorder(
92
- img,
93
- bbox.y_init,
94
- bbox.y_end,
95
- bbox.x_init,
96
- bbox.x_end,
97
- cv2.BORDER_REFLECT,
98
- )
99
- target_size = (part.shape[0], part.shape[1])
100
 
101
- outputs = segment(Image.fromarray(part))
102
- msk = post_processing(outputs, target_size)
103
 
104
- mask[bbox.slice_y, bbox.slice_x] = msk[bbox.slice_y, bbox.slice_x]
 
105
 
106
- return Mask(mask)
107
 
108
 
109
  def image_with_mask(
@@ -156,7 +171,8 @@ This is demo for the SegFormer fine-tuned on sub-dataset from
156
  [CCAgT dataset](https://huggingface.co/datasets/lapix/CCAgT). This model
157
  was trained to segment cervical cells silver-stained (AgNOR technique)
158
  images with resolution of 400x300. The model was available at HF hub at
159
- [{model_hub_name}](https://huggingface.co/{model_hub_name}).
 
160
  """
161
  examples = [
162
  [f'https://hf.co/{model_hub_name}/resolve/main/sampleA.png'],
 
1
  from __future__ import annotations
2
+
3
+ import random
4
+
5
  import gradio as gr
6
  import matplotlib
7
  import matplotlib.pyplot as plt
8
  import numpy as np
9
  import torch
10
  from CCAgT_utils.categories import CategoriesInfos
 
11
  from CCAgT_utils.types.mask import Mask
12
  from CCAgT_utils.visualization import plot
13
  from PIL import Image
 
69
  return mask.colorized(CategoriesInfos()) / 255
70
 
71
 
72
+ # Copied from https://github.com/albumentations-team/albumentations/blob/b1af92ab8e57279f5acd5987770a86a8d6b6b0e5/albumentations/augmentations/crops/functional.py#L35
73
+ def get_random_crop_coords(
74
+ height: int,
75
+ width: int,
76
+ crop_height: int,
77
+ crop_width: int,
78
+ h_start: float,
79
+ w_start: float,
80
+ ):
81
+ y1 = int((height - crop_height + 1) * h_start)
82
+ y2 = y1 + crop_height
83
+ x1 = int((width - crop_width + 1) * w_start)
84
+ x2 = x1 + crop_width
85
+ return x1, y1, x2, y2
86
+
87
 
88
+ # Copied from https://github.com/albumentations-team/albumentations/blob/b1af92ab8e57279f5acd5987770a86a8d6b6b0e5/albumentations/augmentations/crops/functional.py#L46
 
 
 
89
 
90
+
91
+ def random_crop(
92
+ img: np.ndarray,
93
+ crop_height: int,
94
+ crop_width: int,
95
+ h_start: float,
96
+ w_start: float,
97
+ ) -> np.ndarray:
98
+ height, width = img.shape[:2]
99
+
100
+ x1, y1, x2, y2 = get_random_crop_coords(
101
+ height, width, crop_height, crop_width, h_start, w_start,
102
+ )
103
+ img = img[y1:y2, x1:x2]
104
+ return img
105
 
106
 
107
  def process_big_images(
108
  image: Image.Image,
109
  ) -> Mask:
110
  '''Process and post-processing for images bigger than 400x300'''
111
+ img = np.asarray(image)
 
112
 
113
+ if img.shape[0] > 300 or img.shape[1] > 400:
114
+ img = random_crop(img, 300, 400, random.random(), random.random())
 
 
 
 
 
 
 
 
115
 
116
+ target_size = (img.shape[0], img.shape[1])
 
117
 
118
+ outputs = segment(Image.fromarray(img))
119
+ msk = post_processing(outputs, target_size)
120
 
121
+ return Mask(msk)
122
 
123
 
124
  def image_with_mask(
 
171
  [CCAgT dataset](https://huggingface.co/datasets/lapix/CCAgT). This model
172
  was trained to segment cervical cells silver-stained (AgNOR technique)
173
  images with resolution of 400x300. The model was available at HF hub at
174
+ [{model_hub_name}](https://huggingface.co/{model_hub_name}). If input
175
+ an image bigger than 400x300, the demo will random crop it.
176
  """
177
  examples = [
178
  [f'https://hf.co/{model_hub_name}/resolve/main/sampleA.png'],
requirements.txt CHANGED
@@ -1,6 +1,5 @@
1
  CCAgT-utils
2
  matplotlib
3
  numpy
4
- opencv-python
5
  torch
6
  transformers
 
1
  CCAgT-utils
2
  matplotlib
3
  numpy
 
4
  torch
5
  transformers