DOFOFFICIAL commited on
Commit
b812c98
1 Parent(s): 5210b6a

Update usage/interfere.py

Browse files
Files changed (1) hide show
  1. usage/interfere.py +2 -37
usage/interfere.py CHANGED
@@ -9,13 +9,10 @@ import numpy as np
9
  from torchvision import transforms
10
  from PIL import Image
11
 
12
- from transformers import undefined
13
-
14
  num_cls = 2
15
  classes = ['female', 'male']
16
 
17
- #############################
18
- # model struct
19
  def model_struct():
20
  model = torchvision.models.vgg16(pretrained=True)
21
 
@@ -37,29 +34,6 @@ def model_struct():
37
  model = model.cuda() #GPU
38
 
39
  return model
40
-
41
-
42
- #############################
43
- # graphic lib
44
- def dim(imgpath):
45
- img = cv2.imread(imgpath, 1)
46
- height, width, channels = img.shape
47
- return height, width, channels
48
-
49
- def crop(imgfrom, imgto, x = 0, w = 64, y = 0, h = 64):
50
- img = cv2.imread(imgfrom, 1)
51
- img2 = img[y:y+h, x:x+w]
52
- return cv2.imwrite(imgto, img2)
53
-
54
- def resize(imgfrom, imgto, width, height):
55
- img = cv2.imread(imgfrom, 1)
56
- img2 = cv2.resize(img, (width, height))
57
- return cv2.imwrite(imgto, img2)
58
-
59
- def rgb32to24(imgfrom, imgto):
60
- img = cv2.imread(imgfrom, 1)
61
- img2 = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
62
- return cv2.imwrite(imgto, img2)
63
 
64
  def cmpgraph_64x64(imgfrom, imgto):
65
  height, width, channels = dim(imgfrom)
@@ -79,10 +53,7 @@ def cmpgraph_64x64(imgfrom, imgto):
79
  img2 = cv2.resize(img, (64,64))
80
  img3 = cv2.cvtColor(img2, cv2.COLOR_BGRA2BGR)
81
  return cv2.imwrite(imgto, img3)
82
-
83
 
84
- #############################
85
- # model usage
86
  def predict_class(img_path, model):
87
  img = Image.open(img_path)
88
  transform = transforms.Compose([transforms.ToTensor()])
@@ -109,14 +80,8 @@ def predictmain(model, filepath):
109
 
110
 
111
  if __name__ == '__main__':
112
- # transfomer usage
113
- model = undefined.from_pretrained("undefined")
114
- model.load_adapter("DOFOFFICIAL/animeGender-dvgg-0.7", source="hf")
115
- model.to("cuda")
116
- model.eval()
117
-
118
  # local usage
119
- model = modelload("model_animeGender-dvgg-0.7.pth")
120
 
121
  # use your picture to interfere
122
  cmpgraph_64x64("path.png", "path(1).png")
 
9
  from torchvision import transforms
10
  from PIL import Image
11
 
 
 
12
  num_cls = 2
13
  classes = ['female', 'male']
14
 
15
+
 
16
  def model_struct():
17
  model = torchvision.models.vgg16(pretrained=True)
18
 
 
34
  model = model.cuda() #GPU
35
 
36
  return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  def cmpgraph_64x64(imgfrom, imgto):
39
  height, width, channels = dim(imgfrom)
 
53
  img2 = cv2.resize(img, (64,64))
54
  img3 = cv2.cvtColor(img2, cv2.COLOR_BGRA2BGR)
55
  return cv2.imwrite(imgto, img3)
 
56
 
 
 
57
  def predict_class(img_path, model):
58
  img = Image.open(img_path)
59
  transform = transforms.Compose([transforms.ToTensor()])
 
80
 
81
 
82
  if __name__ == '__main__':
 
 
 
 
 
 
83
  # local usage
84
+ model = modelload("animeGender-dvgg-0.7.pth")
85
 
86
  # use your picture to interfere
87
  cmpgraph_64x64("path.png", "path(1).png")