Commit
·
5210b6a
1
Parent(s):
908fb48
Update usage/interfere_cpu.py
Browse files- usage/interfere_cpu.py +1 -32
usage/interfere_cpu.py
CHANGED
@@ -9,13 +9,9 @@ import numpy as np
|
|
9 |
from torchvision import transforms
|
10 |
from PIL import Image
|
11 |
|
12 |
-
from transformers import undefined
|
13 |
-
|
14 |
num_cls = 2
|
15 |
classes = ['female', 'male']
|
16 |
|
17 |
-
#############################
|
18 |
-
# model struct
|
19 |
def model_struct():
|
20 |
model = torchvision.models.vgg16(pretrained=True)
|
21 |
|
@@ -35,29 +31,6 @@ def model_struct():
|
|
35 |
param.requires_grad = True
|
36 |
|
37 |
return model
|
38 |
-
|
39 |
-
|
40 |
-
#############################
|
41 |
-
# graphic lib
|
42 |
-
def dim(imgpath):
|
43 |
-
img = cv2.imread(imgpath, 1)
|
44 |
-
height, width, channels = img.shape
|
45 |
-
return height, width, channels
|
46 |
-
|
47 |
-
def crop(imgfrom, imgto, x = 0, w = 64, y = 0, h = 64):
|
48 |
-
img = cv2.imread(imgfrom, 1)
|
49 |
-
img2 = img[y:y+h, x:x+w]
|
50 |
-
return cv2.imwrite(imgto, img2)
|
51 |
-
|
52 |
-
def resize(imgfrom, imgto, width, height):
|
53 |
-
img = cv2.imread(imgfrom, 1)
|
54 |
-
img2 = cv2.resize(img, (width, height))
|
55 |
-
return cv2.imwrite(imgto, img2)
|
56 |
-
|
57 |
-
def rgb32to24(imgfrom, imgto):
|
58 |
-
img = cv2.imread(imgfrom, 1)
|
59 |
-
img2 = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)
|
60 |
-
return cv2.imwrite(imgto, img2)
|
61 |
|
62 |
def cmpgraph_64x64(imgfrom, imgto):
|
63 |
height, width, channels = dim(imgfrom)
|
@@ -104,12 +77,8 @@ def predictmain(model, filepath):
|
|
104 |
|
105 |
|
106 |
if __name__ == '__main__':
|
107 |
-
# transfomer usage
|
108 |
-
model = undefined.from_pretrained("undefined")
|
109 |
-
model.load_adapter("DOFOFFICIAL/animeGender-dvgg-0.7", source="hf")
|
110 |
-
|
111 |
# local usage
|
112 |
-
model = modelload("
|
113 |
|
114 |
# use your picture to interfere
|
115 |
cmpgraph_64x64("path.png", "path(1).png")
|
|
|
9 |
from torchvision import transforms
|
10 |
from PIL import Image
|
11 |
|
|
|
|
|
12 |
num_cls = 2
|
13 |
classes = ['female', 'male']
|
14 |
|
|
|
|
|
15 |
def model_struct():
|
16 |
model = torchvision.models.vgg16(pretrained=True)
|
17 |
|
|
|
31 |
param.requires_grad = True
|
32 |
|
33 |
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
def cmpgraph_64x64(imgfrom, imgto):
|
36 |
height, width, channels = dim(imgfrom)
|
|
|
77 |
|
78 |
|
79 |
if __name__ == '__main__':
|
|
|
|
|
|
|
|
|
80 |
# local usage
|
81 |
+
model = modelload("animeGender-dvgg-0.7.pth")
|
82 |
|
83 |
# use your picture to interfere
|
84 |
cmpgraph_64x64("path.png", "path(1).png")
|