-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest.py
53 lines (38 loc) · 1.23 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import cv2
import argparse
from PIL import Image
from utils import *
def predict(img_detect, model):
img_detect = cv2.resize(img_detect, (32, 32)) #Resize 32x32
img = Image.fromarray(img_detect)
img = data_transform(img)
img = img.view(1, 3, 32, 32) #View in tensor
img = Variable(img)
model.eval() #Set eval mode
#To Cuda
model = model.cuda()
img = img.cuda()
output = model(img)
predicted = torch.argmax(output)
p = label2id[predicted.item()]
return predicted
model = CNN()
model = model.cuda()
model.load_state_dict(torch.load('weights/Face-Mask-Model.pt'))
img = cv2.imread('./dataset/without_mask/0_0_dongchengpeng_0005')
# a = predict(img, model)
# print(a)
# face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
# #Detect face
# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.3, 5)
# for (x,y,w,h) in faces:
# img2 = img[y+2:y+h-2, x+2:x+w-2]
# emo = predict(img2, model) #face index
# # face = label2id[emo.item()]
# print(emo)
# # putface(img, face, x, y, w, h)
result = predict(img, model)
print(result)
cv2.imshow("Img", img)
cv2.waitKey()