-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathface_model.py
99 lines (87 loc) · 3.03 KB
/
face_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import sys
import os
import argparse
#import tensorflow as tf
import numpy as np
import mxnet as mx
import random
import cv2
import sklearn
from sklearn.decomposition import PCA
from time import sleep
from easydict import EasyDict as edict
from mtcnn_detector import MtcnnDetector
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src', 'common'))
import face_image
import face_preprocess
def do_flip(data):
for idx in range(data.shape[0]):
data[idx,:,:] = np.fliplr(data[idx,:,:])
def get_model(ctx, image_size, model_str, layer):
_vec = model_str.split(',')
assert len(_vec)==2
prefix = _vec[0]
epoch = int(_vec[1])
print('loading',prefix, epoch)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
all_layers = sym.get_internals()
sym = all_layers[layer+'_output']
model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)
#model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
return model
class FaceModel:
def __init__(self, args):
self.args = args
if args.gpu>=0:
ctx = mx.gpu(args.gpu)
else:
ctx = mx.cpu()
_vec = args.image_size
assert len(_vec) >= 2
image_size = (int(_vec[0]), int(_vec[1]))
self.model = None
if len(args.model)>0:
self.model = get_model(ctx, image_size, args.model, 'fc1')
self.det_minsize = 50
self.det_threshold = [0.6,0.7,0.8]
#self.det_factor = 0.9
self.image_size = image_size
mtcnn_path = os.path.join(os.path.dirname(__file__), 'mtcnn-model')
if args.det==0:
detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=self.det_threshold)
else:
detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark = True, threshold=[0.0,0.0,0.2])
self.detector = detector
def get_input(self, face_img):
ret = self.detector.detect_face(face_img, det_type = self.args.det)
if ret is None:
return None
bbox, points = ret
if bbox.shape[0]==0:
return None
bbox = bbox[0,0:4]
points = points[0,:].reshape((2,5)).T
#print(bbox)
#print(points)
nimg = face_preprocess.preprocess(face_img, bbox, points, image_size='112,112')
nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
aligned = np.transpose(nimg, (2,0,1))
input_blob = np.expand_dims(aligned, axis=0)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
return db
def get_ga(self, data):
self.model.forward(data, is_train=False)
ret = self.model.get_outputs()[0].asnumpy()
g = ret[:,0:2].flatten()
gender = np.argmax(g)
a = ret[:,2:202].reshape( (100,2) )
a = np.argmax(a, axis=1)
age = int(sum(a))
return gender, age