采用 OpenCV DNN 模块实现的人脸性别及年龄检测,整个项目比较简单、清晰明了,过程主要包括:
[1] - 检测图片中的人脸框(如,采用 dlib 库).
[2] - 检测人脸的性别和年龄.
[3] - 检测结果可视化.
1. 相关模型
所采用的模型分别如下:
[1] - 人脸框检测模型
这里采用了 dlib 的 CNN 人脸框检测模型:
或者,
OpenCV DNN 模块自带的残差网络的人脸检测模型:
[2] - 性别检测模型(Caffe):
性别共两类:Male,Female
[3] - 年龄预测模型(Caffe):
年龄共八类:(0-2), (4-6), (8-12), (15-20), (25-32), (38-43), (48-53), (60-100)
2. 具体实现
#!--*-- coding:utf-8 --*--
import os
import cv2
import time
import matplotlib.pyplot as plt
import dlib
class AgeGenderDetector():
def __init__(self):
# Load model and networks
# Age
self.ageList = ['(0-2)',
'(4-6)',
'(8-12)',
'(15-20)',
'(25-32)',
'(38-43)',
'(48-53)',
'(60-100)']
ageProto = "age_deploy.prototxt"
ageModel = "age_net.caffemodel"
self.ageNet = cv2.dnn.readNet(ageModel, ageProto)
# Gender
self.genderList = ['Male', 'Female']
genderProto = "gender_deploy.prototxt"
genderModel = "gender_net.caffemodel"
self.genderNet = cv2.dnn.readNet(genderModel, genderProto)
# Face detection - OpenCV DNN
faceProto = "opencv_face_detector.pbtxt"
faceModel = "opencv_face_detector_uint8.pb"
self.faceNet = cv2.dnn.readNet(faceModel, faceProto)
# Face detection - dlib
dlib_cnn_face_model = "mmod_human_face_detector.dat"
self.dlib_cnn_face_detector =
dlib.cnn_face_detection_model_v1(dlib_cnn_face_model)
self.mean_values = (78.4263377603, 87.7689143744, 114.895847746)
self.padding = 20
def dlib_cnn_face_detect(self, img):
detections = self.dlib_cnn_face_detector(img, 1)
bboxes = []
if len(detections) > 0:
for i, d in enumerate(detections):
x1 = d.rect.left()
y1 = d.rect.top()
x2 = d.rect.right()
y2 = d.rect.bottom()
bboxes.append([x1, y1, x2, y2]) #
return bboxes
def opencv_dnn_face_detect(self, img_cv2, conf_threshold=0.7):
height, width, _ = img_cv2.shape
blob = cv2.dnn.blobFromImage(img_cv2,
1.0,
(300, 300),
[104, 117, 123],
True,
False)
self.faceNet.setInput(blob)
detections = self.faceNet.forward()
bboxes = []
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > conf_threshold:
x1 = int(detections[0, 0, i, 3] * width)
y1 = int(detections[0, 0, i, 4] * height)
x2 = int(detections[0, 0, i, 5] * width)
y2 = int(detections[0, 0, i, 6] * height)
bboxes.append([x1, y1, x2, y2]) #
return bboxes
def detect(self, img_file):
start = time.time()
img_cv2 = cv2.imread(img_file)
img_rgb = cv2.cvtColor(img_cv2, cv2.COLOR_BGR2RGB)
# Face box detection
# face_bboxes = self.opencv_dnn_face_detect(img_cv2)
face_bboxes = self.dlib_cnn_face_detect(img_rgb) # RGB order
if not face_bboxes:
print("No face Detected, Checking next image")
for bbox in face_bboxes:
# print(bbox)
# single face
face = img_cv2[
max(0, bbox[1]-self.padding):
min(bbox[3] + self.padding, img_cv2.shape[0]-1),
max(0, bbox[0]-self.padding):
min(bbox[2] + self.padding, img_cv2.shape[1]-1)]
# Gender
blob = cv2.dnn.blobFromImage(face, 1.0, (227, 227),
self.mean_values, swapRB=False)
self.genderNet.setInput(blob)
genderPreds = self.genderNet.forward()
gender = self.genderList[genderPreds[0].argmax()]
# print("Gender Output : {}".format(genderPreds))
print("Gender : {}, conf = {:.3f}".format(
gender, genderPreds[0].max()))
# Age
self.ageNet.setInput(blob)
agePreds = self.ageNet.forward()
age = self.ageList[agePreds[0].argmax()]
print("Age Output : {}".format(agePreds))
print("Age : {}, conf = {:.3f}".format(age, agePreds[0].max()))
# vis
label = "{}, {}".format(gender, age)
cv2.rectangle(img_cv2,
(bbox[0], bbox[1]), (bbox[2], bbox[3]),
(0, 255, 0),
int(round(img_cv2.shape[0]/150)),
8)
cv2.putText(img_cv2,
label,
(bbox[0], bbox[1]-10),
cv2.FONT_HERSHEY_SIMPLEX,
0.6,
(0, 255, 255),
2,
cv2.LINE_AA)
# cv2.imwrite("age-gender-out-{}".format(args.input),frameFace)
print("time : {:.3f}".format(time.time() - start))
plt.figure(figsize=(10, 8))
plt.axis("off")
plt.imshow(img_cv2[:,:,::-1])
plt.show()
return True
if __name__ == '__main__':
print("[INFO] Starting AgeGender Demo...")
img_path = "/path/to/images/"
img_files = [os.path.join(img_path, img_name)
for img_name in os.listdir(img_path)]
age_gender_detector = AgeGenderDetector()
for img_file in img_files:
print("Processing file: {}".format(img_file))
age_gender_detector.detect(img_file)
print("[INFO]Done.")
输入如:
Gender : Female, conf = 0.984
Age : (25-32), conf = 0.500
time : 0.474
Gender : Female, conf = 0.999
Age : (25-32), conf = 0.408
time : 0.211
3. 参考材料
主要参考:
[1] - Age and Gender Classification Using Convolutional Neural Network
2 comments
我想請問為什麼他會跑九次一樣的圖
可以确认下输入图片路劲和图片.