Dlibのインストール
Docker Anaconda環境にDlibをインストールする - 追憶行
僕の環境ではインストールに30分程の時間を要しました。
検出モデルの設定
顔のパーツの検出モデルはDlibのページからDLできます。
http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
# 検出モデル predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") detector = dlib.get_frontal_face_detector()
検出の実行
始めに顔の検出を行い、顔検出結果から器官の検出を行います。
img = cv2.imread("img/woman-2299736_640.jpg") # 正面向きの顔検出 dets = detector(img, 1) for k, d in enumerate(dets): #顔器官の検出 shape = predictor(img, d)
検出結果のプロット
shape_predictor_68_face_landmarks
が指す通り、68個の特徴量が検出され、内、16個までが輪郭、残りの52個が内側の器官として検出されるようです。
#plot用の線、点の設定 color_f = (0,0,225) color_l_out = (255,0,0) color_l_in = (0,255,0) line_w = 3 circle_r = 3 fontType = cv2.FONT_HERSHEY_SIMPLEX fontSize = 1 #顔検出範囲のプロット cv2.rectangle(img, (d.left(), d.top()), (d.right(), d.bottom()), (255,255,255), line_w) num_of_points_out = 17 num_of_points_in = shape.num_parts - num_of_points_out gx_out = 0 gy_out = 0 gx_in = 0 gy_in = 0 for shape_point_count in range(shape.num_parts): shape_point = shape.part(shape_point_count) print("No.{} position: ({},{})".format(shape_point_count, shape_point.x, shape_point.y)) if shape_point_count < num_of_points_out: #検出した顔輪郭の描画 cv2.circle(img, (shape_point.x, shape_point.y), circle_r, color_l_out, line_w) gx_out = gx_out + shape_point.x / num_of_points_out gy_out = gy_out + shape_point.y / num_of_points_out else: #検出した顔パーツの描画 cv2.circle(img, (shape_point.x, shape_point.y), circle_r,color_l_in, line_w) gx_in = gx_in + shape_point.x / num_of_points_in gy_in = gy_in + shape_point.y / num_of_points_in # 顔の傾きを計算 theta = math.asin(2*(gx_in-gx_out)/(d.right()-d.left())) radian = theta*180/math.pi print("theta:{} (radian:{})".format(theta,radian)) if radian<0: textPrefix = " left " else: textPrefix = " right " textShow = textPrefix + str(round(abs(radian),1)) + " deg." # 顔の傾き説明を描画 cv2.putText(img, textShow, (d.left(), d.top()), fontType, fontSize, (255,255,255), line_w) plt.imshow(img) plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis plt.show()
元画像
検出結果
コード全文
import cv2 import dlib import math # 検出モデル predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") detector = dlib.get_frontal_face_detector() img = cv2.imread("img/woman-2299736_640.jpg") # 正面向きの顔検出 dets = detector(img, 1) for k, d in enumerate(dets): #顔器官の検出 shape = predictor(img, d) #plot用の線、点の設定 color_f = (0,0,225) color_l_out = (255,0,0) color_l_in = (0,255,0) line_w = 3 circle_r = 3 fontType = cv2.FONT_HERSHEY_SIMPLEX fontSize = 1 #顔検出範囲のプロット cv2.rectangle(img, (d.left(), d.top()), (d.right(), d.bottom()), (255,255,255), line_w) num_of_points_out = 17 num_of_points_in = shape.num_parts - num_of_points_out gx_out = 0 gy_out = 0 gx_in = 0 gy_in = 0 for shape_point_count in range(shape.num_parts): shape_point = shape.part(shape_point_count) print("No.{} position: ({},{})".format(shape_point_count, shape_point.x, shape_point.y)) if shape_point_count < num_of_points_out: cv2.circle(img, (shape_point.x, shape_point.y), circle_r, color_l_out, line_w) gx_out = gx_out + shape_point.x / num_of_points_out gy_out = gy_out + shape_point.y / num_of_points_out else: cv2.circle(img, (shape_point.x, shape_point.y), circle_r,color_l_in, line_w) gx_in = gx_in + shape_point.x / num_of_points_in gy_in = gy_in + shape_point.y / num_of_points_in theta = math.asin(2*(gx_in-gx_out)/(d.right()-d.left())) radian = theta*180/math.pi print("theta:{} (radian:{})".format(theta,radian)) if radian<0: textPrefix = " left " else: textPrefix = " right " textShow = textPrefix + str(round(abs(radian),1)) + " deg." cv2.putText(img, textShow, (d.left(), d.top()), fontType, fontSize, (255,255,255), line_w) plt.imshow(img) plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis plt.show()
リンク