forked from yinguobing/head-pose-estimation
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpose_estimator.py
143 lines (121 loc) · 5.54 KB
/
pose_estimator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
"""Estimate head pose according to the facial landmarks"""
import numpy as np
import cv2
class PoseEstimator:
"""Estimate head pose according to the facial landmarks"""
def __init__(self, img_size=(480, 640)):
self.size = img_size
# 3D model points.
self.model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corner
(-150.0, -150.0, -125.0), # Mouth left corner
(150.0, -150.0, -125.0) # Mouth right corner
]) / 4.5
self.model_points_68 = self._get_full_model_points()
# Camera internals
self.focal_length = self.size[1]
self.camera_center = (self.size[1] / 2, self.size[0] / 2)
self.camera_matrix = np.array(
[[self.focal_length, 0, self.camera_center[0]],
[0, self.focal_length, self.camera_center[1]],
[0, 0, 1]], dtype="double")
# Assuming no lens distortion
self.dist_coeefs = np.zeros((4, 1))
# Rotation vector and translation vector
self.r_vec = np.array([[0.01891013], [0.08560084], [-3.14392813]])
self.t_vec = np.array(
[[-14.97821226], [-10.62040383], [-2053.03596872]])
# self.r_vec = None
# self.t_vec = None
def _get_full_model_points(self, filename='assets/model.txt'):
"""Get all 68 3D model points from file"""
raw_value = []
with open(filename) as file:
for line in file:
raw_value.append(line)
model_points = np.array(raw_value, dtype=np.float32)
model_points = np.reshape(model_points, (3, -1)).T
# model_points *= 4
model_points[:, -1] *= -1
return model_points
def solve_pose(self, image_points):
"""
Solve pose from image points
Return (rotation_vector, translation_vector) as pose.
"""
(_, rotation_vector, translation_vector) = cv2.solvePnP(
self.model_points, image_points, self.camera_matrix, self.dist_coeefs)
# (success, rotation_vector, translation_vector) = cv2.solvePnP(
# self.model_points,
# image_points,
# self.camera_matrix,
# self.dist_coeefs,
# rvec=self.r_vec,
# tvec=self.t_vec,
# useExtrinsicGuess=True)
return (rotation_vector, translation_vector)
def solve_pose_by_68_points(self, image_points):
"""
Solve pose from all the 68 image points
Return (rotation_vector, translation_vector) as pose.
"""
if self.r_vec is None:
(_, rotation_vector, translation_vector) = cv2.solvePnP(
self.model_points_68, image_points, self.camera_matrix, self.dist_coeefs)
self.r_vec = rotation_vector
self.t_vec = translation_vector
(_, rotation_vector, translation_vector) = cv2.solvePnP(
self.model_points_68,
image_points,
self.camera_matrix,
self.dist_coeefs,
rvec=self.r_vec,
tvec=self.t_vec,
useExtrinsicGuess=True)
return (rotation_vector, translation_vector)
def draw_annotation_box(self, image, rotation_vector, translation_vector, color=(255, 255, 255), line_width=2):
"""Draw a 3D box as annotation of pose"""
point_3d = []
rear_size = 75
rear_depth = 0
point_3d.append((-rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, rear_size, rear_depth))
point_3d.append((rear_size, rear_size, rear_depth))
point_3d.append((rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, -rear_size, rear_depth))
front_size = 100
front_depth = 100
point_3d.append((-front_size, -front_size, front_depth))
point_3d.append((-front_size, front_size, front_depth))
point_3d.append((front_size, front_size, front_depth))
point_3d.append((front_size, -front_size, front_depth))
point_3d.append((-front_size, -front_size, front_depth))
point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
# Map to 2d image points
(point_2d, _) = cv2.projectPoints(point_3d,
rotation_vector,
translation_vector,
self.camera_matrix,
self.dist_coeefs)
point_2d = np.int32(point_2d.reshape(-1, 2))
# Draw all the lines
cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[1]), tuple(
point_2d[6]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[2]), tuple(
point_2d[7]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[3]), tuple(
point_2d[8]), color, line_width, cv2.LINE_AA)
def get_pose_marks(self, marks):
"""Get marks ready for pose estimation from 68 marks"""
pose_marks = []
pose_marks.append(marks[30]) # Nose tip
pose_marks.append(marks[8]) # Chin
pose_marks.append(marks[36]) # Left eye left corner
pose_marks.append(marks[45]) # Right eye right corner
pose_marks.append(marks[48]) # Mouth left corner
pose_marks.append(marks[54]) # Mouth right corner
return pose_marks