feat: basic (perspective?) avatar
This commit is contained in:
parent
7212e2e8ec
commit
ea1732eddf
|
@ -1 +1,9 @@
|
|||
from .crown import Crown
|
||||
from .head import Head
|
||||
from .left_ear import LeftEar
|
||||
from .left_eye import LeftEye
|
||||
from .left_moustache import LeftMoustache
|
||||
from .mouth import Mouth
|
||||
from .right_ear import RightEar
|
||||
from .right_eye import RightEye
|
||||
from .right_moustache import RightMoustache
|
||||
|
|
74
src/bodyparts/crown.py
Normal file
74
src/bodyparts/crown.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class Crown:
|
||||
"""The crown body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the crown."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/crown.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the crown on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
30 * self.ratio * self.env.x + 135 * self.env.y + 90 * self.env.z,
|
||||
-30 * self.ratio * self.env.x + 135 * self.env.y + 90 * self.env.z,
|
||||
-30 * self.ratio * self.env.x + 75 * self.env.y + 90 * self.env.z,
|
||||
30 * self.ratio * self.env.x + 75 * self.env.y + 90 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
|
@ -72,14 +72,3 @@ class Head:
|
|||
(255, 255, 255),
|
||||
2,
|
||||
)
|
||||
|
||||
# TODO: faire un POC où je place juste un cvGetPerspectiveTransform suivi d'un cvWarpPerspective
|
||||
# -> comme sur cet exemple, mais où l'image de gauche et droite sont inversées
|
||||
# https://docs.adaptive-vision.com/studio/filters/GeometricImageTransformations/cvGetPerspectiveTransform.html
|
||||
|
||||
# instrisics ? -> https://github.dev/google/mediapipe/blob/master/mediapipe/modules/face_geometry/libs/effect_renderer.cc#L573-L599
|
||||
|
||||
# TODO: https://github.com/Rassibassi/mediapipeFacegeometryPython/blob/main/head_posture_rt.py
|
||||
# -> pnp -> pose estimation -> paramètres extrinsèques
|
||||
# -> + param intrasèque (supposé connu, check site mediapipe)
|
||||
# -> placer dans l'espace les textures -> et projeter dans le plan image
|
||||
|
|
74
src/bodyparts/left_ear.py
Normal file
74
src/bodyparts/left_ear.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class LeftEar:
|
||||
"""The left eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the left eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/earL.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the left eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
100 * self.env.x + 250 * self.env.y + 70 * self.env.z,
|
||||
30 * self.env.x + 250 * self.env.y + 70 * self.env.z,
|
||||
30 * self.env.x + 50 * self.env.y + 70 * self.env.z,
|
||||
100 * self.env.x + 50 * self.env.y + 70 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/left_eye.py
Normal file
74
src/bodyparts/left_eye.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class LeftEye:
|
||||
"""The left eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the left eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/eyeL.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the left eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
40 * self.env.x + 70 * self.env.y + 80 * self.env.z,
|
||||
25 * self.env.x + 70 * self.env.y + 80 * self.env.z,
|
||||
25 * self.env.x + 15 * self.env.y + 80 * self.env.z,
|
||||
40 * self.env.x + 15 * self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/left_moustache.py
Normal file
74
src/bodyparts/left_moustache.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class LeftMoustache:
|
||||
"""The left eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the left eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/moustacheL.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the left eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
270 * self.env.x + 20 * self.env.y + 80 * self.env.z,
|
||||
70 * self.env.x + 20 * self.env.y + 80 * self.env.z,
|
||||
70 * self.env.x + -130 * self.env.y + 80 * self.env.z,
|
||||
270 * self.env.x + -130 * self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/mouth.py
Normal file
74
src/bodyparts/mouth.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class Mouth:
|
||||
"""The mouth body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the mouth."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/mouth.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the mouth on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
15 * self.ratio * self.env.x + -30 * self.env.y + 80 * self.env.z,
|
||||
-15 * self.ratio * self.env.x + -30 * self.env.y + 80 * self.env.z,
|
||||
-15 * self.ratio * self.env.x + -50 * self.env.y + 80 * self.env.z,
|
||||
15 * self.ratio * self.env.x + -50 * self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/right_ear.py
Normal file
74
src/bodyparts/right_ear.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class RightEar:
|
||||
"""The left eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the left eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/earR.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the left eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
-30 * self.env.x + 250 * self.env.y + 70 * self.env.z,
|
||||
-100 * self.env.x + 250 * self.env.y + 70 * self.env.z,
|
||||
-100 * self.env.x + 50 * self.env.y + 70 * self.env.z,
|
||||
-30 * self.env.x + 50 * self.env.y + 70 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/right_eye.py
Normal file
74
src/bodyparts/right_eye.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class RightEye:
|
||||
"""The right eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the right eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/eyeR.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the right eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
-25 * self.env.x + 70 * self.env.y + 80 * self.env.z,
|
||||
-40 * self.env.x + 70 * self.env.y + 80 * self.env.z,
|
||||
-40 * self.env.x + 15 * self.env.y + 80 * self.env.z,
|
||||
-25 * self.env.x + 15 * self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)
|
||||
|
||||
self.translated_box = (
|
||||
box[:, :2] + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/right_moustache.py
Normal file
74
src/bodyparts/right_moustache.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class RightMoustache:
|
||||
"""The left eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the left eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/moustacheR.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the left eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
-70 * self.env.x + 20 * self.env.y + 80 * self.env.z,
|
||||
-270 * self.env.x + 20 * self.env.y + 80 * self.env.z,
|
||||
-270 * self.env.x + -130 * self.env.y + 80 * self.env.z,
|
||||
-70 * self.env.x + -130 * self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
|
@ -4,7 +4,17 @@ import cv2
|
|||
import mediapipe as mp
|
||||
import numpy as np
|
||||
|
||||
from bodyparts import Head
|
||||
from bodyparts import (
|
||||
Crown,
|
||||
Head,
|
||||
LeftEar,
|
||||
LeftEye,
|
||||
LeftMoustache,
|
||||
Mouth,
|
||||
RightEar,
|
||||
RightEye,
|
||||
RightMoustache,
|
||||
)
|
||||
from utils import (
|
||||
LANDMARKS_BOTTOM_SIDE,
|
||||
LANDMARKS_LEFT_SIDE,
|
||||
|
@ -39,16 +49,15 @@ class Environment:
|
|||
|
||||
# create body parts
|
||||
self.body_parts = [
|
||||
# Body(self),
|
||||
# Ear(False, self),
|
||||
# Ear(True, self),
|
||||
LeftEar(self),
|
||||
RightEar(self),
|
||||
Head(self),
|
||||
# Moustache(False, self),
|
||||
# Moustache(True, self),
|
||||
# Eye(False, self),
|
||||
# Eye(True, self),
|
||||
# Crown(self),
|
||||
# Mouth(self),
|
||||
RightMoustache(self),
|
||||
LeftMoustache(self),
|
||||
LeftEye(self),
|
||||
RightEye(self),
|
||||
Crown(self),
|
||||
Mouth(self),
|
||||
]
|
||||
|
||||
def start(self) -> None:
|
||||
|
@ -73,15 +82,17 @@ class Environment:
|
|||
self.draw_axis()
|
||||
|
||||
# draw keypoints on top of frame
|
||||
self.draw_keypoints()
|
||||
# self.draw_keypoints()
|
||||
|
||||
# draw avatar
|
||||
self.draw_avatar()
|
||||
|
||||
# show frame
|
||||
# cv2.imshow("Camera", self.frame)
|
||||
cv2.imshow("Camera", cv2.flip(self.frame, 1))
|
||||
|
||||
# show avatar
|
||||
# cv2.imshow("Avatar", self.avatar)
|
||||
cv2.imshow("Avatar", cv2.flip(self.avatar, 1))
|
||||
|
||||
def detect_keypoints(self) -> None:
|
||||
|
@ -167,6 +178,7 @@ class Environment:
|
|||
"""Draw the avatar on the screen."""
|
||||
# clear avatar frame
|
||||
self.avatar = np.zeros(self.frame.shape, dtype=np.uint8)
|
||||
self.avatar[:, :] = (0, 255, 0)
|
||||
|
||||
# draw each body part
|
||||
for part in self.body_parts:
|
||||
|
|
|
@ -28,4 +28,8 @@ if __name__ == "__main__":
|
|||
datefmt="(%F %T)",
|
||||
)
|
||||
|
||||
# import cProfile
|
||||
|
||||
# cProfile.run("main()", sort="cumtime")
|
||||
|
||||
main()
|
||||
|
|
Loading…
Reference in a new issue