Compare commits
No commits in common. "ff99203af6dc58acebd941aa6e43710079c91d66" and "eb2cf131d58af6f404d7dafa0457db54fadff7b8" have entirely different histories.
ff99203af6
...
eb2cf131d5
22
README.md
22
README.md
|
@ -1,23 +1 @@
|
|||
Créez vos propre branches
|
||||
|
||||
# random stuff
|
||||
|
||||
https://scikit-image.org/docs/stable/auto_examples/transform/plot_geometric.html#sphx-glr-auto-examples-transform-plot-geometric-py
|
||||
https://scikit-image.org/docs/stable/auto_examples/transform/plot_transform_types.html#sphx-glr-auto-examples-transform-plot-transform-types-py
|
||||
https://github.com/google/mediapipe/issues/1379
|
||||
https://en.wikipedia.org/wiki/3D_projection#Perspective_projection
|
||||
|
||||
|
||||
TODO: faire un POC où je place juste un cvGetPerspectiveTransform suivi d'un cvWarpPerspective
|
||||
-> comme sur cet exemple, mais où l'image de gauche et droite sont inversées
|
||||
https://docs.adaptive-vision.com/studio/filters/GeometricImageTransformations/cvGetPerspectiveTransform.html
|
||||
|
||||
instrisics ? -> https://github.dev/google/mediapipe/blob/master/mediapipe/modules/face_geometry/libs/effect_renderer.cc#L573-L599
|
||||
|
||||
TODO: https://github.com/Rassibassi/mediapipeFacegeometryPython/blob/main/head_posture_rt.py
|
||||
-> pnp -> pose estimation -> paramètres extrinsèques
|
||||
-> + param intrasèque (supposé connu, check site mediapipe)
|
||||
-> placer dans l'espace les textures -> et projeter dans le plan image
|
||||
|
||||
|
||||
https://github.com/Rassibassi/mediapipeDemos
|
||||
|
|
43
flake.lock
43
flake.lock
|
@ -1,43 +0,0 @@
|
|||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1673796341,
|
||||
"narHash": "sha256-1kZi9OkukpNmOaPY7S5/+SlCDOuYnP3HkXHvNDyLQcc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "6dccdc458512abce8d19f74195bb20fdb067df50",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
24
flake.nix
24
flake.nix
|
@ -1,24 +0,0 @@
|
|||
{
|
||||
description = "Proj APP";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, flake-utils }:
|
||||
flake-utils.lib.eachDefaultSystem (system:
|
||||
let pkgs = nixpkgs.legacyPackages.${system};
|
||||
in {
|
||||
devShell = pkgs.mkShell {
|
||||
buildInputs = with pkgs; [
|
||||
poetry
|
||||
python3
|
||||
(python310Packages.opencv4.override {
|
||||
enableGtk2 = true;
|
||||
gtk2 = pkgs.gtk2;
|
||||
})
|
||||
];
|
||||
};
|
||||
});
|
||||
}
|
12
shell.nix
Normal file
12
shell.nix
Normal file
|
@ -0,0 +1,12 @@
|
|||
{ pkgs ? import <nixpkgs> { } }:
|
||||
|
||||
pkgs.mkShell {
|
||||
buildInputs = with pkgs; [
|
||||
poetry
|
||||
python3
|
||||
(python310Packages.opencv4.override {
|
||||
enableGtk2 = true;
|
||||
gtk2 = pkgs.gtk2;
|
||||
})
|
||||
];
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import numpy as np
|
||||
from mediapipe.python.solutions.drawing_utils import _normalized_to_pixel_coordinates
|
||||
|
||||
from bodypart import BodyPart
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
def landmark2vec(landmark, width, height):
|
||||
"""Convert a landmark to numpy vector."""
|
||||
return np.array(
|
||||
_normalized_to_pixel_coordinates(
|
||||
np.clip(landmark.x, 0, 1),
|
||||
np.clip(landmark.y, 0, 1),
|
||||
width,
|
||||
height,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ActiveBodyPart(BodyPart):
|
||||
"""An active body part."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
env: Environment,
|
||||
image_path: str,
|
||||
position: np.ndarray,
|
||||
height: float,
|
||||
keypoints: tuple[int, int, int, int],
|
||||
) -> None:
|
||||
"""Initialize the active part."""
|
||||
super().__init__(env, image_path, position, height)
|
||||
self.keypoints = keypoints
|
||||
|
||||
def modulate_height(self) -> None:
|
||||
"""Modulate the height of the part."""
|
||||
if self.env.results.multi_face_landmarks:
|
||||
face_landmarks = self.env.results.multi_face_landmarks[0]
|
||||
|
||||
left = landmark2vec(
|
||||
face_landmarks.landmark[self.keypoints[0]],
|
||||
self.env.camera_width,
|
||||
self.env.camera_height,
|
||||
)
|
||||
right = landmark2vec(
|
||||
face_landmarks.landmark[self.keypoints[1]],
|
||||
self.env.camera_width,
|
||||
self.env.camera_height,
|
||||
)
|
||||
top = landmark2vec(
|
||||
face_landmarks.landmark[self.keypoints[3]],
|
||||
self.env.camera_width,
|
||||
self.env.camera_height,
|
||||
)
|
||||
bottom = landmark2vec(
|
||||
face_landmarks.landmark[self.keypoints[2]],
|
||||
self.env.camera_width,
|
||||
self.env.camera_height,
|
||||
)
|
||||
|
||||
vertical = np.linalg.norm(top - bottom)
|
||||
horizontal = np.linalg.norm(right - left)
|
||||
|
||||
active_ratio = np.clip(horizontal / vertical * 4, 0.1, 1)
|
||||
|
||||
self.nominal_box = (
|
||||
np.array(
|
||||
[
|
||||
self.ratio * self.env.x + self.env.y * active_ratio,
|
||||
self.ratio * -self.env.x + self.env.y * active_ratio,
|
||||
self.ratio * -self.env.x + -self.env.y * active_ratio,
|
||||
self.ratio * self.env.x + -self.env.y * active_ratio,
|
||||
]
|
||||
)
|
||||
* self.height
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the active part on the screen."""
|
||||
self.modulate_height()
|
||||
super().draw()
|
127
src/bodypart.py
127
src/bodypart.py
|
@ -1,127 +0,0 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class BodyPart:
|
||||
"""A basic body part."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
env: Environment,
|
||||
image_path: str,
|
||||
position: np.ndarray,
|
||||
height: float,
|
||||
wavy=None,
|
||||
) -> None:
|
||||
"""Initialize the part."""
|
||||
self.env = env
|
||||
|
||||
self.image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
|
||||
self.height = height
|
||||
self.position = position
|
||||
|
||||
self.old_bounding_box = np.zeros((4, 1, 2))
|
||||
self.nominal_box = (
|
||||
np.array(
|
||||
[
|
||||
self.ratio * self.env.x + self.env.y,
|
||||
self.ratio * -self.env.x + self.env.y,
|
||||
self.ratio * -self.env.x + -self.env.y,
|
||||
self.ratio * self.env.x + -self.env.y,
|
||||
]
|
||||
)
|
||||
* height
|
||||
)
|
||||
self.image_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
self.wavy = wavy
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the part on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
# compute bounding box from position and height
|
||||
bounding_box = self.nominal_box + self.position + self.env.center
|
||||
|
||||
# project bounding box to camera
|
||||
(bounding_box, _) = cv2.projectPoints(
|
||||
bounding_box,
|
||||
self.env.mp_rotation_vector,
|
||||
self.env.mp_translation_vector,
|
||||
self.env.camera_matrix,
|
||||
self.env.dist_coeff,
|
||||
)
|
||||
|
||||
# interpolation with self.old_bounding_box
|
||||
bounding_box = (bounding_box + self.old_bounding_box) / 2
|
||||
self.old_bounding_box = bounding_box
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.image_box,
|
||||
bounding_box.astype(np.float32),
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
if self.wavy:
|
||||
# move left side of bounding box up and down sinusoidally
|
||||
self.sin_box = self.wavy(bounding_box.copy(), self.env.frame_count)
|
||||
|
||||
# compute affine transform
|
||||
sin_mat = cv2.getAffineTransform(
|
||||
bounding_box[[0, 1, 3]].astype(np.float32),
|
||||
self.sin_box[[0, 1, 3]].astype(np.float32),
|
||||
)
|
||||
|
||||
# apply affine transform to image
|
||||
warped = cv2.warpAffine(
|
||||
warped,
|
||||
sin_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
cv2.polylines(
|
||||
self.env.frame,
|
||||
[self.old_bounding_box.squeeze().astype(int)],
|
||||
True,
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
||||
if self.wavy:
|
||||
cv2.polylines(
|
||||
self.env.frame,
|
||||
[self.sin_box.squeeze().astype(int)],
|
||||
True,
|
||||
(0, 0, 255),
|
||||
2,
|
||||
)
|
9
src/bodyparts/__init__.py
Normal file
9
src/bodyparts/__init__.py
Normal file
|
@ -0,0 +1,9 @@
|
|||
from .crown import Crown
|
||||
from .head import Head
|
||||
from .left_ear import LeftEar
|
||||
from .left_eye import LeftEye
|
||||
from .left_moustache import LeftMoustache
|
||||
from .mouth import Mouth
|
||||
from .right_ear import RightEar
|
||||
from .right_eye import RightEye
|
||||
from .right_moustache import RightMoustache
|
74
src/bodyparts/crown.py
Normal file
74
src/bodyparts/crown.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class Crown:
|
||||
"""The crown body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the crown."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/crown.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the crown on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
30 * self.ratio * self.env.x + 135 * self.env.y + 90 * self.env.z,
|
||||
-30 * self.ratio * self.env.x + 135 * self.env.y + 90 * self.env.z,
|
||||
-30 * self.ratio * self.env.x + 75 * self.env.y + 90 * self.env.z,
|
||||
30 * self.ratio * self.env.x + 75 * self.env.y + 90 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/head.py
Normal file
74
src/bodyparts/head.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class Head:
|
||||
"""The head body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the head."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/head.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the head on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
head_box = np.array(
|
||||
[
|
||||
100 * self.ratio * self.env.x + 100 * self.env.y + 80 * self.env.z,
|
||||
100 * self.ratio * -self.env.x + 100 * self.env.y + 80 * self.env.z,
|
||||
100 * self.ratio * -self.env.x + 100 * -self.env.y + 80 * self.env.z,
|
||||
100 * self.ratio * self.env.x + 100 * -self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_head_box = (
|
||||
head_box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_head_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_head_box[i].astype(np.int32)),
|
||||
tuple(self.translated_head_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/left_ear.py
Normal file
74
src/bodyparts/left_ear.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class LeftEar:
|
||||
"""The left eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the left eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/earL.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the left eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
100 * self.env.x + 250 * self.env.y + 70 * self.env.z,
|
||||
30 * self.env.x + 250 * self.env.y + 70 * self.env.z,
|
||||
30 * self.env.x + 50 * self.env.y + 70 * self.env.z,
|
||||
100 * self.env.x + 50 * self.env.y + 70 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/left_eye.py
Normal file
74
src/bodyparts/left_eye.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class LeftEye:
|
||||
"""The left eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the left eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/eyeL.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the left eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
40 * self.env.x + 70 * self.env.y + 80 * self.env.z,
|
||||
25 * self.env.x + 70 * self.env.y + 80 * self.env.z,
|
||||
25 * self.env.x + 15 * self.env.y + 80 * self.env.z,
|
||||
40 * self.env.x + 15 * self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/left_moustache.py
Normal file
74
src/bodyparts/left_moustache.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class LeftMoustache:
|
||||
"""The left eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the left eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/moustacheL.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the left eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
270 * self.env.x + 20 * self.env.y + 80 * self.env.z,
|
||||
70 * self.env.x + 20 * self.env.y + 80 * self.env.z,
|
||||
70 * self.env.x + -130 * self.env.y + 80 * self.env.z,
|
||||
270 * self.env.x + -130 * self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/mouth.py
Normal file
74
src/bodyparts/mouth.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class Mouth:
|
||||
"""The mouth body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the mouth."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/mouth.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the mouth on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
15 * self.ratio * self.env.x + -30 * self.env.y + 80 * self.env.z,
|
||||
-15 * self.ratio * self.env.x + -30 * self.env.y + 80 * self.env.z,
|
||||
-15 * self.ratio * self.env.x + -50 * self.env.y + 80 * self.env.z,
|
||||
15 * self.ratio * self.env.x + -50 * self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/right_ear.py
Normal file
74
src/bodyparts/right_ear.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class RightEar:
|
||||
"""The left eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the left eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/earR.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the left eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
-30 * self.env.x + 250 * self.env.y + 70 * self.env.z,
|
||||
-100 * self.env.x + 250 * self.env.y + 70 * self.env.z,
|
||||
-100 * self.env.x + 50 * self.env.y + 70 * self.env.z,
|
||||
-30 * self.env.x + 50 * self.env.y + 70 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/right_eye.py
Normal file
74
src/bodyparts/right_eye.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class RightEye:
|
||||
"""The right eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the right eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/eyeR.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the right eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
-25 * self.env.x + 70 * self.env.y + 80 * self.env.z,
|
||||
-40 * self.env.x + 70 * self.env.y + 80 * self.env.z,
|
||||
-40 * self.env.x + 15 * self.env.y + 80 * self.env.z,
|
||||
-25 * self.env.x + 15 * self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)
|
||||
|
||||
self.translated_box = (
|
||||
box[:, :2] + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
74
src/bodyparts/right_moustache.py
Normal file
74
src/bodyparts/right_moustache.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from environment import Environment
|
||||
|
||||
|
||||
class RightMoustache:
|
||||
"""The left eye body part."""
|
||||
|
||||
def __init__(self, env: Environment) -> None:
|
||||
"""Initialize the left eye."""
|
||||
self.env = env
|
||||
self.image = cv2.imread("assets/moustacheR.png", cv2.IMREAD_UNCHANGED)
|
||||
self.ratio = self.image.shape[1] / self.image.shape[0]
|
||||
self.bouding_box = np.array(
|
||||
[
|
||||
[0, 0],
|
||||
[self.image.shape[1], 0],
|
||||
[self.image.shape[1], self.image.shape[0]],
|
||||
[0, self.image.shape[0]],
|
||||
],
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
def draw(self) -> None:
|
||||
"""Draw the left eye on the screen."""
|
||||
# compute position
|
||||
if self.env.results.multi_face_landmarks:
|
||||
for face_landmarks in self.env.results.multi_face_landmarks:
|
||||
box = np.array(
|
||||
[
|
||||
-70 * self.env.x + 20 * self.env.y + 80 * self.env.z,
|
||||
-270 * self.env.x + 20 * self.env.y + 80 * self.env.z,
|
||||
-270 * self.env.x + -130 * self.env.y + 80 * self.env.z,
|
||||
-70 * self.env.x + -130 * self.env.y + 80 * self.env.z,
|
||||
]
|
||||
)[:, :2]
|
||||
|
||||
self.translated_box = (
|
||||
box + self.env.center[:2] * np.array([self.env.camera_width, self.env.camera_height])
|
||||
).astype(np.float32)
|
||||
|
||||
# get perspective transform
|
||||
transform_mat = cv2.getPerspectiveTransform(
|
||||
self.bouding_box,
|
||||
self.translated_box,
|
||||
)
|
||||
|
||||
# apply perspective transform to image
|
||||
warped = cv2.warpPerspective(
|
||||
self.image,
|
||||
transform_mat,
|
||||
self.env.frame.shape[1::-1],
|
||||
)
|
||||
|
||||
# replace non black pixels of warped image by frame
|
||||
self.env.avatar[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]
|
||||
|
||||
def draw_debug(self) -> None:
|
||||
"""Draw debug information on the screen."""
|
||||
# link points
|
||||
for i in range(4):
|
||||
cv2.line(
|
||||
self.env.frame,
|
||||
tuple(self.translated_box[i].astype(np.int32)),
|
||||
tuple(self.translated_box[(i + 1) % 4].astype(np.int32)),
|
||||
(255, 255, 255),
|
||||
2,
|
||||
)
|
|
@ -4,36 +4,24 @@ import cv2
|
|||
import mediapipe as mp
|
||||
import numpy as np
|
||||
|
||||
from active_bodypart import ActiveBodyPart
|
||||
from bodypart import BodyPart
|
||||
from face_geometry import PCF, get_metric_landmarks
|
||||
|
||||
NOSE_LANDMARK = 4
|
||||
UNREFINED_LANDMARKS = 468
|
||||
|
||||
|
||||
def earL_wavy(box, t):
|
||||
box[0, 0, 0] -= np.sin(t / 7) * 11
|
||||
box[1, 0, 0] -= np.sin(t / 7) * 11
|
||||
return box
|
||||
|
||||
|
||||
def earR_wavy(box, t):
|
||||
box[0, 0, 0] += np.sin(t / 7 + 1) * 11
|
||||
box[1, 0, 0] += np.sin(t / 7 + 1) * 11
|
||||
return box
|
||||
|
||||
|
||||
def moustacheL_wavy(box, t):
|
||||
box[0, 0, 1] += np.sin(t / 7) * 11
|
||||
box[3, 0, 1] += np.sin(t / 7) * 11
|
||||
return box
|
||||
|
||||
|
||||
def moustacheR_wavy(box, t):
|
||||
box[1, 0, 1] += np.sin(t / 7 + 1) * 11
|
||||
box[2, 0, 1] += np.sin(t / 7 + 1) * 11
|
||||
return box
|
||||
from bodyparts import (
|
||||
Crown,
|
||||
Head,
|
||||
LeftEar,
|
||||
LeftEye,
|
||||
LeftMoustache,
|
||||
Mouth,
|
||||
RightEar,
|
||||
RightEye,
|
||||
RightMoustache,
|
||||
)
|
||||
from utils import (
|
||||
LANDMARKS_BOTTOM_SIDE,
|
||||
LANDMARKS_LEFT_SIDE,
|
||||
LANDMARKS_RIGHT_SIDE,
|
||||
LANDMARKS_TOP_SIDE,
|
||||
landmark2vec,
|
||||
)
|
||||
|
||||
|
||||
class Environment:
|
||||
|
@ -50,108 +38,28 @@ class Environment:
|
|||
# store reference to webcam
|
||||
self.cam = camera
|
||||
|
||||
self.frame_count = 0
|
||||
|
||||
# mediapipe stuff
|
||||
self.mp_drawing = mp.solutions.drawing_utils # type: ignore
|
||||
self.mp_drawing_styles = mp.solutions.drawing_styles # type: ignore
|
||||
self.mp_face_mesh = mp.solutions.face_mesh # type: ignore
|
||||
self.refine_landmarks = True
|
||||
|
||||
# get screen size from webcam
|
||||
self.camera_width = camera.get(3)
|
||||
self.camera_height = camera.get(4)
|
||||
|
||||
# setup face axis
|
||||
self.x = np.array([7, 0, 0]) # TODO: replace 7s by 1s
|
||||
self.y = np.array([0, 7, 0])
|
||||
self.z = np.array([0, 0, 7])
|
||||
|
||||
# create body parts
|
||||
self.body_parts = [
|
||||
BodyPart(
|
||||
self,
|
||||
"assets/earL.png",
|
||||
np.array([6, 11, -0.5]),
|
||||
1,
|
||||
wavy=earL_wavy,
|
||||
),
|
||||
BodyPart(
|
||||
self,
|
||||
"assets/earR.png",
|
||||
np.array([-6, 11, -0.5]),
|
||||
1,
|
||||
wavy=earR_wavy,
|
||||
),
|
||||
BodyPart(
|
||||
self,
|
||||
"assets/head.png",
|
||||
np.array([0, 0, 0]),
|
||||
1,
|
||||
),
|
||||
BodyPart(
|
||||
self,
|
||||
"assets/moustacheL.png",
|
||||
np.array([13, -6, 0.1]),
|
||||
1,
|
||||
wavy=moustacheL_wavy,
|
||||
),
|
||||
BodyPart(
|
||||
self,
|
||||
"assets/moustacheR.png",
|
||||
np.array([-13, -6, 0.1]),
|
||||
1,
|
||||
wavy=moustacheR_wavy,
|
||||
),
|
||||
ActiveBodyPart(
|
||||
self,
|
||||
"assets/eyeL.png",
|
||||
np.array([2.5, 2.5, 0.1]),
|
||||
0.3,
|
||||
(145, 159, 133, 33),
|
||||
),
|
||||
ActiveBodyPart(
|
||||
self,
|
||||
"assets/eyeR.png",
|
||||
np.array([-2.5, 2.5, 0.1]),
|
||||
0.3,
|
||||
(374, 386, 362, 263),
|
||||
),
|
||||
BodyPart(
|
||||
self,
|
||||
"assets/crown.png",
|
||||
np.array([0, 6.5, 0.5]),
|
||||
0.3,
|
||||
),
|
||||
ActiveBodyPart(
|
||||
self,
|
||||
"assets/mouth.png",
|
||||
np.array([0, -3.5, 0.1]),
|
||||
0.15,
|
||||
(14, 13, 308, 78),
|
||||
),
|
||||
LeftEar(self),
|
||||
RightEar(self),
|
||||
Head(self),
|
||||
RightMoustache(self),
|
||||
LeftMoustache(self),
|
||||
LeftEye(self),
|
||||
RightEye(self),
|
||||
Crown(self),
|
||||
Mouth(self),
|
||||
]
|
||||
|
||||
# pseudo camera internals
|
||||
self.dist_coeff = np.zeros((4, 1))
|
||||
self.focal_length = self.camera_width
|
||||
self.center = (self.camera_width / 2, self.camera_height / 2)
|
||||
self.camera_matrix = np.array(
|
||||
[
|
||||
[self.focal_length, 0, self.center[0]],
|
||||
[0, self.focal_length, self.center[1]],
|
||||
[0, 0, 1],
|
||||
],
|
||||
dtype="double",
|
||||
)
|
||||
self.pcf = PCF(
|
||||
near=1,
|
||||
far=10000,
|
||||
frame_height=self.camera_height,
|
||||
frame_width=self.camera_width,
|
||||
fy=self.focal_length,
|
||||
)
|
||||
|
||||
def start(self) -> None:
|
||||
"""Start the environment."""
|
||||
while self.cam.isOpened():
|
||||
|
@ -166,8 +74,6 @@ class Environment:
|
|||
logging.debug("Ignoring empty camera frame.")
|
||||
continue
|
||||
|
||||
self.frame_count += 1
|
||||
|
||||
# detect keypoints on frame
|
||||
self.detect_keypoints()
|
||||
|
||||
|
@ -176,7 +82,7 @@ class Environment:
|
|||
self.draw_axis()
|
||||
|
||||
# draw keypoints on top of frame
|
||||
self.draw_keypoints()
|
||||
# self.draw_keypoints()
|
||||
|
||||
# draw avatar
|
||||
self.draw_avatar()
|
||||
|
@ -193,8 +99,7 @@ class Environment:
|
|||
"""Detect the keypoints on the frame."""
|
||||
with self.mp_face_mesh.FaceMesh(
|
||||
max_num_faces=1,
|
||||
refine_landmarks=self.refine_landmarks,
|
||||
static_image_mode=False,
|
||||
refine_landmarks=True,
|
||||
min_detection_confidence=0.5,
|
||||
min_tracking_confidence=0.5,
|
||||
) as face_mesh:
|
||||
|
@ -217,46 +122,46 @@ class Environment:
|
|||
def compute_face_axis(self) -> None:
|
||||
"""Compute the face axis."""
|
||||
if self.results.multi_face_landmarks:
|
||||
# get landmarks, suppose only one face is detected
|
||||
face_landmarks = self.results.multi_face_landmarks[0]
|
||||
for face_landmarks in self.results.multi_face_landmarks:
|
||||
# retreive points
|
||||
left_points = np.array([landmark2vec(face_landmarks.landmark[i]) for i in LANDMARKS_LEFT_SIDE])
|
||||
right_points = np.array([landmark2vec(face_landmarks.landmark[i]) for i in LANDMARKS_RIGHT_SIDE])
|
||||
bottom_points = np.array([landmark2vec(face_landmarks.landmark[i]) for i in LANDMARKS_BOTTOM_SIDE])
|
||||
top_points = np.array([landmark2vec(face_landmarks.landmark[i]) for i in LANDMARKS_TOP_SIDE])
|
||||
|
||||
# convert landmarks to numpy array
|
||||
landmarks = np.array([(lm.x, lm.y, lm.z) for lm in face_landmarks.landmark])
|
||||
landmarks = landmarks.T
|
||||
# compute center
|
||||
self.center = np.mean(np.concatenate((left_points, right_points, bottom_points, top_points)), axis=0)
|
||||
|
||||
# remove refined landmarks
|
||||
if self.refine_landmarks:
|
||||
landmarks = landmarks[:, :UNREFINED_LANDMARKS]
|
||||
# compute axis
|
||||
self.x = np.mean(right_points - left_points, axis=0)
|
||||
self.y = np.mean(top_points - bottom_points, axis=0)
|
||||
self.z = np.cross(self.x, self.y)
|
||||
|
||||
# get pose from landmarks
|
||||
metric_landmarks, pose_transform_mat = get_metric_landmarks(landmarks, self.pcf)
|
||||
|
||||
# extract rotation and translation vectors
|
||||
pose_transform_mat[1:3, :] = -pose_transform_mat[1:3, :]
|
||||
self.mp_rotation_vector, _ = cv2.Rodrigues(pose_transform_mat[:3, :3])
|
||||
self.mp_translation_vector = pose_transform_mat[:3, 3, None]
|
||||
|
||||
# retrieve center of face
|
||||
self.center = metric_landmarks[:, NOSE_LANDMARK].T
|
||||
# normalize axis
|
||||
self.x = self.x / np.linalg.norm(self.x)
|
||||
self.y = self.y / np.linalg.norm(self.y)
|
||||
self.z = self.z / np.linalg.norm(self.z)
|
||||
|
||||
def draw_axis(self) -> None:
|
||||
"""Draw the face axis on the frame."""
|
||||
# project axis
|
||||
(nose_pointers, _) = cv2.projectPoints(
|
||||
np.array([np.zeros(3), self.x, self.y, self.z]) + self.center,
|
||||
self.mp_rotation_vector,
|
||||
self.mp_translation_vector,
|
||||
self.camera_matrix,
|
||||
self.dist_coeff,
|
||||
)
|
||||
for (axis, color, letter) in [
|
||||
(self.x, (0, 0, 255), "X"),
|
||||
(self.y, (0, 255, 0), "Y"),
|
||||
(self.z, (255, 0, 0), "Z"),
|
||||
]:
|
||||
# compute start and end of axis
|
||||
start = (
|
||||
int(self.center[0] * self.camera_width),
|
||||
int(self.center[1] * self.camera_height),
|
||||
)
|
||||
end = (
|
||||
int(self.center[0] * self.camera_width + axis[0] * 100),
|
||||
int(self.center[1] * self.camera_height + axis[1] * 100),
|
||||
)
|
||||
|
||||
# extract projected vectors
|
||||
nose_tip_2D, nose_tip_2D_x, nose_tip_2D_y, nose_tip_2D_z = nose_pointers.squeeze().astype(int)
|
||||
|
||||
# draw axis
|
||||
cv2.line(self.frame, nose_tip_2D, nose_tip_2D_x, (0, 0, 255), 2)
|
||||
cv2.line(self.frame, nose_tip_2D, nose_tip_2D_y, (0, 255, 0), 2)
|
||||
cv2.line(self.frame, nose_tip_2D, nose_tip_2D_z, (255, 0, 0), 2)
|
||||
# draw axis + letter
|
||||
cv2.line(self.frame, start, end, color, 2)
|
||||
cv2.putText(self.frame, letter, end, cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
|
||||
|
||||
def draw_keypoints(self) -> None:
|
||||
"""Draw the keypoints on the screen."""
|
||||
|
@ -266,7 +171,7 @@ class Environment:
|
|||
self.mp_drawing.draw_landmarks(
|
||||
self.frame,
|
||||
face_landmarks,
|
||||
landmark_drawing_spec=self.mp_drawing_styles.DrawingSpec((0, 0, 0), 0, 0),
|
||||
landmark_drawing_spec=self.mp_drawing_styles.DrawingSpec((0, 0, 255), 0, 0),
|
||||
)
|
||||
|
||||
def draw_avatar(self) -> None:
|
||||
|
|
2659
src/face_geometry.py
2659
src/face_geometry.py
File diff suppressed because it is too large
Load diff
32
src/utils.py
Normal file
32
src/utils.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
import numpy as np
|
||||
from mediapipe.python.solutions.drawing_utils import _normalized_to_pixel_coordinates
|
||||
|
||||
# def landmark2vec(landmark, screen):
|
||||
# """Convert a landmark to a pygame Vector2."""
|
||||
# return pg.Vector2(
|
||||
# _normalized_to_pixel_coordinates(
|
||||
# np.clip(landmark.x, 0, 1),
|
||||
# np.clip(landmark.y, 0, 1),
|
||||
# screen.get_width(),
|
||||
# screen.get_height(),
|
||||
# ) # type: ignore
|
||||
# )
|
||||
|
||||
|
||||
def landmark2vec(landmark) -> np.ndarray:
|
||||
"""Convert a landmark to numpy array."""
|
||||
return np.clip(
|
||||
[
|
||||
landmark.x,
|
||||
landmark.y,
|
||||
landmark.z,
|
||||
],
|
||||
a_min=0,
|
||||
a_max=1,
|
||||
)
|
||||
|
||||
|
||||
LANDMARKS_LEFT_SIDE = [109, 67, 103, 54, 21, 162, 127, 234, 93, 132, 58, 172, 136, 150, 149, 176, 148]
|
||||
LANDMARKS_RIGHT_SIDE = [338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288, 397, 365, 379, 378, 400, 377]
|
||||
LANDMARKS_BOTTOM_SIDE = [93, 132, 152, 361, 323]
|
||||
LANDMARKS_TOP_SIDE = [127, 162, 10, 389, 356]
|
Loading…
Reference in a new issue