feat: POC horrible code

This commit is contained in:
Laureηt 2022-12-29 15:49:17 +01:00
parent e05df6256a
commit 0aa7c42781
Signed by: Laurent
SSH key fingerprint: SHA256:kZEpW8cMJ54PDeCvOhzreNr4FSh6R13CMGH/POoO8DI
4 changed files with 311 additions and 53 deletions

2
.vscode/launch.json vendored
View file

@ -9,8 +9,6 @@
"console": "integratedTerminal",
"justMyCode": true,
"env": {
"QT_QPA_PLATFORM": "xcb",
"SDL_VIDEODRIVER": "x11",
"PYGAME_HIDE_SUPPORT_PROMPT": "hide"
}
}

134
src/bodyparts/test.py Normal file
View file

@ -0,0 +1,134 @@
from __future__ import annotations
from typing import TYPE_CHECKING
import cv2
import numpy as np
import pygame as pg
if TYPE_CHECKING:
from environment import Environment
class Test:
def __init__(self, env: Environment) -> None:
self.env = env
self.image = cv2.imread("assets/head.png", cv2.IMREAD_UNCHANGED)
def draw(self, screen: pg.Surface) -> None:
# compute position
if self.env.results.multi_face_landmarks:
for face_landmarks in self.env.results.multi_face_landmarks:
ratio = self.image.shape[1] / self.image.shape[0]
head_box = np.array(
[
100 * ratio * self.env.x + 100 * self.env.y + 80 * self.env.z,
100 * ratio * -self.env.x + 100 * self.env.y + 80 * self.env.z,
100 * ratio * -self.env.x + 100 * -self.env.y + 80 * self.env.z,
100 * ratio * self.env.x + 100 * -self.env.y + 80 * self.env.z,
]
)[:, :2]
# link points
cv2.line(
self.env.frame,
(
int(self.env.center.x * self.env.screen.get_width() + head_box[0][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[0][1]),
),
(
int(self.env.center.x * self.env.screen.get_width() + head_box[1][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[1][1]),
),
(0, 0, 255),
2,
)
cv2.line(
self.env.frame,
(
int(self.env.center.x * self.env.screen.get_width() + head_box[1][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[1][1]),
),
(
int(self.env.center.x * self.env.screen.get_width() + head_box[2][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[2][1]),
),
(0, 0, 255),
2,
)
cv2.line(
self.env.frame,
(
int(self.env.center.x * self.env.screen.get_width() + head_box[2][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[2][1]),
),
(
int(self.env.center.x * self.env.screen.get_width() + head_box[3][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[3][1]),
),
(0, 0, 255),
2,
)
cv2.line(
self.env.frame,
(
int(self.env.center.x * self.env.screen.get_width() + head_box[3][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[3][1]),
),
(
int(self.env.center.x * self.env.screen.get_width() + head_box[0][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[0][1]),
),
(0, 0, 255),
2,
)
a = np.array(
[
[0, 0],
[self.image.shape[1], 0],
[self.image.shape[1], self.image.shape[0]],
[0, self.image.shape[0]],
],
dtype=np.float32,
)
b = np.array(
[
[
int(self.env.center.x * self.env.screen.get_width() + head_box[0][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[0][1]),
],
[
int(self.env.center.x * self.env.screen.get_width() + head_box[1][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[1][1]),
],
[
int(self.env.center.x * self.env.screen.get_width() + head_box[2][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[2][1]),
],
[
int(self.env.center.x * self.env.screen.get_width() + head_box[3][0]),
int(self.env.center.y * self.env.screen.get_height() + head_box[3][1]),
],
],
dtype=np.float32,
)
# get perspective transform
transform_mat = cv2.getPerspectiveTransform(
a,
b,
)
# apply perspective transform to image
warped = cv2.warpPerspective(
self.image,
transform_mat,
self.env.frame.shape[1::-1],
)
# replace non black pixels of warped image by frame
self.env.frame[warped[:, :, 3] != 0] = warped[warped[:, :, 3] != 0][:, :3]

126
src/bodyparts/test.py.old Normal file
View file

@ -0,0 +1,126 @@
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
import pygame as pg
import skimage
import skimage.io
import skimage.transform
from skimage import data, img_as_float
if TYPE_CHECKING:
from environment import Environment
LANDMARKS = [6]
# https://scikit-image.org/docs/stable/auto_examples/transform/plot_geometric.html#sphx-glr-auto-examples-transform-plot-geometric-py
# https://scikit-image.org/docs/stable/auto_examples/transform/plot_transform_types.html#sphx-glr-auto-examples-transform-plot-transform-types-py
# https://github.com/google/mediapipe/issues/1379
# https://en.wikipedia.org/wiki/3D_projection#Perspective_projection
class Test:
def __init__(self, env: Environment) -> None:
self.env = env
self.image = skimage.io.imread("assets/head.png")[:, :, :3]
self.image = skimage.transform.resize(
self.image, (self.image.shape[0] // 10, self.image.shape[1] // 10), anti_aliasing=False
)
self.anglex = 0
self.angley = 0
self.anglez = 0
def draw(self, screen: pg.Surface) -> None:
# compute position
if self.env.results.multi_face_landmarks:
for face_landmarks in self.env.results.multi_face_landmarks:
body_pos = pg.Vector2(*screen.get_size()) / 2
img = img_as_float(data.chelsea())
self.anglez += 1 / 180 * np.pi
self.anglez += 1 / 180 * np.pi
# matrix = np.array(
# [
# [1, -0.5, 100],
# [0.1, 0.9, 50],
# [0.0015, 0.0015, 1],
# ]
# )
# self.angley += 0.1 / 180 * np.pi
# print(self.angley * 180 / np.pi)
# scale = np.array(
# [
# [0.5, 0, 0],
# [0, 0.5, 0],
# [0, 0, 1],
# ]
# )
translation = np.array(
[
[1, 0, -img.shape[1] / 2],
[0, 1, img.shape[0] / 2],
[0, 0, 1],
]
)
anti_translation = np.array(
[
[1, 0, img.shape[1] / 2],
[0, 1, -img.shape[0] / 2],
[0, 0, 1],
]
)
rotation_x = np.array(
[
[1, 0, 0],
[0, np.cos(self.anglex), -np.sin(self.anglex)],
[0, np.sin(self.anglex), np.cos(self.anglex)],
]
)
rotation_y = np.array(
[
[np.cos(self.angley), 0, np.sin(self.angley)],
[0, 1, 0],
[-np.sin(self.angley), 0, np.cos(self.angley)],
]
)
rotation_z = np.array(
[
[np.cos(self.anglez), -np.sin(self.anglez), 0],
[np.sin(self.anglez), np.cos(self.anglez), 0],
[0, 0, 1],
]
)
# matrix = translation @ rotation_x @ rotation_y @ rotation_z @ anti_translation
matrix = rotation_x @ rotation_y @ rotation_z
matrix = translation @ rotation_x @ rotation_y @ rotation_z
tform = skimage.transform.ProjectiveTransform(matrix=matrix)
tf_img = skimage.transform.warp(img, tform.inverse)
yes = pg.surfarray.make_surface((tf_img * 255).astype(np.uint8).transpose(1, 0, 2))
yes.set_colorkey((0, 0, 0))
texture_scaled = pg.transform.scale(
yes,
(yes.get_width() / 2, yes.get_height() / 2),
)
texture_pos = body_pos - pg.Vector2(
texture_scaled.get_width() / 2,
texture_scaled.get_height() / 2,
)
screen.blit(texture_scaled, texture_pos)

View file

@ -45,16 +45,17 @@ class Environment:
# create body parts
self.body_parts = {
"body": Body(self),
"left_ear": Ear(False, self),
"right_ear": Ear(True, self),
"head": Head(self),
"left_moustache": Moustache(False, self),
"right_moustache": Moustache(True, self),
"left_eye": Eye(False, self),
"right_eye": Eye(True, self),
"crown": Crown(self),
"mouth": Mouth(self),
# "body": Body(self),
# "left_ear": Ear(False, self),
# "right_ear": Ear(True, self),
# "head": Head(self),
# "left_moustache": Moustache(False, self),
# "right_moustache": Moustache(True, self),
# "left_eye": Eye(False, self),
# "right_eye": Eye(True, self),
# "crown": Crown(self),
# "mouth": Mouth(self),
"test": Test(self),
}
def start(self) -> None:
@ -82,12 +83,15 @@ class Environment:
# compute face axis
self.compute_face_axis()
# draw keypoints on top of fram
self.draw_keypoints()
# draw keypoints on top of frame
# self.draw_keypoints()
# draw avatar
self.draw_avatar()
# tmp
cv2.imshow("MediaPipe Face Mesh", cv2.flip(self.frame, 1))
def detect_keypoints(self) -> None:
"""Detect the keypoints on the frame."""
with self.mp_face_mesh.FaceMesh(
@ -121,7 +125,7 @@ class Environment:
right = landmark3vec(face_landmarks.landmark[454], self.screen)
bottom = landmark3vec(face_landmarks.landmark[152], self.screen)
top = landmark3vec(face_landmarks.landmark[10], self.screen)
center = (left + right + bottom + top) / 4
self.center = (left + right + bottom + top) / 4
# compute axis
self.x = (right - left) / 2
@ -139,36 +143,36 @@ class Environment:
self.angle_z = self.y.angle_to(pg.math.Vector3(0, 0, 1)) - 90
# draw axis on opencv screen
cv2.line(
self.frame,
(int(center.x * self.screen.get_width()), int(center.y * self.screen.get_height())),
(
int(center.x * self.screen.get_width() + self.x.x * 100),
int(center.y * self.screen.get_height() + self.x.y * 100),
),
(0, 0, 255),
2,
)
cv2.line(
self.frame,
(int(center.x * self.screen.get_width()), int(center.y * self.screen.get_height())),
(
int(center.x * self.screen.get_width() + self.y.x * 100),
int(center.y * self.screen.get_height() + self.y.y * 100),
),
(0, 255, 0),
2,
)
cv2.line(
self.frame,
(int(center.x * self.screen.get_width()), int(center.y * self.screen.get_height())),
(
int(center.x * self.screen.get_width() + self.z.x * 100),
int(center.y * self.screen.get_height() + self.z.y * 100),
),
(255, 0, 0),
2,
)
# cv2.line(
# self.frame,
# (int(self.center.x * self.screen.get_width()), int(self.center.y * self.screen.get_height())),
# (
# int(self.center.x * self.screen.get_width() + self.x.x * 100),
# int(self.center.y * self.screen.get_height() + self.x.y * 100),
# ),
# (0, 0, 255),
# 2,
# )
# cv2.line(
# self.frame,
# (int(self.center.x * self.screen.get_width()), int(self.center.y * self.screen.get_height())),
# (
# int(self.center.x * self.screen.get_width() + self.y.x * 100),
# int(self.center.y * self.screen.get_height() + self.y.y * 100),
# ),
# (0, 255, 0),
# 2,
# )
# cv2.line(
# self.frame,
# (int(self.center.x * self.screen.get_width()), int(self.center.y * self.screen.get_height())),
# (
# int(self.center.x * self.screen.get_width() + self.z.x * 100),
# int(self.center.y * self.screen.get_height() + self.z.y * 100),
# ),
# (255, 0, 0),
# 2,
# )
def draw_keypoints(self) -> None:
"""Draw the keypoints on the screen."""
@ -180,25 +184,21 @@ class Environment:
face_landmarks,
landmark_drawing_spec=self.mp_drawing_styles.DrawingSpec((0, 0, 255), 0, 0),
)
# self.mp_drawing.draw_landmarks(
# self.frame,
# face_landmarks,
# [(469, 470), (470, 471)],
# None,
# )
# flip the image horizontally for a selfie-view display
# cv2.imshow("MediaPipe Face Mesh", self.frame)
cv2.imshow("MediaPipe Face Mesh", cv2.flip(self.frame, 1))
def draw_avatar(self) -> None:
"""Draw the avatar on the screen."""
# clear image with green background
self.screen.fill(pg.Color("green"))
# draw each body part
for part in self.body_parts.values():
part.draw(self.screen)
# self.screen.blit(self.screen, (0, 0))
# flip screen
self.screen.blit(pg.transform.flip(self.screen, True, False), (0, 0))
# display screen
pg.display.flip()