feat: switch to objects

This commit is contained in:
Laureηt 2022-11-26 19:23:02 +01:00
parent d6d53166e8
commit 58cac43a37
No known key found for this signature in database
GPG key ID: D88C6B294FD40994
2 changed files with 131 additions and 53 deletions

125
src/environment.py Normal file
View file

@ -0,0 +1,125 @@
import logging
import cv2
import mediapipe as mp
import pygame as pg
from parts.eye import Eye
class Environment:
def __init__(self, camera: cv2.VideoCapture) -> None:
# store reference to webcam
self.cam = camera
# mediapipe stuff
self.mp_drawing = mp.solutions.drawing_utils # type: ignore
self.mp_drawing_styles = mp.solutions.drawing_styles # type: ignore
self.mp_face_mesh = mp.solutions.face_mesh # type: ignore
# init pygame
pg.init()
# get screen size from webcam
self.screen_width = camera.get(3)
self.screen_height = camera.get(4)
# create screen
self.screen: pg.Surface = pg.display.set_mode(
(self.screen_width, self.screen_height),
pg.DOUBLEBUF | pg.HWSURFACE,
) # type: ignore
pg.display.set_caption("Projet APP")
# create body parts
self.body_parts = {
"left_eye": Eye(False),
"right_eye": Eye(True),
}
def start(self) -> None:
while self.cam.isOpened():
# read webcam
success, self.frame = self.cam.read()
if not success:
logging.debug("Ignoring empty camera frame.")
continue
# stop if q is pressed (opencv)
if cv2.waitKey(5) & 0xFF == ord("q"):
break
# quit the game
if any(
[event.type == pg.KEYDOWN and event.key == pg.K_q for event in pg.event.get()],
):
break
# detect keypoints on frame
self.detect_keypoints()
# draw keypoints on top of fram
self.draw_keypoints()
# draw avatar
self.draw_avatar()
def detect_keypoints(self) -> None:
with self.mp_face_mesh.FaceMesh(
max_num_faces=1,
refine_landmarks=True,
min_detection_confidence=0.5,
min_tracking_confidence=0.5,
) as face_mesh:
# perf, mark image as not writeable to pass by reference
self.frame.flags.writeable = False
# convert the BGR image to RGB before processing
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
# process the image to detect keypoints
self.results = face_mesh.process(self.frame)
# re-enable writeable flag
self.frame.flags.writeable = True
# convert the image back to BGR format
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR)
def draw_keypoints(self) -> None:
# draw the face mesh annotations on the image.
if self.results.multi_face_landmarks:
for face_landmarks in self.results.multi_face_landmarks:
self.mp_drawing.draw_landmarks(
image=self.frame,
landmark_list=face_landmarks,
connections=self.mp_face_mesh.FACEMESH_TESSELATION,
landmark_drawing_spec=None,
connection_drawing_spec=self.mp_drawing_styles.get_default_face_mesh_tesselation_style(),
)
self.mp_drawing.draw_landmarks(
image=self.frame,
landmark_list=face_landmarks,
connections=self.mp_face_mesh.FACEMESH_CONTOURS,
landmark_drawing_spec=None,
connection_drawing_spec=self.mp_drawing_styles.get_default_face_mesh_contours_style(),
)
self.mp_drawing.draw_landmarks(
image=self.frame,
landmark_list=face_landmarks,
connections=self.mp_face_mesh.FACEMESH_IRISES,
landmark_drawing_spec=None,
connection_drawing_spec=self.mp_drawing_styles.get_default_face_mesh_iris_connections_style(),
)
# flip the image horizontally for a selfie-view display
cv2.imshow("MediaPipe Face Mesh", cv2.flip(self.frame, 1))
def draw_avatar(self) -> None:
self.screen.fill(pg.Color("green"))
for part in self.body_parts.values():
part.draw(self.screen)
pg.display.flip()

View file

@ -1,67 +1,20 @@
import logging
import cv2
import mediapipe as mp
from environment import Environment
def main() -> None:
"""Main function."""
mp_drawing = mp.solutions.drawing_utils # type: ignore
mp_drawing_styles = mp.solutions.drawing_styles # type: ignore
mp_face_mesh = mp.solutions.face_mesh # type: ignore
# open webcam
cap = cv2.VideoCapture(0)
with mp_face_mesh.FaceMesh(
max_num_faces=2, refine_landmarks=True, min_detection_confidence=0.5, min_tracking_confidence=0.5
) as face_mesh:
# create env
env = Environment(cap)
while cap.isOpened():
# read webcam
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'
continue
# to improve performance, optionally mark the image as not writeable to pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = face_mesh.process(image)
# draw the face mesh annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACEMESH_TESSELATION,
landmark_drawing_spec=None,
connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_tesselation_style(),
)
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACEMESH_CONTOURS,
landmark_drawing_spec=None,
connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_contours_style(),
)
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACEMESH_IRISES,
landmark_drawing_spec=None,
connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_iris_connections_style(),
)
# flip the image horizontally for a selfie-view display.
cv2.imshow("MediaPipe Face Mesh", cv2.flip(image, 1))
if cv2.waitKey(5) & 0xFF == 27:
break
# start env
env.start()
# close webcam
cap.release()