diff --git a/src/environment.py b/src/environment.py index ef429d9..16cb381 100644 --- a/src/environment.py +++ b/src/environment.py @@ -8,7 +8,16 @@ from parts.eye import Eye class Environment: + """The environment is the main class of the application. + + It is responsible for the following: + - detecting the keypoints + - drawing the keypoints + - drawing the avatar + """ + def __init__(self, camera: cv2.VideoCapture) -> None: + """Initialize the environment.""" # store reference to webcam self.cam = camera @@ -33,11 +42,12 @@ class Environment: # create body parts self.body_parts = { - "left_eye": Eye(False), - "right_eye": Eye(True), + "left_eye": Eye(False, self), + "right_eye": Eye(True, self), } def start(self) -> None: + """Start the environment.""" while self.cam.isOpened(): # read webcam success, self.frame = self.cam.read() @@ -65,6 +75,7 @@ class Environment: self.draw_avatar() def detect_keypoints(self) -> None: + """Detect the keypoints on the frame.""" with self.mp_face_mesh.FaceMesh( max_num_faces=1, refine_landmarks=True, @@ -72,7 +83,7 @@ class Environment: min_tracking_confidence=0.5, ) as face_mesh: - # perf, mark image as not writeable to pass by reference + # perf, mark image as not writeable, to pass by reference self.frame.flags.writeable = False # convert the BGR image to RGB before processing @@ -88,6 +99,7 @@ class Environment: self.frame = cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR) def draw_keypoints(self) -> None: + """Draw the keypoints on the screen.""" # draw the face mesh annotations on the image. if self.results.multi_face_landmarks: for face_landmarks in self.results.multi_face_landmarks: @@ -117,6 +129,7 @@ class Environment: cv2.imshow("MediaPipe Face Mesh", cv2.flip(self.frame, 1)) def draw_avatar(self) -> None: + """Draw the avatar on the screen.""" self.screen.fill(pg.Color("green")) for part in self.body_parts.values():