I'm using a webcam to stream a video and would like to use mediapipe to estimate and replace/blur a user's face with a custom yellow image and the user's eyes with a heart (❤) emoji and the user's lips with a smile drawing.
The code I currently have below is only drawing landmark lines.
Could someone please help me alter the code below to achieve the result as described?
Thanks in advance!
Current image result:
Desired Image result:
The current code:
import cv2
import mediapipe as mp
## initialize pose estimator
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
cap = cv2.VideoCapture(0)
while cap.isOpened():
# read frame
_, frame = cap.read()
try:
# resize the frame for portrait video
# frame = cv2.resize(frame, (350, 600))
# convert to RGB
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# process the frame for pose detection
pose_results = pose.process(frame_rgb)
# print(pose_results.pose_landmarks)
# draw skeleton on the frame
mp_drawing.draw_landmarks(frame, pose_results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
# display the frame
cv2.imshow('Output', frame)
except:
break
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Heart can be drawn using equation found here: https://www.quora.com/How-can-I-draw-a-heart-using-Python
import cv2
import mediapipe as mp
import numpy as np
def draw_heart(image, center, size, color):
t = np.arange(0, 2*np.pi, 0.1)
x = size * (16*np.sin(t)**3)
y = size * (13*np.cos(t) - 5*np.cos(2*t) - 2*np.cos(3*t) - np.cos(4*t))
x = x + center[0]
y = center[1] - y
x = x.astype(int)
y = y.astype(int)
for i in range(len(x) - 1):
cv2.line(image, (x[i], y[i]), (x[i+1], y[i+1]), color, 2)
def landmark_to_image_coords(landmark, frame):
return (int(landmark.x * frame.shape[1]), int(landmark.y * frame.shape[0]))
mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose
pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
cap = cv2.VideoCapture(0)
while cap.isOpened():
_, frame = cap.read()
try:
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
pose_results = pose.process(frame_rgb)
# mp_drawing.draw_landmarks(frame, pose_results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
nose = pose_results.pose_landmarks.landmark[0]
left_eye = pose_results.pose_landmarks.landmark[1]
right_eye = pose_results.pose_landmarks.landmark[5]
mouth_left = pose_results.pose_landmarks.landmark[9]
mouth_right = pose_results.pose_landmarks.landmark[10]
euclidean_distance_times_2 = 2 * ((nose.x - left_eye.x) ** 2 + (nose.y - left_eye.y) ** 2) ** 0.5
cv2.circle(frame, (int(nose.x * frame.shape[1]), int(nose.y * frame.shape[0])), int(euclidean_distance_times_2 * frame.shape[1]), (0, 255, 255), -1)
red= (0, 0, 255)
heart_size = 1
left_eye_coords = landmark_to_image_coords(left_eye, frame)
right_eye_coords = landmark_to_image_coords(right_eye, frame)
draw_heart(frame, left_eye_coords, heart_size, red)
draw_heart(frame, right_eye_coords, heart_size, red)
mouth_left_c = landmark_to_image_coords(mouth_left, frame)
mouth_right_c = landmark_to_image_coords(mouth_right, frame)
cv2.line(frame, mouth_left_c, mouth_right_c, red, 2)
cv2.line(frame, mouth_left_c, (int(mouth_left_c[0] - 20), int(mouth_left_c[1] + 20)), (0, 255, 0), 2)
cv2.line(frame, mouth_right_c, (int(mouth_right_c[0] + 20), int(mouth_right_c[1] + 20)), (0, 255, 0), 2)
cv2.imshow('Output', frame)
except:
break
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()