Writing frame 1 / 6218
Writing frame 2 / 6218
Writing frame 3 / 6218
Writing frame 4 / 6218
Traceback (most recent call last):
File "/Users/main/Desktop/pypred/src/main.py", line 55, in <module>
face_encodings = fr.face_encodings(rgb_frame, face_locations)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/face_recognition/api.py", line 214, in face_encodings
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/face_recognition/api.py", line 214, in <listcomp>
return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: compute_face_descriptor(): incompatible function arguments. The following argument types are supported:
1. (self: _dlib_pybind11.face_recognition_model_v1, img: numpy.ndarray[(rows,cols,3),numpy.uint8], face: _dlib_pybind11.full_object_detection, num_jitters: int = 0, padding: float = 0.25) -> _dlib_pybind11.vector
2. (self: _dlib_pybind11.face_recognition_model_v1, img: numpy.ndarray[(rows,cols,3),numpy.uint8], num_jitters: int = 0) -> _dlib_pybind11.vector
3. (self: _dlib_pybind11.face_recognition_model_v1, img: numpy.ndarray[(rows,cols,3),numpy.uint8], faces: _dlib_pybind11.full_object_detections, num_jitters: int = 0, padding: float = 0.25) -> _dlib_pybind11.vectors
4. (self: _dlib_pybind11.face_recognition_model_v1, batch_img: List[numpy.ndarray[(rows,cols,3),numpy.uint8]], batch_faces: List[_dlib_pybind11.full_object_detections], num_jitters: int = 0, padding: float = 0.25) -> _dlib_pybind11.vectorss
5. (self: _dlib_pybind11.face_recognition_model_v1, batch_img: List[numpy.ndarray[(rows,cols,3),numpy.uint8]], num_jitters: int = 0) -> _dlib_pybind11.vectors
Invoked with: <_dlib_pybind11.face_recognition_model_v1 object at 0x1046a0270>, array([[[40, 32, 31],
[40, 32, 31],
[40, 32, 31],
...,
[61, 91, 49],
[61, 91, 49],
[61, 91, 49]],
[[40, 32, 31],
[40, 32, 31],
[40, 32, 31],
...,
[61, 91, 49],
[61, 91, 49],
[61, 91, 49]],
[[40, 32, 31],
[40, 32, 31],
[40, 32, 31],
...,
[61, 91, 49],
[61, 91, 49],
[61, 91, 49]],
...,
[[17, 30, 16],
[13, 26, 12],
[11, 24, 10],
...,
[15, 16, 9],
[15, 16, 9],
[15, 16, 9]],
[[17, 30, 16],
[13, 26, 12],
[11, 24, 10],
...,
[15, 16, 9],
[15, 16, 9],
[15, 16, 9]],
[[17, 30, 16],
[13, 26, 12],
[11, 24, 10],
...,
[15, 16, 9],
[15, 16, 9],
[16, 17, 10]]], dtype=uint8), <_dlib_pybind11.full_object_detection object at 0x10772cf30>, 1`
That is the error thrown from the following code.
input_movie = cv2.VideoCapture("moneky.mp4")
length = int(input_movie.get(cv2.CAP_PROP_FRAME_COUNT))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_movie = cv2.VideoWriter('output.avi', fourcc, 29.97, (1280, 720))
joe_image = fr.load_image_file("joe.png")
lmm_face_encoding = fr.face_encodings(joe_image)[0]
known_faces = [
lmm_face_encoding
]
face_locations = []
face_encodings = []
face_names = []
frame_number = 0
while True:
# Grab a single frame of video
ret, frame = input_movie.read()
frame_number += 1
# Quit when the input video file ends
if not ret:
break
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_frame = frame[:, :, ::-1]
# Find all the faces and face encodings in the current frame of video
face_locations = fr.face_locations(rgb_frame)
face_encodings = fr.face_encodings(rgb_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
match = fr.compare_faces(known_faces, face_encoding, tolerance=0.50)
# If you had more than 2 faces, you could make this logic a lot prettier
# but I kept it simple for the demo
name = None
if match[0]:
name = "joe rogan"
face_names.append(name)
# Label the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
if not name:
continue
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 25), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)
# Write the resulting image to the output video file
print("Writing frame {} / {}".format(frame_number, length))
output_movie.write(frame)
# All done!
input_movie.release()
cv2.destroyAllWindows()
I know that's a lot to put into. on question. As seen from the error, I will run the file and it will start capturing frames but once it gets to Fram 4 it throws all of that.
Ive been looking for other solutions online but I cannot find any that really help with mine. This code is from the face recognition GitHub repo, in the "facerec_from_video_file.py" https://github.com/ageitgey/face_recognition/blob/master/examples/facerec_from_video_file.py
the solution for me was changing rgb_frame = frame[:, :, ::-1]
to rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)