pythonnumpy3droboticseuler-angles

Calculation of Yaw, Pitch and Roll


So basically I have had written a code which calculates the rvec, yaw, pitch and roll of aruko marker and displays them in real time.

import numpy as np
import cv2
import sys
import time
import math

ARUCO_DICT = {
    "DICT_4X4_50": cv2.aruco.DICT_4X4_50,
    "DICT_4X4_100": cv2.aruco.DICT_4X4_100,
    "DICT_4X4_250": cv2.aruco.DICT_4X4_250,
    "DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
    "DICT_5X5_50": cv2.aruco.DICT_5X5_50,
    "DICT_5X5_100": cv2.aruco.DICT_5X5_100,
    "DICT_5X5_250": cv2.aruco.DICT_5X5_250,
    "DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
    "DICT_6X6_50": cv2.aruco.DICT_6X6_50,
    "DICT_6X6_100": cv2.aruco.DICT_6X6_100,
    "DICT_6X6_250": cv2.aruco.DICT_6X6_250,
    "DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
    "DICT_7X7_50": cv2.aruco.DICT_7X7_50,
    "DICT_7X7_100": cv2.aruco.DICT_7X7_100,
    "DICT_7X7_250": cv2.aruco.DICT_7X7_250,
    "DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
    "DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL,
    "DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5,
    "DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9,
    "DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10,
    "DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11
}

# Global variables to store the latest rvec and tvec
latest_rvec = None
latest_tvec = None


def aruco_display(corners, ids, rejected, image):
    if len(corners) > 0:
        ids = ids.flatten()
        for (markerCorner, markerID) in zip(corners, ids):
            corners = markerCorner.reshape((4, 2))
            (topLeft, topRight, bottomRight, bottomLeft) = corners

            topRight = (int(topRight[0]), int(topRight[1]))
            bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
            bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
            topLeft = (int(topLeft[0]), int(topLeft[1]))

            cv2.line(image, topLeft, topRight, (0, 255, 0), 2)
            cv2.line(image, topRight, bottomRight, (0, 255, 0), 2)
            cv2.line(image, bottomRight, bottomLeft, (0, 255, 0), 2)
            cv2.line(image, bottomLeft, topLeft, (0, 255, 0), 2)

            cX = int((topLeft[0] + bottomRight[0]) / 2.0)
            cY = int((topLeft[1] + bottomRight[1]) / 2.0)
            cv2.circle(image, (cX, cY), 4, (0, 0, 255), -1)

            cv2.putText(image, str(markerID), (topLeft[0], topLeft[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
                        0.5, (0, 255, 0), 2)
            print("[Inference] ArUco marker ID: {}".format(markerID))

    return image


def pose_estimation(frame, aruco_dict_type, matrix_coefficients, distortion_coefficients):
    global latest_rvec, latest_tvec
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = cv2.aruco.getPredefinedDictionary(aruco_dict_type)
    parameters = cv2.aruco.DetectorParameters()
    detector = cv2.aruco.ArucoDetector(aruco_dict, parameters)

    corners, ids, rejected_img_points = detector.detectMarkers(frame)

    if len(corners) > 0:
        for i in range(0, len(ids)):
            # Define the ArUco marker points (assuming the marker is planar and its side length is 0.02 meters)
            markerSize = 5.292
            objPoints = np.array([
                [-markerSize / 2, markerSize / 2, 0],
                [markerSize / 2, markerSize / 2, 0],
                [markerSize / 2, -markerSize / 2, 0],
                [-markerSize / 2, -markerSize / 2, 0]
            ], dtype=np.float32)

            # Get the corner points of the detected marker
            imgPoints = corners[i].reshape((4, 2))

            # Use solvePnP to estimate the pose
            success, rvec, tvec = cv2.solvePnP(objPoints, imgPoints, matrix_coefficients, distortion_coefficients)

            if success:
                # Update the latest rvec and tvec
                latest_rvec = rvec
                latest_tvec = tvec

                # Draw a square around the markers
                cv2.aruco.drawDetectedMarkers(frame, corners)

                # Draw Axis
                cv2.drawFrameAxes(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01)

                # Convert rotation vector to rotation matrix
                rmat, _ = cv2.Rodrigues(rvec)

                # Get Euler angles
                euler_angles = rotationMatrixToEulerAngles(rmat)

                # Convert to degrees
                euler_angles_deg = np.degrees(euler_angles)

                # Ensure yaw is between 0 and 360 degrees
                yaw = euler_angles_deg[1] % 360

                # Extract pitch and roll
                pitch = euler_angles_deg[0]
                roll = euler_angles_deg[2]

                # Convert rvec to degrees
                rvec_deg = np.degrees(rvec.flatten())

                # Display rvec (in degrees), tvec, and Euler angles on the frame
                rvec_str = f"rvec (deg): ({rvec_deg[0]:.2f}, {rvec_deg[1]:.2f}, {rvec_deg[2]:.2f})"
                tvec_str = f"tvec: ({tvec[0][0]:.2f}, {tvec[1][0]:.2f}, {tvec[2][0]:.2f})"
                euler_str = f"Yaw: {yaw:.2f}, Pitch: {pitch:.2f}, Roll: {roll:.2f}"

                cv2.putText(frame, rvec_str, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
                cv2.putText(frame, tvec_str, (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)
                cv2.putText(frame, euler_str, (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)

    return frame


def rotationMatrixToEulerAngles(R):
    sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
    singular = sy < 1e-6

    if not singular:
        x = math.atan2(R[2, 1], R[2, 2])
        y = math.atan2(-R[2, 0], sy)
        z = math.atan2(R[1, 0], R[0, 0])
    else:
        x = math.atan2(-R[1, 2], R[1, 1])
        y = math.atan2(-R[2, 0], sy)
        z = 0

    return np.array([x, y, z])
def save_vectors():
    global latest_rvec, latest_tvec
    if latest_rvec is not None and latest_tvec is not None:
        timestamp = time.strftime("%Y%m%d-%H%M%S")
        filename = f"vectors_{timestamp}.txt"
        with open(filename, 'w') as f:
            f.write(f"rvec: {latest_rvec.flatten()}\n")
            f.write(f"tvec: {latest_tvec.flatten()}\n")
        print(f"Vectors saved to {filename}")
    else:
        print("No vectors to save. Detect a marker first.")


aruco_type = "DICT_6X6_250"

# these are camera intrinsic properties and needs to be changed for the camera used.
intrinsic_camera = np.array(((933.15867, 0, 657.59), (0, 933.1586, 400.36993), (0, 0, 1)))
distortion = np.array((-0.43948, 0.18514, 0, 0))

cap = cv2.VideoCapture(0)

cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)

while cap.isOpened():
    ret, img = cap.read()

    output = pose_estimation(img, ARUCO_DICT[aruco_type], intrinsic_camera, distortion)

    cv2.imshow('Estimated Pose', output)

    key = cv2.waitKey(1) & 0xFF
    if key == ord('q'):
        break
    elif key == ord(' '):  # Space key
        save_vectors()

cap.release()
cv2.destroyAllWindows()

But now when I am placing the aruko marker in front of my camera and moving it along x, y and z-axis the yaw, pitch and roll are changing anonymously. Like when I moved my marker by an angle of 10 degree along x-axis, the roll is changing by 100 degree but the y-parameter in rvec is moving more and less by 10degree only and same apply for other angles also. So please can anyone tell me :

  1. What is the difference between the rvec and Euler's angle. Why their values are different?
  2. Please give me correct approach to find yaw, pitch and roll.

Thanks in advance!!


Solution

  • Rotation Vector

    Any rotation rotate along a axis, so all rotation can be define from:

    , which can be represent by a vector, the direction of the vector is the axis of rotation, and lenght of the vector angle of rotation.

    Euler Angle

    Represent rotation with three angle value, axis order, and intrinsic/extrinsic.

    example:

    [pi/2, pi, pi/4], xyz, extrinsic.

    You have a frame_0. with x_0, y_0, and z_0 axis.

    you get frame_1, by rotating frame_0 wrt. x_0 by pi/2 radian, and get x_1,y_1, and z_1 axis.

    you get frame_2, by rotating frame_1 wrt. y_0 by pi radian, and get x_2,y_2, and z_2 axis.

    why y axis?

    why y_0 and not y_1?

    finally, you get frame_3, by rotating frame_2 wrt. z_0 by pi/4 radian, and get x_3,y_3, and z_3 axis.

    see more: https://math.stackexchange.com/questions/2943074/confusion-about-order-of-rotations-for-euler-angles

    Python

    to translate between Rvect and Euler Angle, you can use scipy.spatial.transform.Rotation

    import numpy
    from scipy.spatial.transform import Rotation
    
    rotvec = numpy.array([ numpy.pi/2, numpy.pi, numpy.pi/2 ])
    rotation = Rotation.from_rotvec(rotvec)
    
    print(f"Euler Angle : {rotation.as_euler('zyx')}")
    print(f"RPY         : {rotation.as_euler('xyz')}")
    print(f"Quaternion  : {rotation.as_quat()}")
    print(f"Matrix      : \n{rotation.as_matrix()}") 
    
    Euler Angle : [-2.07266036 -0.23854245 -2.07266036]
    RPY         : [ 2.53825394 -0.96713372  2.53825394]
    Quaternion  : [ 0.38307161  0.76614321  0.38307161 -0.34574104]
    Matrix      : 
    [[-0.46743855  0.85186257 -0.2362866 ]
     [ 0.32208827  0.41302458  0.85186257]
     [ 0.82326202  0.32208827 -0.46743855]]
    

    https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.html

    Debugging

    https://www.andre-gaschler.com/rotationconverter/