c++opencvcalibration

Stereo Calibration Between RGB webcamera and ToF camera


I'm working on a stereo calibration between a CubeEyeS100D ToF camera and a Standard USB RGB webcamera. I'm performing this calibration to be able to overlay the RGB image onto the Pointcloud I have generated from the ToF data. The problem I'm encountering is that the output images I get from my calibration are either wrong or they are translated too many pixels off the image. I'm working on Windows with C++ and CMake. I'm using openCV to perform the calibration. The output of my program looks like this:

Output of calibration

This is my code:

void stereoCalibration()
{
    //Load in calibration images (paths need to be correct!) (RGB is on the right):
    std::vector<cv::String> fileNames_left, fileNames_right;
    cv::glob("../../../TestIntrin/right/", fileNames_right, false);
    cv::glob("../../../TestIntrin/left/", fileNames_left, false);
    //Initialise size of chessboard (needs to be correct!):
    const cv::Size BoardSize{ 6, 4 };
    std::vector<std::vector<cv::Point2f>> imagePoints_left(fileNames_left.size());
    std::vector<std::vector<cv::Point2f>> imagePoints_right(fileNames_right.size());
    //create vectors to contain corners
    std::vector<std::vector<cv::Point2f>> corners_left(fileNames_left.size());
    std::vector<std::vector<cv::Point2f>> corners_right(fileNames_right.size());

    cv::Mat img_left, img_right, gray_left, gray_right;
    try {
        if (fileNames_left.size() != fileNames_right.size()) {
            throw std::runtime_error("Vectors are not of the same size.");
        }
        if (fileNames_left.empty() || fileNames_right.empty() || fileNames_left.back() == "" || fileNames_right.back() == "") {
            throw std::runtime_error("Last element in one or both vectors is not filled.");
        }
    }
    catch (const std::runtime_error& e) {
        std::cerr << e.what() << std::endl;
        return;
    }

    // Detect feature points
    for (int i = 0; i < fileNames_left.size(); ++i) {
        // 1. Read in the image an call cv::findChessboardCorners()
        img_left = cv::imread(fileNames_left[i]);
        img_right = cv::imread(fileNames_right[i]);
        //std::cout << "RGB res: " << img_right.size() << " " << img_right.type() <<  " Tof res: " << img_left.size() << " " << img_left.type() << std::endl;
        cv::cvtColor(img_left, gray_left, cv::COLOR_BGR2GRAY);
        cv::cvtColor(img_right, gray_right, cv::COLOR_BGR2GRAY);

        bool found_left = cv::findChessboardCorners(gray_left, BoardSize, corners_left[i], cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FILTER_QUADS);
        bool found_right = cv::findChessboardCorners(gray_right, BoardSize, corners_right[i], cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_FILTER_QUADS);

        // 2. Use cv::cornerSubPix() to refine the found corner detections
        if (found_left && found_right) {
            cv::cornerSubPix(gray_left, corners_left[i], cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.1));
            cv::cornerSubPix(gray_right, corners_right[i], cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.1));
        }
        else if (!found_left) {
            std::cout << "Didnt find left " << corners_left[i] << std::endl;
        }
        else if (!found_right) {
            std::cout << "Didn't find right " << corners_right[i] << std::endl;
        }
        else {
            std::cout << "DRATS" << std::endl;
        }

        // Display (Can be removed after later but is good for quality assurance)
        //cv::drawChessboardCorners(img_left, BoardSize, corners_left[i], found_left);
        //cv::drawChessboardCorners(img_right, BoardSize, corners_right[i], found_right);
        //cv::imshow("chessboard detection left", img_left);
        //cv::imshow("chessboard detection right", img_right);
        //cv::waitKey(0);
    }

    std::vector<std::vector<cv::Point3f>> object_points;
    // 3. Generate checkerboard (world) coordinates Q. The board has 7 x 5
   // fields with a size of 30x30mm
    int square_size = 30;
    std::vector<cv::Point3f> objp;
    for (int i = 0; i < BoardSize.height; i++) {
        for (int j = 0; j < BoardSize.width; j++) {
            objp.push_back(cv::Point3f(j * square_size, i * square_size, 0));
        }
    }

    // Puts objp objects into the vector object_points.
    for (auto v : corners_left) {
        object_points.push_back(objp);
    }

    std::vector<std::vector<cv::Point2f>> left_img_points, right_img_points;
    for (int i = 0; i < corners_left.size(); i++) {
        std::vector<cv::Point2f> v1, v2;
        for (int j = 0; j < corners_left[i].size(); j++) {
            v1.push_back(cv::Point2f((double)corners_left[i][j].x, (double)corners_left[i][j].y));
            v2.push_back(cv::Point2f((double)corners_right[i][j].x, (double)corners_right[i][j].y));
        }
        left_img_points.push_back(v1);
        right_img_points.push_back(v2);
    }

    // Load in the camera matrices and the distortion coefficients
    cv::Mat tmpr, tmpl;
    cv::Vec<float, 5> k_left, k_right;
    cv::FileStorage fileKr("../../../TestIntrin/Config/RGBIntrinsic.xml", cv::FileStorage::READ);
    fileKr["K"] >> tmpr;
    fileKr.release();
    cv::FileStorage filekr("../../../TestIntrin/Config/RGBDistortion.xml", cv::FileStorage::READ);
    filekr["k"] >> k_right;
    filekr.release();
    cv::FileStorage fileKl("../../../TestIntrin/Config/ToFIntrinsic.xml", cv::FileStorage::READ);
    fileKl["K"] >> tmpl;
    fileKl.release();
    cv::FileStorage filekl("../../../TestIntrin/Config/ToFDistortion.xml", cv::FileStorage::READ);
    filekl["k"] >> k_left;
    filekl.release();
    cv::Matx33f K_right(tmpr);
    cv::Matx33f K_left(tmpl);

    cv::Matx33f newK_right = cv::getOptimalNewCameraMatrix(K_right, k_right, img_right.size(), 1, img_right.size());
    cv::Matx33f newK_left = cv::getOptimalNewCameraMatrix(K_left, k_left, img_left.size(), 1, img_left.size());


    //Stereo calibrate
    cv::Mat R, F, E;
    cv::Vec3d T;
    int flag = 0;
    flag |= cv::CALIB_FIX_INTRINSIC;
    cv::TermCriteria criteria_stereo = cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.001);
    cv::stereoCalibrate(object_points, left_img_points, right_img_points, newK_left, k_left, newK_right, k_right, img_left.size(), R, T, E, F, flag, cv::TermCriteria(cv::TermCriteria::EPS + cv::TermCriteria::MAX_ITER, 30, 0.001));

    //stereo rectify
    cv::Mat R1, R2, P1, P2, Q;
    cv::stereoRectify(newK_left, k_left, newK_right, k_right, img_right.size(), R, T, R1, R2, P1, P2, Q);

    std::cout << "R: " << R << "\n T: " << T << "\n R1: " << R1 << "\n R2: " << R2 << "\n P1: " << P1 << "\n P2: " << P2 << "\n Q: " << Q << std::endl;

    //Create maps to rectify images with
    cv::Mat lmapx, lmapy, rmapx, rmapy;
    cv::Mat imgU1, imgU2;

    cv::initUndistortRectifyMap(newK_left, k_left, R1, P1, img_left.size(), CV_32F, lmapx, lmapy);
    cv::initUndistortRectifyMap(newK_right, k_right, R2, P2, img_right.size(), CV_32F, rmapx, rmapy);
    cv::remap(img_left, imgU1, lmapx, lmapy, cv::INTER_LANCZOS4, cv::BORDER_CONSTANT, 0);
    cv::remap(img_right, imgU2, rmapx, rmapy, cv::INTER_LANCZOS4, cv::BORDER_CONSTANT, 0);
    cv::Mat concat;
    cv::hconcat(imgU1, imgU2, concat);
    //Show undistorted images
    cv::imshow("rectified left", imgU1);
    cv::imshow("rectified right", imgU2);
    cv::imshow("Concat", concat);
    cv::waitKey(0);

    cv::FileStorage fileRGB("../../../TestIntrin/Config/RGBStereo.xml", cv::FileStorage::WRITE);
    fileRGB << "mapx" << rmapx;
    fileRGB << "mapy" << rmapy;
    cv::FileStorage fileToF("../../../TestIntrin/Config/ToFStereo.xml", cv::FileStorage::WRITE);
    fileToF << "mapx" << lmapx;
    fileToF << "mapy" << lmapy;
}

I have already performed an intrinsic calibration on the RGB camera and the ToF camera, I have saved both camera matrices and distortion coefficients, which I use in the program. I have taken 30 images where a chessboard is visible to both cameras at the same time.

I tried the calibration with images from two RGB cameras and the code worked perfectly. So my thought is that there might be something I'm missing when I'm performing the calibration between ToF and RGB cameras.

I can't upload my files I use for the calibration here unfortunately.


Solution

  • I figured out the problem i was having, the problem was that the images i used were saved in the .png format which the method for some reason wasn't compatible with.

    I changed it to the .jpg format and now the calibration works perfectly.

    Thank you for your help and time @DrBwts!