Im trying to create a pointcloud from depth data from my ToF camera and overlaying my RGB image onto the pointcloud. To do this I have created a stereo calibration using OpenCV on the amplitude image and the RGB image. I have the correction maps saved as .xml files that I load into my program when i need them. The code I have made outputs a pointcloud but it is not correct since not the entire RGB image appears and since the pointcloud comes out as a cone of the same image made smaller and smaller as can be seen in the image below.
Here is the code that I have:
void openCVViewer::displayPCLRGBData2(float xData[], float yData[], float depth[], int frameWidth, cv::Mat img)
{
// Convert ToF image data to cv::Mats to rectify
cv::Mat depthMap(640, 480, CV_32FC1, depth); //= arrToMat(depth, frameWidth);
// Load maps for rectification
cv::Mat rmapx, rmapy, lmapx, lmapy, ToFCamMat;
cv::FileStorage rgbFile;
rgbFile.open(savePath1 + "RGBStereo.xml", cv::FileStorage::READ);
if (!rgbFile.isOpened())
{
std::cerr << "Failed to find RGBStereo map" << std::endl;
}
rgbFile["mapx"] >> rmapx;
rgbFile["mapy"] >> rmapy;
rgbFile.release();
cv::FileStorage tofFile;
tofFile.open(savePath1 + "ToFStereo.xml", cv::FileStorage::READ);
if (!tofFile.isOpened())
{
std::cerr << "Failed to find ToFStereo map" << std::endl;
}
tofFile["mapx"] >> lmapx;
tofFile["mapy"] >> lmapy;
tofFile.release();
cv::FileStorage ToFIn;
ToFIn.open(savePath1 + "ToFIntrinsic.xml", cv::FileStorage::READ);
//ToFIn.open(savePath1 + "RGBIntrinsic.xml", cv::FileStorage::READ);
if(!ToFIn.isOpened()){
std::cerr << "Failed to finde ToFIntrinsic.xml" << std::endl;
}
ToFIn["K"] >> ToFCamMat;
ToFIn.release();
float cx = ToFCamMat.at<float>(0,2);
float cy = ToFCamMat.at<float>(1,2);
float fx = ToFCamMat.at<float>(0,0);
float fy = ToFCamMat.at<float>(1,1);
std:: cout << "cx: " << cx << " cy: " << cy << std::endl;
cv::Mat xcoor(480,640, CV_32F, xData);
cv::Mat ycoor(480,640, CV_32F, yData);
// Rectify images
cv::Mat recy, recx, recImg, recDep;
cv::remap(depthMap, recDep, lmapx, lmapy, cv::INTER_LINEAR);
cv::remap(img, recImg, rmapx, rmapy, cv::INTER_LINEAR);
cv::remap(xcoor, recx, lmapx, lmapy, cv::INTER_LINEAR);
cv::remap(ycoor, recy, lmapx, lmapy, cv::INTER_LINEAR);
float* depthArr = new float[recDep.cols * recDep.rows];
std::memcpy(depthArr, recDep.ptr<float>(0), (recDep.cols * recDep.rows) * sizeof(float));
// Convert color
cv::cvtColor(recImg, recImg, cv::COLOR_BGR2RGB);
// Create the aligned point cloud
pcl::PointCloud<pcl::PointXYZRGB>::Ptr alignedPointCloud(new pcl::PointCloud<pcl::PointXYZRGB>);
std::cout << "img rows: " << recImg.rows << " img cols: " << recImg.cols << std::endl;
int size = (recImg.rows * recImg.cols);
// Loop through each point in the point cloud
for (size_t i = 0; i < size; ++i) {
// Retrieve the corresponding pixel coordinates in the corrected RGB image
int x = static_cast<int>(lmapx.at<float>(i));
int y = static_cast<int>(lmapy.at<float>(i));
// Skip points with invalid pixel coordinates
if (x < 0 || x >= recImg.cols || y < 0 || y >= recImg.rows)
continue;
// Retrieve the RGB color at the pixel coordinates from the corrected RGB image
cv::Vec3b color = recImg.at<cv::Vec3b>(y, x);
// Calculate the x and y coordinates based on the pixel and depth values
float px = (x - cx) * depthArr[i] / fx;
float py = (y - cy) * depthArr[i] / fy;
// Create a PCL point and assign the coordinates and color
pcl::PointXYZRGB alignedPoint;
alignedPoint.x = px;
alignedPoint.y = py;
alignedPoint.z = depthArr[i];
alignedPoint.r = color[2];
alignedPoint.g = color[1];
alignedPoint.b = color[0];
// Add the aligned point to the aligned point cloud
alignedPointCloud->push_back(alignedPoint);
}
// Visualize the point cloud
pcl::visualization::CloudViewer viewer("Point Cloud Viewer");
viewer.showCloud(alignedPointCloud);
while (!viewer.wasStopped()) {
// Do nothing
}
}
I figured it out. I had written:
cv::Mat depthMap(640, 480, CV_32FC1, depth);
and it should have been
cv::Mat depthMap(480, 640, CV_32FC1, depth);