I recently started working on opencv and facing problem in getting desired result. I don't know where I am mistaking. I have two uncalibrated images and have to calculate disparity map for them without any other support data(like camera matrix).
int minHessian = 2080;
Ptr<SURF> detector = SURF::create(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1, descriptors_2;
detector->detectAndCompute(h1, noArray(), keypoints_1, descriptors_1);
detector->detectAndCompute(h2, noArray(), keypoints_2, descriptors_2);
//-- Step 2: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector<DMatch> matches;
matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0;
double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++) {
double dist = matches[i].distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
Mat img_matches;
drawMatches(h1, keypoints_1, h2, keypoints_2, good_matches, img_matches,Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Good Matches", img_matches);
imwrite("Good Matches.jpg", img_matches);
for (int i = 0; i < (int) good_matches.size(); i++) {
printf("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i,good_matches[i].queryIdx, good_matches[i].trainIdx);
}
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
for (int i = 0; i < good_matches.size(); i++) {
//-- Get the keypoints from the good matches
obj.push_back(keypoints_1[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_2[good_matches[i].trainIdx].pt);
}
cv::Mat H = cv::findFundamentalMat(obj, scene, CV_FM_RANSAC);
cv::Mat H1(4, 4, h1.type());
cv::Mat H2(4, 4, h1.type());
cv::stereoRectifyUncalibrated(obj, scene, H, h1.size(), H1, H2);
imshow("h1", h1);
cv::Mat rectified1(h1.size(), h1.type());
cv::warpPerspective(h1, rectified1, H1, h1.size());
cv::imshow("rectified1.jpg", rectified1);
cv::imwrite("rectified1.jpg", rectified1);
imshow("h2", h2);
cv::Mat rectified2(h2.size(), h2.type());
cv::warpPerspective(h2, rectified2, H2, h2.size());
cv::imshow("rectified2.jpg", rectified2);
cv::imwrite("rectified2.jpg", rectified2);
Mat test;
addWeighted(rectified1, 0.5, rectified2, 0.5, 0.0, test);
imshow("test", test);
//-- Depth map
int ndisparities = 16*5;
double minVal;
double maxVal;
Ptr<StereoSGBM> sgbm = StereoSGBM::create(16, ndisparities, 1, 0, 0, 0,0, 0,0, 0,StereoSGBM::MODE_HH);
//-- 3. Calculate the disparity image via SGBM
Mat disparity2;
sgbm->compute(rectified1, rectified2, disparity2);
minMaxLoc(disparity2, &minVal, &maxVal);
printf("Min disp: %f Max value: %f \n", minVal, maxVal);
disparity2.convertTo(disparity2, CV_8UC1, 255 / (maxVal - minVal));
cv::imshow("Disparity Map sgbm", disparity2);
imwrite("out2.jpg", disparity2);
rectified left and right image
I think rectified images are okey and problem is in parameter of sgbm. Is there any way to callibrate them.
Yes, your rectified images look ok and yes, it's hard to find good parameters. I tried
Ptr<StereoSGBM> sgbm = StereoSGBM::create(0, //int minDisparity
80, //int numDisparities
5, //int SADWindowSize 3
600, //int P1 = 0
2400, //int P2 = 0
0, //int disp12MaxDiff = 0
0, //int preFilterCap = 0
0, //int uniquenessRatio = 0
0, //int speckleWindowSize = 0
0, //int speckleRange = 0
false); //bool fullDP = false