opencv3.0surfflannbasedmatcher

OpenCV3.0: SURF detection and FlannBased matcher tracking rectangle issue


I am trying to implement SURF detection and tracking using FlannBased matcher. My code is working properly for detection part but the issue is with tracking.

enter image description here

You can see in the above image the tracking rectangle is not focusing on the right object. And moreover the rectangle stays static even when i move my camera around. I am not sure where am i going wrong.

Here is the code i have implemented

void surf_detection::surf_detect(){

UMat img_extractor, snap_extractor;

if (crop_image_.empty())
    cv_snapshot.copyTo(dst);
else
    crop_image_.copyTo(dst);
//dst = QImagetocv(crop_image_);

imshow("dst", dst);

Ptr<SURF> detector = SURF::create(minHessian);
Ptr<DescriptorExtractor> extractor = SURF::create(minHessian);

cvtColor(dst, src, CV_BGR2GRAY);
cvtColor(frame, gray_image, CV_BGR2GRAY);


detector->detect(src, keypoints_1);
//printf("Object: %d keypoints detected\n", (int)keypoints_1.size());
detector->detect(gray_image, keypoints_2);
//printf("Object: %d keypoints detected\n", (int)keypoints_1.size());

extractor->compute(src, keypoints_1, img_extractor);
// printf("Object: %d descriptors extracted\n", img_extractor.rows);
extractor->compute(gray_image, keypoints_2, snap_extractor);

std::vector<Point2f> scene_corners(4);
std::vector<Point2f> obj_corners(4);

obj_corners[0] = (cvPoint(0, 0));
obj_corners[1] = (cvPoint(src.cols, 0));
obj_corners[2] = (cvPoint(src.cols, src.rows));
obj_corners[3] = (cvPoint(0, src.rows));

vector<DMatch> matches;
matcher.match(img_extractor, snap_extractor, matches);

double max_dist = 0; double min_dist = 100;

//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < img_extractor.rows; i++)
{
    double dist = matches[i].distance;
    if (dist < min_dist) min_dist = dist;
    if (dist > max_dist) max_dist = dist;
}
//printf("-- Max dist : %f \n", max_dist);
//printf("-- Min dist : %f \n", min_dist);

vector< DMatch > good_matches;

for (int i = 0; i < img_extractor.rows; i++)
{
    if (matches[i].distance <= max(2 * min_dist, 0.02))
    {
        good_matches.push_back(matches[i]);
    }
}

UMat img_matches;
drawMatches(src, keypoints_1, gray_image, keypoints_2,
    good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
    vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

if (good_matches.size() >= 4){

    for (int i = 0; i<good_matches.size(); i++){

        //get the keypoints from good matches
        obj.push_back(keypoints_1[good_matches[i].queryIdx].pt);
        scene.push_back(keypoints_2[good_matches[i].trainIdx].pt);

    }
}

H = findHomography(obj, scene, CV_RANSAC);

perspectiveTransform(obj_corners, scene_corners, H);

line(img_matches, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 4);

line(img_matches, scene_corners[1], scene_corners[2], Scalar(0, 255, 0), 4);

line(img_matches, scene_corners[2], scene_corners[3], Scalar(0, 255, 0), 4);

line(img_matches, scene_corners[3], scene_corners[0], Scalar(0, 255, 0), 4);

imshow("Good matches", img_matches);



}

Solution

  • Your matches are correct, you are simply displaying them wrong. Matches refers to the gray_image coordinate system, but you're displaying them in the img_matches coordinate system.

    So, basically, you need to translate them by the src width:

    line(img_matches, scene_corners[0] + Point2f(src.cols,0), scene_corners[1] + Point2f(src.cols,0), Scalar(0, 255, 0), 4);
    line(img_matches, scene_corners[1] + Point2f(src.cols,0), scene_corners[2] + Point2f(src.cols,0), Scalar(0, 255, 0), 4);
    line(img_matches, scene_corners[2] + Point2f(src.cols,0), scene_corners[3] + Point2f(src.cols,0), Scalar(0, 255, 0), 4);
    line(img_matches, scene_corners[3] + Point2f(src.cols,0), scene_corners[0] + Point2f(src.cols,0), Scalar(0, 255, 0), 4);
    

    See also this related answer.