cimage-processingopencveye-detection

OpenCV Eye Tracking looses out for some frames


I am pasting code below to track both eyes and overlay an image when both eyes are tracked.
I am using haar xml files to track both eyes of faces and overlaying an image over it.
My problem is I get result like this.

Img 1

Img 2

Img 3

Img 4

Img 5

Img 6

Img 7

Img 8

Img 9

Img 10

I am posting my code below

#include "cv.h"
#include "highgui.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <stdio.h>
#include <sys/stat.h>
#include <ctype.h>
#include <string>
using namespace cv;
using namespace std;

void detectAndDisplay( Mat frame );
String face_cascade_name = "/root/opencv/newtutorial/haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "/root/opencv/newtutorial/haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
RNG rng(12345);
IplImage *disp,*neg_img,*cpy_img,*imga;
IplImage *pic;
IplImage  *image_n = 0;
int make=0;
CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);
namespace {
    void makevdo(){
        bool flag = false;
        fstream fin;
        char filename_new[200],filename_new_after[200];
        int n=0;
        Mat frame;
        double frameRate = 25.0;
        CvVideoWriter *vdowriter = cvCreateVideoWriter( "/root/opencv/newtutorial/test_converted_next.mov", CV_FOURCC('j','p','e','g'), frameRate, Size(640,480) );
        VideoWriter(outputFile,CV_FOURCC('j','p','e','g'),frameRate,Size(640,480));
        while(flag==false){
            sprintf(filename_new,"/root/opencv/newtutorial/mydirnext/filename%.3d.jpg",n);
            sprintf(filename_new_after,"/root/opencv/newtutorial/framesaftertrack/filename%.3d.jpg",n);
            fin.open(filename_new,ios::in);
            if( fin.is_open() )
            {
                frame = imread(filename_new);
                pic = cvLoadImage("/root/opencv/newtutorial/pic.png");
                image_n = cvLoadImage(filename_new,1);
                disp = cvCreateImage( cvGetSize(image_n), 8, 3 );
                cpy_img = cvCreateImage( cvGetSize(image_n), 8, 3 );
                neg_img = cvCreateImage( cvGetSize(image_n), 8, 3 );
                std::vector<Rect> faces;
                Mat frame_gray;
                cvtColor( frame, frame_gray, CV_BGR2GRAY );
                equalizeHist( frame_gray, frame_gray );
                face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
                CvPoint2D32f q[4];
                q[0].x= (float) pic->width * 0;
                q[0].y= (float) pic->height * 0;
                q[1].x= (float) pic->width;
                q[1].y= (float) pic->height * 0;
                q[2].x= (float) pic->width;
                q[2].y= (float) pic->height;
                q[3].x= (float) pic->width * 0;
                q[3].y= (float) pic->height;
                for( int i = 0; i < (int)faces.size(); i++ )
                {
                    faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );
                    Mat faceROI = frame_gray( faces[i] );
                    std::vector<Rect> eyes;
                    eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
                    if((int)eyes.size()==2 && (((eyes[0].x + eyes[0].width*0.5 ) < eyes[1].x) || ((eyes[1].x + eyes[1].width*0.5 ) < eyes[0].x))){
                        CvPoint2D32f p[4];
                        IplImage* blank  = cvCreateImage( cvGetSize(pic), 8, 3);
                        cvZero(blank);
                        cvNot(blank,blank);
                        if(eyes[0].x<eyes[1].x){
                            Point center0( faces[i].x + eyes[0].x + eyes[0].width*0.5, faces[i].y + eyes[0].y + eyes[0].height*0.5 ); 
                            float radius0 =  (eyes[0].width + eyes[0].height)*0.25 ;
                            Point center1( faces[i].x + eyes[1].x + eyes[1].width*0.5, faces[i].y + eyes[1].y + eyes[1].height*0.5 ); 
                            float radius1 =  (eyes[1].width + eyes[1].height)*0.25 ;
                            p[2].x= (float) center0.x - radius0;
                            p[2].y= (float) center0.y + radius0;
                            p[1].x= (float) center0.x - radius0;
                            p[1].y= (float) center0.y - radius0;
                            p[3].x= (float) center1.x + radius1;
                            p[3].y= (float) center1.y + radius1;
                            p[0].x= (float) center1.x + radius1;
                            p[0].y= (float) center1.y - radius1;
                        }
                        else{
                            Point center0( faces[i].x + eyes[1].x + eyes[1].width*0.5, faces[i].y + eyes[1].y + eyes[1].height*0.5 ); 
                            float radius0 =  (eyes[1].width + eyes[1].height)*0.25 ;
                            Point center1( faces[i].x + eyes[0].x + eyes[0].width*0.5, faces[i].y + eyes[0].y + eyes[0].height*0.5 ); 
                            float radius1 =  (eyes[0].width + eyes[0].height)*0.25 ;
                            p[2].x= (float) center0.x - radius0;
                            p[2].y= (float) center0.y + radius0;
                            p[1].x= (float) center0.x - radius0;
                            p[1].y= (float) center0.y - radius0;
                            p[3].x= (float) center1.x + radius1;
                            p[3].y= (float) center1.y + radius1;
                            p[0].x= (float) center1.x + radius1;
                            p[0].y= (float) center1.y - radius1;
                        }

                        cvGetPerspectiveTransform(q,p,warp_matrix);
                        cvZero(neg_img);
                        cvZero(cpy_img);
                        cvWarpPerspective( pic, neg_img, warp_matrix);
                        cvWarpPerspective( blank, cpy_img, warp_matrix);
                        cvNot(cpy_img,cpy_img);
                        cvAnd(cpy_img,image_n,cpy_img);
                        cvOr(cpy_img,neg_img,image_n);
                    }
                }
                cvSaveImage(filename_new_after,image_n);
                cvWriteFrame(vdowriter,image_n);
                cout<<"Read file filename"<< n <<endl;
            }
            else{
                flag=true;
            }
            fin.close();
            n++;
        }
        cvReleaseVideoWriter(&vdowriter);
        cvReleaseImage(&pic);
        cvReleaseImage(&imga);
        cvReleaseImage(&disp);
        cvReleaseImage(&neg_img);
        cvReleaseImage(&cpy_img);
        cvReleaseImage(&image_n);
    }

    int process(VideoCapture& capture) {
        char strFrame[]="/root/opencv/newtutorial/mydirnext";
        if(mkdir(strFrame,0777)==-1)
        {
            cout<<"Error Trying to delete"<<endl;
            if(system("rm -r /root/opencv/newtutorial/mydirnext")){
                cout << "Directory successfully deleted"<<endl;
            }
            if(mkdir(strFrame,0777)==-1){
                cout << "Error Again creating directory" << endl;
            }
        }
        int n = 0;
        char filename[200];
        string window_name = "video | q or esc to quit";
        cout << "press space to save a picture. q or esc to quit" << endl;
        namedWindow(window_name, CV_WINDOW_KEEPRATIO);
        Mat frame;
        for (;;) {
            capture >> frame;
            if (frame.empty())
                continue;
            imshow(window_name, frame);
            sprintf(filename,"/root/opencv/newtutorial/mydirnext/filename%.3d.jpg",n++);
            imwrite(filename,frame);
            char key = (char)waitKey(5);
            switch (key) {
            case 27:
                if(make==0){
                    make=1;
                    makevdo();
                }
            return 0;
            default:
            break;
            }
        }
        return 0;
    }
}
int main(int ac, char** av) {
    if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
    if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
    if (ac != 2) {
        return 1;
    }
    std::string arg = av[1];
    VideoCapture capture(arg);
    if (!capture.isOpened())
        capture.open(atoi(arg.c_str()));
    if (!capture.isOpened()) {
        cerr << "Failed to open a video device or video file!\n" << endl;
        return 1;
    }
    return process(capture);
}

Sorry for posting so many Images SO, but No other way to explain my problem.


Solution

  • Start by extracting everything to methods/functions that you can be given a sensible name. We don't want to see methods longer than 10 lines. The images are not helpful at all. Remove the overlay and show what the algorithm claims to recognize, when it does so.

    After you've done that, feed it sequences of the same images. First the ones where it recognizes, then those where it doesn't. That way you can get a better guess where the problem is. But first clean up the messy code.